Home | History | Annotate | Line # | Download | only in gcc
      1 /* RTL simplification functions for GNU compiler.
      2    Copyright (C) 1987-2022 Free Software Foundation, Inc.
      3 
      4 This file is part of GCC.
      5 
      6 GCC is free software; you can redistribute it and/or modify it under
      7 the terms of the GNU General Public License as published by the Free
      8 Software Foundation; either version 3, or (at your option) any later
      9 version.
     10 
     11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
     12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
     13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
     14 for more details.
     15 
     16 You should have received a copy of the GNU General Public License
     17 along with GCC; see the file COPYING3.  If not see
     18 <http://www.gnu.org/licenses/>.  */
     19 
     20 
     21 #include "config.h"
     22 #include "system.h"
     23 #include "coretypes.h"
     24 #include "backend.h"
     25 #include "target.h"
     26 #include "rtl.h"
     27 #include "tree.h"
     28 #include "predict.h"
     29 #include "memmodel.h"
     30 #include "optabs.h"
     31 #include "emit-rtl.h"
     32 #include "recog.h"
     33 #include "diagnostic-core.h"
     34 #include "varasm.h"
     35 #include "flags.h"
     36 #include "selftest.h"
     37 #include "selftest-rtl.h"
     38 #include "rtx-vector-builder.h"
     39 #include "rtlanal.h"
     40 
     41 /* Simplification and canonicalization of RTL.  */
     42 
     43 /* Much code operates on (low, high) pairs; the low value is an
     44    unsigned wide int, the high value a signed wide int.  We
     45    occasionally need to sign extend from low to high as if low were a
     46    signed wide int.  */
     47 #define HWI_SIGN_EXTEND(low) \
     48   ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
     49 
     50 static bool plus_minus_operand_p (const_rtx);
     51 
     52 /* Negate I, which satisfies poly_int_rtx_p.  MODE is the mode of I.  */
     54 
     55 static rtx
     56 neg_poly_int_rtx (machine_mode mode, const_rtx i)
     57 {
     58   return immed_wide_int_const (-wi::to_poly_wide (i, mode), mode);
     59 }
     60 
     61 /* Test whether expression, X, is an immediate constant that represents
     62    the most significant bit of machine mode MODE.  */
     63 
     64 bool
     65 mode_signbit_p (machine_mode mode, const_rtx x)
     66 {
     67   unsigned HOST_WIDE_INT val;
     68   unsigned int width;
     69   scalar_int_mode int_mode;
     70 
     71   if (!is_int_mode (mode, &int_mode))
     72     return false;
     73 
     74   width = GET_MODE_PRECISION (int_mode);
     75   if (width == 0)
     76     return false;
     77 
     78   if (width <= HOST_BITS_PER_WIDE_INT
     79       && CONST_INT_P (x))
     80     val = INTVAL (x);
     81 #if TARGET_SUPPORTS_WIDE_INT
     82   else if (CONST_WIDE_INT_P (x))
     83     {
     84       unsigned int i;
     85       unsigned int elts = CONST_WIDE_INT_NUNITS (x);
     86       if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
     87 	return false;
     88       for (i = 0; i < elts - 1; i++)
     89 	if (CONST_WIDE_INT_ELT (x, i) != 0)
     90 	  return false;
     91       val = CONST_WIDE_INT_ELT (x, elts - 1);
     92       width %= HOST_BITS_PER_WIDE_INT;
     93       if (width == 0)
     94 	width = HOST_BITS_PER_WIDE_INT;
     95     }
     96 #else
     97   else if (width <= HOST_BITS_PER_DOUBLE_INT
     98 	   && CONST_DOUBLE_AS_INT_P (x)
     99 	   && CONST_DOUBLE_LOW (x) == 0)
    100     {
    101       val = CONST_DOUBLE_HIGH (x);
    102       width -= HOST_BITS_PER_WIDE_INT;
    103     }
    104 #endif
    105   else
    106     /* X is not an integer constant.  */
    107     return false;
    108 
    109   if (width < HOST_BITS_PER_WIDE_INT)
    110     val &= (HOST_WIDE_INT_1U << width) - 1;
    111   return val == (HOST_WIDE_INT_1U << (width - 1));
    112 }
    113 
    114 /* Test whether VAL is equal to the most significant bit of mode MODE
    115    (after masking with the mode mask of MODE).  Returns false if the
    116    precision of MODE is too large to handle.  */
    117 
    118 bool
    119 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
    120 {
    121   unsigned int width;
    122   scalar_int_mode int_mode;
    123 
    124   if (!is_int_mode (mode, &int_mode))
    125     return false;
    126 
    127   width = GET_MODE_PRECISION (int_mode);
    128   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
    129     return false;
    130 
    131   val &= GET_MODE_MASK (int_mode);
    132   return val == (HOST_WIDE_INT_1U << (width - 1));
    133 }
    134 
    135 /* Test whether the most significant bit of mode MODE is set in VAL.
    136    Returns false if the precision of MODE is too large to handle.  */
    137 bool
    138 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
    139 {
    140   unsigned int width;
    141 
    142   scalar_int_mode int_mode;
    143   if (!is_int_mode (mode, &int_mode))
    144     return false;
    145 
    146   width = GET_MODE_PRECISION (int_mode);
    147   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
    148     return false;
    149 
    150   val &= HOST_WIDE_INT_1U << (width - 1);
    151   return val != 0;
    152 }
    153 
    154 /* Test whether the most significant bit of mode MODE is clear in VAL.
    155    Returns false if the precision of MODE is too large to handle.  */
    156 bool
    157 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
    158 {
    159   unsigned int width;
    160 
    161   scalar_int_mode int_mode;
    162   if (!is_int_mode (mode, &int_mode))
    163     return false;
    164 
    165   width = GET_MODE_PRECISION (int_mode);
    166   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
    167     return false;
    168 
    169   val &= HOST_WIDE_INT_1U << (width - 1);
    170   return val == 0;
    171 }
    172 
    173 /* Make a binary operation by properly ordering the operands and
    175    seeing if the expression folds.  */
    176 
    177 rtx
    178 simplify_context::simplify_gen_binary (rtx_code code, machine_mode mode,
    179 				       rtx op0, rtx op1)
    180 {
    181   rtx tem;
    182 
    183   /* If this simplifies, do it.  */
    184   tem = simplify_binary_operation (code, mode, op0, op1);
    185   if (tem)
    186     return tem;
    187 
    188   /* Put complex operands first and constants second if commutative.  */
    189   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
    190       && swap_commutative_operands_p (op0, op1))
    191     std::swap (op0, op1);
    192 
    193   return gen_rtx_fmt_ee (code, mode, op0, op1);
    194 }
    195 
    196 /* If X is a MEM referencing the constant pool, return the real value.
    198    Otherwise return X.  */
    199 rtx
    200 avoid_constant_pool_reference (rtx x)
    201 {
    202   rtx c, tmp, addr;
    203   machine_mode cmode;
    204   poly_int64 offset = 0;
    205 
    206   switch (GET_CODE (x))
    207     {
    208     case MEM:
    209       break;
    210 
    211     case FLOAT_EXTEND:
    212       /* Handle float extensions of constant pool references.  */
    213       tmp = XEXP (x, 0);
    214       c = avoid_constant_pool_reference (tmp);
    215       if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
    216 	return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
    217 					     GET_MODE (x));
    218       return x;
    219 
    220     default:
    221       return x;
    222     }
    223 
    224   if (GET_MODE (x) == BLKmode)
    225     return x;
    226 
    227   addr = XEXP (x, 0);
    228 
    229   /* Call target hook to avoid the effects of -fpic etc....  */
    230   addr = targetm.delegitimize_address (addr);
    231 
    232   /* Split the address into a base and integer offset.  */
    233   addr = strip_offset (addr, &offset);
    234 
    235   if (GET_CODE (addr) == LO_SUM)
    236     addr = XEXP (addr, 1);
    237 
    238   /* If this is a constant pool reference, we can turn it into its
    239      constant and hope that simplifications happen.  */
    240   if (GET_CODE (addr) == SYMBOL_REF
    241       && CONSTANT_POOL_ADDRESS_P (addr))
    242     {
    243       c = get_pool_constant (addr);
    244       cmode = get_pool_mode (addr);
    245 
    246       /* If we're accessing the constant in a different mode than it was
    247          originally stored, attempt to fix that up via subreg simplifications.
    248          If that fails we have no choice but to return the original memory.  */
    249       if (known_eq (offset, 0) && cmode == GET_MODE (x))
    250 	return c;
    251       else if (known_in_range_p (offset, 0, GET_MODE_SIZE (cmode)))
    252         {
    253           rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
    254           if (tem && CONSTANT_P (tem))
    255             return tem;
    256         }
    257     }
    258 
    259   return x;
    260 }
    261 
    262 /* Simplify a MEM based on its attributes.  This is the default
    264    delegitimize_address target hook, and it's recommended that every
    265    overrider call it.  */
    266 
    267 rtx
    268 delegitimize_mem_from_attrs (rtx x)
    269 {
    270   /* MEMs without MEM_OFFSETs may have been offset, so we can't just
    271      use their base addresses as equivalent.  */
    272   if (MEM_P (x)
    273       && MEM_EXPR (x)
    274       && MEM_OFFSET_KNOWN_P (x))
    275     {
    276       tree decl = MEM_EXPR (x);
    277       machine_mode mode = GET_MODE (x);
    278       poly_int64 offset = 0;
    279 
    280       switch (TREE_CODE (decl))
    281 	{
    282 	default:
    283 	  decl = NULL;
    284 	  break;
    285 
    286 	case VAR_DECL:
    287 	  break;
    288 
    289 	case ARRAY_REF:
    290 	case ARRAY_RANGE_REF:
    291 	case COMPONENT_REF:
    292 	case BIT_FIELD_REF:
    293 	case REALPART_EXPR:
    294 	case IMAGPART_EXPR:
    295 	case VIEW_CONVERT_EXPR:
    296 	  {
    297 	    poly_int64 bitsize, bitpos, bytepos, toffset_val = 0;
    298 	    tree toffset;
    299 	    int unsignedp, reversep, volatilep = 0;
    300 
    301 	    decl
    302 	      = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
    303 				     &unsignedp, &reversep, &volatilep);
    304 	    if (maybe_ne (bitsize, GET_MODE_BITSIZE (mode))
    305 		|| !multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
    306 		|| (toffset && !poly_int_tree_p (toffset, &toffset_val)))
    307 	      decl = NULL;
    308 	    else
    309 	      offset += bytepos + toffset_val;
    310 	    break;
    311 	  }
    312 	}
    313 
    314       if (decl
    315 	  && mode == GET_MODE (x)
    316 	  && VAR_P (decl)
    317 	  && (TREE_STATIC (decl)
    318 	      || DECL_THREAD_LOCAL_P (decl))
    319 	  && DECL_RTL_SET_P (decl)
    320 	  && MEM_P (DECL_RTL (decl)))
    321 	{
    322 	  rtx newx;
    323 
    324 	  offset += MEM_OFFSET (x);
    325 
    326 	  newx = DECL_RTL (decl);
    327 
    328 	  if (MEM_P (newx))
    329 	    {
    330 	      rtx n = XEXP (newx, 0), o = XEXP (x, 0);
    331 	      poly_int64 n_offset, o_offset;
    332 
    333 	      /* Avoid creating a new MEM needlessly if we already had
    334 		 the same address.  We do if there's no OFFSET and the
    335 		 old address X is identical to NEWX, or if X is of the
    336 		 form (plus NEWX OFFSET), or the NEWX is of the form
    337 		 (plus Y (const_int Z)) and X is that with the offset
    338 		 added: (plus Y (const_int Z+OFFSET)).  */
    339 	      n = strip_offset (n, &n_offset);
    340 	      o = strip_offset (o, &o_offset);
    341 	      if (!(known_eq (o_offset, n_offset + offset)
    342 		    && rtx_equal_p (o, n)))
    343 		x = adjust_address_nv (newx, mode, offset);
    344 	    }
    345 	  else if (GET_MODE (x) == GET_MODE (newx)
    346 		   && known_eq (offset, 0))
    347 	    x = newx;
    348 	}
    349     }
    350 
    351   return x;
    352 }
    353 
    354 /* Make a unary operation by first seeing if it folds and otherwise making
    356    the specified operation.  */
    357 
    358 rtx
    359 simplify_context::simplify_gen_unary (rtx_code code, machine_mode mode, rtx op,
    360 				      machine_mode op_mode)
    361 {
    362   rtx tem;
    363 
    364   /* If this simplifies, use it.  */
    365   if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
    366     return tem;
    367 
    368   return gen_rtx_fmt_e (code, mode, op);
    369 }
    370 
    371 /* Likewise for ternary operations.  */
    372 
    373 rtx
    374 simplify_context::simplify_gen_ternary (rtx_code code, machine_mode mode,
    375 					machine_mode op0_mode,
    376 					rtx op0, rtx op1, rtx op2)
    377 {
    378   rtx tem;
    379 
    380   /* If this simplifies, use it.  */
    381   if ((tem = simplify_ternary_operation (code, mode, op0_mode,
    382 					 op0, op1, op2)) != 0)
    383     return tem;
    384 
    385   return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
    386 }
    387 
    388 /* Likewise, for relational operations.
    389    CMP_MODE specifies mode comparison is done in.  */
    390 
    391 rtx
    392 simplify_context::simplify_gen_relational (rtx_code code, machine_mode mode,
    393 					   machine_mode cmp_mode,
    394 					   rtx op0, rtx op1)
    395 {
    396   rtx tem;
    397 
    398   if ((tem = simplify_relational_operation (code, mode, cmp_mode,
    399 					    op0, op1)) != 0)
    400     return tem;
    401 
    402   return gen_rtx_fmt_ee (code, mode, op0, op1);
    403 }
    404 
    405 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
    407    and simplify the result.  If FN is non-NULL, call this callback on each
    408    X, if it returns non-NULL, replace X with its return value and simplify the
    409    result.  */
    410 
    411 rtx
    412 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
    413 			 rtx (*fn) (rtx, const_rtx, void *), void *data)
    414 {
    415   enum rtx_code code = GET_CODE (x);
    416   machine_mode mode = GET_MODE (x);
    417   machine_mode op_mode;
    418   const char *fmt;
    419   rtx op0, op1, op2, newx, op;
    420   rtvec vec, newvec;
    421   int i, j;
    422 
    423   if (__builtin_expect (fn != NULL, 0))
    424     {
    425       newx = fn (x, old_rtx, data);
    426       if (newx)
    427 	return newx;
    428     }
    429   else if (rtx_equal_p (x, old_rtx))
    430     return copy_rtx ((rtx) data);
    431 
    432   switch (GET_RTX_CLASS (code))
    433     {
    434     case RTX_UNARY:
    435       op0 = XEXP (x, 0);
    436       op_mode = GET_MODE (op0);
    437       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
    438       if (op0 == XEXP (x, 0))
    439 	return x;
    440       return simplify_gen_unary (code, mode, op0, op_mode);
    441 
    442     case RTX_BIN_ARITH:
    443     case RTX_COMM_ARITH:
    444       op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
    445       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
    446       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
    447 	return x;
    448       return simplify_gen_binary (code, mode, op0, op1);
    449 
    450     case RTX_COMPARE:
    451     case RTX_COMM_COMPARE:
    452       op0 = XEXP (x, 0);
    453       op1 = XEXP (x, 1);
    454       op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
    455       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
    456       op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
    457       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
    458 	return x;
    459       return simplify_gen_relational (code, mode, op_mode, op0, op1);
    460 
    461     case RTX_TERNARY:
    462     case RTX_BITFIELD_OPS:
    463       op0 = XEXP (x, 0);
    464       op_mode = GET_MODE (op0);
    465       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
    466       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
    467       op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
    468       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
    469 	return x;
    470       if (op_mode == VOIDmode)
    471 	op_mode = GET_MODE (op0);
    472       return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
    473 
    474     case RTX_EXTRA:
    475       if (code == SUBREG)
    476 	{
    477 	  op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
    478 	  if (op0 == SUBREG_REG (x))
    479 	    return x;
    480 	  op0 = simplify_gen_subreg (GET_MODE (x), op0,
    481 				     GET_MODE (SUBREG_REG (x)),
    482 				     SUBREG_BYTE (x));
    483 	  return op0 ? op0 : x;
    484 	}
    485       break;
    486 
    487     case RTX_OBJ:
    488       if (code == MEM)
    489 	{
    490 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
    491 	  if (op0 == XEXP (x, 0))
    492 	    return x;
    493 	  return replace_equiv_address_nv (x, op0);
    494 	}
    495       else if (code == LO_SUM)
    496 	{
    497 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
    498 	  op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
    499 
    500 	  /* (lo_sum (high x) y) -> y where x and y have the same base.  */
    501 	  if (GET_CODE (op0) == HIGH)
    502 	    {
    503 	      rtx base0, base1, offset0, offset1;
    504 	      split_const (XEXP (op0, 0), &base0, &offset0);
    505 	      split_const (op1, &base1, &offset1);
    506 	      if (rtx_equal_p (base0, base1))
    507 		return op1;
    508 	    }
    509 
    510 	  if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
    511 	    return x;
    512 	  return gen_rtx_LO_SUM (mode, op0, op1);
    513 	}
    514       break;
    515 
    516     default:
    517       break;
    518     }
    519 
    520   newx = x;
    521   fmt = GET_RTX_FORMAT (code);
    522   for (i = 0; fmt[i]; i++)
    523     switch (fmt[i])
    524       {
    525       case 'E':
    526 	vec = XVEC (x, i);
    527 	newvec = XVEC (newx, i);
    528 	for (j = 0; j < GET_NUM_ELEM (vec); j++)
    529 	  {
    530 	    op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
    531 					  old_rtx, fn, data);
    532 	    if (op != RTVEC_ELT (vec, j))
    533 	      {
    534 		if (newvec == vec)
    535 		  {
    536 		    newvec = shallow_copy_rtvec (vec);
    537 		    if (x == newx)
    538 		      newx = shallow_copy_rtx (x);
    539 		    XVEC (newx, i) = newvec;
    540 		  }
    541 		RTVEC_ELT (newvec, j) = op;
    542 	      }
    543 	  }
    544 	break;
    545 
    546       case 'e':
    547 	if (XEXP (x, i))
    548 	  {
    549 	    op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
    550 	    if (op != XEXP (x, i))
    551 	      {
    552 		if (x == newx)
    553 		  newx = shallow_copy_rtx (x);
    554 		XEXP (newx, i) = op;
    555 	      }
    556 	  }
    557 	break;
    558       }
    559   return newx;
    560 }
    561 
    562 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
    563    resulting RTX.  Return a new RTX which is as simplified as possible.  */
    564 
    565 rtx
    566 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
    567 {
    568   return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
    569 }
    570 
    571 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
    573    Only handle cases where the truncated value is inherently an rvalue.
    574 
    575    RTL provides two ways of truncating a value:
    576 
    577    1. a lowpart subreg.  This form is only a truncation when both
    578       the outer and inner modes (here MODE and OP_MODE respectively)
    579       are scalar integers, and only then when the subreg is used as
    580       an rvalue.
    581 
    582       It is only valid to form such truncating subregs if the
    583       truncation requires no action by the target.  The onus for
    584       proving this is on the creator of the subreg -- e.g. the
    585       caller to simplify_subreg or simplify_gen_subreg -- and typically
    586       involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
    587 
    588    2. a TRUNCATE.  This form handles both scalar and compound integers.
    589 
    590    The first form is preferred where valid.  However, the TRUNCATE
    591    handling in simplify_unary_operation turns the second form into the
    592    first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
    593    so it is generally safe to form rvalue truncations using:
    594 
    595       simplify_gen_unary (TRUNCATE, ...)
    596 
    597    and leave simplify_unary_operation to work out which representation
    598    should be used.
    599 
    600    Because of the proof requirements on (1), simplify_truncation must
    601    also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
    602    regardless of whether the outer truncation came from a SUBREG or a
    603    TRUNCATE.  For example, if the caller has proven that an SImode
    604    truncation of:
    605 
    606       (and:DI X Y)
    607 
    608    is a no-op and can be represented as a subreg, it does not follow
    609    that SImode truncations of X and Y are also no-ops.  On a target
    610    like 64-bit MIPS that requires SImode values to be stored in
    611    sign-extended form, an SImode truncation of:
    612 
    613       (and:DI (reg:DI X) (const_int 63))
    614 
    615    is trivially a no-op because only the lower 6 bits can be set.
    616    However, X is still an arbitrary 64-bit number and so we cannot
    617    assume that truncating it too is a no-op.  */
    618 
    619 rtx
    620 simplify_context::simplify_truncation (machine_mode mode, rtx op,
    621 				       machine_mode op_mode)
    622 {
    623   unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
    624   unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
    625   scalar_int_mode int_mode, int_op_mode, subreg_mode;
    626 
    627   gcc_assert (precision <= op_precision);
    628 
    629   /* Optimize truncations of zero and sign extended values.  */
    630   if (GET_CODE (op) == ZERO_EXTEND
    631       || GET_CODE (op) == SIGN_EXTEND)
    632     {
    633       /* There are three possibilities.  If MODE is the same as the
    634 	 origmode, we can omit both the extension and the subreg.
    635 	 If MODE is not larger than the origmode, we can apply the
    636 	 truncation without the extension.  Finally, if the outermode
    637 	 is larger than the origmode, we can just extend to the appropriate
    638 	 mode.  */
    639       machine_mode origmode = GET_MODE (XEXP (op, 0));
    640       if (mode == origmode)
    641 	return XEXP (op, 0);
    642       else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
    643 	return simplify_gen_unary (TRUNCATE, mode,
    644 				   XEXP (op, 0), origmode);
    645       else
    646 	return simplify_gen_unary (GET_CODE (op), mode,
    647 				   XEXP (op, 0), origmode);
    648     }
    649 
    650   /* If the machine can perform operations in the truncated mode, distribute
    651      the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
    652      (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))).  */
    653   if (1
    654       && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
    655       && (GET_CODE (op) == PLUS
    656 	  || GET_CODE (op) == MINUS
    657 	  || GET_CODE (op) == MULT))
    658     {
    659       rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
    660       if (op0)
    661 	{
    662 	  rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
    663 	  if (op1)
    664 	    return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
    665 	}
    666     }
    667 
    668   /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
    669      to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
    670      the outer subreg is effectively a truncation to the original mode.  */
    671   if ((GET_CODE (op) == LSHIFTRT
    672        || GET_CODE (op) == ASHIFTRT)
    673       /* Ensure that OP_MODE is at least twice as wide as MODE
    674 	 to avoid the possibility that an outer LSHIFTRT shifts by more
    675 	 than the sign extension's sign_bit_copies and introduces zeros
    676 	 into the high bits of the result.  */
    677       && 2 * precision <= op_precision
    678       && CONST_INT_P (XEXP (op, 1))
    679       && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
    680       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
    681       && UINTVAL (XEXP (op, 1)) < precision)
    682     return simplify_gen_binary (ASHIFTRT, mode,
    683 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
    684 
    685   /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
    686      to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
    687      the outer subreg is effectively a truncation to the original mode.  */
    688   if ((GET_CODE (op) == LSHIFTRT
    689        || GET_CODE (op) == ASHIFTRT)
    690       && CONST_INT_P (XEXP (op, 1))
    691       && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
    692       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
    693       && UINTVAL (XEXP (op, 1)) < precision)
    694     return simplify_gen_binary (LSHIFTRT, mode,
    695 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
    696 
    697   /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
    698      to (ashift:QI (x:QI) C), where C is a suitable small constant and
    699      the outer subreg is effectively a truncation to the original mode.  */
    700   if (GET_CODE (op) == ASHIFT
    701       && CONST_INT_P (XEXP (op, 1))
    702       && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
    703 	  || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
    704       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
    705       && UINTVAL (XEXP (op, 1)) < precision)
    706     return simplify_gen_binary (ASHIFT, mode,
    707 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
    708 
    709   /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
    710      (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
    711      and C2.  */
    712   if (GET_CODE (op) == AND
    713       && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
    714 	  || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
    715       && CONST_INT_P (XEXP (XEXP (op, 0), 1))
    716       && CONST_INT_P (XEXP (op, 1)))
    717     {
    718       rtx op0 = (XEXP (XEXP (op, 0), 0));
    719       rtx shift_op = XEXP (XEXP (op, 0), 1);
    720       rtx mask_op = XEXP (op, 1);
    721       unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
    722       unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
    723 
    724       if (shift < precision
    725 	  /* If doing this transform works for an X with all bits set,
    726 	     it works for any X.  */
    727 	  && ((GET_MODE_MASK (mode) >> shift) & mask)
    728 	     == ((GET_MODE_MASK (op_mode) >> shift) & mask)
    729 	  && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
    730 	  && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
    731 	{
    732 	  mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
    733 	  return simplify_gen_binary (AND, mode, op0, mask_op);
    734 	}
    735     }
    736 
    737   /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
    738      (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
    739      changing len.  */
    740   if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
    741       && REG_P (XEXP (op, 0))
    742       && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
    743       && CONST_INT_P (XEXP (op, 1))
    744       && CONST_INT_P (XEXP (op, 2)))
    745     {
    746       rtx op0 = XEXP (op, 0);
    747       unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
    748       unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
    749       if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
    750 	{
    751 	  op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
    752 	  if (op0)
    753 	    {
    754 	      pos -= op_precision - precision;
    755 	      return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
    756 					   XEXP (op, 1), GEN_INT (pos));
    757 	    }
    758 	}
    759       else if (!BITS_BIG_ENDIAN && precision >= len + pos)
    760 	{
    761 	  op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
    762 	  if (op0)
    763 	    return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
    764 					 XEXP (op, 1), XEXP (op, 2));
    765 	}
    766     }
    767 
    768   /* Recognize a word extraction from a multi-word subreg.  */
    769   if ((GET_CODE (op) == LSHIFTRT
    770        || GET_CODE (op) == ASHIFTRT)
    771       && SCALAR_INT_MODE_P (mode)
    772       && SCALAR_INT_MODE_P (op_mode)
    773       && precision >= BITS_PER_WORD
    774       && 2 * precision <= op_precision
    775       && CONST_INT_P (XEXP (op, 1))
    776       && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
    777       && UINTVAL (XEXP (op, 1)) < op_precision)
    778     {
    779       poly_int64 byte = subreg_lowpart_offset (mode, op_mode);
    780       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
    781       return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
    782 				  (WORDS_BIG_ENDIAN
    783 				   ? byte - shifted_bytes
    784 				   : byte + shifted_bytes));
    785     }
    786 
    787   /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
    788      and try replacing the TRUNCATE and shift with it.  Don't do this
    789      if the MEM has a mode-dependent address.  */
    790   if ((GET_CODE (op) == LSHIFTRT
    791        || GET_CODE (op) == ASHIFTRT)
    792       && is_a <scalar_int_mode> (mode, &int_mode)
    793       && is_a <scalar_int_mode> (op_mode, &int_op_mode)
    794       && MEM_P (XEXP (op, 0))
    795       && CONST_INT_P (XEXP (op, 1))
    796       && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
    797       && INTVAL (XEXP (op, 1)) > 0
    798       && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
    799       && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
    800 				     MEM_ADDR_SPACE (XEXP (op, 0)))
    801       && ! MEM_VOLATILE_P (XEXP (op, 0))
    802       && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
    803 	  || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
    804     {
    805       poly_int64 byte = subreg_lowpart_offset (int_mode, int_op_mode);
    806       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
    807       return adjust_address_nv (XEXP (op, 0), int_mode,
    808 				(WORDS_BIG_ENDIAN
    809 				 ? byte - shifted_bytes
    810 				 : byte + shifted_bytes));
    811     }
    812 
    813   /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
    814      (OP:SI foo:SI) if OP is NEG or ABS.  */
    815   if ((GET_CODE (op) == ABS
    816        || GET_CODE (op) == NEG)
    817       && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
    818 	  || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
    819       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
    820     return simplify_gen_unary (GET_CODE (op), mode,
    821 			       XEXP (XEXP (op, 0), 0), mode);
    822 
    823   /* Simplifications of (truncate:A (subreg:B X 0)).  */
    824   if (GET_CODE (op) == SUBREG
    825       && is_a <scalar_int_mode> (mode, &int_mode)
    826       && SCALAR_INT_MODE_P (op_mode)
    827       && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
    828       && subreg_lowpart_p (op))
    829     {
    830       /* (truncate:A (subreg:B (truncate:C X) 0)) is (truncate:A X).  */
    831       if (GET_CODE (SUBREG_REG (op)) == TRUNCATE)
    832 	{
    833 	  rtx inner = XEXP (SUBREG_REG (op), 0);
    834 	  if (GET_MODE_PRECISION (int_mode)
    835 	      <= GET_MODE_PRECISION (subreg_mode))
    836 	    return simplify_gen_unary (TRUNCATE, int_mode, inner,
    837 				       GET_MODE (inner));
    838 	  else
    839 	    /* If subreg above is paradoxical and C is narrower
    840 	       than A, return (subreg:A (truncate:C X) 0).  */
    841 	    return simplify_gen_subreg (int_mode, SUBREG_REG (op),
    842 					subreg_mode, 0);
    843 	}
    844 
    845       /* Simplifications of (truncate:A (subreg:B X:C 0)) with
    846 	 paradoxical subregs (B is wider than C).  */
    847       if (is_a <scalar_int_mode> (op_mode, &int_op_mode))
    848 	{
    849 	  unsigned int int_op_prec = GET_MODE_PRECISION (int_op_mode);
    850 	  unsigned int subreg_prec = GET_MODE_PRECISION (subreg_mode);
    851 	  if (int_op_prec > subreg_prec)
    852 	    {
    853 	      if (int_mode == subreg_mode)
    854 		return SUBREG_REG (op);
    855 	      if (GET_MODE_PRECISION (int_mode) < subreg_prec)
    856 		return simplify_gen_unary (TRUNCATE, int_mode,
    857 					   SUBREG_REG (op), subreg_mode);
    858 	    }
    859 	  /* Simplification of (truncate:A (subreg:B X:C 0)) where
    860  	     A is narrower than B and B is narrower than C.  */
    861 	  else if (int_op_prec < subreg_prec
    862 		   && GET_MODE_PRECISION (int_mode) < int_op_prec)
    863 	    return simplify_gen_unary (TRUNCATE, int_mode,
    864 				       SUBREG_REG (op), subreg_mode);
    865 	}
    866     }
    867 
    868   /* (truncate:A (truncate:B X)) is (truncate:A X).  */
    869   if (GET_CODE (op) == TRUNCATE)
    870     return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
    871 			       GET_MODE (XEXP (op, 0)));
    872 
    873   /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
    874      in mode A.  */
    875   if (GET_CODE (op) == IOR
    876       && SCALAR_INT_MODE_P (mode)
    877       && SCALAR_INT_MODE_P (op_mode)
    878       && CONST_INT_P (XEXP (op, 1))
    879       && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
    880     return constm1_rtx;
    881 
    882   return NULL_RTX;
    883 }
    884 
    885 /* Try to simplify a unary operation CODE whose output mode is to be
    887    MODE with input operand OP whose mode was originally OP_MODE.
    888    Return zero if no simplification can be made.  */
    889 rtx
    890 simplify_context::simplify_unary_operation (rtx_code code, machine_mode mode,
    891 					    rtx op, machine_mode op_mode)
    892 {
    893   rtx trueop, tem;
    894 
    895   trueop = avoid_constant_pool_reference (op);
    896 
    897   tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
    898   if (tem)
    899     return tem;
    900 
    901   return simplify_unary_operation_1 (code, mode, op);
    902 }
    903 
    904 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
    905    to be exact.  */
    906 
    907 static bool
    908 exact_int_to_float_conversion_p (const_rtx op)
    909 {
    910   machine_mode op0_mode = GET_MODE (XEXP (op, 0));
    911   /* Constants can reach here with -frounding-math, if they do then
    912      the conversion isn't exact.  */
    913   if (op0_mode == VOIDmode)
    914     return false;
    915   int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
    916   int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
    917   int in_bits = in_prec;
    918   if (HWI_COMPUTABLE_MODE_P (op0_mode))
    919     {
    920       unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
    921       if (GET_CODE (op) == FLOAT)
    922 	in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
    923       else if (GET_CODE (op) == UNSIGNED_FLOAT)
    924 	in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
    925       else
    926 	gcc_unreachable ();
    927       in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
    928     }
    929   return in_bits <= out_bits;
    930 }
    931 
    932 /* Perform some simplifications we can do even if the operands
    933    aren't constant.  */
    934 rtx
    935 simplify_context::simplify_unary_operation_1 (rtx_code code, machine_mode mode,
    936 					      rtx op)
    937 {
    938   enum rtx_code reversed;
    939   rtx temp, elt, base, step;
    940   scalar_int_mode inner, int_mode, op_mode, op0_mode;
    941 
    942   switch (code)
    943     {
    944     case NOT:
    945       /* (not (not X)) == X.  */
    946       if (GET_CODE (op) == NOT)
    947 	return XEXP (op, 0);
    948 
    949       /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
    950 	 comparison is all ones.   */
    951       if (COMPARISON_P (op)
    952 	  && (mode == BImode || STORE_FLAG_VALUE == -1)
    953 	  && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
    954 	return simplify_gen_relational (reversed, mode, VOIDmode,
    955 					XEXP (op, 0), XEXP (op, 1));
    956 
    957       /* (not (plus X -1)) can become (neg X).  */
    958       if (GET_CODE (op) == PLUS
    959 	  && XEXP (op, 1) == constm1_rtx)
    960 	return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
    961 
    962       /* Similarly, (not (neg X)) is (plus X -1).  Only do this for
    963 	 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
    964 	 and MODE_VECTOR_INT.  */
    965       if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
    966 	return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
    967 				    CONSTM1_RTX (mode));
    968 
    969       /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
    970       if (GET_CODE (op) == XOR
    971 	  && CONST_INT_P (XEXP (op, 1))
    972 	  && (temp = simplify_unary_operation (NOT, mode,
    973 					       XEXP (op, 1), mode)) != 0)
    974 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
    975 
    976       /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
    977       if (GET_CODE (op) == PLUS
    978 	  && CONST_INT_P (XEXP (op, 1))
    979 	  && mode_signbit_p (mode, XEXP (op, 1))
    980 	  && (temp = simplify_unary_operation (NOT, mode,
    981 					       XEXP (op, 1), mode)) != 0)
    982 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
    983 
    984 
    985       /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
    986 	 operands other than 1, but that is not valid.  We could do a
    987 	 similar simplification for (not (lshiftrt C X)) where C is
    988 	 just the sign bit, but this doesn't seem common enough to
    989 	 bother with.  */
    990       if (GET_CODE (op) == ASHIFT
    991 	  && XEXP (op, 0) == const1_rtx)
    992 	{
    993 	  temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
    994 	  return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
    995 	}
    996 
    997       /* (not (ashiftrt foo C)) where C is the number of bits in FOO
    998 	 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
    999 	 so we can perform the above simplification.  */
   1000       if (STORE_FLAG_VALUE == -1
   1001 	  && is_a <scalar_int_mode> (mode, &int_mode)
   1002 	  && GET_CODE (op) == ASHIFTRT
   1003 	  && CONST_INT_P (XEXP (op, 1))
   1004 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
   1005 	return simplify_gen_relational (GE, int_mode, VOIDmode,
   1006 					XEXP (op, 0), const0_rtx);
   1007 
   1008 
   1009       if (partial_subreg_p (op)
   1010 	  && subreg_lowpart_p (op)
   1011 	  && GET_CODE (SUBREG_REG (op)) == ASHIFT
   1012 	  && XEXP (SUBREG_REG (op), 0) == const1_rtx)
   1013 	{
   1014 	  machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
   1015 	  rtx x;
   1016 
   1017 	  x = gen_rtx_ROTATE (inner_mode,
   1018 			      simplify_gen_unary (NOT, inner_mode, const1_rtx,
   1019 						  inner_mode),
   1020 			      XEXP (SUBREG_REG (op), 1));
   1021 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
   1022 	  if (temp)
   1023 	    return temp;
   1024 	}
   1025 
   1026       /* Apply De Morgan's laws to reduce number of patterns for machines
   1027 	 with negating logical insns (and-not, nand, etc.).  If result has
   1028 	 only one NOT, put it first, since that is how the patterns are
   1029 	 coded.  */
   1030       if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
   1031 	{
   1032 	  rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
   1033 	  machine_mode op_mode;
   1034 
   1035 	  op_mode = GET_MODE (in1);
   1036 	  in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
   1037 
   1038 	  op_mode = GET_MODE (in2);
   1039 	  if (op_mode == VOIDmode)
   1040 	    op_mode = mode;
   1041 	  in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
   1042 
   1043 	  if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
   1044 	    std::swap (in1, in2);
   1045 
   1046 	  return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
   1047 				 mode, in1, in2);
   1048 	}
   1049 
   1050       /* (not (bswap x)) -> (bswap (not x)).  */
   1051       if (GET_CODE (op) == BSWAP)
   1052 	{
   1053 	  rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
   1054 	  return simplify_gen_unary (BSWAP, mode, x, mode);
   1055 	}
   1056       break;
   1057 
   1058     case NEG:
   1059       /* (neg (neg X)) == X.  */
   1060       if (GET_CODE (op) == NEG)
   1061 	return XEXP (op, 0);
   1062 
   1063       /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
   1064 	 If comparison is not reversible use
   1065 	 x ? y : (neg y).  */
   1066       if (GET_CODE (op) == IF_THEN_ELSE)
   1067 	{
   1068 	  rtx cond = XEXP (op, 0);
   1069 	  rtx true_rtx = XEXP (op, 1);
   1070 	  rtx false_rtx = XEXP (op, 2);
   1071 
   1072 	  if ((GET_CODE (true_rtx) == NEG
   1073 	       && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
   1074 	       || (GET_CODE (false_rtx) == NEG
   1075 		   && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
   1076 	    {
   1077 	      if (reversed_comparison_code (cond, NULL) != UNKNOWN)
   1078 		temp = reversed_comparison (cond, mode);
   1079 	      else
   1080 		{
   1081 		  temp = cond;
   1082 		  std::swap (true_rtx, false_rtx);
   1083 		}
   1084 	      return simplify_gen_ternary (IF_THEN_ELSE, mode,
   1085 					    mode, temp, true_rtx, false_rtx);
   1086 	    }
   1087 	}
   1088 
   1089       /* (neg (plus X 1)) can become (not X).  */
   1090       if (GET_CODE (op) == PLUS
   1091 	  && XEXP (op, 1) == const1_rtx)
   1092 	return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
   1093 
   1094       /* Similarly, (neg (not X)) is (plus X 1).  */
   1095       if (GET_CODE (op) == NOT)
   1096 	return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
   1097 				    CONST1_RTX (mode));
   1098 
   1099       /* (neg (minus X Y)) can become (minus Y X).  This transformation
   1100 	 isn't safe for modes with signed zeros, since if X and Y are
   1101 	 both +0, (minus Y X) is the same as (minus X Y).  If the
   1102 	 rounding mode is towards +infinity (or -infinity) then the two
   1103 	 expressions will be rounded differently.  */
   1104       if (GET_CODE (op) == MINUS
   1105 	  && !HONOR_SIGNED_ZEROS (mode)
   1106 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
   1107 	return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
   1108 
   1109       if (GET_CODE (op) == PLUS
   1110 	  && !HONOR_SIGNED_ZEROS (mode)
   1111 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
   1112 	{
   1113 	  /* (neg (plus A C)) is simplified to (minus -C A).  */
   1114 	  if (CONST_SCALAR_INT_P (XEXP (op, 1))
   1115 	      || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
   1116 	    {
   1117 	      temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
   1118 	      if (temp)
   1119 		return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
   1120 	    }
   1121 
   1122 	  /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
   1123 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
   1124 	  return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
   1125 	}
   1126 
   1127       /* (neg (mult A B)) becomes (mult A (neg B)).
   1128 	 This works even for floating-point values.  */
   1129       if (GET_CODE (op) == MULT
   1130 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
   1131 	{
   1132 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
   1133 	  return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
   1134 	}
   1135 
   1136       /* NEG commutes with ASHIFT since it is multiplication.  Only do
   1137 	 this if we can then eliminate the NEG (e.g., if the operand
   1138 	 is a constant).  */
   1139       if (GET_CODE (op) == ASHIFT)
   1140 	{
   1141 	  temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
   1142 	  if (temp)
   1143 	    return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
   1144 	}
   1145 
   1146       /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
   1147 	 C is equal to the width of MODE minus 1.  */
   1148       if (GET_CODE (op) == ASHIFTRT
   1149 	  && CONST_INT_P (XEXP (op, 1))
   1150 	  && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
   1151 	return simplify_gen_binary (LSHIFTRT, mode,
   1152 				    XEXP (op, 0), XEXP (op, 1));
   1153 
   1154       /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
   1155 	 C is equal to the width of MODE minus 1.  */
   1156       if (GET_CODE (op) == LSHIFTRT
   1157 	  && CONST_INT_P (XEXP (op, 1))
   1158 	  && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
   1159 	return simplify_gen_binary (ASHIFTRT, mode,
   1160 				    XEXP (op, 0), XEXP (op, 1));
   1161 
   1162       /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1.  */
   1163       if (GET_CODE (op) == XOR
   1164 	  && XEXP (op, 1) == const1_rtx
   1165 	  && nonzero_bits (XEXP (op, 0), mode) == 1)
   1166 	return plus_constant (mode, XEXP (op, 0), -1);
   1167 
   1168       /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1.  */
   1169       /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1.  */
   1170       if (GET_CODE (op) == LT
   1171 	  && XEXP (op, 1) == const0_rtx
   1172 	  && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
   1173 	{
   1174 	  int_mode = as_a <scalar_int_mode> (mode);
   1175 	  int isize = GET_MODE_PRECISION (inner);
   1176 	  if (STORE_FLAG_VALUE == 1)
   1177 	    {
   1178 	      temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
   1179 					  gen_int_shift_amount (inner,
   1180 								isize - 1));
   1181 	      if (int_mode == inner)
   1182 		return temp;
   1183 	      if (GET_MODE_PRECISION (int_mode) > isize)
   1184 		return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
   1185 	      return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
   1186 	    }
   1187 	  else if (STORE_FLAG_VALUE == -1)
   1188 	    {
   1189 	      temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
   1190 					  gen_int_shift_amount (inner,
   1191 								isize - 1));
   1192 	      if (int_mode == inner)
   1193 		return temp;
   1194 	      if (GET_MODE_PRECISION (int_mode) > isize)
   1195 		return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
   1196 	      return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
   1197 	    }
   1198 	}
   1199 
   1200       if (vec_series_p (op, &base, &step))
   1201 	{
   1202 	  /* Only create a new series if we can simplify both parts.  In other
   1203 	     cases this isn't really a simplification, and it's not necessarily
   1204 	     a win to replace a vector operation with a scalar operation.  */
   1205 	  scalar_mode inner_mode = GET_MODE_INNER (mode);
   1206 	  base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
   1207 	  if (base)
   1208 	    {
   1209 	      step = simplify_unary_operation (NEG, inner_mode,
   1210 					       step, inner_mode);
   1211 	      if (step)
   1212 		return gen_vec_series (mode, base, step);
   1213 	    }
   1214 	}
   1215       break;
   1216 
   1217     case TRUNCATE:
   1218       /* Don't optimize (lshiftrt (mult ...)) as it would interfere
   1219 	 with the umulXi3_highpart patterns.  */
   1220       if (GET_CODE (op) == LSHIFTRT
   1221 	  && GET_CODE (XEXP (op, 0)) == MULT)
   1222 	break;
   1223 
   1224       if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
   1225 	{
   1226 	  if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
   1227 	    {
   1228 	      temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
   1229 	      if (temp)
   1230 		return temp;
   1231 	    }
   1232 	  /* We can't handle truncation to a partial integer mode here
   1233 	     because we don't know the real bitsize of the partial
   1234 	     integer mode.  */
   1235 	  break;
   1236 	}
   1237 
   1238       if (GET_MODE (op) != VOIDmode)
   1239 	{
   1240 	  temp = simplify_truncation (mode, op, GET_MODE (op));
   1241 	  if (temp)
   1242 	    return temp;
   1243 	}
   1244 
   1245       /* If we know that the value is already truncated, we can
   1246 	 replace the TRUNCATE with a SUBREG.  */
   1247       if (known_eq (GET_MODE_NUNITS (mode), 1)
   1248 	  && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
   1249 	      || truncated_to_mode (mode, op)))
   1250 	{
   1251 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
   1252 	  if (temp)
   1253 	    return temp;
   1254 	}
   1255 
   1256       /* A truncate of a comparison can be replaced with a subreg if
   1257          STORE_FLAG_VALUE permits.  This is like the previous test,
   1258          but it works even if the comparison is done in a mode larger
   1259          than HOST_BITS_PER_WIDE_INT.  */
   1260       if (HWI_COMPUTABLE_MODE_P (mode)
   1261 	  && COMPARISON_P (op)
   1262 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
   1263 	  && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
   1264 	{
   1265 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
   1266 	  if (temp)
   1267 	    return temp;
   1268 	}
   1269 
   1270       /* A truncate of a memory is just loading the low part of the memory
   1271 	 if we are not changing the meaning of the address. */
   1272       if (GET_CODE (op) == MEM
   1273 	  && !VECTOR_MODE_P (mode)
   1274 	  && !MEM_VOLATILE_P (op)
   1275 	  && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
   1276 	{
   1277 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
   1278 	  if (temp)
   1279 	    return temp;
   1280 	}
   1281 
   1282       /* Check for useless truncation.  */
   1283       if (GET_MODE (op) == mode)
   1284 	return op;
   1285       break;
   1286 
   1287     case FLOAT_TRUNCATE:
   1288       /* Check for useless truncation.  */
   1289       if (GET_MODE (op) == mode)
   1290 	return op;
   1291 
   1292       if (DECIMAL_FLOAT_MODE_P (mode))
   1293 	break;
   1294 
   1295       /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF.  */
   1296       if (GET_CODE (op) == FLOAT_EXTEND
   1297 	  && GET_MODE (XEXP (op, 0)) == mode)
   1298 	return XEXP (op, 0);
   1299 
   1300       /* (float_truncate:SF (float_truncate:DF foo:XF))
   1301          = (float_truncate:SF foo:XF).
   1302 	 This may eliminate double rounding, so it is unsafe.
   1303 
   1304          (float_truncate:SF (float_extend:XF foo:DF))
   1305          = (float_truncate:SF foo:DF).
   1306 
   1307          (float_truncate:DF (float_extend:XF foo:SF))
   1308          = (float_extend:DF foo:SF).  */
   1309       if ((GET_CODE (op) == FLOAT_TRUNCATE
   1310 	   && flag_unsafe_math_optimizations)
   1311 	  || GET_CODE (op) == FLOAT_EXTEND)
   1312 	return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
   1313 	  			   > GET_MODE_UNIT_SIZE (mode)
   1314 	  			   ? FLOAT_TRUNCATE : FLOAT_EXTEND,
   1315 				   mode,
   1316 				   XEXP (op, 0), mode);
   1317 
   1318       /*  (float_truncate (float x)) is (float x)  */
   1319       if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
   1320 	  && (flag_unsafe_math_optimizations
   1321 	      || exact_int_to_float_conversion_p (op)))
   1322 	return simplify_gen_unary (GET_CODE (op), mode,
   1323 				   XEXP (op, 0),
   1324 				   GET_MODE (XEXP (op, 0)));
   1325 
   1326       /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
   1327 	 (OP:SF foo:SF) if OP is NEG or ABS.  */
   1328       if ((GET_CODE (op) == ABS
   1329 	   || GET_CODE (op) == NEG)
   1330 	  && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
   1331 	  && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
   1332 	return simplify_gen_unary (GET_CODE (op), mode,
   1333 				   XEXP (XEXP (op, 0), 0), mode);
   1334 
   1335       /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
   1336 	 is (float_truncate:SF x).  */
   1337       if (GET_CODE (op) == SUBREG
   1338 	  && subreg_lowpart_p (op)
   1339 	  && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
   1340 	return SUBREG_REG (op);
   1341       break;
   1342 
   1343     case FLOAT_EXTEND:
   1344       /* Check for useless extension.  */
   1345       if (GET_MODE (op) == mode)
   1346 	return op;
   1347 
   1348       if (DECIMAL_FLOAT_MODE_P (mode))
   1349 	break;
   1350 
   1351       /*  (float_extend (float_extend x)) is (float_extend x)
   1352 
   1353 	  (float_extend (float x)) is (float x) assuming that double
   1354 	  rounding can't happen.
   1355           */
   1356       if (GET_CODE (op) == FLOAT_EXTEND
   1357 	  || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
   1358 	      && exact_int_to_float_conversion_p (op)))
   1359 	return simplify_gen_unary (GET_CODE (op), mode,
   1360 				   XEXP (op, 0),
   1361 				   GET_MODE (XEXP (op, 0)));
   1362 
   1363       break;
   1364 
   1365     case ABS:
   1366       /* (abs (neg <foo>)) -> (abs <foo>) */
   1367       if (GET_CODE (op) == NEG)
   1368 	return simplify_gen_unary (ABS, mode, XEXP (op, 0),
   1369 				   GET_MODE (XEXP (op, 0)));
   1370 
   1371       /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
   1372          do nothing.  */
   1373       if (GET_MODE (op) == VOIDmode)
   1374 	break;
   1375 
   1376       /* If operand is something known to be positive, ignore the ABS.  */
   1377       if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
   1378 	  || val_signbit_known_clear_p (GET_MODE (op),
   1379 					nonzero_bits (op, GET_MODE (op))))
   1380 	return op;
   1381 
   1382       /* If operand is known to be only -1 or 0, convert ABS to NEG.  */
   1383       if (is_a <scalar_int_mode> (mode, &int_mode)
   1384 	  && (num_sign_bit_copies (op, int_mode)
   1385 	      == GET_MODE_PRECISION (int_mode)))
   1386 	return gen_rtx_NEG (int_mode, op);
   1387 
   1388       break;
   1389 
   1390     case FFS:
   1391       /* (ffs (*_extend <X>)) = (ffs <X>) */
   1392       if (GET_CODE (op) == SIGN_EXTEND
   1393 	  || GET_CODE (op) == ZERO_EXTEND)
   1394 	return simplify_gen_unary (FFS, mode, XEXP (op, 0),
   1395 				   GET_MODE (XEXP (op, 0)));
   1396       break;
   1397 
   1398     case POPCOUNT:
   1399       switch (GET_CODE (op))
   1400 	{
   1401 	case BSWAP:
   1402 	case ZERO_EXTEND:
   1403 	  /* (popcount (zero_extend <X>)) = (popcount <X>) */
   1404 	  return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
   1405 				     GET_MODE (XEXP (op, 0)));
   1406 
   1407 	case ROTATE:
   1408 	case ROTATERT:
   1409 	  /* Rotations don't affect popcount.  */
   1410 	  if (!side_effects_p (XEXP (op, 1)))
   1411 	    return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
   1412 				       GET_MODE (XEXP (op, 0)));
   1413 	  break;
   1414 
   1415 	default:
   1416 	  break;
   1417 	}
   1418       break;
   1419 
   1420     case PARITY:
   1421       switch (GET_CODE (op))
   1422 	{
   1423 	case NOT:
   1424 	case BSWAP:
   1425 	case ZERO_EXTEND:
   1426 	case SIGN_EXTEND:
   1427 	  return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
   1428 				     GET_MODE (XEXP (op, 0)));
   1429 
   1430 	case ROTATE:
   1431 	case ROTATERT:
   1432 	  /* Rotations don't affect parity.  */
   1433 	  if (!side_effects_p (XEXP (op, 1)))
   1434 	    return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
   1435 				       GET_MODE (XEXP (op, 0)));
   1436 	  break;
   1437 
   1438 	case PARITY:
   1439 	  /* (parity (parity x)) -> parity (x).  */
   1440 	  return op;
   1441 
   1442 	default:
   1443 	  break;
   1444 	}
   1445       break;
   1446 
   1447     case BSWAP:
   1448       /* (bswap (bswap x)) -> x.  */
   1449       if (GET_CODE (op) == BSWAP)
   1450 	return XEXP (op, 0);
   1451       break;
   1452 
   1453     case FLOAT:
   1454       /* (float (sign_extend <X>)) = (float <X>).  */
   1455       if (GET_CODE (op) == SIGN_EXTEND)
   1456 	return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
   1457 				   GET_MODE (XEXP (op, 0)));
   1458       break;
   1459 
   1460     case SIGN_EXTEND:
   1461       /* Check for useless extension.  */
   1462       if (GET_MODE (op) == mode)
   1463 	return op;
   1464 
   1465       /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
   1466 	 becomes just the MINUS if its mode is MODE.  This allows
   1467 	 folding switch statements on machines using casesi (such as
   1468 	 the VAX).  */
   1469       if (GET_CODE (op) == TRUNCATE
   1470 	  && GET_MODE (XEXP (op, 0)) == mode
   1471 	  && GET_CODE (XEXP (op, 0)) == MINUS
   1472 	  && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
   1473 	  && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
   1474 	return XEXP (op, 0);
   1475 
   1476       /* Extending a widening multiplication should be canonicalized to
   1477 	 a wider widening multiplication.  */
   1478       if (GET_CODE (op) == MULT)
   1479 	{
   1480 	  rtx lhs = XEXP (op, 0);
   1481 	  rtx rhs = XEXP (op, 1);
   1482 	  enum rtx_code lcode = GET_CODE (lhs);
   1483 	  enum rtx_code rcode = GET_CODE (rhs);
   1484 
   1485 	  /* Widening multiplies usually extend both operands, but sometimes
   1486 	     they use a shift to extract a portion of a register.  */
   1487 	  if ((lcode == SIGN_EXTEND
   1488 	       || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
   1489 	      && (rcode == SIGN_EXTEND
   1490 		  || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
   1491 	    {
   1492 	      machine_mode lmode = GET_MODE (lhs);
   1493 	      machine_mode rmode = GET_MODE (rhs);
   1494 	      int bits;
   1495 
   1496 	      if (lcode == ASHIFTRT)
   1497 		/* Number of bits not shifted off the end.  */
   1498 		bits = (GET_MODE_UNIT_PRECISION (lmode)
   1499 			- INTVAL (XEXP (lhs, 1)));
   1500 	      else /* lcode == SIGN_EXTEND */
   1501 		/* Size of inner mode.  */
   1502 		bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
   1503 
   1504 	      if (rcode == ASHIFTRT)
   1505 		bits += (GET_MODE_UNIT_PRECISION (rmode)
   1506 			 - INTVAL (XEXP (rhs, 1)));
   1507 	      else /* rcode == SIGN_EXTEND */
   1508 		bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
   1509 
   1510 	      /* We can only widen multiplies if the result is mathematiclly
   1511 		 equivalent.  I.e. if overflow was impossible.  */
   1512 	      if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
   1513 		return simplify_gen_binary
   1514 			 (MULT, mode,
   1515 			  simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
   1516 			  simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
   1517 	    }
   1518 	}
   1519 
   1520       /* Check for a sign extension of a subreg of a promoted
   1521 	 variable, where the promotion is sign-extended, and the
   1522 	 target mode is the same as the variable's promotion.  */
   1523       if (GET_CODE (op) == SUBREG
   1524 	  && SUBREG_PROMOTED_VAR_P (op)
   1525 	  && SUBREG_PROMOTED_SIGNED_P (op))
   1526 	{
   1527 	  rtx subreg = SUBREG_REG (op);
   1528 	  machine_mode subreg_mode = GET_MODE (subreg);
   1529 	  if (!paradoxical_subreg_p (mode, subreg_mode))
   1530 	    {
   1531 	      temp = rtl_hooks.gen_lowpart_no_emit (mode, subreg);
   1532 	      if (temp)
   1533 		{
   1534 		  /* Preserve SUBREG_PROMOTED_VAR_P.  */
   1535 		  if (partial_subreg_p (temp))
   1536 		    {
   1537 		      SUBREG_PROMOTED_VAR_P (temp) = 1;
   1538 		      SUBREG_PROMOTED_SET (temp, SRP_SIGNED);
   1539 		    }
   1540 		  return temp;
   1541 		}
   1542 	    }
   1543 	  else
   1544 	    /* Sign-extending a sign-extended subreg.  */
   1545 	    return simplify_gen_unary (SIGN_EXTEND, mode,
   1546 				       subreg, subreg_mode);
   1547 	}
   1548 
   1549       /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
   1550 	 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
   1551       if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
   1552 	{
   1553 	  gcc_assert (GET_MODE_UNIT_PRECISION (mode)
   1554 		      > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
   1555 	  return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
   1556 				     GET_MODE (XEXP (op, 0)));
   1557 	}
   1558 
   1559       /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
   1560 	 is (sign_extend:M (subreg:O <X>)) if there is mode with
   1561 	 GET_MODE_BITSIZE (N) - I bits.
   1562 	 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
   1563 	 is similarly (zero_extend:M (subreg:O <X>)).  */
   1564       if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
   1565 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
   1566 	  && is_a <scalar_int_mode> (mode, &int_mode)
   1567 	  && CONST_INT_P (XEXP (op, 1))
   1568 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
   1569 	  && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
   1570 	      GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
   1571 	{
   1572 	  scalar_int_mode tmode;
   1573 	  gcc_assert (GET_MODE_PRECISION (int_mode)
   1574 		      > GET_MODE_PRECISION (op_mode));
   1575 	  if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
   1576 				 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
   1577 	    {
   1578 	      rtx inner =
   1579 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
   1580 	      if (inner)
   1581 		return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
   1582 					   ? SIGN_EXTEND : ZERO_EXTEND,
   1583 					   int_mode, inner, tmode);
   1584 	    }
   1585 	}
   1586 
   1587       /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
   1588          (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0.  */
   1589       if (GET_CODE (op) == LSHIFTRT
   1590 	  && CONST_INT_P (XEXP (op, 1))
   1591 	  && XEXP (op, 1) != const0_rtx)
   1592 	return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
   1593 
   1594       /* (sign_extend:M (truncate:N (lshiftrt:O <X> (const_int I)))) where
   1595 	 I is GET_MODE_PRECISION(O) - GET_MODE_PRECISION(N), simplifies to
   1596 	 (ashiftrt:M <X> (const_int I)) if modes M and O are the same, and
   1597 	 (truncate:M (ashiftrt:O <X> (const_int I))) if M is narrower than
   1598 	 O, and (sign_extend:M (ashiftrt:O <X> (const_int I))) if M is
   1599 	 wider than O.  */
   1600       if (GET_CODE (op) == TRUNCATE
   1601 	  && GET_CODE (XEXP (op, 0)) == LSHIFTRT
   1602 	  && CONST_INT_P (XEXP (XEXP (op, 0), 1)))
   1603 	{
   1604 	  scalar_int_mode m_mode, n_mode, o_mode;
   1605 	  rtx old_shift = XEXP (op, 0);
   1606 	  if (is_a <scalar_int_mode> (mode, &m_mode)
   1607 	      && is_a <scalar_int_mode> (GET_MODE (op), &n_mode)
   1608 	      && is_a <scalar_int_mode> (GET_MODE (old_shift), &o_mode)
   1609 	      && GET_MODE_PRECISION (o_mode) - GET_MODE_PRECISION (n_mode)
   1610 		 == INTVAL (XEXP (old_shift, 1)))
   1611 	    {
   1612 	      rtx new_shift = simplify_gen_binary (ASHIFTRT,
   1613 						   GET_MODE (old_shift),
   1614 						   XEXP (old_shift, 0),
   1615 						   XEXP (old_shift, 1));
   1616 	      if (GET_MODE_PRECISION (m_mode) > GET_MODE_PRECISION (o_mode))
   1617 		return simplify_gen_unary (SIGN_EXTEND, mode, new_shift,
   1618 					   GET_MODE (new_shift));
   1619 	      if (mode != GET_MODE (new_shift))
   1620 		return simplify_gen_unary (TRUNCATE, mode, new_shift,
   1621 					   GET_MODE (new_shift));
   1622 	      return new_shift;
   1623 	    }
   1624 	}
   1625 
   1626 #if defined(POINTERS_EXTEND_UNSIGNED)
   1627       /* As we do not know which address space the pointer is referring to,
   1628 	 we can do this only if the target does not support different pointer
   1629 	 or address modes depending on the address space.  */
   1630       if (target_default_pointer_address_modes_p ()
   1631 	  && ! POINTERS_EXTEND_UNSIGNED
   1632 	  && mode == Pmode && GET_MODE (op) == ptr_mode
   1633 	  && (CONSTANT_P (op)
   1634 	      || (GET_CODE (op) == SUBREG
   1635 		  && REG_P (SUBREG_REG (op))
   1636 		  && REG_POINTER (SUBREG_REG (op))
   1637 		  && GET_MODE (SUBREG_REG (op)) == Pmode))
   1638 	  && !targetm.have_ptr_extend ())
   1639 	{
   1640 	  temp
   1641 	    = convert_memory_address_addr_space_1 (Pmode, op,
   1642 						   ADDR_SPACE_GENERIC, false,
   1643 						   true);
   1644 	  if (temp)
   1645 	    return temp;
   1646 	}
   1647 #endif
   1648       break;
   1649 
   1650     case ZERO_EXTEND:
   1651       /* Check for useless extension.  */
   1652       if (GET_MODE (op) == mode)
   1653 	return op;
   1654 
   1655       /* Check for a zero extension of a subreg of a promoted
   1656 	 variable, where the promotion is zero-extended, and the
   1657 	 target mode is the same as the variable's promotion.  */
   1658       if (GET_CODE (op) == SUBREG
   1659 	  && SUBREG_PROMOTED_VAR_P (op)
   1660 	  && SUBREG_PROMOTED_UNSIGNED_P (op))
   1661 	{
   1662 	  rtx subreg = SUBREG_REG (op);
   1663 	  machine_mode subreg_mode = GET_MODE (subreg);
   1664 	  if (!paradoxical_subreg_p (mode, subreg_mode))
   1665 	    {
   1666 	      temp = rtl_hooks.gen_lowpart_no_emit (mode, subreg);
   1667 	      if (temp)
   1668 		{
   1669 		  /* Preserve SUBREG_PROMOTED_VAR_P.  */
   1670 		  if (partial_subreg_p (temp))
   1671 		    {
   1672 		      SUBREG_PROMOTED_VAR_P (temp) = 1;
   1673 		      SUBREG_PROMOTED_SET (temp, SRP_UNSIGNED);
   1674 		    }
   1675 		  return temp;
   1676 		}
   1677 	    }
   1678 	  else
   1679 	    /* Zero-extending a zero-extended subreg.  */
   1680 	    return simplify_gen_unary (ZERO_EXTEND, mode,
   1681 				       subreg, subreg_mode);
   1682 	}
   1683 
   1684       /* Extending a widening multiplication should be canonicalized to
   1685 	 a wider widening multiplication.  */
   1686       if (GET_CODE (op) == MULT)
   1687 	{
   1688 	  rtx lhs = XEXP (op, 0);
   1689 	  rtx rhs = XEXP (op, 1);
   1690 	  enum rtx_code lcode = GET_CODE (lhs);
   1691 	  enum rtx_code rcode = GET_CODE (rhs);
   1692 
   1693 	  /* Widening multiplies usually extend both operands, but sometimes
   1694 	     they use a shift to extract a portion of a register.  */
   1695 	  if ((lcode == ZERO_EXTEND
   1696 	       || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
   1697 	      && (rcode == ZERO_EXTEND
   1698 		  || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
   1699 	    {
   1700 	      machine_mode lmode = GET_MODE (lhs);
   1701 	      machine_mode rmode = GET_MODE (rhs);
   1702 	      int bits;
   1703 
   1704 	      if (lcode == LSHIFTRT)
   1705 		/* Number of bits not shifted off the end.  */
   1706 		bits = (GET_MODE_UNIT_PRECISION (lmode)
   1707 			- INTVAL (XEXP (lhs, 1)));
   1708 	      else /* lcode == ZERO_EXTEND */
   1709 		/* Size of inner mode.  */
   1710 		bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
   1711 
   1712 	      if (rcode == LSHIFTRT)
   1713 		bits += (GET_MODE_UNIT_PRECISION (rmode)
   1714 			 - INTVAL (XEXP (rhs, 1)));
   1715 	      else /* rcode == ZERO_EXTEND */
   1716 		bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
   1717 
   1718 	      /* We can only widen multiplies if the result is mathematiclly
   1719 		 equivalent.  I.e. if overflow was impossible.  */
   1720 	      if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
   1721 		return simplify_gen_binary
   1722 			 (MULT, mode,
   1723 			  simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
   1724 			  simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
   1725 	    }
   1726 	}
   1727 
   1728       /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
   1729       if (GET_CODE (op) == ZERO_EXTEND)
   1730 	return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
   1731 				   GET_MODE (XEXP (op, 0)));
   1732 
   1733       /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
   1734 	 is (zero_extend:M (subreg:O <X>)) if there is mode with
   1735 	 GET_MODE_PRECISION (N) - I bits.  */
   1736       if (GET_CODE (op) == LSHIFTRT
   1737 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
   1738 	  && is_a <scalar_int_mode> (mode, &int_mode)
   1739 	  && CONST_INT_P (XEXP (op, 1))
   1740 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
   1741 	  && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
   1742 	      GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
   1743 	{
   1744 	  scalar_int_mode tmode;
   1745 	  if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
   1746 				 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
   1747 	    {
   1748 	      rtx inner =
   1749 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
   1750 	      if (inner)
   1751 		return simplify_gen_unary (ZERO_EXTEND, int_mode,
   1752 					   inner, tmode);
   1753 	    }
   1754 	}
   1755 
   1756       /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
   1757 	 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
   1758 	 of mode N.  E.g.
   1759 	 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
   1760 	 (and:SI (reg:SI) (const_int 63)).  */
   1761       if (partial_subreg_p (op)
   1762 	  && is_a <scalar_int_mode> (mode, &int_mode)
   1763 	  && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
   1764 	  && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
   1765 	  && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
   1766 	  && subreg_lowpart_p (op)
   1767 	  && (nonzero_bits (SUBREG_REG (op), op0_mode)
   1768 	      & ~GET_MODE_MASK (GET_MODE (op))) == 0)
   1769 	{
   1770 	  if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
   1771 	    return SUBREG_REG (op);
   1772 	  return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
   1773 				     op0_mode);
   1774 	}
   1775 
   1776 #if defined(POINTERS_EXTEND_UNSIGNED)
   1777       /* As we do not know which address space the pointer is referring to,
   1778 	 we can do this only if the target does not support different pointer
   1779 	 or address modes depending on the address space.  */
   1780       if (target_default_pointer_address_modes_p ()
   1781 	  && POINTERS_EXTEND_UNSIGNED > 0
   1782 	  && mode == Pmode && GET_MODE (op) == ptr_mode
   1783 	  && (CONSTANT_P (op)
   1784 	      || (GET_CODE (op) == SUBREG
   1785 		  && REG_P (SUBREG_REG (op))
   1786 		  && REG_POINTER (SUBREG_REG (op))
   1787 		  && GET_MODE (SUBREG_REG (op)) == Pmode))
   1788 	  && !targetm.have_ptr_extend ())
   1789 	{
   1790 	  temp
   1791 	    = convert_memory_address_addr_space_1 (Pmode, op,
   1792 						   ADDR_SPACE_GENERIC, false,
   1793 						   true);
   1794 	  if (temp)
   1795 	    return temp;
   1796 	}
   1797 #endif
   1798       break;
   1799 
   1800     default:
   1801       break;
   1802     }
   1803 
   1804   if (VECTOR_MODE_P (mode)
   1805       && vec_duplicate_p (op, &elt)
   1806       && code != VEC_DUPLICATE)
   1807     {
   1808       if (code == SIGN_EXTEND || code == ZERO_EXTEND)
   1809 	/* Enforce a canonical order of VEC_DUPLICATE wrt other unary
   1810 	   operations by promoting VEC_DUPLICATE to the root of the expression
   1811 	   (as far as possible).  */
   1812 	temp = simplify_gen_unary (code, GET_MODE_INNER (mode),
   1813 				   elt, GET_MODE_INNER (GET_MODE (op)));
   1814       else
   1815 	/* Try applying the operator to ELT and see if that simplifies.
   1816 	   We can duplicate the result if so.
   1817 
   1818 	   The reason we traditionally haven't used simplify_gen_unary
   1819 	   for these codes is that it didn't necessarily seem to be a
   1820 	   win to convert things like:
   1821 
   1822 	     (neg:V (vec_duplicate:V (reg:S R)))
   1823 
   1824 	   to:
   1825 
   1826 	     (vec_duplicate:V (neg:S (reg:S R)))
   1827 
   1828 	   The first might be done entirely in vector registers while the
   1829 	   second might need a move between register files.
   1830 
   1831 	   However, there also cases where promoting the vec_duplicate is
   1832 	   more efficient, and there is definite value in having a canonical
   1833 	   form when matching instruction patterns.  We should consider
   1834 	   extending the simplify_gen_unary code above to more cases.  */
   1835 	temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
   1836 					 elt, GET_MODE_INNER (GET_MODE (op)));
   1837       if (temp)
   1838 	return gen_vec_duplicate (mode, temp);
   1839     }
   1840 
   1841   return 0;
   1842 }
   1843 
   1844 /* Try to compute the value of a unary operation CODE whose output mode is to
   1845    be MODE with input operand OP whose mode was originally OP_MODE.
   1846    Return zero if the value cannot be computed.  */
   1847 rtx
   1848 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
   1849 				rtx op, machine_mode op_mode)
   1850 {
   1851   scalar_int_mode result_mode;
   1852 
   1853   if (code == VEC_DUPLICATE)
   1854     {
   1855       gcc_assert (VECTOR_MODE_P (mode));
   1856       if (GET_MODE (op) != VOIDmode)
   1857       {
   1858 	if (!VECTOR_MODE_P (GET_MODE (op)))
   1859 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
   1860 	else
   1861 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
   1862 						(GET_MODE (op)));
   1863       }
   1864       if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
   1865 	return gen_const_vec_duplicate (mode, op);
   1866       if (GET_CODE (op) == CONST_VECTOR
   1867 	  && (CONST_VECTOR_DUPLICATE_P (op)
   1868 	      || CONST_VECTOR_NUNITS (op).is_constant ()))
   1869 	{
   1870 	  unsigned int npatterns = (CONST_VECTOR_DUPLICATE_P (op)
   1871 				    ? CONST_VECTOR_NPATTERNS (op)
   1872 				    : CONST_VECTOR_NUNITS (op).to_constant ());
   1873 	  gcc_assert (multiple_p (GET_MODE_NUNITS (mode), npatterns));
   1874 	  rtx_vector_builder builder (mode, npatterns, 1);
   1875 	  for (unsigned i = 0; i < npatterns; i++)
   1876 	    builder.quick_push (CONST_VECTOR_ELT (op, i));
   1877 	  return builder.build ();
   1878 	}
   1879     }
   1880 
   1881   if (VECTOR_MODE_P (mode)
   1882       && GET_CODE (op) == CONST_VECTOR
   1883       && known_eq (GET_MODE_NUNITS (mode), CONST_VECTOR_NUNITS (op)))
   1884     {
   1885       gcc_assert (GET_MODE (op) == op_mode);
   1886 
   1887       rtx_vector_builder builder;
   1888       if (!builder.new_unary_operation (mode, op, false))
   1889 	return 0;
   1890 
   1891       unsigned int count = builder.encoded_nelts ();
   1892       for (unsigned int i = 0; i < count; i++)
   1893 	{
   1894 	  rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
   1895 					    CONST_VECTOR_ELT (op, i),
   1896 					    GET_MODE_INNER (op_mode));
   1897 	  if (!x || !valid_for_const_vector_p (mode, x))
   1898 	    return 0;
   1899 	  builder.quick_push (x);
   1900 	}
   1901       return builder.build ();
   1902     }
   1903 
   1904   /* The order of these tests is critical so that, for example, we don't
   1905      check the wrong mode (input vs. output) for a conversion operation,
   1906      such as FIX.  At some point, this should be simplified.  */
   1907 
   1908   if (code == FLOAT && CONST_SCALAR_INT_P (op))
   1909     {
   1910       REAL_VALUE_TYPE d;
   1911 
   1912       if (op_mode == VOIDmode)
   1913 	{
   1914 	  /* CONST_INT have VOIDmode as the mode.  We assume that all
   1915 	     the bits of the constant are significant, though, this is
   1916 	     a dangerous assumption as many times CONST_INTs are
   1917 	     created and used with garbage in the bits outside of the
   1918 	     precision of the implied mode of the const_int.  */
   1919 	  op_mode = MAX_MODE_INT;
   1920 	}
   1921 
   1922       real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
   1923 
   1924       /* Avoid the folding if flag_signaling_nans is on and
   1925          operand is a signaling NaN.  */
   1926       if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
   1927         return 0;
   1928 
   1929       d = real_value_truncate (mode, d);
   1930 
   1931       /* Avoid the folding if flag_rounding_math is on and the
   1932 	 conversion is not exact.  */
   1933       if (HONOR_SIGN_DEPENDENT_ROUNDING (mode))
   1934 	{
   1935 	  bool fail = false;
   1936 	  wide_int w = real_to_integer (&d, &fail,
   1937 					GET_MODE_PRECISION
   1938 					  (as_a <scalar_int_mode> (op_mode)));
   1939 	  if (fail || wi::ne_p (w, wide_int (rtx_mode_t (op, op_mode))))
   1940 	    return 0;
   1941 	}
   1942 
   1943       return const_double_from_real_value (d, mode);
   1944     }
   1945   else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
   1946     {
   1947       REAL_VALUE_TYPE d;
   1948 
   1949       if (op_mode == VOIDmode)
   1950 	{
   1951 	  /* CONST_INT have VOIDmode as the mode.  We assume that all
   1952 	     the bits of the constant are significant, though, this is
   1953 	     a dangerous assumption as many times CONST_INTs are
   1954 	     created and used with garbage in the bits outside of the
   1955 	     precision of the implied mode of the const_int.  */
   1956 	  op_mode = MAX_MODE_INT;
   1957 	}
   1958 
   1959       real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
   1960 
   1961       /* Avoid the folding if flag_signaling_nans is on and
   1962          operand is a signaling NaN.  */
   1963       if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
   1964         return 0;
   1965 
   1966       d = real_value_truncate (mode, d);
   1967 
   1968       /* Avoid the folding if flag_rounding_math is on and the
   1969 	 conversion is not exact.  */
   1970       if (HONOR_SIGN_DEPENDENT_ROUNDING (mode))
   1971 	{
   1972 	  bool fail = false;
   1973 	  wide_int w = real_to_integer (&d, &fail,
   1974 					GET_MODE_PRECISION
   1975 					  (as_a <scalar_int_mode> (op_mode)));
   1976 	  if (fail || wi::ne_p (w, wide_int (rtx_mode_t (op, op_mode))))
   1977 	    return 0;
   1978 	}
   1979 
   1980       return const_double_from_real_value (d, mode);
   1981     }
   1982 
   1983   if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
   1984     {
   1985       unsigned int width = GET_MODE_PRECISION (result_mode);
   1986       if (width > MAX_BITSIZE_MODE_ANY_INT)
   1987 	return 0;
   1988 
   1989       wide_int result;
   1990       scalar_int_mode imode = (op_mode == VOIDmode
   1991 			       ? result_mode
   1992 			       : as_a <scalar_int_mode> (op_mode));
   1993       rtx_mode_t op0 = rtx_mode_t (op, imode);
   1994       int int_value;
   1995 
   1996 #if TARGET_SUPPORTS_WIDE_INT == 0
   1997       /* This assert keeps the simplification from producing a result
   1998 	 that cannot be represented in a CONST_DOUBLE but a lot of
   1999 	 upstream callers expect that this function never fails to
   2000 	 simplify something and so you if you added this to the test
   2001 	 above the code would die later anyway.  If this assert
   2002 	 happens, you just need to make the port support wide int.  */
   2003       gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
   2004 #endif
   2005 
   2006       switch (code)
   2007 	{
   2008 	case NOT:
   2009 	  result = wi::bit_not (op0);
   2010 	  break;
   2011 
   2012 	case NEG:
   2013 	  result = wi::neg (op0);
   2014 	  break;
   2015 
   2016 	case ABS:
   2017 	  result = wi::abs (op0);
   2018 	  break;
   2019 
   2020 	case FFS:
   2021 	  result = wi::shwi (wi::ffs (op0), result_mode);
   2022 	  break;
   2023 
   2024 	case CLZ:
   2025 	  if (wi::ne_p (op0, 0))
   2026 	    int_value = wi::clz (op0);
   2027 	  else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
   2028 	    return NULL_RTX;
   2029 	  result = wi::shwi (int_value, result_mode);
   2030 	  break;
   2031 
   2032 	case CLRSB:
   2033 	  result = wi::shwi (wi::clrsb (op0), result_mode);
   2034 	  break;
   2035 
   2036 	case CTZ:
   2037 	  if (wi::ne_p (op0, 0))
   2038 	    int_value = wi::ctz (op0);
   2039 	  else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
   2040 	    return NULL_RTX;
   2041 	  result = wi::shwi (int_value, result_mode);
   2042 	  break;
   2043 
   2044 	case POPCOUNT:
   2045 	  result = wi::shwi (wi::popcount (op0), result_mode);
   2046 	  break;
   2047 
   2048 	case PARITY:
   2049 	  result = wi::shwi (wi::parity (op0), result_mode);
   2050 	  break;
   2051 
   2052 	case BSWAP:
   2053 	  result = wide_int (op0).bswap ();
   2054 	  break;
   2055 
   2056 	case TRUNCATE:
   2057 	case ZERO_EXTEND:
   2058 	  result = wide_int::from (op0, width, UNSIGNED);
   2059 	  break;
   2060 
   2061 	case SIGN_EXTEND:
   2062 	  result = wide_int::from (op0, width, SIGNED);
   2063 	  break;
   2064 
   2065 	case SS_NEG:
   2066 	  if (wi::only_sign_bit_p (op0))
   2067 	    result = wi::max_value (GET_MODE_PRECISION (imode), SIGNED);
   2068 	  else
   2069 	    result = wi::neg (op0);
   2070 	  break;
   2071 
   2072 	case SS_ABS:
   2073 	  if (wi::only_sign_bit_p (op0))
   2074 	    result = wi::max_value (GET_MODE_PRECISION (imode), SIGNED);
   2075 	  else
   2076 	    result = wi::abs (op0);
   2077 	  break;
   2078 
   2079 	case SQRT:
   2080 	default:
   2081 	  return 0;
   2082 	}
   2083 
   2084       return immed_wide_int_const (result, result_mode);
   2085     }
   2086 
   2087   else if (CONST_DOUBLE_AS_FLOAT_P (op)
   2088 	   && SCALAR_FLOAT_MODE_P (mode)
   2089 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
   2090     {
   2091       REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
   2092       switch (code)
   2093 	{
   2094 	case SQRT:
   2095 	  return 0;
   2096 	case ABS:
   2097 	  d = real_value_abs (&d);
   2098 	  break;
   2099 	case NEG:
   2100 	  d = real_value_negate (&d);
   2101 	  break;
   2102 	case FLOAT_TRUNCATE:
   2103 	  /* Don't perform the operation if flag_signaling_nans is on
   2104 	     and the operand is a signaling NaN.  */
   2105 	  if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
   2106 	    return NULL_RTX;
   2107 	  /* Or if flag_rounding_math is on and the truncation is not
   2108 	     exact.  */
   2109 	  if (HONOR_SIGN_DEPENDENT_ROUNDING (mode)
   2110 	      && !exact_real_truncate (mode, &d))
   2111 	    return NULL_RTX;
   2112 	  d = real_value_truncate (mode, d);
   2113 	  break;
   2114 	case FLOAT_EXTEND:
   2115 	  /* Don't perform the operation if flag_signaling_nans is on
   2116 	     and the operand is a signaling NaN.  */
   2117 	  if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
   2118 	    return NULL_RTX;
   2119 	  /* All this does is change the mode, unless changing
   2120 	     mode class.  */
   2121 	  if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
   2122 	    real_convert (&d, mode, &d);
   2123 	  break;
   2124 	case FIX:
   2125 	  /* Don't perform the operation if flag_signaling_nans is on
   2126 	     and the operand is a signaling NaN.  */
   2127 	  if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
   2128 	    return NULL_RTX;
   2129 	  real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
   2130 	  break;
   2131 	case NOT:
   2132 	  {
   2133 	    long tmp[4];
   2134 	    int i;
   2135 
   2136 	    real_to_target (tmp, &d, GET_MODE (op));
   2137 	    for (i = 0; i < 4; i++)
   2138 	      tmp[i] = ~tmp[i];
   2139 	    real_from_target (&d, tmp, mode);
   2140 	    break;
   2141 	  }
   2142 	default:
   2143 	  gcc_unreachable ();
   2144 	}
   2145       return const_double_from_real_value (d, mode);
   2146     }
   2147   else if (CONST_DOUBLE_AS_FLOAT_P (op)
   2148 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op))
   2149 	   && is_int_mode (mode, &result_mode))
   2150     {
   2151       unsigned int width = GET_MODE_PRECISION (result_mode);
   2152       if (width > MAX_BITSIZE_MODE_ANY_INT)
   2153 	return 0;
   2154 
   2155       /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
   2156 	 operators are intentionally left unspecified (to ease implementation
   2157 	 by target backends), for consistency, this routine implements the
   2158 	 same semantics for constant folding as used by the middle-end.  */
   2159 
   2160       /* This was formerly used only for non-IEEE float.
   2161 	 eggert (at) twinsun.com says it is safe for IEEE also.  */
   2162       REAL_VALUE_TYPE t;
   2163       const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
   2164       wide_int wmax, wmin;
   2165       /* This is part of the abi to real_to_integer, but we check
   2166 	 things before making this call.  */
   2167       bool fail;
   2168 
   2169       switch (code)
   2170 	{
   2171 	case FIX:
   2172 	  if (REAL_VALUE_ISNAN (*x))
   2173 	    return const0_rtx;
   2174 
   2175 	  /* Test against the signed upper bound.  */
   2176 	  wmax = wi::max_value (width, SIGNED);
   2177 	  real_from_integer (&t, VOIDmode, wmax, SIGNED);
   2178 	  if (real_less (&t, x))
   2179 	    return immed_wide_int_const (wmax, mode);
   2180 
   2181 	  /* Test against the signed lower bound.  */
   2182 	  wmin = wi::min_value (width, SIGNED);
   2183 	  real_from_integer (&t, VOIDmode, wmin, SIGNED);
   2184 	  if (real_less (x, &t))
   2185 	    return immed_wide_int_const (wmin, mode);
   2186 
   2187 	  return immed_wide_int_const (real_to_integer (x, &fail, width),
   2188 				       mode);
   2189 
   2190 	case UNSIGNED_FIX:
   2191 	  if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
   2192 	    return const0_rtx;
   2193 
   2194 	  /* Test against the unsigned upper bound.  */
   2195 	  wmax = wi::max_value (width, UNSIGNED);
   2196 	  real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
   2197 	  if (real_less (&t, x))
   2198 	    return immed_wide_int_const (wmax, mode);
   2199 
   2200 	  return immed_wide_int_const (real_to_integer (x, &fail, width),
   2201 				       mode);
   2202 
   2203 	default:
   2204 	  gcc_unreachable ();
   2205 	}
   2206     }
   2207 
   2208   /* Handle polynomial integers.  */
   2209   else if (CONST_POLY_INT_P (op))
   2210     {
   2211       poly_wide_int result;
   2212       switch (code)
   2213 	{
   2214 	case NEG:
   2215 	  result = -const_poly_int_value (op);
   2216 	  break;
   2217 
   2218 	case NOT:
   2219 	  result = ~const_poly_int_value (op);
   2220 	  break;
   2221 
   2222 	default:
   2223 	  return NULL_RTX;
   2224 	}
   2225       return immed_wide_int_const (result, mode);
   2226     }
   2227 
   2228   return NULL_RTX;
   2229 }
   2230 
   2231 /* Subroutine of simplify_binary_operation to simplify a binary operation
   2233    CODE that can commute with byte swapping, with result mode MODE and
   2234    operating on OP0 and OP1.  CODE is currently one of AND, IOR or XOR.
   2235    Return zero if no simplification or canonicalization is possible.  */
   2236 
   2237 rtx
   2238 simplify_context::simplify_byte_swapping_operation (rtx_code code,
   2239 						    machine_mode mode,
   2240 						    rtx op0, rtx op1)
   2241 {
   2242   rtx tem;
   2243 
   2244   /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped.  */
   2245   if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
   2246     {
   2247       tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
   2248 				 simplify_gen_unary (BSWAP, mode, op1, mode));
   2249       return simplify_gen_unary (BSWAP, mode, tem, mode);
   2250     }
   2251 
   2252   /* (op (bswap x) (bswap y)) -> (bswap (op x y)).  */
   2253   if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
   2254     {
   2255       tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
   2256       return simplify_gen_unary (BSWAP, mode, tem, mode);
   2257     }
   2258 
   2259   return NULL_RTX;
   2260 }
   2261 
   2262 /* Subroutine of simplify_binary_operation to simplify a commutative,
   2263    associative binary operation CODE with result mode MODE, operating
   2264    on OP0 and OP1.  CODE is currently one of PLUS, MULT, AND, IOR, XOR,
   2265    SMIN, SMAX, UMIN or UMAX.  Return zero if no simplification or
   2266    canonicalization is possible.  */
   2267 
   2268 rtx
   2269 simplify_context::simplify_associative_operation (rtx_code code,
   2270 						  machine_mode mode,
   2271 						  rtx op0, rtx op1)
   2272 {
   2273   rtx tem;
   2274 
   2275   /* Normally expressions simplified by simplify-rtx.cc are combined
   2276      at most from a few machine instructions and therefore the
   2277      expressions should be fairly small.  During var-tracking
   2278      we can see arbitrarily large expressions though and reassociating
   2279      those can be quadratic, so punt after encountering max_assoc_count
   2280      simplify_associative_operation calls during outermost simplify_*
   2281      call.  */
   2282   if (++assoc_count >= max_assoc_count)
   2283     return NULL_RTX;
   2284 
   2285   /* Linearize the operator to the left.  */
   2286   if (GET_CODE (op1) == code)
   2287     {
   2288       /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)".  */
   2289       if (GET_CODE (op0) == code)
   2290 	{
   2291 	  tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
   2292 	  return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
   2293 	}
   2294 
   2295       /* "a op (b op c)" becomes "(b op c) op a".  */
   2296       if (! swap_commutative_operands_p (op1, op0))
   2297 	return simplify_gen_binary (code, mode, op1, op0);
   2298 
   2299       std::swap (op0, op1);
   2300     }
   2301 
   2302   if (GET_CODE (op0) == code)
   2303     {
   2304       /* Canonicalize "(x op c) op y" as "(x op y) op c".  */
   2305       if (swap_commutative_operands_p (XEXP (op0, 1), op1))
   2306 	{
   2307 	  tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
   2308 	  return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
   2309 	}
   2310 
   2311       /* Attempt to simplify "(a op b) op c" as "a op (b op c)".  */
   2312       tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
   2313       if (tem != 0)
   2314         return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
   2315 
   2316       /* Attempt to simplify "(a op b) op c" as "(a op c) op b".  */
   2317       tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
   2318       if (tem != 0)
   2319         return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
   2320     }
   2321 
   2322   return 0;
   2323 }
   2324 
   2325 /* Return a mask describing the COMPARISON.  */
   2326 static int
   2327 comparison_to_mask (enum rtx_code comparison)
   2328 {
   2329   switch (comparison)
   2330     {
   2331     case LT:
   2332       return 8;
   2333     case GT:
   2334       return 4;
   2335     case EQ:
   2336       return 2;
   2337     case UNORDERED:
   2338       return 1;
   2339 
   2340     case LTGT:
   2341       return 12;
   2342     case LE:
   2343       return 10;
   2344     case GE:
   2345       return 6;
   2346     case UNLT:
   2347       return 9;
   2348     case UNGT:
   2349       return 5;
   2350     case UNEQ:
   2351       return 3;
   2352 
   2353     case ORDERED:
   2354       return 14;
   2355     case NE:
   2356       return 13;
   2357     case UNLE:
   2358       return 11;
   2359     case UNGE:
   2360       return 7;
   2361 
   2362     default:
   2363       gcc_unreachable ();
   2364     }
   2365 }
   2366 
   2367 /* Return a comparison corresponding to the MASK.  */
   2368 static enum rtx_code
   2369 mask_to_comparison (int mask)
   2370 {
   2371   switch (mask)
   2372     {
   2373     case 8:
   2374       return LT;
   2375     case 4:
   2376       return GT;
   2377     case 2:
   2378       return EQ;
   2379     case 1:
   2380       return UNORDERED;
   2381 
   2382     case 12:
   2383       return LTGT;
   2384     case 10:
   2385       return LE;
   2386     case 6:
   2387       return GE;
   2388     case 9:
   2389       return UNLT;
   2390     case 5:
   2391       return UNGT;
   2392     case 3:
   2393       return UNEQ;
   2394 
   2395     case 14:
   2396       return ORDERED;
   2397     case 13:
   2398       return NE;
   2399     case 11:
   2400       return UNLE;
   2401     case 7:
   2402       return UNGE;
   2403 
   2404     default:
   2405       gcc_unreachable ();
   2406     }
   2407 }
   2408 
   2409 /* Return true if CODE is valid for comparisons of mode MODE, false
   2410    otherwise.
   2411 
   2412    It is always safe to return false, even if the code was valid for the
   2413    given mode as that will merely suppress optimizations.  */
   2414 
   2415 static bool
   2416 comparison_code_valid_for_mode (enum rtx_code code, enum machine_mode mode)
   2417 {
   2418   switch (code)
   2419     {
   2420       /* These are valid for integral, floating and vector modes.  */
   2421       case NE:
   2422       case EQ:
   2423       case GE:
   2424       case GT:
   2425       case LE:
   2426       case LT:
   2427 	return (INTEGRAL_MODE_P (mode)
   2428 		|| FLOAT_MODE_P (mode)
   2429 		|| VECTOR_MODE_P (mode));
   2430 
   2431       /* These are valid for floating point modes.  */
   2432       case LTGT:
   2433       case UNORDERED:
   2434       case ORDERED:
   2435       case UNEQ:
   2436       case UNGE:
   2437       case UNGT:
   2438       case UNLE:
   2439       case UNLT:
   2440 	return FLOAT_MODE_P (mode);
   2441 
   2442       /* These are filtered out in simplify_logical_operation, but
   2443 	 we check for them too as a matter of safety.   They are valid
   2444 	 for integral and vector modes.  */
   2445       case GEU:
   2446       case GTU:
   2447       case LEU:
   2448       case LTU:
   2449 	return INTEGRAL_MODE_P (mode) || VECTOR_MODE_P (mode);
   2450 
   2451       default:
   2452 	gcc_unreachable ();
   2453     }
   2454 }
   2455 
   2456 /* Canonicalize RES, a scalar const0_rtx/const_true_rtx to the right
   2457    false/true value of comparison with MODE where comparison operands
   2458    have CMP_MODE.  */
   2459 
   2460 static rtx
   2461 relational_result (machine_mode mode, machine_mode cmp_mode, rtx res)
   2462 {
   2463   if (SCALAR_FLOAT_MODE_P (mode))
   2464     {
   2465       if (res == const0_rtx)
   2466         return CONST0_RTX (mode);
   2467 #ifdef FLOAT_STORE_FLAG_VALUE
   2468       REAL_VALUE_TYPE val = FLOAT_STORE_FLAG_VALUE (mode);
   2469       return const_double_from_real_value (val, mode);
   2470 #else
   2471       return NULL_RTX;
   2472 #endif
   2473     }
   2474   if (VECTOR_MODE_P (mode))
   2475     {
   2476       if (res == const0_rtx)
   2477 	return CONST0_RTX (mode);
   2478 #ifdef VECTOR_STORE_FLAG_VALUE
   2479       rtx val = VECTOR_STORE_FLAG_VALUE (mode);
   2480       if (val == NULL_RTX)
   2481 	return NULL_RTX;
   2482       if (val == const1_rtx)
   2483 	return CONST1_RTX (mode);
   2484 
   2485       return gen_const_vec_duplicate (mode, val);
   2486 #else
   2487       return NULL_RTX;
   2488 #endif
   2489     }
   2490   /* For vector comparison with scalar int result, it is unknown
   2491      if the target means here a comparison into an integral bitmask,
   2492      or comparison where all comparisons true mean const_true_rtx
   2493      whole result, or where any comparisons true mean const_true_rtx
   2494      whole result.  For const0_rtx all the cases are the same.  */
   2495   if (VECTOR_MODE_P (cmp_mode)
   2496       && SCALAR_INT_MODE_P (mode)
   2497       && res == const_true_rtx)
   2498     return NULL_RTX;
   2499 
   2500   return res;
   2501 }
   2502 
   2503 /* Simplify a logical operation CODE with result mode MODE, operating on OP0
   2504    and OP1, which should be both relational operations.  Return 0 if no such
   2505    simplification is possible.  */
   2506 rtx
   2507 simplify_context::simplify_logical_relational_operation (rtx_code code,
   2508 							 machine_mode mode,
   2509 							 rtx op0, rtx op1)
   2510 {
   2511   /* We only handle IOR of two relational operations.  */
   2512   if (code != IOR)
   2513     return 0;
   2514 
   2515   if (!(COMPARISON_P (op0) && COMPARISON_P (op1)))
   2516     return 0;
   2517 
   2518   if (!(rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
   2519 	&& rtx_equal_p (XEXP (op0, 1), XEXP (op1, 1))))
   2520     return 0;
   2521 
   2522   enum rtx_code code0 = GET_CODE (op0);
   2523   enum rtx_code code1 = GET_CODE (op1);
   2524 
   2525   /* We don't handle unsigned comparisons currently.  */
   2526   if (code0 == LTU || code0 == GTU || code0 == LEU || code0 == GEU)
   2527     return 0;
   2528   if (code1 == LTU || code1 == GTU || code1 == LEU || code1 == GEU)
   2529     return 0;
   2530 
   2531   int mask0 = comparison_to_mask (code0);
   2532   int mask1 = comparison_to_mask (code1);
   2533 
   2534   int mask = mask0 | mask1;
   2535 
   2536   if (mask == 15)
   2537     return relational_result (mode, GET_MODE (op0), const_true_rtx);
   2538 
   2539   code = mask_to_comparison (mask);
   2540 
   2541   /* Many comparison codes are only valid for certain mode classes.  */
   2542   if (!comparison_code_valid_for_mode (code, mode))
   2543     return 0;
   2544 
   2545   op0 = XEXP (op1, 0);
   2546   op1 = XEXP (op1, 1);
   2547 
   2548   return simplify_gen_relational (code, mode, VOIDmode, op0, op1);
   2549 }
   2550 
   2551 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
   2552    and OP1.  Return 0 if no simplification is possible.
   2553 
   2554    Don't use this for relational operations such as EQ or LT.
   2555    Use simplify_relational_operation instead.  */
   2556 rtx
   2557 simplify_context::simplify_binary_operation (rtx_code code, machine_mode mode,
   2558 					     rtx op0, rtx op1)
   2559 {
   2560   rtx trueop0, trueop1;
   2561   rtx tem;
   2562 
   2563   /* Relational operations don't work here.  We must know the mode
   2564      of the operands in order to do the comparison correctly.
   2565      Assuming a full word can give incorrect results.
   2566      Consider comparing 128 with -128 in QImode.  */
   2567   gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
   2568   gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
   2569 
   2570   /* Make sure the constant is second.  */
   2571   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
   2572       && swap_commutative_operands_p (op0, op1))
   2573     std::swap (op0, op1);
   2574 
   2575   trueop0 = avoid_constant_pool_reference (op0);
   2576   trueop1 = avoid_constant_pool_reference (op1);
   2577 
   2578   tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
   2579   if (tem)
   2580     return tem;
   2581   tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
   2582 
   2583   if (tem)
   2584     return tem;
   2585 
   2586   /* If the above steps did not result in a simplification and op0 or op1
   2587      were constant pool references, use the referenced constants directly.  */
   2588   if (trueop0 != op0 || trueop1 != op1)
   2589     return simplify_gen_binary (code, mode, trueop0, trueop1);
   2590 
   2591   return NULL_RTX;
   2592 }
   2593 
   2594 /* Subroutine of simplify_binary_operation_1 that looks for cases in
   2595    which OP0 and OP1 are both vector series or vector duplicates
   2596    (which are really just series with a step of 0).  If so, try to
   2597    form a new series by applying CODE to the bases and to the steps.
   2598    Return null if no simplification is possible.
   2599 
   2600    MODE is the mode of the operation and is known to be a vector
   2601    integer mode.  */
   2602 
   2603 rtx
   2604 simplify_context::simplify_binary_operation_series (rtx_code code,
   2605 						    machine_mode mode,
   2606 						    rtx op0, rtx op1)
   2607 {
   2608   rtx base0, step0;
   2609   if (vec_duplicate_p (op0, &base0))
   2610     step0 = const0_rtx;
   2611   else if (!vec_series_p (op0, &base0, &step0))
   2612     return NULL_RTX;
   2613 
   2614   rtx base1, step1;
   2615   if (vec_duplicate_p (op1, &base1))
   2616     step1 = const0_rtx;
   2617   else if (!vec_series_p (op1, &base1, &step1))
   2618     return NULL_RTX;
   2619 
   2620   /* Only create a new series if we can simplify both parts.  In other
   2621      cases this isn't really a simplification, and it's not necessarily
   2622      a win to replace a vector operation with a scalar operation.  */
   2623   scalar_mode inner_mode = GET_MODE_INNER (mode);
   2624   rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
   2625   if (!new_base)
   2626     return NULL_RTX;
   2627 
   2628   rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
   2629   if (!new_step)
   2630     return NULL_RTX;
   2631 
   2632   return gen_vec_series (mode, new_base, new_step);
   2633 }
   2634 
   2635 /* Subroutine of simplify_binary_operation_1.  Un-distribute a binary
   2636    operation CODE with result mode MODE, operating on OP0 and OP1.
   2637    e.g. simplify (xor (and A C) (and (B C)) to (and (xor (A B) C).
   2638    Returns NULL_RTX if no simplification is possible.  */
   2639 
   2640 rtx
   2641 simplify_context::simplify_distributive_operation (rtx_code code,
   2642 						   machine_mode mode,
   2643 						   rtx op0, rtx op1)
   2644 {
   2645   enum rtx_code op = GET_CODE (op0);
   2646   gcc_assert (GET_CODE (op1) == op);
   2647 
   2648   if (rtx_equal_p (XEXP (op0, 1), XEXP (op1, 1))
   2649       && ! side_effects_p (XEXP (op0, 1)))
   2650     return simplify_gen_binary (op, mode,
   2651 				simplify_gen_binary (code, mode,
   2652 						     XEXP (op0, 0),
   2653 						     XEXP (op1, 0)),
   2654 				XEXP (op0, 1));
   2655 
   2656   if (GET_RTX_CLASS (op) == RTX_COMM_ARITH)
   2657     {
   2658       if (rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
   2659 	  && ! side_effects_p (XEXP (op0, 0)))
   2660 	return simplify_gen_binary (op, mode,
   2661 				    simplify_gen_binary (code, mode,
   2662 							 XEXP (op0, 1),
   2663 							 XEXP (op1, 1)),
   2664 				    XEXP (op0, 0));
   2665       if (rtx_equal_p (XEXP (op0, 0), XEXP (op1, 1))
   2666 	  && ! side_effects_p (XEXP (op0, 0)))
   2667 	return simplify_gen_binary (op, mode,
   2668 				    simplify_gen_binary (code, mode,
   2669 							 XEXP (op0, 1),
   2670 							 XEXP (op1, 0)),
   2671 				    XEXP (op0, 0));
   2672       if (rtx_equal_p (XEXP (op0, 1), XEXP (op1, 0))
   2673 	  && ! side_effects_p (XEXP (op0, 1)))
   2674 	return simplify_gen_binary (op, mode,
   2675 				    simplify_gen_binary (code, mode,
   2676 							 XEXP (op0, 0),
   2677 							 XEXP (op1, 1)),
   2678 				    XEXP (op0, 1));
   2679     }
   2680 
   2681   return NULL_RTX;
   2682 }
   2683 
   2684 /* Subroutine of simplify_binary_operation.  Simplify a binary operation
   2685    CODE with result mode MODE, operating on OP0 and OP1.  If OP0 and/or
   2686    OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
   2687    actual constants.  */
   2688 
   2689 rtx
   2690 simplify_context::simplify_binary_operation_1 (rtx_code code,
   2691 					       machine_mode mode,
   2692 					       rtx op0, rtx op1,
   2693 					       rtx trueop0, rtx trueop1)
   2694 {
   2695   rtx tem, reversed, opleft, opright, elt0, elt1;
   2696   HOST_WIDE_INT val;
   2697   scalar_int_mode int_mode, inner_mode;
   2698   poly_int64 offset;
   2699 
   2700   /* Even if we can't compute a constant result,
   2701      there are some cases worth simplifying.  */
   2702 
   2703   switch (code)
   2704     {
   2705     case PLUS:
   2706       /* Maybe simplify x + 0 to x.  The two expressions are equivalent
   2707 	 when x is NaN, infinite, or finite and nonzero.  They aren't
   2708 	 when x is -0 and the rounding mode is not towards -infinity,
   2709 	 since (-0) + 0 is then 0.  */
   2710       if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
   2711 	return op0;
   2712 
   2713       /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
   2714 	 transformations are safe even for IEEE.  */
   2715       if (GET_CODE (op0) == NEG)
   2716 	return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
   2717       else if (GET_CODE (op1) == NEG)
   2718 	return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
   2719 
   2720       /* (~a) + 1 -> -a */
   2721       if (INTEGRAL_MODE_P (mode)
   2722 	  && GET_CODE (op0) == NOT
   2723 	  && trueop1 == const1_rtx)
   2724 	return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
   2725 
   2726       /* Handle both-operands-constant cases.  We can only add
   2727 	 CONST_INTs to constants since the sum of relocatable symbols
   2728 	 can't be handled by most assemblers.  Don't add CONST_INT
   2729 	 to CONST_INT since overflow won't be computed properly if wider
   2730 	 than HOST_BITS_PER_WIDE_INT.  */
   2731 
   2732       if ((GET_CODE (op0) == CONST
   2733 	   || GET_CODE (op0) == SYMBOL_REF
   2734 	   || GET_CODE (op0) == LABEL_REF)
   2735 	  && poly_int_rtx_p (op1, &offset))
   2736 	return plus_constant (mode, op0, offset);
   2737       else if ((GET_CODE (op1) == CONST
   2738 		|| GET_CODE (op1) == SYMBOL_REF
   2739 		|| GET_CODE (op1) == LABEL_REF)
   2740 	       && poly_int_rtx_p (op0, &offset))
   2741 	return plus_constant (mode, op1, offset);
   2742 
   2743       /* See if this is something like X * C - X or vice versa or
   2744 	 if the multiplication is written as a shift.  If so, we can
   2745 	 distribute and make a new multiply, shift, or maybe just
   2746 	 have X (if C is 2 in the example above).  But don't make
   2747 	 something more expensive than we had before.  */
   2748 
   2749       if (is_a <scalar_int_mode> (mode, &int_mode))
   2750 	{
   2751 	  rtx lhs = op0, rhs = op1;
   2752 
   2753 	  wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
   2754 	  wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
   2755 
   2756 	  if (GET_CODE (lhs) == NEG)
   2757 	    {
   2758 	      coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
   2759 	      lhs = XEXP (lhs, 0);
   2760 	    }
   2761 	  else if (GET_CODE (lhs) == MULT
   2762 		   && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
   2763 	    {
   2764 	      coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
   2765 	      lhs = XEXP (lhs, 0);
   2766 	    }
   2767 	  else if (GET_CODE (lhs) == ASHIFT
   2768 		   && CONST_INT_P (XEXP (lhs, 1))
   2769                    && INTVAL (XEXP (lhs, 1)) >= 0
   2770 		   && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
   2771 	    {
   2772 	      coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
   2773 					    GET_MODE_PRECISION (int_mode));
   2774 	      lhs = XEXP (lhs, 0);
   2775 	    }
   2776 
   2777 	  if (GET_CODE (rhs) == NEG)
   2778 	    {
   2779 	      coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
   2780 	      rhs = XEXP (rhs, 0);
   2781 	    }
   2782 	  else if (GET_CODE (rhs) == MULT
   2783 		   && CONST_INT_P (XEXP (rhs, 1)))
   2784 	    {
   2785 	      coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
   2786 	      rhs = XEXP (rhs, 0);
   2787 	    }
   2788 	  else if (GET_CODE (rhs) == ASHIFT
   2789 		   && CONST_INT_P (XEXP (rhs, 1))
   2790 		   && INTVAL (XEXP (rhs, 1)) >= 0
   2791 		   && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
   2792 	    {
   2793 	      coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
   2794 					    GET_MODE_PRECISION (int_mode));
   2795 	      rhs = XEXP (rhs, 0);
   2796 	    }
   2797 
   2798 	  if (rtx_equal_p (lhs, rhs))
   2799 	    {
   2800 	      rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
   2801 	      rtx coeff;
   2802 	      bool speed = optimize_function_for_speed_p (cfun);
   2803 
   2804 	      coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
   2805 
   2806 	      tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
   2807 	      return (set_src_cost (tem, int_mode, speed)
   2808 		      <= set_src_cost (orig, int_mode, speed) ? tem : 0);
   2809 	    }
   2810 
   2811 	  /* Optimize (X - 1) * Y + Y to X * Y.  */
   2812 	  lhs = op0;
   2813 	  rhs = op1;
   2814 	  if (GET_CODE (op0) == MULT)
   2815 	    {
   2816 	      if (((GET_CODE (XEXP (op0, 0)) == PLUS
   2817 		    && XEXP (XEXP (op0, 0), 1) == constm1_rtx)
   2818 		   || (GET_CODE (XEXP (op0, 0)) == MINUS
   2819 		       && XEXP (XEXP (op0, 0), 1) == const1_rtx))
   2820 		  && rtx_equal_p (XEXP (op0, 1), op1))
   2821 		lhs = XEXP (XEXP (op0, 0), 0);
   2822 	      else if (((GET_CODE (XEXP (op0, 1)) == PLUS
   2823 			 && XEXP (XEXP (op0, 1), 1) == constm1_rtx)
   2824 			|| (GET_CODE (XEXP (op0, 1)) == MINUS
   2825 			    && XEXP (XEXP (op0, 1), 1) == const1_rtx))
   2826 		       && rtx_equal_p (XEXP (op0, 0), op1))
   2827 		lhs = XEXP (XEXP (op0, 1), 0);
   2828 	    }
   2829 	  else if (GET_CODE (op1) == MULT)
   2830 	    {
   2831 	      if (((GET_CODE (XEXP (op1, 0)) == PLUS
   2832 		    && XEXP (XEXP (op1, 0), 1) == constm1_rtx)
   2833 		   || (GET_CODE (XEXP (op1, 0)) == MINUS
   2834 		       && XEXP (XEXP (op1, 0), 1) == const1_rtx))
   2835 		  && rtx_equal_p (XEXP (op1, 1), op0))
   2836 		rhs = XEXP (XEXP (op1, 0), 0);
   2837 	      else if (((GET_CODE (XEXP (op1, 1)) == PLUS
   2838 			 && XEXP (XEXP (op1, 1), 1) == constm1_rtx)
   2839 			|| (GET_CODE (XEXP (op1, 1)) == MINUS
   2840 			    && XEXP (XEXP (op1, 1), 1) == const1_rtx))
   2841 		       && rtx_equal_p (XEXP (op1, 0), op0))
   2842 		rhs = XEXP (XEXP (op1, 1), 0);
   2843 	    }
   2844 	  if (lhs != op0 || rhs != op1)
   2845 	    return simplify_gen_binary (MULT, int_mode, lhs, rhs);
   2846 	}
   2847 
   2848       /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit.  */
   2849       if (CONST_SCALAR_INT_P (op1)
   2850 	  && GET_CODE (op0) == XOR
   2851 	  && CONST_SCALAR_INT_P (XEXP (op0, 1))
   2852 	  && mode_signbit_p (mode, op1))
   2853 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
   2854 				    simplify_gen_binary (XOR, mode, op1,
   2855 							 XEXP (op0, 1)));
   2856 
   2857       /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)).  */
   2858       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
   2859 	  && GET_CODE (op0) == MULT
   2860 	  && GET_CODE (XEXP (op0, 0)) == NEG)
   2861 	{
   2862 	  rtx in1, in2;
   2863 
   2864 	  in1 = XEXP (XEXP (op0, 0), 0);
   2865 	  in2 = XEXP (op0, 1);
   2866 	  return simplify_gen_binary (MINUS, mode, op1,
   2867 				      simplify_gen_binary (MULT, mode,
   2868 							   in1, in2));
   2869 	}
   2870 
   2871       /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
   2872 	 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
   2873 	 is 1.  */
   2874       if (COMPARISON_P (op0)
   2875 	  && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
   2876 	      || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
   2877 	  && (reversed = reversed_comparison (op0, mode)))
   2878 	return
   2879 	  simplify_gen_unary (NEG, mode, reversed, mode);
   2880 
   2881       /* If one of the operands is a PLUS or a MINUS, see if we can
   2882 	 simplify this by the associative law.
   2883 	 Don't use the associative law for floating point.
   2884 	 The inaccuracy makes it nonassociative,
   2885 	 and subtle programs can break if operations are associated.  */
   2886 
   2887       if (INTEGRAL_MODE_P (mode)
   2888 	  && (plus_minus_operand_p (op0)
   2889 	      || plus_minus_operand_p (op1))
   2890 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
   2891 	return tem;
   2892 
   2893       /* Reassociate floating point addition only when the user
   2894 	 specifies associative math operations.  */
   2895       if (FLOAT_MODE_P (mode)
   2896 	  && flag_associative_math)
   2897 	{
   2898 	  tem = simplify_associative_operation (code, mode, op0, op1);
   2899 	  if (tem)
   2900 	    return tem;
   2901 	}
   2902 
   2903       /* Handle vector series.  */
   2904       if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
   2905 	{
   2906 	  tem = simplify_binary_operation_series (code, mode, op0, op1);
   2907 	  if (tem)
   2908 	    return tem;
   2909 	}
   2910       break;
   2911 
   2912     case COMPARE:
   2913       /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
   2914       if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
   2915 	   || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
   2916 	  && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
   2917 	{
   2918 	  rtx xop00 = XEXP (op0, 0);
   2919 	  rtx xop10 = XEXP (op1, 0);
   2920 
   2921 	  if (REG_P (xop00) && REG_P (xop10)
   2922 	      && REGNO (xop00) == REGNO (xop10)
   2923 	      && GET_MODE (xop00) == mode
   2924 	      && GET_MODE (xop10) == mode
   2925 	      && GET_MODE_CLASS (mode) == MODE_CC)
   2926 	    return xop00;
   2927 	}
   2928       break;
   2929 
   2930     case MINUS:
   2931       /* We can't assume x-x is 0 even with non-IEEE floating point,
   2932 	 but since it is zero except in very strange circumstances, we
   2933 	 will treat it as zero with -ffinite-math-only.  */
   2934       if (rtx_equal_p (trueop0, trueop1)
   2935 	  && ! side_effects_p (op0)
   2936 	  && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
   2937 	return CONST0_RTX (mode);
   2938 
   2939       /* Change subtraction from zero into negation.  (0 - x) is the
   2940 	 same as -x when x is NaN, infinite, or finite and nonzero.
   2941 	 But if the mode has signed zeros, and does not round towards
   2942 	 -infinity, then 0 - 0 is 0, not -0.  */
   2943       if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
   2944 	return simplify_gen_unary (NEG, mode, op1, mode);
   2945 
   2946       /* (-1 - a) is ~a, unless the expression contains symbolic
   2947 	 constants, in which case not retaining additions and
   2948 	 subtractions could cause invalid assembly to be produced.  */
   2949       if (trueop0 == constm1_rtx
   2950 	  && !contains_symbolic_reference_p (op1))
   2951 	return simplify_gen_unary (NOT, mode, op1, mode);
   2952 
   2953       /* Subtracting 0 has no effect unless the mode has signalling NaNs,
   2954 	 or has signed zeros and supports rounding towards -infinity.
   2955 	 In such a case, 0 - 0 is -0.  */
   2956       if (!(HONOR_SIGNED_ZEROS (mode)
   2957 	    && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
   2958 	  && !HONOR_SNANS (mode)
   2959 	  && trueop1 == CONST0_RTX (mode))
   2960 	return op0;
   2961 
   2962       /* See if this is something like X * C - X or vice versa or
   2963 	 if the multiplication is written as a shift.  If so, we can
   2964 	 distribute and make a new multiply, shift, or maybe just
   2965 	 have X (if C is 2 in the example above).  But don't make
   2966 	 something more expensive than we had before.  */
   2967 
   2968       if (is_a <scalar_int_mode> (mode, &int_mode))
   2969 	{
   2970 	  rtx lhs = op0, rhs = op1;
   2971 
   2972 	  wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
   2973 	  wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
   2974 
   2975 	  if (GET_CODE (lhs) == NEG)
   2976 	    {
   2977 	      coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
   2978 	      lhs = XEXP (lhs, 0);
   2979 	    }
   2980 	  else if (GET_CODE (lhs) == MULT
   2981 		   && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
   2982 	    {
   2983 	      coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
   2984 	      lhs = XEXP (lhs, 0);
   2985 	    }
   2986 	  else if (GET_CODE (lhs) == ASHIFT
   2987 		   && CONST_INT_P (XEXP (lhs, 1))
   2988 		   && INTVAL (XEXP (lhs, 1)) >= 0
   2989 		   && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
   2990 	    {
   2991 	      coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
   2992 					    GET_MODE_PRECISION (int_mode));
   2993 	      lhs = XEXP (lhs, 0);
   2994 	    }
   2995 
   2996 	  if (GET_CODE (rhs) == NEG)
   2997 	    {
   2998 	      negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
   2999 	      rhs = XEXP (rhs, 0);
   3000 	    }
   3001 	  else if (GET_CODE (rhs) == MULT
   3002 		   && CONST_INT_P (XEXP (rhs, 1)))
   3003 	    {
   3004 	      negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
   3005 	      rhs = XEXP (rhs, 0);
   3006 	    }
   3007 	  else if (GET_CODE (rhs) == ASHIFT
   3008 		   && CONST_INT_P (XEXP (rhs, 1))
   3009 		   && INTVAL (XEXP (rhs, 1)) >= 0
   3010 		   && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
   3011 	    {
   3012 	      negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
   3013 					       GET_MODE_PRECISION (int_mode));
   3014 	      negcoeff1 = -negcoeff1;
   3015 	      rhs = XEXP (rhs, 0);
   3016 	    }
   3017 
   3018 	  if (rtx_equal_p (lhs, rhs))
   3019 	    {
   3020 	      rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
   3021 	      rtx coeff;
   3022 	      bool speed = optimize_function_for_speed_p (cfun);
   3023 
   3024 	      coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
   3025 
   3026 	      tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
   3027 	      return (set_src_cost (tem, int_mode, speed)
   3028 		      <= set_src_cost (orig, int_mode, speed) ? tem : 0);
   3029 	    }
   3030 
   3031 	  /* Optimize (X + 1) * Y - Y to X * Y.  */
   3032 	  lhs = op0;
   3033 	  if (GET_CODE (op0) == MULT)
   3034 	    {
   3035 	      if (((GET_CODE (XEXP (op0, 0)) == PLUS
   3036 		    && XEXP (XEXP (op0, 0), 1) == const1_rtx)
   3037 		   || (GET_CODE (XEXP (op0, 0)) == MINUS
   3038 		       && XEXP (XEXP (op0, 0), 1) == constm1_rtx))
   3039 		  && rtx_equal_p (XEXP (op0, 1), op1))
   3040 		lhs = XEXP (XEXP (op0, 0), 0);
   3041 	      else if (((GET_CODE (XEXP (op0, 1)) == PLUS
   3042 			 && XEXP (XEXP (op0, 1), 1) == const1_rtx)
   3043 			|| (GET_CODE (XEXP (op0, 1)) == MINUS
   3044 			    && XEXP (XEXP (op0, 1), 1) == constm1_rtx))
   3045 		       && rtx_equal_p (XEXP (op0, 0), op1))
   3046 		lhs = XEXP (XEXP (op0, 1), 0);
   3047 	    }
   3048 	  if (lhs != op0)
   3049 	    return simplify_gen_binary (MULT, int_mode, lhs, op1);
   3050 	}
   3051 
   3052       /* (a - (-b)) -> (a + b).  True even for IEEE.  */
   3053       if (GET_CODE (op1) == NEG)
   3054 	return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
   3055 
   3056       /* (-x - c) may be simplified as (-c - x).  */
   3057       if (GET_CODE (op0) == NEG
   3058 	  && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
   3059 	{
   3060 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
   3061 	  if (tem)
   3062 	    return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
   3063 	}
   3064 
   3065       if ((GET_CODE (op0) == CONST
   3066 	   || GET_CODE (op0) == SYMBOL_REF
   3067 	   || GET_CODE (op0) == LABEL_REF)
   3068 	  && poly_int_rtx_p (op1, &offset))
   3069 	return plus_constant (mode, op0, trunc_int_for_mode (-offset, mode));
   3070 
   3071       /* Don't let a relocatable value get a negative coeff.  */
   3072       if (poly_int_rtx_p (op1) && GET_MODE (op0) != VOIDmode)
   3073 	return simplify_gen_binary (PLUS, mode,
   3074 				    op0,
   3075 				    neg_poly_int_rtx (mode, op1));
   3076 
   3077       /* (x - (x & y)) -> (x & ~y) */
   3078       if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
   3079 	{
   3080 	  if (rtx_equal_p (op0, XEXP (op1, 0)))
   3081 	    {
   3082 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
   3083 					GET_MODE (XEXP (op1, 1)));
   3084 	      return simplify_gen_binary (AND, mode, op0, tem);
   3085 	    }
   3086 	  if (rtx_equal_p (op0, XEXP (op1, 1)))
   3087 	    {
   3088 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
   3089 					GET_MODE (XEXP (op1, 0)));
   3090 	      return simplify_gen_binary (AND, mode, op0, tem);
   3091 	    }
   3092 	}
   3093 
   3094       /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
   3095 	 by reversing the comparison code if valid.  */
   3096       if (STORE_FLAG_VALUE == 1
   3097 	  && trueop0 == const1_rtx
   3098 	  && COMPARISON_P (op1)
   3099 	  && (reversed = reversed_comparison (op1, mode)))
   3100 	return reversed;
   3101 
   3102       /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A).  */
   3103       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
   3104 	  && GET_CODE (op1) == MULT
   3105 	  && GET_CODE (XEXP (op1, 0)) == NEG)
   3106 	{
   3107 	  rtx in1, in2;
   3108 
   3109 	  in1 = XEXP (XEXP (op1, 0), 0);
   3110 	  in2 = XEXP (op1, 1);
   3111 	  return simplify_gen_binary (PLUS, mode,
   3112 				      simplify_gen_binary (MULT, mode,
   3113 							   in1, in2),
   3114 				      op0);
   3115 	}
   3116 
   3117       /* Canonicalize (minus (neg A) (mult B C)) to
   3118 	 (minus (mult (neg B) C) A).  */
   3119       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
   3120 	  && GET_CODE (op1) == MULT
   3121 	  && GET_CODE (op0) == NEG)
   3122 	{
   3123 	  rtx in1, in2;
   3124 
   3125 	  in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
   3126 	  in2 = XEXP (op1, 1);
   3127 	  return simplify_gen_binary (MINUS, mode,
   3128 				      simplify_gen_binary (MULT, mode,
   3129 							   in1, in2),
   3130 				      XEXP (op0, 0));
   3131 	}
   3132 
   3133       /* If one of the operands is a PLUS or a MINUS, see if we can
   3134 	 simplify this by the associative law.  This will, for example,
   3135          canonicalize (minus A (plus B C)) to (minus (minus A B) C).
   3136 	 Don't use the associative law for floating point.
   3137 	 The inaccuracy makes it nonassociative,
   3138 	 and subtle programs can break if operations are associated.  */
   3139 
   3140       if (INTEGRAL_MODE_P (mode)
   3141 	  && (plus_minus_operand_p (op0)
   3142 	      || plus_minus_operand_p (op1))
   3143 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
   3144 	return tem;
   3145 
   3146       /* Handle vector series.  */
   3147       if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
   3148 	{
   3149 	  tem = simplify_binary_operation_series (code, mode, op0, op1);
   3150 	  if (tem)
   3151 	    return tem;
   3152 	}
   3153       break;
   3154 
   3155     case MULT:
   3156       if (trueop1 == constm1_rtx)
   3157 	return simplify_gen_unary (NEG, mode, op0, mode);
   3158 
   3159       if (GET_CODE (op0) == NEG)
   3160 	{
   3161 	  rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
   3162 	  /* If op1 is a MULT as well and simplify_unary_operation
   3163 	     just moved the NEG to the second operand, simplify_gen_binary
   3164 	     below could through simplify_associative_operation move
   3165 	     the NEG around again and recurse endlessly.  */
   3166 	  if (temp
   3167 	      && GET_CODE (op1) == MULT
   3168 	      && GET_CODE (temp) == MULT
   3169 	      && XEXP (op1, 0) == XEXP (temp, 0)
   3170 	      && GET_CODE (XEXP (temp, 1)) == NEG
   3171 	      && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
   3172 	    temp = NULL_RTX;
   3173 	  if (temp)
   3174 	    return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
   3175 	}
   3176       if (GET_CODE (op1) == NEG)
   3177 	{
   3178 	  rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
   3179 	  /* If op0 is a MULT as well and simplify_unary_operation
   3180 	     just moved the NEG to the second operand, simplify_gen_binary
   3181 	     below could through simplify_associative_operation move
   3182 	     the NEG around again and recurse endlessly.  */
   3183 	  if (temp
   3184 	      && GET_CODE (op0) == MULT
   3185 	      && GET_CODE (temp) == MULT
   3186 	      && XEXP (op0, 0) == XEXP (temp, 0)
   3187 	      && GET_CODE (XEXP (temp, 1)) == NEG
   3188 	      && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
   3189 	    temp = NULL_RTX;
   3190 	  if (temp)
   3191 	    return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
   3192 	}
   3193 
   3194       /* Maybe simplify x * 0 to 0.  The reduction is not valid if
   3195 	 x is NaN, since x * 0 is then also NaN.  Nor is it valid
   3196 	 when the mode has signed zeros, since multiplying a negative
   3197 	 number by 0 will give -0, not 0.  */
   3198       if (!HONOR_NANS (mode)
   3199 	  && !HONOR_SIGNED_ZEROS (mode)
   3200 	  && trueop1 == CONST0_RTX (mode)
   3201 	  && ! side_effects_p (op0))
   3202 	return op1;
   3203 
   3204       /* In IEEE floating point, x*1 is not equivalent to x for
   3205 	 signalling NaNs.  */
   3206       if (!HONOR_SNANS (mode)
   3207 	  && trueop1 == CONST1_RTX (mode))
   3208 	return op0;
   3209 
   3210       /* Convert multiply by constant power of two into shift.  */
   3211       if (mem_depth == 0 && CONST_SCALAR_INT_P (trueop1))
   3212 	{
   3213 	  val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
   3214 	  if (val >= 0)
   3215 	    return simplify_gen_binary (ASHIFT, mode, op0,
   3216 					gen_int_shift_amount (mode, val));
   3217 	}
   3218 
   3219       /* x*2 is x+x and x*(-1) is -x */
   3220       if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
   3221 	  && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
   3222 	  && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
   3223 	  && GET_MODE (op0) == mode)
   3224 	{
   3225 	  const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
   3226 
   3227 	  if (real_equal (d1, &dconst2))
   3228 	    return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
   3229 
   3230 	  if (!HONOR_SNANS (mode)
   3231 	      && real_equal (d1, &dconstm1))
   3232 	    return simplify_gen_unary (NEG, mode, op0, mode);
   3233 	}
   3234 
   3235       /* Optimize -x * -x as x * x.  */
   3236       if (FLOAT_MODE_P (mode)
   3237 	  && GET_CODE (op0) == NEG
   3238 	  && GET_CODE (op1) == NEG
   3239 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
   3240 	  && !side_effects_p (XEXP (op0, 0)))
   3241 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
   3242 
   3243       /* Likewise, optimize abs(x) * abs(x) as x * x.  */
   3244       if (SCALAR_FLOAT_MODE_P (mode)
   3245 	  && GET_CODE (op0) == ABS
   3246 	  && GET_CODE (op1) == ABS
   3247 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
   3248 	  && !side_effects_p (XEXP (op0, 0)))
   3249 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
   3250 
   3251       /* Reassociate multiplication, but for floating point MULTs
   3252 	 only when the user specifies unsafe math optimizations.  */
   3253       if (! FLOAT_MODE_P (mode)
   3254 	  || flag_unsafe_math_optimizations)
   3255 	{
   3256 	  tem = simplify_associative_operation (code, mode, op0, op1);
   3257 	  if (tem)
   3258 	    return tem;
   3259 	}
   3260       break;
   3261 
   3262     case IOR:
   3263       if (trueop1 == CONST0_RTX (mode))
   3264 	return op0;
   3265       if (INTEGRAL_MODE_P (mode)
   3266 	  && trueop1 == CONSTM1_RTX (mode)
   3267 	  && !side_effects_p (op0))
   3268 	return op1;
   3269       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
   3270 	return op0;
   3271       /* A | (~A) -> -1 */
   3272       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
   3273 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
   3274 	  && ! side_effects_p (op0)
   3275 	  && SCALAR_INT_MODE_P (mode))
   3276 	return constm1_rtx;
   3277 
   3278       /* (ior A C) is C if all bits of A that might be nonzero are on in C.  */
   3279       if (CONST_INT_P (op1)
   3280 	  && HWI_COMPUTABLE_MODE_P (mode)
   3281 	  && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
   3282 	  && !side_effects_p (op0))
   3283 	return op1;
   3284 
   3285       /* Canonicalize (X & C1) | C2.  */
   3286       if (GET_CODE (op0) == AND
   3287 	  && CONST_INT_P (trueop1)
   3288 	  && CONST_INT_P (XEXP (op0, 1)))
   3289 	{
   3290 	  HOST_WIDE_INT mask = GET_MODE_MASK (mode);
   3291 	  HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
   3292 	  HOST_WIDE_INT c2 = INTVAL (trueop1);
   3293 
   3294 	  /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2.  */
   3295 	  if ((c1 & c2) == c1
   3296 	      && !side_effects_p (XEXP (op0, 0)))
   3297 	    return trueop1;
   3298 
   3299 	  /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2.  */
   3300 	  if (((c1|c2) & mask) == mask)
   3301 	    return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
   3302 	}
   3303 
   3304       /* Convert (A & B) | A to A.  */
   3305       if (GET_CODE (op0) == AND
   3306 	  && (rtx_equal_p (XEXP (op0, 0), op1)
   3307 	      || rtx_equal_p (XEXP (op0, 1), op1))
   3308 	  && ! side_effects_p (XEXP (op0, 0))
   3309 	  && ! side_effects_p (XEXP (op0, 1)))
   3310 	return op1;
   3311 
   3312       /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
   3313          mode size to (rotate A CX).  */
   3314 
   3315       if (GET_CODE (op1) == ASHIFT
   3316           || GET_CODE (op1) == SUBREG)
   3317         {
   3318 	  opleft = op1;
   3319 	  opright = op0;
   3320 	}
   3321       else
   3322         {
   3323 	  opright = op1;
   3324 	  opleft = op0;
   3325 	}
   3326 
   3327       if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
   3328           && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
   3329           && CONST_INT_P (XEXP (opleft, 1))
   3330           && CONST_INT_P (XEXP (opright, 1))
   3331           && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
   3332 	      == GET_MODE_UNIT_PRECISION (mode)))
   3333         return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
   3334 
   3335       /* Same, but for ashift that has been "simplified" to a wider mode
   3336         by simplify_shift_const.  */
   3337 
   3338       if (GET_CODE (opleft) == SUBREG
   3339 	  && is_a <scalar_int_mode> (mode, &int_mode)
   3340 	  && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
   3341 				     &inner_mode)
   3342           && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
   3343           && GET_CODE (opright) == LSHIFTRT
   3344           && GET_CODE (XEXP (opright, 0)) == SUBREG
   3345 	  && known_eq (SUBREG_BYTE (opleft), SUBREG_BYTE (XEXP (opright, 0)))
   3346 	  && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
   3347           && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
   3348                           SUBREG_REG (XEXP (opright, 0)))
   3349           && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
   3350           && CONST_INT_P (XEXP (opright, 1))
   3351 	  && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
   3352 	      + INTVAL (XEXP (opright, 1))
   3353 	      == GET_MODE_PRECISION (int_mode)))
   3354 	return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
   3355 			       XEXP (SUBREG_REG (opleft), 1));
   3356 
   3357       /* If OP0 is (ashiftrt (plus ...) C), it might actually be
   3358          a (sign_extend (plus ...)).  Then check if OP1 is a CONST_INT and
   3359 	 the PLUS does not affect any of the bits in OP1: then we can do
   3360 	 the IOR as a PLUS and we can associate.  This is valid if OP1
   3361          can be safely shifted left C bits.  */
   3362       if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
   3363           && GET_CODE (XEXP (op0, 0)) == PLUS
   3364           && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
   3365           && CONST_INT_P (XEXP (op0, 1))
   3366           && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
   3367         {
   3368 	  int count = INTVAL (XEXP (op0, 1));
   3369 	  HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
   3370 
   3371           if (mask >> count == INTVAL (trueop1)
   3372 	      && trunc_int_for_mode (mask, mode) == mask
   3373               && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
   3374 	    return simplify_gen_binary (ASHIFTRT, mode,
   3375 					plus_constant (mode, XEXP (op0, 0),
   3376 						       mask),
   3377 					XEXP (op0, 1));
   3378         }
   3379 
   3380       /* The following happens with bitfield merging.
   3381          (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
   3382       if (GET_CODE (op0) == AND
   3383 	  && GET_CODE (op1) == AND
   3384 	  && CONST_INT_P (XEXP (op0, 1))
   3385 	  && CONST_INT_P (XEXP (op1, 1))
   3386 	  && (INTVAL (XEXP (op0, 1))
   3387 	      == ~INTVAL (XEXP (op1, 1))))
   3388 	{
   3389 	  /* The IOR may be on both sides.  */
   3390 	  rtx top0 = NULL_RTX, top1 = NULL_RTX;
   3391 	  if (GET_CODE (XEXP (op1, 0)) == IOR)
   3392 	    top0 = op0, top1 = op1;
   3393 	  else if (GET_CODE (XEXP (op0, 0)) == IOR)
   3394 	    top0 = op1, top1 = op0;
   3395 	  if (top0 && top1)
   3396 	    {
   3397 	      /* X may be on either side of the inner IOR.  */
   3398 	      rtx tem = NULL_RTX;
   3399 	      if (rtx_equal_p (XEXP (top0, 0),
   3400 			       XEXP (XEXP (top1, 0), 0)))
   3401 		tem = XEXP (XEXP (top1, 0), 1);
   3402 	      else if (rtx_equal_p (XEXP (top0, 0),
   3403 				    XEXP (XEXP (top1, 0), 1)))
   3404 		tem = XEXP (XEXP (top1, 0), 0);
   3405 	      if (tem)
   3406 		return simplify_gen_binary (IOR, mode, XEXP (top0, 0),
   3407 					    simplify_gen_binary
   3408 					      (AND, mode, tem, XEXP (top1, 1)));
   3409 	    }
   3410 	}
   3411 
   3412       /* Convert (ior (and A C) (and B C)) into (and (ior A B) C).  */
   3413       if (GET_CODE (op0) == GET_CODE (op1)
   3414 	  && (GET_CODE (op0) == AND
   3415 	      || GET_CODE (op0) == IOR
   3416 	      || GET_CODE (op0) == LSHIFTRT
   3417 	      || GET_CODE (op0) == ASHIFTRT
   3418 	      || GET_CODE (op0) == ASHIFT
   3419 	      || GET_CODE (op0) == ROTATE
   3420 	      || GET_CODE (op0) == ROTATERT))
   3421 	{
   3422 	  tem = simplify_distributive_operation (code, mode, op0, op1);
   3423 	  if (tem)
   3424 	    return tem;
   3425 	}
   3426 
   3427       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
   3428       if (tem)
   3429 	return tem;
   3430 
   3431       tem = simplify_associative_operation (code, mode, op0, op1);
   3432       if (tem)
   3433 	return tem;
   3434 
   3435       tem = simplify_logical_relational_operation (code, mode, op0, op1);
   3436       if (tem)
   3437 	return tem;
   3438       break;
   3439 
   3440     case XOR:
   3441       if (trueop1 == CONST0_RTX (mode))
   3442 	return op0;
   3443       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
   3444 	return simplify_gen_unary (NOT, mode, op0, mode);
   3445       if (rtx_equal_p (trueop0, trueop1)
   3446 	  && ! side_effects_p (op0)
   3447 	  && GET_MODE_CLASS (mode) != MODE_CC)
   3448 	 return CONST0_RTX (mode);
   3449 
   3450       /* Canonicalize XOR of the most significant bit to PLUS.  */
   3451       if (CONST_SCALAR_INT_P (op1)
   3452 	  && mode_signbit_p (mode, op1))
   3453 	return simplify_gen_binary (PLUS, mode, op0, op1);
   3454       /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit.  */
   3455       if (CONST_SCALAR_INT_P (op1)
   3456 	  && GET_CODE (op0) == PLUS
   3457 	  && CONST_SCALAR_INT_P (XEXP (op0, 1))
   3458 	  && mode_signbit_p (mode, XEXP (op0, 1)))
   3459 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
   3460 				    simplify_gen_binary (XOR, mode, op1,
   3461 							 XEXP (op0, 1)));
   3462 
   3463       /* If we are XORing two things that have no bits in common,
   3464 	 convert them into an IOR.  This helps to detect rotation encoded
   3465 	 using those methods and possibly other simplifications.  */
   3466 
   3467       if (HWI_COMPUTABLE_MODE_P (mode)
   3468 	  && (nonzero_bits (op0, mode)
   3469 	      & nonzero_bits (op1, mode)) == 0)
   3470 	return (simplify_gen_binary (IOR, mode, op0, op1));
   3471 
   3472       /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
   3473 	 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
   3474 	 (NOT y).  */
   3475       {
   3476 	int num_negated = 0;
   3477 
   3478 	if (GET_CODE (op0) == NOT)
   3479 	  num_negated++, op0 = XEXP (op0, 0);
   3480 	if (GET_CODE (op1) == NOT)
   3481 	  num_negated++, op1 = XEXP (op1, 0);
   3482 
   3483 	if (num_negated == 2)
   3484 	  return simplify_gen_binary (XOR, mode, op0, op1);
   3485 	else if (num_negated == 1)
   3486 	  return simplify_gen_unary (NOT, mode,
   3487 				     simplify_gen_binary (XOR, mode, op0, op1),
   3488 				     mode);
   3489       }
   3490 
   3491       /* Convert (xor (and A B) B) to (and (not A) B).  The latter may
   3492 	 correspond to a machine insn or result in further simplifications
   3493 	 if B is a constant.  */
   3494 
   3495       if (GET_CODE (op0) == AND
   3496 	  && rtx_equal_p (XEXP (op0, 1), op1)
   3497 	  && ! side_effects_p (op1))
   3498 	return simplify_gen_binary (AND, mode,
   3499 				    simplify_gen_unary (NOT, mode,
   3500 							XEXP (op0, 0), mode),
   3501 				    op1);
   3502 
   3503       else if (GET_CODE (op0) == AND
   3504 	       && rtx_equal_p (XEXP (op0, 0), op1)
   3505 	       && ! side_effects_p (op1))
   3506 	return simplify_gen_binary (AND, mode,
   3507 				    simplify_gen_unary (NOT, mode,
   3508 							XEXP (op0, 1), mode),
   3509 				    op1);
   3510 
   3511       /* Given (xor (ior (xor A B) C) D), where B, C and D are
   3512 	 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
   3513 	 out bits inverted twice and not set by C.  Similarly, given
   3514 	 (xor (and (xor A B) C) D), simplify without inverting C in
   3515 	 the xor operand: (xor (and A C) (B&C)^D).
   3516       */
   3517       else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
   3518 	       && GET_CODE (XEXP (op0, 0)) == XOR
   3519 	       && CONST_INT_P (op1)
   3520 	       && CONST_INT_P (XEXP (op0, 1))
   3521 	       && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
   3522 	{
   3523 	  enum rtx_code op = GET_CODE (op0);
   3524 	  rtx a = XEXP (XEXP (op0, 0), 0);
   3525 	  rtx b = XEXP (XEXP (op0, 0), 1);
   3526 	  rtx c = XEXP (op0, 1);
   3527 	  rtx d = op1;
   3528 	  HOST_WIDE_INT bval = INTVAL (b);
   3529 	  HOST_WIDE_INT cval = INTVAL (c);
   3530 	  HOST_WIDE_INT dval = INTVAL (d);
   3531 	  HOST_WIDE_INT xcval;
   3532 
   3533 	  if (op == IOR)
   3534 	    xcval = ~cval;
   3535 	  else
   3536 	    xcval = cval;
   3537 
   3538 	  return simplify_gen_binary (XOR, mode,
   3539 				      simplify_gen_binary (op, mode, a, c),
   3540 				      gen_int_mode ((bval & xcval) ^ dval,
   3541 						    mode));
   3542 	}
   3543 
   3544       /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
   3545 	 we can transform like this:
   3546             (A&B)^C == ~(A&B)&C | ~C&(A&B)
   3547                     == (~A|~B)&C | ~C&(A&B)    * DeMorgan's Law
   3548                     == ~A&C | ~B&C | A&(~C&B)  * Distribute and re-order
   3549 	 Attempt a few simplifications when B and C are both constants.  */
   3550       if (GET_CODE (op0) == AND
   3551 	  && CONST_INT_P (op1)
   3552 	  && CONST_INT_P (XEXP (op0, 1)))
   3553 	{
   3554 	  rtx a = XEXP (op0, 0);
   3555 	  rtx b = XEXP (op0, 1);
   3556 	  rtx c = op1;
   3557 	  HOST_WIDE_INT bval = INTVAL (b);
   3558 	  HOST_WIDE_INT cval = INTVAL (c);
   3559 
   3560 	  /* Instead of computing ~A&C, we compute its negated value,
   3561 	     ~(A|~C).  If it yields -1, ~A&C is zero, so we can
   3562 	     optimize for sure.  If it does not simplify, we still try
   3563 	     to compute ~A&C below, but since that always allocates
   3564 	     RTL, we don't try that before committing to returning a
   3565 	     simplified expression.  */
   3566 	  rtx n_na_c = simplify_binary_operation (IOR, mode, a,
   3567 						  GEN_INT (~cval));
   3568 
   3569 	  if ((~cval & bval) == 0)
   3570 	    {
   3571 	      rtx na_c = NULL_RTX;
   3572 	      if (n_na_c)
   3573 		na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
   3574 	      else
   3575 		{
   3576 		  /* If ~A does not simplify, don't bother: we don't
   3577 		     want to simplify 2 operations into 3, and if na_c
   3578 		     were to simplify with na, n_na_c would have
   3579 		     simplified as well.  */
   3580 		  rtx na = simplify_unary_operation (NOT, mode, a, mode);
   3581 		  if (na)
   3582 		    na_c = simplify_gen_binary (AND, mode, na, c);
   3583 		}
   3584 
   3585 	      /* Try to simplify ~A&C | ~B&C.  */
   3586 	      if (na_c != NULL_RTX)
   3587 		return simplify_gen_binary (IOR, mode, na_c,
   3588 					    gen_int_mode (~bval & cval, mode));
   3589 	    }
   3590 	  else
   3591 	    {
   3592 	      /* If ~A&C is zero, simplify A&(~C&B) | ~B&C.  */
   3593 	      if (n_na_c == CONSTM1_RTX (mode))
   3594 		{
   3595 		  rtx a_nc_b = simplify_gen_binary (AND, mode, a,
   3596 						    gen_int_mode (~cval & bval,
   3597 								  mode));
   3598 		  return simplify_gen_binary (IOR, mode, a_nc_b,
   3599 					      gen_int_mode (~bval & cval,
   3600 							    mode));
   3601 		}
   3602 	    }
   3603 	}
   3604 
   3605       /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
   3606 	 do (ior (and A ~C) (and B C)) which is a machine instruction on some
   3607 	 machines, and also has shorter instruction path length.  */
   3608       if (GET_CODE (op0) == AND
   3609 	  && GET_CODE (XEXP (op0, 0)) == XOR
   3610 	  && CONST_INT_P (XEXP (op0, 1))
   3611 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
   3612 	{
   3613 	  rtx a = trueop1;
   3614 	  rtx b = XEXP (XEXP (op0, 0), 1);
   3615 	  rtx c = XEXP (op0, 1);
   3616 	  rtx nc = simplify_gen_unary (NOT, mode, c, mode);
   3617 	  rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
   3618 	  rtx bc = simplify_gen_binary (AND, mode, b, c);
   3619 	  return simplify_gen_binary (IOR, mode, a_nc, bc);
   3620 	}
   3621       /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C))  */
   3622       else if (GET_CODE (op0) == AND
   3623 	  && GET_CODE (XEXP (op0, 0)) == XOR
   3624 	  && CONST_INT_P (XEXP (op0, 1))
   3625 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
   3626 	{
   3627 	  rtx a = XEXP (XEXP (op0, 0), 0);
   3628 	  rtx b = trueop1;
   3629 	  rtx c = XEXP (op0, 1);
   3630 	  rtx nc = simplify_gen_unary (NOT, mode, c, mode);
   3631 	  rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
   3632 	  rtx ac = simplify_gen_binary (AND, mode, a, c);
   3633 	  return simplify_gen_binary (IOR, mode, ac, b_nc);
   3634 	}
   3635 
   3636       /* (xor (comparison foo bar) (const_int 1)) can become the reversed
   3637 	 comparison if STORE_FLAG_VALUE is 1.  */
   3638       if (STORE_FLAG_VALUE == 1
   3639 	  && trueop1 == const1_rtx
   3640 	  && COMPARISON_P (op0)
   3641 	  && (reversed = reversed_comparison (op0, mode)))
   3642 	return reversed;
   3643 
   3644       /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
   3645 	 is (lt foo (const_int 0)), so we can perform the above
   3646 	 simplification if STORE_FLAG_VALUE is 1.  */
   3647 
   3648       if (is_a <scalar_int_mode> (mode, &int_mode)
   3649 	  && STORE_FLAG_VALUE == 1
   3650 	  && trueop1 == const1_rtx
   3651 	  && GET_CODE (op0) == LSHIFTRT
   3652 	  && CONST_INT_P (XEXP (op0, 1))
   3653 	  && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
   3654 	return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
   3655 
   3656       /* (xor (comparison foo bar) (const_int sign-bit))
   3657 	 when STORE_FLAG_VALUE is the sign bit.  */
   3658       if (is_a <scalar_int_mode> (mode, &int_mode)
   3659 	  && val_signbit_p (int_mode, STORE_FLAG_VALUE)
   3660 	  && trueop1 == const_true_rtx
   3661 	  && COMPARISON_P (op0)
   3662 	  && (reversed = reversed_comparison (op0, int_mode)))
   3663 	return reversed;
   3664 
   3665       /* Convert (xor (and A C) (and B C)) into (and (xor A B) C).  */
   3666       if (GET_CODE (op0) == GET_CODE (op1)
   3667 	  && (GET_CODE (op0) == AND
   3668 	      || GET_CODE (op0) == LSHIFTRT
   3669 	      || GET_CODE (op0) == ASHIFTRT
   3670 	      || GET_CODE (op0) == ASHIFT
   3671 	      || GET_CODE (op0) == ROTATE
   3672 	      || GET_CODE (op0) == ROTATERT))
   3673 	{
   3674 	  tem = simplify_distributive_operation (code, mode, op0, op1);
   3675 	  if (tem)
   3676 	    return tem;
   3677 	}
   3678 
   3679       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
   3680       if (tem)
   3681 	return tem;
   3682 
   3683       tem = simplify_associative_operation (code, mode, op0, op1);
   3684       if (tem)
   3685 	return tem;
   3686       break;
   3687 
   3688     case AND:
   3689       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
   3690 	return trueop1;
   3691       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
   3692 	return op0;
   3693       if (HWI_COMPUTABLE_MODE_P (mode))
   3694 	{
   3695 	  HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
   3696 	  HOST_WIDE_INT nzop1;
   3697 	  if (CONST_INT_P (trueop1))
   3698 	    {
   3699 	      HOST_WIDE_INT val1 = INTVAL (trueop1);
   3700 	      /* If we are turning off bits already known off in OP0, we need
   3701 		 not do an AND.  */
   3702 	      if ((nzop0 & ~val1) == 0)
   3703 		return op0;
   3704 	    }
   3705 	  nzop1 = nonzero_bits (trueop1, mode);
   3706 	  /* If we are clearing all the nonzero bits, the result is zero.  */
   3707 	  if ((nzop1 & nzop0) == 0
   3708 	      && !side_effects_p (op0) && !side_effects_p (op1))
   3709 	    return CONST0_RTX (mode);
   3710 	}
   3711       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
   3712 	  && GET_MODE_CLASS (mode) != MODE_CC)
   3713 	return op0;
   3714       /* A & (~A) -> 0 */
   3715       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
   3716 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
   3717 	  && ! side_effects_p (op0)
   3718 	  && GET_MODE_CLASS (mode) != MODE_CC)
   3719 	return CONST0_RTX (mode);
   3720 
   3721       /* Transform (and (extend X) C) into (zero_extend (and X C)) if
   3722 	 there are no nonzero bits of C outside of X's mode.  */
   3723       if ((GET_CODE (op0) == SIGN_EXTEND
   3724 	   || GET_CODE (op0) == ZERO_EXTEND)
   3725 	  && CONST_INT_P (trueop1)
   3726 	  && HWI_COMPUTABLE_MODE_P (mode)
   3727 	  && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
   3728 	      & UINTVAL (trueop1)) == 0)
   3729 	{
   3730 	  machine_mode imode = GET_MODE (XEXP (op0, 0));
   3731 	  tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
   3732 				     gen_int_mode (INTVAL (trueop1),
   3733 						   imode));
   3734 	  return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
   3735 	}
   3736 
   3737       /* Transform (and (truncate X) C) into (truncate (and X C)).  This way
   3738 	 we might be able to further simplify the AND with X and potentially
   3739 	 remove the truncation altogether.  */
   3740       if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
   3741 	{
   3742 	  rtx x = XEXP (op0, 0);
   3743 	  machine_mode xmode = GET_MODE (x);
   3744 	  tem = simplify_gen_binary (AND, xmode, x,
   3745 				     gen_int_mode (INTVAL (trueop1), xmode));
   3746 	  return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
   3747 	}
   3748 
   3749       /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2).  */
   3750       if (GET_CODE (op0) == IOR
   3751 	  && CONST_INT_P (trueop1)
   3752 	  && CONST_INT_P (XEXP (op0, 1)))
   3753 	{
   3754 	  HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
   3755 	  return simplify_gen_binary (IOR, mode,
   3756 				      simplify_gen_binary (AND, mode,
   3757 							   XEXP (op0, 0), op1),
   3758 				      gen_int_mode (tmp, mode));
   3759 	}
   3760 
   3761       /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
   3762 	 insn (and may simplify more).  */
   3763       if (GET_CODE (op0) == XOR
   3764 	  && rtx_equal_p (XEXP (op0, 0), op1)
   3765 	  && ! side_effects_p (op1))
   3766 	return simplify_gen_binary (AND, mode,
   3767 				    simplify_gen_unary (NOT, mode,
   3768 							XEXP (op0, 1), mode),
   3769 				    op1);
   3770 
   3771       if (GET_CODE (op0) == XOR
   3772 	  && rtx_equal_p (XEXP (op0, 1), op1)
   3773 	  && ! side_effects_p (op1))
   3774 	return simplify_gen_binary (AND, mode,
   3775 				    simplify_gen_unary (NOT, mode,
   3776 							XEXP (op0, 0), mode),
   3777 				    op1);
   3778 
   3779       /* Similarly for (~(A ^ B)) & A.  */
   3780       if (GET_CODE (op0) == NOT
   3781 	  && GET_CODE (XEXP (op0, 0)) == XOR
   3782 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
   3783 	  && ! side_effects_p (op1))
   3784 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
   3785 
   3786       if (GET_CODE (op0) == NOT
   3787 	  && GET_CODE (XEXP (op0, 0)) == XOR
   3788 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
   3789 	  && ! side_effects_p (op1))
   3790 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
   3791 
   3792       /* Convert (A | B) & A to A.  */
   3793       if (GET_CODE (op0) == IOR
   3794 	  && (rtx_equal_p (XEXP (op0, 0), op1)
   3795 	      || rtx_equal_p (XEXP (op0, 1), op1))
   3796 	  && ! side_effects_p (XEXP (op0, 0))
   3797 	  && ! side_effects_p (XEXP (op0, 1)))
   3798 	return op1;
   3799 
   3800       /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
   3801 	 ((A & N) + B) & M -> (A + B) & M
   3802 	 Similarly if (N & M) == 0,
   3803 	 ((A | N) + B) & M -> (A + B) & M
   3804 	 and for - instead of + and/or ^ instead of |.
   3805          Also, if (N & M) == 0, then
   3806 	 (A +- N) & M -> A & M.  */
   3807       if (CONST_INT_P (trueop1)
   3808 	  && HWI_COMPUTABLE_MODE_P (mode)
   3809 	  && ~UINTVAL (trueop1)
   3810 	  && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
   3811 	  && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
   3812 	{
   3813 	  rtx pmop[2];
   3814 	  int which;
   3815 
   3816 	  pmop[0] = XEXP (op0, 0);
   3817 	  pmop[1] = XEXP (op0, 1);
   3818 
   3819 	  if (CONST_INT_P (pmop[1])
   3820 	      && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
   3821 	    return simplify_gen_binary (AND, mode, pmop[0], op1);
   3822 
   3823 	  for (which = 0; which < 2; which++)
   3824 	    {
   3825 	      tem = pmop[which];
   3826 	      switch (GET_CODE (tem))
   3827 		{
   3828 		case AND:
   3829 		  if (CONST_INT_P (XEXP (tem, 1))
   3830 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
   3831 		      == UINTVAL (trueop1))
   3832 		    pmop[which] = XEXP (tem, 0);
   3833 		  break;
   3834 		case IOR:
   3835 		case XOR:
   3836 		  if (CONST_INT_P (XEXP (tem, 1))
   3837 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
   3838 		    pmop[which] = XEXP (tem, 0);
   3839 		  break;
   3840 		default:
   3841 		  break;
   3842 		}
   3843 	    }
   3844 
   3845 	  if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
   3846 	    {
   3847 	      tem = simplify_gen_binary (GET_CODE (op0), mode,
   3848 					 pmop[0], pmop[1]);
   3849 	      return simplify_gen_binary (code, mode, tem, op1);
   3850 	    }
   3851 	}
   3852 
   3853       /* (and X (ior (not X) Y) -> (and X Y) */
   3854       if (GET_CODE (op1) == IOR
   3855 	  && GET_CODE (XEXP (op1, 0)) == NOT
   3856 	  && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
   3857        return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
   3858 
   3859       /* (and (ior (not X) Y) X) -> (and X Y) */
   3860       if (GET_CODE (op0) == IOR
   3861 	  && GET_CODE (XEXP (op0, 0)) == NOT
   3862 	  && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
   3863 	return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
   3864 
   3865       /* (and X (ior Y (not X)) -> (and X Y) */
   3866       if (GET_CODE (op1) == IOR
   3867 	  && GET_CODE (XEXP (op1, 1)) == NOT
   3868 	  && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
   3869        return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
   3870 
   3871       /* (and (ior Y (not X)) X) -> (and X Y) */
   3872       if (GET_CODE (op0) == IOR
   3873 	  && GET_CODE (XEXP (op0, 1)) == NOT
   3874 	  && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
   3875 	return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
   3876 
   3877       /* Convert (and (ior A C) (ior B C)) into (ior (and A B) C).  */
   3878       if (GET_CODE (op0) == GET_CODE (op1)
   3879 	  && (GET_CODE (op0) == AND
   3880 	      || GET_CODE (op0) == IOR
   3881 	      || GET_CODE (op0) == LSHIFTRT
   3882 	      || GET_CODE (op0) == ASHIFTRT
   3883 	      || GET_CODE (op0) == ASHIFT
   3884 	      || GET_CODE (op0) == ROTATE
   3885 	      || GET_CODE (op0) == ROTATERT))
   3886 	{
   3887 	  tem = simplify_distributive_operation (code, mode, op0, op1);
   3888 	  if (tem)
   3889 	    return tem;
   3890 	}
   3891 
   3892       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
   3893       if (tem)
   3894 	return tem;
   3895 
   3896       tem = simplify_associative_operation (code, mode, op0, op1);
   3897       if (tem)
   3898 	return tem;
   3899       break;
   3900 
   3901     case UDIV:
   3902       /* 0/x is 0 (or x&0 if x has side-effects).  */
   3903       if (trueop0 == CONST0_RTX (mode)
   3904 	  && !cfun->can_throw_non_call_exceptions)
   3905 	{
   3906 	  if (side_effects_p (op1))
   3907 	    return simplify_gen_binary (AND, mode, op1, trueop0);
   3908 	  return trueop0;
   3909 	}
   3910       /* x/1 is x.  */
   3911       if (trueop1 == CONST1_RTX (mode))
   3912 	{
   3913 	  tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
   3914 	  if (tem)
   3915 	    return tem;
   3916 	}
   3917       /* Convert divide by power of two into shift.  */
   3918       if (CONST_INT_P (trueop1)
   3919 	  && (val = exact_log2 (UINTVAL (trueop1))) > 0)
   3920 	return simplify_gen_binary (LSHIFTRT, mode, op0,
   3921 				    gen_int_shift_amount (mode, val));
   3922       break;
   3923 
   3924     case DIV:
   3925       /* Handle floating point and integers separately.  */
   3926       if (SCALAR_FLOAT_MODE_P (mode))
   3927 	{
   3928 	  /* Maybe change 0.0 / x to 0.0.  This transformation isn't
   3929 	     safe for modes with NaNs, since 0.0 / 0.0 will then be
   3930 	     NaN rather than 0.0.  Nor is it safe for modes with signed
   3931 	     zeros, since dividing 0 by a negative number gives -0.0  */
   3932 	  if (trueop0 == CONST0_RTX (mode)
   3933 	      && !HONOR_NANS (mode)
   3934 	      && !HONOR_SIGNED_ZEROS (mode)
   3935 	      && ! side_effects_p (op1))
   3936 	    return op0;
   3937 	  /* x/1.0 is x.  */
   3938 	  if (trueop1 == CONST1_RTX (mode)
   3939 	      && !HONOR_SNANS (mode))
   3940 	    return op0;
   3941 
   3942 	  if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
   3943 	      && trueop1 != CONST0_RTX (mode))
   3944 	    {
   3945 	      const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
   3946 
   3947 	      /* x/-1.0 is -x.  */
   3948 	      if (real_equal (d1, &dconstm1)
   3949 		  && !HONOR_SNANS (mode))
   3950 		return simplify_gen_unary (NEG, mode, op0, mode);
   3951 
   3952 	      /* Change FP division by a constant into multiplication.
   3953 		 Only do this with -freciprocal-math.  */
   3954 	      if (flag_reciprocal_math
   3955 		  && !real_equal (d1, &dconst0))
   3956 		{
   3957 		  REAL_VALUE_TYPE d;
   3958 		  real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
   3959 		  tem = const_double_from_real_value (d, mode);
   3960 		  return simplify_gen_binary (MULT, mode, op0, tem);
   3961 		}
   3962 	    }
   3963 	}
   3964       else if (SCALAR_INT_MODE_P (mode))
   3965 	{
   3966 	  /* 0/x is 0 (or x&0 if x has side-effects).  */
   3967 	  if (trueop0 == CONST0_RTX (mode)
   3968 	      && !cfun->can_throw_non_call_exceptions)
   3969 	    {
   3970 	      if (side_effects_p (op1))
   3971 		return simplify_gen_binary (AND, mode, op1, trueop0);
   3972 	      return trueop0;
   3973 	    }
   3974 	  /* x/1 is x.  */
   3975 	  if (trueop1 == CONST1_RTX (mode))
   3976 	    {
   3977 	      tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
   3978 	      if (tem)
   3979 		return tem;
   3980 	    }
   3981 	  /* x/-1 is -x.  */
   3982 	  if (trueop1 == constm1_rtx)
   3983 	    {
   3984 	      rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
   3985 	      if (x)
   3986 		return simplify_gen_unary (NEG, mode, x, mode);
   3987 	    }
   3988 	}
   3989       break;
   3990 
   3991     case UMOD:
   3992       /* 0%x is 0 (or x&0 if x has side-effects).  */
   3993       if (trueop0 == CONST0_RTX (mode))
   3994 	{
   3995 	  if (side_effects_p (op1))
   3996 	    return simplify_gen_binary (AND, mode, op1, trueop0);
   3997 	  return trueop0;
   3998 	}
   3999       /* x%1 is 0 (of x&0 if x has side-effects).  */
   4000       if (trueop1 == CONST1_RTX (mode))
   4001 	{
   4002 	  if (side_effects_p (op0))
   4003 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
   4004 	  return CONST0_RTX (mode);
   4005 	}
   4006       /* Implement modulus by power of two as AND.  */
   4007       if (CONST_INT_P (trueop1)
   4008 	  && exact_log2 (UINTVAL (trueop1)) > 0)
   4009 	return simplify_gen_binary (AND, mode, op0,
   4010 				    gen_int_mode (UINTVAL (trueop1) - 1,
   4011 						  mode));
   4012       break;
   4013 
   4014     case MOD:
   4015       /* 0%x is 0 (or x&0 if x has side-effects).  */
   4016       if (trueop0 == CONST0_RTX (mode))
   4017 	{
   4018 	  if (side_effects_p (op1))
   4019 	    return simplify_gen_binary (AND, mode, op1, trueop0);
   4020 	  return trueop0;
   4021 	}
   4022       /* x%1 and x%-1 is 0 (or x&0 if x has side-effects).  */
   4023       if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
   4024 	{
   4025 	  if (side_effects_p (op0))
   4026 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
   4027 	  return CONST0_RTX (mode);
   4028 	}
   4029       break;
   4030 
   4031     case ROTATERT:
   4032     case ROTATE:
   4033       if (trueop1 == CONST0_RTX (mode))
   4034 	return op0;
   4035       /* Canonicalize rotates by constant amount.  If op1 is bitsize / 2,
   4036 	 prefer left rotation, if op1 is from bitsize / 2 + 1 to
   4037 	 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
   4038 	 amount instead.  */
   4039 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
   4040       if (CONST_INT_P (trueop1)
   4041 	  && IN_RANGE (INTVAL (trueop1),
   4042 		       GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
   4043 		       GET_MODE_UNIT_PRECISION (mode) - 1))
   4044 	{
   4045 	  int new_amount = GET_MODE_UNIT_PRECISION (mode) - INTVAL (trueop1);
   4046 	  rtx new_amount_rtx = gen_int_shift_amount (mode, new_amount);
   4047 	  return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
   4048 				      mode, op0, new_amount_rtx);
   4049 	}
   4050 #endif
   4051       /* FALLTHRU */
   4052     case ASHIFTRT:
   4053       if (trueop1 == CONST0_RTX (mode))
   4054 	return op0;
   4055       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
   4056 	return op0;
   4057       /* Rotating ~0 always results in ~0.  */
   4058       if (CONST_INT_P (trueop0)
   4059 	  && HWI_COMPUTABLE_MODE_P (mode)
   4060 	  && UINTVAL (trueop0) == GET_MODE_MASK (mode)
   4061 	  && ! side_effects_p (op1))
   4062 	return op0;
   4063 
   4064     canonicalize_shift:
   4065       /* Given:
   4066 	 scalar modes M1, M2
   4067 	 scalar constants c1, c2
   4068 	 size (M2) > size (M1)
   4069 	 c1 == size (M2) - size (M1)
   4070 	 optimize:
   4071 	 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
   4072 				 <low_part>)
   4073 		      (const_int <c2>))
   4074 	 to:
   4075 	 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
   4076 		    <low_part>).  */
   4077       if ((code == ASHIFTRT || code == LSHIFTRT)
   4078 	  && is_a <scalar_int_mode> (mode, &int_mode)
   4079 	  && SUBREG_P (op0)
   4080 	  && CONST_INT_P (op1)
   4081 	  && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
   4082 	  && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
   4083 				     &inner_mode)
   4084 	  && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
   4085 	  && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
   4086 	  && (INTVAL (XEXP (SUBREG_REG (op0), 1))
   4087 	      == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
   4088 	  && subreg_lowpart_p (op0))
   4089 	{
   4090 	  rtx tmp = gen_int_shift_amount
   4091 	    (inner_mode, INTVAL (XEXP (SUBREG_REG (op0), 1)) + INTVAL (op1));
   4092 
   4093 	 /* Combine would usually zero out the value when combining two
   4094 	    local shifts and the range becomes larger or equal to the mode.
   4095 	    However since we fold away one of the shifts here combine won't
   4096 	    see it so we should immediately zero the result if it's out of
   4097 	    range.  */
   4098 	 if (code == LSHIFTRT
   4099 	     && INTVAL (tmp) >= GET_MODE_BITSIZE (inner_mode))
   4100 	  tmp = const0_rtx;
   4101 	 else
   4102 	   tmp = simplify_gen_binary (code,
   4103 				      inner_mode,
   4104 				      XEXP (SUBREG_REG (op0), 0),
   4105 				      tmp);
   4106 
   4107 	  return lowpart_subreg (int_mode, tmp, inner_mode);
   4108 	}
   4109 
   4110       if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
   4111 	{
   4112 	  val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
   4113 	  if (val != INTVAL (op1))
   4114 	    return simplify_gen_binary (code, mode, op0,
   4115 					gen_int_shift_amount (mode, val));
   4116 	}
   4117       break;
   4118 
   4119     case SS_ASHIFT:
   4120       if (CONST_INT_P (trueop0)
   4121 	  && HWI_COMPUTABLE_MODE_P (mode)
   4122 	  && (UINTVAL (trueop0) == (GET_MODE_MASK (mode) >> 1)
   4123 	      || mode_signbit_p (mode, trueop0))
   4124 	  && ! side_effects_p (op1))
   4125 	return op0;
   4126       goto simplify_ashift;
   4127 
   4128     case US_ASHIFT:
   4129       if (CONST_INT_P (trueop0)
   4130 	  && HWI_COMPUTABLE_MODE_P (mode)
   4131 	  && UINTVAL (trueop0) == GET_MODE_MASK (mode)
   4132 	  && ! side_effects_p (op1))
   4133 	return op0;
   4134       /* FALLTHRU */
   4135 
   4136     case ASHIFT:
   4137 simplify_ashift:
   4138       if (trueop1 == CONST0_RTX (mode))
   4139 	return op0;
   4140       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
   4141 	return op0;
   4142       if (mem_depth
   4143 	  && code == ASHIFT
   4144 	  && CONST_INT_P (trueop1)
   4145 	  && is_a <scalar_int_mode> (mode, &int_mode)
   4146 	  && IN_RANGE (UINTVAL (trueop1),
   4147 		       1, GET_MODE_PRECISION (int_mode) - 1))
   4148 	{
   4149 	  auto c = (wi::one (GET_MODE_PRECISION (int_mode))
   4150 		    << UINTVAL (trueop1));
   4151 	  rtx new_op1 = immed_wide_int_const (c, int_mode);
   4152 	  return simplify_gen_binary (MULT, int_mode, op0, new_op1);
   4153 	}
   4154       goto canonicalize_shift;
   4155 
   4156     case LSHIFTRT:
   4157       if (trueop1 == CONST0_RTX (mode))
   4158 	return op0;
   4159       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
   4160 	return op0;
   4161       /* Optimize (lshiftrt (clz X) C) as (eq X 0).  */
   4162       if (GET_CODE (op0) == CLZ
   4163 	  && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
   4164 	  && CONST_INT_P (trueop1)
   4165 	  && STORE_FLAG_VALUE == 1
   4166 	  && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
   4167 	{
   4168 	  unsigned HOST_WIDE_INT zero_val = 0;
   4169 
   4170 	  if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
   4171 	      && zero_val == GET_MODE_PRECISION (inner_mode)
   4172 	      && INTVAL (trueop1) == exact_log2 (zero_val))
   4173 	    return simplify_gen_relational (EQ, mode, inner_mode,
   4174 					    XEXP (op0, 0), const0_rtx);
   4175 	}
   4176       goto canonicalize_shift;
   4177 
   4178     case SMIN:
   4179       if (HWI_COMPUTABLE_MODE_P (mode)
   4180 	  && mode_signbit_p (mode, trueop1)
   4181 	  && ! side_effects_p (op0))
   4182 	return op1;
   4183       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
   4184 	return op0;
   4185       tem = simplify_associative_operation (code, mode, op0, op1);
   4186       if (tem)
   4187 	return tem;
   4188       break;
   4189 
   4190     case SMAX:
   4191       if (HWI_COMPUTABLE_MODE_P (mode)
   4192 	  && CONST_INT_P (trueop1)
   4193 	  && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
   4194 	  && ! side_effects_p (op0))
   4195 	return op1;
   4196       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
   4197 	return op0;
   4198       tem = simplify_associative_operation (code, mode, op0, op1);
   4199       if (tem)
   4200 	return tem;
   4201       break;
   4202 
   4203     case UMIN:
   4204       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
   4205 	return op1;
   4206       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
   4207 	return op0;
   4208       tem = simplify_associative_operation (code, mode, op0, op1);
   4209       if (tem)
   4210 	return tem;
   4211       break;
   4212 
   4213     case UMAX:
   4214       if (trueop1 == constm1_rtx && ! side_effects_p (op0))
   4215 	return op1;
   4216       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
   4217 	return op0;
   4218       tem = simplify_associative_operation (code, mode, op0, op1);
   4219       if (tem)
   4220 	return tem;
   4221       break;
   4222 
   4223     case SS_PLUS:
   4224     case US_PLUS:
   4225     case SS_MINUS:
   4226     case US_MINUS:
   4227       /* Simplify x +/- 0 to x, if possible.  */
   4228       if (trueop1 == CONST0_RTX (mode))
   4229 	return op0;
   4230       return 0;
   4231 
   4232     case SS_MULT:
   4233     case US_MULT:
   4234       /* Simplify x * 0 to 0, if possible.  */
   4235       if (trueop1 == CONST0_RTX (mode)
   4236 	  && !side_effects_p (op0))
   4237 	return op1;
   4238 
   4239       /* Simplify x * 1 to x, if possible.  */
   4240       if (trueop1 == CONST1_RTX (mode))
   4241 	return op0;
   4242       return 0;
   4243 
   4244     case SMUL_HIGHPART:
   4245     case UMUL_HIGHPART:
   4246       /* Simplify x * 0 to 0, if possible.  */
   4247       if (trueop1 == CONST0_RTX (mode)
   4248 	  && !side_effects_p (op0))
   4249 	return op1;
   4250       return 0;
   4251 
   4252     case SS_DIV:
   4253     case US_DIV:
   4254       /* Simplify x / 1 to x, if possible.  */
   4255       if (trueop1 == CONST1_RTX (mode))
   4256 	return op0;
   4257       return 0;
   4258 
   4259     case VEC_SERIES:
   4260       if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
   4261 	return gen_vec_duplicate (mode, op0);
   4262       if (valid_for_const_vector_p (mode, op0)
   4263 	  && valid_for_const_vector_p (mode, op1))
   4264 	return gen_const_vec_series (mode, op0, op1);
   4265       return 0;
   4266 
   4267     case VEC_SELECT:
   4268       if (!VECTOR_MODE_P (mode))
   4269 	{
   4270 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
   4271 	  gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
   4272 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
   4273 	  gcc_assert (XVECLEN (trueop1, 0) == 1);
   4274 
   4275 	  /* We can't reason about selections made at runtime.  */
   4276 	  if (!CONST_INT_P (XVECEXP (trueop1, 0, 0)))
   4277 	    return 0;
   4278 
   4279 	  if (vec_duplicate_p (trueop0, &elt0))
   4280 	    return elt0;
   4281 
   4282 	  if (GET_CODE (trueop0) == CONST_VECTOR)
   4283 	    return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
   4284 						      (trueop1, 0, 0)));
   4285 
   4286 	  /* Extract a scalar element from a nested VEC_SELECT expression
   4287 	     (with optional nested VEC_CONCAT expression).  Some targets
   4288 	     (i386) extract scalar element from a vector using chain of
   4289 	     nested VEC_SELECT expressions.  When input operand is a memory
   4290 	     operand, this operation can be simplified to a simple scalar
   4291 	     load from an offseted memory address.  */
   4292 	  int n_elts;
   4293 	  if (GET_CODE (trueop0) == VEC_SELECT
   4294 	      && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
   4295 		  .is_constant (&n_elts)))
   4296 	    {
   4297 	      rtx op0 = XEXP (trueop0, 0);
   4298 	      rtx op1 = XEXP (trueop0, 1);
   4299 
   4300 	      int i = INTVAL (XVECEXP (trueop1, 0, 0));
   4301 	      int elem;
   4302 
   4303 	      rtvec vec;
   4304 	      rtx tmp_op, tmp;
   4305 
   4306 	      gcc_assert (GET_CODE (op1) == PARALLEL);
   4307 	      gcc_assert (i < n_elts);
   4308 
   4309 	      /* Select element, pointed by nested selector.  */
   4310 	      elem = INTVAL (XVECEXP (op1, 0, i));
   4311 
   4312 	      /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT.  */
   4313 	      if (GET_CODE (op0) == VEC_CONCAT)
   4314 		{
   4315 		  rtx op00 = XEXP (op0, 0);
   4316 		  rtx op01 = XEXP (op0, 1);
   4317 
   4318 		  machine_mode mode00, mode01;
   4319 		  int n_elts00, n_elts01;
   4320 
   4321 		  mode00 = GET_MODE (op00);
   4322 		  mode01 = GET_MODE (op01);
   4323 
   4324 		  /* Find out the number of elements of each operand.
   4325 		     Since the concatenated result has a constant number
   4326 		     of elements, the operands must too.  */
   4327 		  n_elts00 = GET_MODE_NUNITS (mode00).to_constant ();
   4328 		  n_elts01 = GET_MODE_NUNITS (mode01).to_constant ();
   4329 
   4330 		  gcc_assert (n_elts == n_elts00 + n_elts01);
   4331 
   4332 		  /* Select correct operand of VEC_CONCAT
   4333 		     and adjust selector. */
   4334 		  if (elem < n_elts01)
   4335 		    tmp_op = op00;
   4336 		  else
   4337 		    {
   4338 		      tmp_op = op01;
   4339 		      elem -= n_elts00;
   4340 		    }
   4341 		}
   4342 	      else
   4343 		tmp_op = op0;
   4344 
   4345 	      vec = rtvec_alloc (1);
   4346 	      RTVEC_ELT (vec, 0) = GEN_INT (elem);
   4347 
   4348 	      tmp = gen_rtx_fmt_ee (code, mode,
   4349 				    tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
   4350 	      return tmp;
   4351 	    }
   4352 	}
   4353       else
   4354 	{
   4355 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
   4356 	  gcc_assert (GET_MODE_INNER (mode)
   4357 		      == GET_MODE_INNER (GET_MODE (trueop0)));
   4358 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
   4359 
   4360 	  if (vec_duplicate_p (trueop0, &elt0))
   4361 	    /* It doesn't matter which elements are selected by trueop1,
   4362 	       because they are all the same.  */
   4363 	    return gen_vec_duplicate (mode, elt0);
   4364 
   4365 	  if (GET_CODE (trueop0) == CONST_VECTOR)
   4366 	    {
   4367 	      unsigned n_elts = XVECLEN (trueop1, 0);
   4368 	      rtvec v = rtvec_alloc (n_elts);
   4369 	      unsigned int i;
   4370 
   4371 	      gcc_assert (known_eq (n_elts, GET_MODE_NUNITS (mode)));
   4372 	      for (i = 0; i < n_elts; i++)
   4373 		{
   4374 		  rtx x = XVECEXP (trueop1, 0, i);
   4375 
   4376 		  if (!CONST_INT_P (x))
   4377 		    return 0;
   4378 
   4379 		  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
   4380 						       INTVAL (x));
   4381 		}
   4382 
   4383 	      return gen_rtx_CONST_VECTOR (mode, v);
   4384 	    }
   4385 
   4386 	  /* Recognize the identity.  */
   4387 	  if (GET_MODE (trueop0) == mode)
   4388 	    {
   4389 	      bool maybe_ident = true;
   4390 	      for (int i = 0; i < XVECLEN (trueop1, 0); i++)
   4391 		{
   4392 		  rtx j = XVECEXP (trueop1, 0, i);
   4393 		  if (!CONST_INT_P (j) || INTVAL (j) != i)
   4394 		    {
   4395 		      maybe_ident = false;
   4396 		      break;
   4397 		    }
   4398 		}
   4399 	      if (maybe_ident)
   4400 		return trueop0;
   4401 	    }
   4402 
   4403 	  /* If we select a low-part subreg, return that.  */
   4404 	  if (vec_series_lowpart_p (mode, GET_MODE (trueop0), trueop1))
   4405 	    {
   4406 	      rtx new_rtx = lowpart_subreg (mode, trueop0,
   4407 					    GET_MODE (trueop0));
   4408 	      if (new_rtx != NULL_RTX)
   4409 		return new_rtx;
   4410 	    }
   4411 
   4412 	  /* If we build {a,b} then permute it, build the result directly.  */
   4413 	  if (XVECLEN (trueop1, 0) == 2
   4414 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0))
   4415 	      && CONST_INT_P (XVECEXP (trueop1, 0, 1))
   4416 	      && GET_CODE (trueop0) == VEC_CONCAT
   4417 	      && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
   4418 	      && GET_MODE (XEXP (trueop0, 0)) == mode
   4419 	      && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
   4420 	      && GET_MODE (XEXP (trueop0, 1)) == mode)
   4421 	    {
   4422 	      unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
   4423 	      unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
   4424 	      rtx subop0, subop1;
   4425 
   4426 	      gcc_assert (i0 < 4 && i1 < 4);
   4427 	      subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
   4428 	      subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
   4429 
   4430 	      return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
   4431 	    }
   4432 
   4433 	  if (XVECLEN (trueop1, 0) == 2
   4434 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0))
   4435 	      && CONST_INT_P (XVECEXP (trueop1, 0, 1))
   4436 	      && GET_CODE (trueop0) == VEC_CONCAT
   4437 	      && GET_MODE (trueop0) == mode)
   4438 	    {
   4439 	      unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
   4440 	      unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
   4441 	      rtx subop0, subop1;
   4442 
   4443 	      gcc_assert (i0 < 2 && i1 < 2);
   4444 	      subop0 = XEXP (trueop0, i0);
   4445 	      subop1 = XEXP (trueop0, i1);
   4446 
   4447 	      return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
   4448 	    }
   4449 
   4450 	  /* If we select one half of a vec_concat, return that.  */
   4451 	  int l0, l1;
   4452 	  if (GET_CODE (trueop0) == VEC_CONCAT
   4453 	      && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
   4454 		  .is_constant (&l0))
   4455 	      && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 1)))
   4456 		  .is_constant (&l1))
   4457 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
   4458 	    {
   4459 	      rtx subop0 = XEXP (trueop0, 0);
   4460 	      rtx subop1 = XEXP (trueop0, 1);
   4461 	      machine_mode mode0 = GET_MODE (subop0);
   4462 	      machine_mode mode1 = GET_MODE (subop1);
   4463 	      int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
   4464 	      if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
   4465 		{
   4466 		  bool success = true;
   4467 		  for (int i = 1; i < l0; ++i)
   4468 		    {
   4469 		      rtx j = XVECEXP (trueop1, 0, i);
   4470 		      if (!CONST_INT_P (j) || INTVAL (j) != i)
   4471 			{
   4472 			  success = false;
   4473 			  break;
   4474 			}
   4475 		    }
   4476 		  if (success)
   4477 		    return subop0;
   4478 		}
   4479 	      if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
   4480 		{
   4481 		  bool success = true;
   4482 		  for (int i = 1; i < l1; ++i)
   4483 		    {
   4484 		      rtx j = XVECEXP (trueop1, 0, i);
   4485 		      if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
   4486 			{
   4487 			  success = false;
   4488 			  break;
   4489 			}
   4490 		    }
   4491 		  if (success)
   4492 		    return subop1;
   4493 		}
   4494 	    }
   4495 
   4496 	  /* Simplify vec_select of a subreg of X to just a vec_select of X
   4497 	     when X has same component mode as vec_select.  */
   4498 	  unsigned HOST_WIDE_INT subreg_offset = 0;
   4499 	  if (GET_CODE (trueop0) == SUBREG
   4500 	      && GET_MODE_INNER (mode)
   4501 		 == GET_MODE_INNER (GET_MODE (SUBREG_REG (trueop0)))
   4502 	      && GET_MODE_NUNITS (mode).is_constant (&l1)
   4503 	      && constant_multiple_p (subreg_memory_offset (trueop0),
   4504 				      GET_MODE_UNIT_BITSIZE (mode),
   4505 				      &subreg_offset))
   4506 	    {
   4507 	      poly_uint64 nunits
   4508 		= GET_MODE_NUNITS (GET_MODE (SUBREG_REG (trueop0)));
   4509 	      bool success = true;
   4510 	      for (int i = 0; i != l1; i++)
   4511 		{
   4512 		  rtx idx = XVECEXP (trueop1, 0, i);
   4513 		  if (!CONST_INT_P (idx)
   4514 		      || maybe_ge (UINTVAL (idx) + subreg_offset, nunits))
   4515 		    {
   4516 		      success = false;
   4517 		      break;
   4518 		    }
   4519 		}
   4520 
   4521 	      if (success)
   4522 		{
   4523 		  rtx par = trueop1;
   4524 		  if (subreg_offset)
   4525 		    {
   4526 		      rtvec vec = rtvec_alloc (l1);
   4527 		      for (int i = 0; i < l1; i++)
   4528 			RTVEC_ELT (vec, i)
   4529 			  = GEN_INT (INTVAL (XVECEXP (trueop1, 0, i))
   4530 				     + subreg_offset);
   4531 		      par = gen_rtx_PARALLEL (VOIDmode, vec);
   4532 		    }
   4533 		  return gen_rtx_VEC_SELECT (mode, SUBREG_REG (trueop0), par);
   4534 		}
   4535 	    }
   4536 	}
   4537 
   4538       if (XVECLEN (trueop1, 0) == 1
   4539 	  && CONST_INT_P (XVECEXP (trueop1, 0, 0))
   4540 	  && GET_CODE (trueop0) == VEC_CONCAT)
   4541 	{
   4542 	  rtx vec = trueop0;
   4543 	  offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
   4544 
   4545 	  /* Try to find the element in the VEC_CONCAT.  */
   4546 	  while (GET_MODE (vec) != mode
   4547 		 && GET_CODE (vec) == VEC_CONCAT)
   4548 	    {
   4549 	      poly_int64 vec_size;
   4550 
   4551 	      if (CONST_INT_P (XEXP (vec, 0)))
   4552 	        {
   4553 	          /* vec_concat of two const_ints doesn't make sense with
   4554 	             respect to modes.  */
   4555 	          if (CONST_INT_P (XEXP (vec, 1)))
   4556 	            return 0;
   4557 
   4558 	          vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
   4559 	                     - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
   4560 	        }
   4561 	      else
   4562 	        vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
   4563 
   4564 	      if (known_lt (offset, vec_size))
   4565 		vec = XEXP (vec, 0);
   4566 	      else if (known_ge (offset, vec_size))
   4567 		{
   4568 		  offset -= vec_size;
   4569 		  vec = XEXP (vec, 1);
   4570 		}
   4571 	      else
   4572 		break;
   4573 	      vec = avoid_constant_pool_reference (vec);
   4574 	    }
   4575 
   4576 	  if (GET_MODE (vec) == mode)
   4577 	    return vec;
   4578 	}
   4579 
   4580       /* If we select elements in a vec_merge that all come from the same
   4581 	 operand, select from that operand directly.  */
   4582       if (GET_CODE (op0) == VEC_MERGE)
   4583 	{
   4584 	  rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
   4585 	  if (CONST_INT_P (trueop02))
   4586 	    {
   4587 	      unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
   4588 	      bool all_operand0 = true;
   4589 	      bool all_operand1 = true;
   4590 	      for (int i = 0; i < XVECLEN (trueop1, 0); i++)
   4591 		{
   4592 		  rtx j = XVECEXP (trueop1, 0, i);
   4593 		  if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
   4594 		    all_operand1 = false;
   4595 		  else
   4596 		    all_operand0 = false;
   4597 		}
   4598 	      if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
   4599 		return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
   4600 	      if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
   4601 		return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
   4602 	    }
   4603 	}
   4604 
   4605       /* If we have two nested selects that are inverses of each
   4606 	 other, replace them with the source operand.  */
   4607       if (GET_CODE (trueop0) == VEC_SELECT
   4608 	  && GET_MODE (XEXP (trueop0, 0)) == mode)
   4609 	{
   4610 	  rtx op0_subop1 = XEXP (trueop0, 1);
   4611 	  gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
   4612 	  gcc_assert (known_eq (XVECLEN (trueop1, 0), GET_MODE_NUNITS (mode)));
   4613 
   4614 	  /* Apply the outer ordering vector to the inner one.  (The inner
   4615 	     ordering vector is expressly permitted to be of a different
   4616 	     length than the outer one.)  If the result is { 0, 1, ..., n-1 }
   4617 	     then the two VEC_SELECTs cancel.  */
   4618 	  for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
   4619 	    {
   4620 	      rtx x = XVECEXP (trueop1, 0, i);
   4621 	      if (!CONST_INT_P (x))
   4622 		return 0;
   4623 	      rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
   4624 	      if (!CONST_INT_P (y) || i != INTVAL (y))
   4625 		return 0;
   4626 	    }
   4627 	  return XEXP (trueop0, 0);
   4628 	}
   4629 
   4630       return 0;
   4631     case VEC_CONCAT:
   4632       {
   4633 	machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
   4634 				      ? GET_MODE (trueop0)
   4635 				      : GET_MODE_INNER (mode));
   4636 	machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
   4637 				      ? GET_MODE (trueop1)
   4638 				      : GET_MODE_INNER (mode));
   4639 
   4640 	gcc_assert (VECTOR_MODE_P (mode));
   4641 	gcc_assert (known_eq (GET_MODE_SIZE (op0_mode)
   4642 			      + GET_MODE_SIZE (op1_mode),
   4643 			      GET_MODE_SIZE (mode)));
   4644 
   4645 	if (VECTOR_MODE_P (op0_mode))
   4646 	  gcc_assert (GET_MODE_INNER (mode)
   4647 		      == GET_MODE_INNER (op0_mode));
   4648 	else
   4649 	  gcc_assert (GET_MODE_INNER (mode) == op0_mode);
   4650 
   4651 	if (VECTOR_MODE_P (op1_mode))
   4652 	  gcc_assert (GET_MODE_INNER (mode)
   4653 		      == GET_MODE_INNER (op1_mode));
   4654 	else
   4655 	  gcc_assert (GET_MODE_INNER (mode) == op1_mode);
   4656 
   4657 	unsigned int n_elts, in_n_elts;
   4658 	if ((GET_CODE (trueop0) == CONST_VECTOR
   4659 	     || CONST_SCALAR_INT_P (trueop0)
   4660 	     || CONST_DOUBLE_AS_FLOAT_P (trueop0))
   4661 	    && (GET_CODE (trueop1) == CONST_VECTOR
   4662 		|| CONST_SCALAR_INT_P (trueop1)
   4663 		|| CONST_DOUBLE_AS_FLOAT_P (trueop1))
   4664 	    && GET_MODE_NUNITS (mode).is_constant (&n_elts)
   4665 	    && GET_MODE_NUNITS (op0_mode).is_constant (&in_n_elts))
   4666 	  {
   4667 	    rtvec v = rtvec_alloc (n_elts);
   4668 	    unsigned int i;
   4669 	    for (i = 0; i < n_elts; i++)
   4670 	      {
   4671 		if (i < in_n_elts)
   4672 		  {
   4673 		    if (!VECTOR_MODE_P (op0_mode))
   4674 		      RTVEC_ELT (v, i) = trueop0;
   4675 		    else
   4676 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
   4677 		  }
   4678 		else
   4679 		  {
   4680 		    if (!VECTOR_MODE_P (op1_mode))
   4681 		      RTVEC_ELT (v, i) = trueop1;
   4682 		    else
   4683 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
   4684 							   i - in_n_elts);
   4685 		  }
   4686 	      }
   4687 
   4688 	    return gen_rtx_CONST_VECTOR (mode, v);
   4689 	  }
   4690 
   4691 	/* Try to merge two VEC_SELECTs from the same vector into a single one.
   4692 	   Restrict the transformation to avoid generating a VEC_SELECT with a
   4693 	   mode unrelated to its operand.  */
   4694 	if (GET_CODE (trueop0) == VEC_SELECT
   4695 	    && GET_CODE (trueop1) == VEC_SELECT
   4696 	    && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
   4697 	    && GET_MODE_INNER (GET_MODE (XEXP (trueop0, 0)))
   4698 	       == GET_MODE_INNER(mode))
   4699 	  {
   4700 	    rtx par0 = XEXP (trueop0, 1);
   4701 	    rtx par1 = XEXP (trueop1, 1);
   4702 	    int len0 = XVECLEN (par0, 0);
   4703 	    int len1 = XVECLEN (par1, 0);
   4704 	    rtvec vec = rtvec_alloc (len0 + len1);
   4705 	    for (int i = 0; i < len0; i++)
   4706 	      RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
   4707 	    for (int i = 0; i < len1; i++)
   4708 	      RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
   4709 	    return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
   4710 					gen_rtx_PARALLEL (VOIDmode, vec));
   4711 	  }
   4712       }
   4713       return 0;
   4714 
   4715     default:
   4716       gcc_unreachable ();
   4717     }
   4718 
   4719   if (mode == GET_MODE (op0)
   4720       && mode == GET_MODE (op1)
   4721       && vec_duplicate_p (op0, &elt0)
   4722       && vec_duplicate_p (op1, &elt1))
   4723     {
   4724       /* Try applying the operator to ELT and see if that simplifies.
   4725 	 We can duplicate the result if so.
   4726 
   4727 	 The reason we don't use simplify_gen_binary is that it isn't
   4728 	 necessarily a win to convert things like:
   4729 
   4730 	   (plus:V (vec_duplicate:V (reg:S R1))
   4731 		   (vec_duplicate:V (reg:S R2)))
   4732 
   4733 	 to:
   4734 
   4735 	   (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
   4736 
   4737 	 The first might be done entirely in vector registers while the
   4738 	 second might need a move between register files.  */
   4739       tem = simplify_binary_operation (code, GET_MODE_INNER (mode),
   4740 				       elt0, elt1);
   4741       if (tem)
   4742 	return gen_vec_duplicate (mode, tem);
   4743     }
   4744 
   4745   return 0;
   4746 }
   4747 
   4748 /* Return true if binary operation OP distributes over addition in operand
   4749    OPNO, with the other operand being held constant.  OPNO counts from 1.  */
   4750 
   4751 static bool
   4752 distributes_over_addition_p (rtx_code op, int opno)
   4753 {
   4754   switch (op)
   4755     {
   4756     case PLUS:
   4757     case MINUS:
   4758     case MULT:
   4759       return true;
   4760 
   4761     case ASHIFT:
   4762       return opno == 1;
   4763 
   4764     default:
   4765       return false;
   4766     }
   4767 }
   4768 
   4769 rtx
   4770 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
   4771 				 rtx op0, rtx op1)
   4772 {
   4773   if (VECTOR_MODE_P (mode)
   4774       && code != VEC_CONCAT
   4775       && GET_CODE (op0) == CONST_VECTOR
   4776       && GET_CODE (op1) == CONST_VECTOR)
   4777     {
   4778       bool step_ok_p;
   4779       if (CONST_VECTOR_STEPPED_P (op0)
   4780 	  && CONST_VECTOR_STEPPED_P (op1))
   4781 	/* We can operate directly on the encoding if:
   4782 
   4783 	      a3 - a2 == a2 - a1 && b3 - b2 == b2 - b1
   4784 	    implies
   4785 	      (a3 op b3) - (a2 op b2) == (a2 op b2) - (a1 op b1)
   4786 
   4787 	   Addition and subtraction are the supported operators
   4788 	   for which this is true.  */
   4789 	step_ok_p = (code == PLUS || code == MINUS);
   4790       else if (CONST_VECTOR_STEPPED_P (op0))
   4791 	/* We can operate directly on stepped encodings if:
   4792 
   4793 	     a3 - a2 == a2 - a1
   4794 	   implies:
   4795 	     (a3 op c) - (a2 op c) == (a2 op c) - (a1 op c)
   4796 
   4797 	   which is true if (x -> x op c) distributes over addition.  */
   4798 	step_ok_p = distributes_over_addition_p (code, 1);
   4799       else
   4800 	/* Similarly in reverse.  */
   4801 	step_ok_p = distributes_over_addition_p (code, 2);
   4802       rtx_vector_builder builder;
   4803       if (!builder.new_binary_operation (mode, op0, op1, step_ok_p))
   4804 	return 0;
   4805 
   4806       unsigned int count = builder.encoded_nelts ();
   4807       for (unsigned int i = 0; i < count; i++)
   4808 	{
   4809 	  rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
   4810 					     CONST_VECTOR_ELT (op0, i),
   4811 					     CONST_VECTOR_ELT (op1, i));
   4812 	  if (!x || !valid_for_const_vector_p (mode, x))
   4813 	    return 0;
   4814 	  builder.quick_push (x);
   4815 	}
   4816       return builder.build ();
   4817     }
   4818 
   4819   if (VECTOR_MODE_P (mode)
   4820       && code == VEC_CONCAT
   4821       && (CONST_SCALAR_INT_P (op0)
   4822 	  || CONST_FIXED_P (op0)
   4823 	  || CONST_DOUBLE_AS_FLOAT_P (op0))
   4824       && (CONST_SCALAR_INT_P (op1)
   4825 	  || CONST_DOUBLE_AS_FLOAT_P (op1)
   4826 	  || CONST_FIXED_P (op1)))
   4827     {
   4828       /* Both inputs have a constant number of elements, so the result
   4829 	 must too.  */
   4830       unsigned n_elts = GET_MODE_NUNITS (mode).to_constant ();
   4831       rtvec v = rtvec_alloc (n_elts);
   4832 
   4833       gcc_assert (n_elts >= 2);
   4834       if (n_elts == 2)
   4835 	{
   4836 	  gcc_assert (GET_CODE (op0) != CONST_VECTOR);
   4837 	  gcc_assert (GET_CODE (op1) != CONST_VECTOR);
   4838 
   4839 	  RTVEC_ELT (v, 0) = op0;
   4840 	  RTVEC_ELT (v, 1) = op1;
   4841 	}
   4842       else
   4843 	{
   4844 	  unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0)).to_constant ();
   4845 	  unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1)).to_constant ();
   4846 	  unsigned i;
   4847 
   4848 	  gcc_assert (GET_CODE (op0) == CONST_VECTOR);
   4849 	  gcc_assert (GET_CODE (op1) == CONST_VECTOR);
   4850 	  gcc_assert (op0_n_elts + op1_n_elts == n_elts);
   4851 
   4852 	  for (i = 0; i < op0_n_elts; ++i)
   4853 	    RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op0, i);
   4854 	  for (i = 0; i < op1_n_elts; ++i)
   4855 	    RTVEC_ELT (v, op0_n_elts+i) = CONST_VECTOR_ELT (op1, i);
   4856 	}
   4857 
   4858       return gen_rtx_CONST_VECTOR (mode, v);
   4859     }
   4860 
   4861   if (SCALAR_FLOAT_MODE_P (mode)
   4862       && CONST_DOUBLE_AS_FLOAT_P (op0)
   4863       && CONST_DOUBLE_AS_FLOAT_P (op1)
   4864       && mode == GET_MODE (op0) && mode == GET_MODE (op1))
   4865     {
   4866       if (code == AND
   4867 	  || code == IOR
   4868 	  || code == XOR)
   4869 	{
   4870 	  long tmp0[4];
   4871 	  long tmp1[4];
   4872 	  REAL_VALUE_TYPE r;
   4873 	  int i;
   4874 
   4875 	  real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
   4876 			  GET_MODE (op0));
   4877 	  real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
   4878 			  GET_MODE (op1));
   4879 	  for (i = 0; i < 4; i++)
   4880 	    {
   4881 	      switch (code)
   4882 	      {
   4883 	      case AND:
   4884 		tmp0[i] &= tmp1[i];
   4885 		break;
   4886 	      case IOR:
   4887 		tmp0[i] |= tmp1[i];
   4888 		break;
   4889 	      case XOR:
   4890 		tmp0[i] ^= tmp1[i];
   4891 		break;
   4892 	      default:
   4893 		gcc_unreachable ();
   4894 	      }
   4895 	    }
   4896 	   real_from_target (&r, tmp0, mode);
   4897 	   return const_double_from_real_value (r, mode);
   4898 	}
   4899       else
   4900 	{
   4901 	  REAL_VALUE_TYPE f0, f1, value, result;
   4902 	  const REAL_VALUE_TYPE *opr0, *opr1;
   4903 	  bool inexact;
   4904 
   4905 	  opr0 = CONST_DOUBLE_REAL_VALUE (op0);
   4906 	  opr1 = CONST_DOUBLE_REAL_VALUE (op1);
   4907 
   4908 	  if (HONOR_SNANS (mode)
   4909 	      && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
   4910 	          || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
   4911 	    return 0;
   4912 
   4913 	  real_convert (&f0, mode, opr0);
   4914 	  real_convert (&f1, mode, opr1);
   4915 
   4916 	  if (code == DIV
   4917 	      && real_equal (&f1, &dconst0)
   4918 	      && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
   4919 	    return 0;
   4920 
   4921 	  if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
   4922 	      && flag_trapping_math
   4923 	      && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
   4924 	    {
   4925 	      int s0 = REAL_VALUE_NEGATIVE (f0);
   4926 	      int s1 = REAL_VALUE_NEGATIVE (f1);
   4927 
   4928 	      switch (code)
   4929 		{
   4930 		case PLUS:
   4931 		  /* Inf + -Inf = NaN plus exception.  */
   4932 		  if (s0 != s1)
   4933 		    return 0;
   4934 		  break;
   4935 		case MINUS:
   4936 		  /* Inf - Inf = NaN plus exception.  */
   4937 		  if (s0 == s1)
   4938 		    return 0;
   4939 		  break;
   4940 		case DIV:
   4941 		  /* Inf / Inf = NaN plus exception.  */
   4942 		  return 0;
   4943 		default:
   4944 		  break;
   4945 		}
   4946 	    }
   4947 
   4948 	  if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
   4949 	      && flag_trapping_math
   4950 	      && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
   4951 		  || (REAL_VALUE_ISINF (f1)
   4952 		      && real_equal (&f0, &dconst0))))
   4953 	    /* Inf * 0 = NaN plus exception.  */
   4954 	    return 0;
   4955 
   4956 	  inexact = real_arithmetic (&value, rtx_to_tree_code (code),
   4957 				     &f0, &f1);
   4958 	  real_convert (&result, mode, &value);
   4959 
   4960 	  /* Don't constant fold this floating point operation if
   4961 	     the result has overflowed and flag_trapping_math.  */
   4962 
   4963 	  if (flag_trapping_math
   4964 	      && MODE_HAS_INFINITIES (mode)
   4965 	      && REAL_VALUE_ISINF (result)
   4966 	      && !REAL_VALUE_ISINF (f0)
   4967 	      && !REAL_VALUE_ISINF (f1))
   4968 	    /* Overflow plus exception.  */
   4969 	    return 0;
   4970 
   4971 	  /* Don't constant fold this floating point operation if the
   4972 	     result may dependent upon the run-time rounding mode and
   4973 	     flag_rounding_math is set, or if GCC's software emulation
   4974 	     is unable to accurately represent the result.  */
   4975 
   4976 	  if ((flag_rounding_math
   4977 	       || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
   4978 	      && (inexact || !real_identical (&result, &value)))
   4979 	    return NULL_RTX;
   4980 
   4981 	  return const_double_from_real_value (result, mode);
   4982 	}
   4983     }
   4984 
   4985   /* We can fold some multi-word operations.  */
   4986   scalar_int_mode int_mode;
   4987   if (is_a <scalar_int_mode> (mode, &int_mode)
   4988       && CONST_SCALAR_INT_P (op0)
   4989       && CONST_SCALAR_INT_P (op1)
   4990       && GET_MODE_PRECISION (int_mode) <= MAX_BITSIZE_MODE_ANY_INT)
   4991     {
   4992       wide_int result;
   4993       wi::overflow_type overflow;
   4994       rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
   4995       rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
   4996 
   4997 #if TARGET_SUPPORTS_WIDE_INT == 0
   4998       /* This assert keeps the simplification from producing a result
   4999 	 that cannot be represented in a CONST_DOUBLE but a lot of
   5000 	 upstream callers expect that this function never fails to
   5001 	 simplify something and so you if you added this to the test
   5002 	 above the code would die later anyway.  If this assert
   5003 	 happens, you just need to make the port support wide int.  */
   5004       gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
   5005 #endif
   5006       switch (code)
   5007 	{
   5008 	case MINUS:
   5009 	  result = wi::sub (pop0, pop1);
   5010 	  break;
   5011 
   5012 	case PLUS:
   5013 	  result = wi::add (pop0, pop1);
   5014 	  break;
   5015 
   5016 	case MULT:
   5017 	  result = wi::mul (pop0, pop1);
   5018 	  break;
   5019 
   5020 	case DIV:
   5021 	  result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
   5022 	  if (overflow)
   5023 	    return NULL_RTX;
   5024 	  break;
   5025 
   5026 	case MOD:
   5027 	  result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
   5028 	  if (overflow)
   5029 	    return NULL_RTX;
   5030 	  break;
   5031 
   5032 	case UDIV:
   5033 	  result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
   5034 	  if (overflow)
   5035 	    return NULL_RTX;
   5036 	  break;
   5037 
   5038 	case UMOD:
   5039 	  result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
   5040 	  if (overflow)
   5041 	    return NULL_RTX;
   5042 	  break;
   5043 
   5044 	case AND:
   5045 	  result = wi::bit_and (pop0, pop1);
   5046 	  break;
   5047 
   5048 	case IOR:
   5049 	  result = wi::bit_or (pop0, pop1);
   5050 	  break;
   5051 
   5052 	case XOR:
   5053 	  result = wi::bit_xor (pop0, pop1);
   5054 	  break;
   5055 
   5056 	case SMIN:
   5057 	  result = wi::smin (pop0, pop1);
   5058 	  break;
   5059 
   5060 	case SMAX:
   5061 	  result = wi::smax (pop0, pop1);
   5062 	  break;
   5063 
   5064 	case UMIN:
   5065 	  result = wi::umin (pop0, pop1);
   5066 	  break;
   5067 
   5068 	case UMAX:
   5069 	  result = wi::umax (pop0, pop1);
   5070 	  break;
   5071 
   5072 	case LSHIFTRT:
   5073 	case ASHIFTRT:
   5074 	case ASHIFT:
   5075 	case SS_ASHIFT:
   5076 	case US_ASHIFT:
   5077 	  {
   5078 	    /* The shift count might be in SImode while int_mode might
   5079 	       be narrower.  On IA-64 it is even DImode.  If the shift
   5080 	       count is too large and doesn't fit into int_mode, we'd
   5081 	       ICE.  So, if int_mode is narrower than word, use
   5082 	       word_mode for the shift count.  */
   5083 	    if (GET_MODE (op1) == VOIDmode
   5084 		&& GET_MODE_PRECISION (int_mode) < BITS_PER_WORD)
   5085 	      pop1 = rtx_mode_t (op1, word_mode);
   5086 
   5087 	    wide_int wop1 = pop1;
   5088 	    if (SHIFT_COUNT_TRUNCATED)
   5089 	      wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
   5090 	    else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
   5091 	      return NULL_RTX;
   5092 
   5093 	    switch (code)
   5094 	      {
   5095 	      case LSHIFTRT:
   5096 		result = wi::lrshift (pop0, wop1);
   5097 		break;
   5098 
   5099 	      case ASHIFTRT:
   5100 		result = wi::arshift (pop0, wop1);
   5101 		break;
   5102 
   5103 	      case ASHIFT:
   5104 		result = wi::lshift (pop0, wop1);
   5105 		break;
   5106 
   5107 	      case SS_ASHIFT:
   5108 		if (wi::leu_p (wop1, wi::clrsb (pop0)))
   5109 		  result = wi::lshift (pop0, wop1);
   5110 		else if (wi::neg_p (pop0))
   5111 		  result = wi::min_value (int_mode, SIGNED);
   5112 		else
   5113 		  result = wi::max_value (int_mode, SIGNED);
   5114 		break;
   5115 
   5116 	      case US_ASHIFT:
   5117 		if (wi::eq_p (pop0, 0))
   5118 		  result = pop0;
   5119 		else if (wi::leu_p (wop1, wi::clz (pop0)))
   5120 		  result = wi::lshift (pop0, wop1);
   5121 		else
   5122 		  result = wi::max_value (int_mode, UNSIGNED);
   5123 		break;
   5124 
   5125 	      default:
   5126 		gcc_unreachable ();
   5127 	      }
   5128 	    break;
   5129 	  }
   5130 	case ROTATE:
   5131 	case ROTATERT:
   5132 	  {
   5133 	    /* The rotate count might be in SImode while int_mode might
   5134 	       be narrower.  On IA-64 it is even DImode.  If the shift
   5135 	       count is too large and doesn't fit into int_mode, we'd
   5136 	       ICE.  So, if int_mode is narrower than word, use
   5137 	       word_mode for the shift count.  */
   5138 	    if (GET_MODE (op1) == VOIDmode
   5139 		&& GET_MODE_PRECISION (int_mode) < BITS_PER_WORD)
   5140 	      pop1 = rtx_mode_t (op1, word_mode);
   5141 
   5142 	    if (wi::neg_p (pop1))
   5143 	      return NULL_RTX;
   5144 
   5145 	    switch (code)
   5146 	      {
   5147 	      case ROTATE:
   5148 		result = wi::lrotate (pop0, pop1);
   5149 		break;
   5150 
   5151 	      case ROTATERT:
   5152 		result = wi::rrotate (pop0, pop1);
   5153 		break;
   5154 
   5155 	      default:
   5156 		gcc_unreachable ();
   5157 	      }
   5158 	    break;
   5159 	  }
   5160 
   5161 	case SS_PLUS:
   5162 	  result = wi::add (pop0, pop1, SIGNED, &overflow);
   5163  clamp_signed_saturation:
   5164 	  if (overflow == wi::OVF_OVERFLOW)
   5165 	    result = wi::max_value (GET_MODE_PRECISION (int_mode), SIGNED);
   5166 	  else if (overflow == wi::OVF_UNDERFLOW)
   5167 	    result = wi::min_value (GET_MODE_PRECISION (int_mode), SIGNED);
   5168 	  else if (overflow != wi::OVF_NONE)
   5169 	    return NULL_RTX;
   5170 	  break;
   5171 
   5172 	case US_PLUS:
   5173 	  result = wi::add (pop0, pop1, UNSIGNED, &overflow);
   5174  clamp_unsigned_saturation:
   5175 	  if (overflow != wi::OVF_NONE)
   5176 	    result = wi::max_value (GET_MODE_PRECISION (int_mode), UNSIGNED);
   5177 	  break;
   5178 
   5179 	case SS_MINUS:
   5180 	  result = wi::sub (pop0, pop1, SIGNED, &overflow);
   5181 	  goto clamp_signed_saturation;
   5182 
   5183 	case US_MINUS:
   5184 	  result = wi::sub (pop0, pop1, UNSIGNED, &overflow);
   5185 	  if (overflow != wi::OVF_NONE)
   5186 	    result = wi::min_value (GET_MODE_PRECISION (int_mode), UNSIGNED);
   5187 	  break;
   5188 
   5189 	case SS_MULT:
   5190 	  result = wi::mul (pop0, pop1, SIGNED, &overflow);
   5191 	  goto clamp_signed_saturation;
   5192 
   5193 	case US_MULT:
   5194 	  result = wi::mul (pop0, pop1, UNSIGNED, &overflow);
   5195 	  goto clamp_unsigned_saturation;
   5196 
   5197 	case SMUL_HIGHPART:
   5198 	  result = wi::mul_high (pop0, pop1, SIGNED);
   5199 	  break;
   5200 
   5201 	case UMUL_HIGHPART:
   5202 	  result = wi::mul_high (pop0, pop1, UNSIGNED);
   5203 	  break;
   5204 
   5205 	default:
   5206 	  return NULL_RTX;
   5207 	}
   5208       return immed_wide_int_const (result, int_mode);
   5209     }
   5210 
   5211   /* Handle polynomial integers.  */
   5212   if (NUM_POLY_INT_COEFFS > 1
   5213       && is_a <scalar_int_mode> (mode, &int_mode)
   5214       && poly_int_rtx_p (op0)
   5215       && poly_int_rtx_p (op1))
   5216     {
   5217       poly_wide_int result;
   5218       switch (code)
   5219 	{
   5220 	case PLUS:
   5221 	  result = wi::to_poly_wide (op0, mode) + wi::to_poly_wide (op1, mode);
   5222 	  break;
   5223 
   5224 	case MINUS:
   5225 	  result = wi::to_poly_wide (op0, mode) - wi::to_poly_wide (op1, mode);
   5226 	  break;
   5227 
   5228 	case MULT:
   5229 	  if (CONST_SCALAR_INT_P (op1))
   5230 	    result = wi::to_poly_wide (op0, mode) * rtx_mode_t (op1, mode);
   5231 	  else
   5232 	    return NULL_RTX;
   5233 	  break;
   5234 
   5235 	case ASHIFT:
   5236 	  if (CONST_SCALAR_INT_P (op1))
   5237 	    {
   5238 	      wide_int shift
   5239 		= rtx_mode_t (op1,
   5240 			      GET_MODE (op1) == VOIDmode
   5241 			      && GET_MODE_PRECISION (int_mode) < BITS_PER_WORD
   5242 			      ? word_mode : mode);
   5243 	      if (SHIFT_COUNT_TRUNCATED)
   5244 		shift = wi::umod_trunc (shift, GET_MODE_PRECISION (int_mode));
   5245 	      else if (wi::geu_p (shift, GET_MODE_PRECISION (int_mode)))
   5246 		return NULL_RTX;
   5247 	      result = wi::to_poly_wide (op0, mode) << shift;
   5248 	    }
   5249 	  else
   5250 	    return NULL_RTX;
   5251 	  break;
   5252 
   5253 	case IOR:
   5254 	  if (!CONST_SCALAR_INT_P (op1)
   5255 	      || !can_ior_p (wi::to_poly_wide (op0, mode),
   5256 			     rtx_mode_t (op1, mode), &result))
   5257 	    return NULL_RTX;
   5258 	  break;
   5259 
   5260 	default:
   5261 	  return NULL_RTX;
   5262 	}
   5263       return immed_wide_int_const (result, int_mode);
   5264     }
   5265 
   5266   return NULL_RTX;
   5267 }
   5268 
   5269 
   5270 
   5271 /* Return a positive integer if X should sort after Y.  The value
   5273    returned is 1 if and only if X and Y are both regs.  */
   5274 
   5275 static int
   5276 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
   5277 {
   5278   int result;
   5279 
   5280   result = (commutative_operand_precedence (y)
   5281 	    - commutative_operand_precedence (x));
   5282   if (result)
   5283     return result + result;
   5284 
   5285   /* Group together equal REGs to do more simplification.  */
   5286   if (REG_P (x) && REG_P (y))
   5287     return REGNO (x) > REGNO (y);
   5288 
   5289   return 0;
   5290 }
   5291 
   5292 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
   5293    operands may be another PLUS or MINUS.
   5294 
   5295    Rather than test for specific case, we do this by a brute-force method
   5296    and do all possible simplifications until no more changes occur.  Then
   5297    we rebuild the operation.
   5298 
   5299    May return NULL_RTX when no changes were made.  */
   5300 
   5301 rtx
   5302 simplify_context::simplify_plus_minus (rtx_code code, machine_mode mode,
   5303 				       rtx op0, rtx op1)
   5304 {
   5305   struct simplify_plus_minus_op_data
   5306   {
   5307     rtx op;
   5308     short neg;
   5309   } ops[16];
   5310   rtx result, tem;
   5311   int n_ops = 2;
   5312   int changed, n_constants, canonicalized = 0;
   5313   int i, j;
   5314 
   5315   memset (ops, 0, sizeof ops);
   5316 
   5317   /* Set up the two operands and then expand them until nothing has been
   5318      changed.  If we run out of room in our array, give up; this should
   5319      almost never happen.  */
   5320 
   5321   ops[0].op = op0;
   5322   ops[0].neg = 0;
   5323   ops[1].op = op1;
   5324   ops[1].neg = (code == MINUS);
   5325 
   5326   do
   5327     {
   5328       changed = 0;
   5329       n_constants = 0;
   5330 
   5331       for (i = 0; i < n_ops; i++)
   5332 	{
   5333 	  rtx this_op = ops[i].op;
   5334 	  int this_neg = ops[i].neg;
   5335 	  enum rtx_code this_code = GET_CODE (this_op);
   5336 
   5337 	  switch (this_code)
   5338 	    {
   5339 	    case PLUS:
   5340 	    case MINUS:
   5341 	      if (n_ops == ARRAY_SIZE (ops))
   5342 		return NULL_RTX;
   5343 
   5344 	      ops[n_ops].op = XEXP (this_op, 1);
   5345 	      ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
   5346 	      n_ops++;
   5347 
   5348 	      ops[i].op = XEXP (this_op, 0);
   5349 	      changed = 1;
   5350 	      /* If this operand was negated then we will potentially
   5351 		 canonicalize the expression.  Similarly if we don't
   5352 		 place the operands adjacent we're re-ordering the
   5353 		 expression and thus might be performing a
   5354 		 canonicalization.  Ignore register re-ordering.
   5355 		 ??? It might be better to shuffle the ops array here,
   5356 		 but then (plus (plus (A, B), plus (C, D))) wouldn't
   5357 		 be seen as non-canonical.  */
   5358 	      if (this_neg
   5359 		  || (i != n_ops - 2
   5360 		      && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
   5361 		canonicalized = 1;
   5362 	      break;
   5363 
   5364 	    case NEG:
   5365 	      ops[i].op = XEXP (this_op, 0);
   5366 	      ops[i].neg = ! this_neg;
   5367 	      changed = 1;
   5368 	      canonicalized = 1;
   5369 	      break;
   5370 
   5371 	    case CONST:
   5372 	      if (n_ops != ARRAY_SIZE (ops)
   5373 		  && GET_CODE (XEXP (this_op, 0)) == PLUS
   5374 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
   5375 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
   5376 		{
   5377 		  ops[i].op = XEXP (XEXP (this_op, 0), 0);
   5378 		  ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
   5379 		  ops[n_ops].neg = this_neg;
   5380 		  n_ops++;
   5381 		  changed = 1;
   5382 		  canonicalized = 1;
   5383 		}
   5384 	      break;
   5385 
   5386 	    case NOT:
   5387 	      /* ~a -> (-a - 1) */
   5388 	      if (n_ops != ARRAY_SIZE (ops))
   5389 		{
   5390 		  ops[n_ops].op = CONSTM1_RTX (mode);
   5391 		  ops[n_ops++].neg = this_neg;
   5392 		  ops[i].op = XEXP (this_op, 0);
   5393 		  ops[i].neg = !this_neg;
   5394 		  changed = 1;
   5395 		  canonicalized = 1;
   5396 		}
   5397 	      break;
   5398 
   5399 	    CASE_CONST_SCALAR_INT:
   5400 	    case CONST_POLY_INT:
   5401 	      n_constants++;
   5402 	      if (this_neg)
   5403 		{
   5404 		  ops[i].op = neg_poly_int_rtx (mode, this_op);
   5405 		  ops[i].neg = 0;
   5406 		  changed = 1;
   5407 		  canonicalized = 1;
   5408 		}
   5409 	      break;
   5410 
   5411 	    default:
   5412 	      break;
   5413 	    }
   5414 	}
   5415     }
   5416   while (changed);
   5417 
   5418   if (n_constants > 1)
   5419     canonicalized = 1;
   5420 
   5421   gcc_assert (n_ops >= 2);
   5422 
   5423   /* If we only have two operands, we can avoid the loops.  */
   5424   if (n_ops == 2)
   5425     {
   5426       enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
   5427       rtx lhs, rhs;
   5428 
   5429       /* Get the two operands.  Be careful with the order, especially for
   5430 	 the cases where code == MINUS.  */
   5431       if (ops[0].neg && ops[1].neg)
   5432 	{
   5433 	  lhs = gen_rtx_NEG (mode, ops[0].op);
   5434 	  rhs = ops[1].op;
   5435 	}
   5436       else if (ops[0].neg)
   5437 	{
   5438 	  lhs = ops[1].op;
   5439 	  rhs = ops[0].op;
   5440 	}
   5441       else
   5442 	{
   5443 	  lhs = ops[0].op;
   5444 	  rhs = ops[1].op;
   5445 	}
   5446 
   5447       return simplify_const_binary_operation (code, mode, lhs, rhs);
   5448     }
   5449 
   5450   /* Now simplify each pair of operands until nothing changes.  */
   5451   while (1)
   5452     {
   5453       /* Insertion sort is good enough for a small array.  */
   5454       for (i = 1; i < n_ops; i++)
   5455 	{
   5456 	  struct simplify_plus_minus_op_data save;
   5457 	  int cmp;
   5458 
   5459 	  j = i - 1;
   5460 	  cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
   5461 	  if (cmp <= 0)
   5462 	    continue;
   5463 	  /* Just swapping registers doesn't count as canonicalization.  */
   5464 	  if (cmp != 1)
   5465 	    canonicalized = 1;
   5466 
   5467 	  save = ops[i];
   5468 	  do
   5469 	    ops[j + 1] = ops[j];
   5470 	  while (j--
   5471 		 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
   5472 	  ops[j + 1] = save;
   5473 	}
   5474 
   5475       changed = 0;
   5476       for (i = n_ops - 1; i > 0; i--)
   5477 	for (j = i - 1; j >= 0; j--)
   5478 	  {
   5479 	    rtx lhs = ops[j].op, rhs = ops[i].op;
   5480 	    int lneg = ops[j].neg, rneg = ops[i].neg;
   5481 
   5482 	    if (lhs != 0 && rhs != 0)
   5483 	      {
   5484 		enum rtx_code ncode = PLUS;
   5485 
   5486 		if (lneg != rneg)
   5487 		  {
   5488 		    ncode = MINUS;
   5489 		    if (lneg)
   5490 		      std::swap (lhs, rhs);
   5491 		  }
   5492 		else if (swap_commutative_operands_p (lhs, rhs))
   5493 		  std::swap (lhs, rhs);
   5494 
   5495 		if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
   5496 		    && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
   5497 		  {
   5498 		    rtx tem_lhs, tem_rhs;
   5499 
   5500 		    tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
   5501 		    tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
   5502 		    tem = simplify_binary_operation (ncode, mode, tem_lhs,
   5503 						     tem_rhs);
   5504 
   5505 		    if (tem && !CONSTANT_P (tem))
   5506 		      tem = gen_rtx_CONST (GET_MODE (tem), tem);
   5507 		  }
   5508 		else
   5509 		  tem = simplify_binary_operation (ncode, mode, lhs, rhs);
   5510 
   5511 		if (tem)
   5512 		  {
   5513 		    /* Reject "simplifications" that just wrap the two
   5514 		       arguments in a CONST.  Failure to do so can result
   5515 		       in infinite recursion with simplify_binary_operation
   5516 		       when it calls us to simplify CONST operations.
   5517 		       Also, if we find such a simplification, don't try
   5518 		       any more combinations with this rhs:  We must have
   5519 		       something like symbol+offset, ie. one of the
   5520 		       trivial CONST expressions we handle later.  */
   5521 		    if (GET_CODE (tem) == CONST
   5522 			&& GET_CODE (XEXP (tem, 0)) == ncode
   5523 			&& XEXP (XEXP (tem, 0), 0) == lhs
   5524 			&& XEXP (XEXP (tem, 0), 1) == rhs)
   5525 		      break;
   5526 		    lneg &= rneg;
   5527 		    if (GET_CODE (tem) == NEG)
   5528 		      tem = XEXP (tem, 0), lneg = !lneg;
   5529 		    if (poly_int_rtx_p (tem) && lneg)
   5530 		      tem = neg_poly_int_rtx (mode, tem), lneg = 0;
   5531 
   5532 		    ops[i].op = tem;
   5533 		    ops[i].neg = lneg;
   5534 		    ops[j].op = NULL_RTX;
   5535 		    changed = 1;
   5536 		    canonicalized = 1;
   5537 		  }
   5538 	      }
   5539 	  }
   5540 
   5541       if (!changed)
   5542 	break;
   5543 
   5544       /* Pack all the operands to the lower-numbered entries.  */
   5545       for (i = 0, j = 0; j < n_ops; j++)
   5546 	if (ops[j].op)
   5547 	  {
   5548 	    ops[i] = ops[j];
   5549 	    i++;
   5550 	  }
   5551       n_ops = i;
   5552     }
   5553 
   5554   /* If nothing changed, check that rematerialization of rtl instructions
   5555      is still required.  */
   5556   if (!canonicalized)
   5557     {
   5558       /* Perform rematerialization if only all operands are registers and
   5559 	 all operations are PLUS.  */
   5560       /* ??? Also disallow (non-global, non-frame) fixed registers to work
   5561 	 around rs6000 and how it uses the CA register.  See PR67145.  */
   5562       for (i = 0; i < n_ops; i++)
   5563 	if (ops[i].neg
   5564 	    || !REG_P (ops[i].op)
   5565 	    || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
   5566 		&& fixed_regs[REGNO (ops[i].op)]
   5567 		&& !global_regs[REGNO (ops[i].op)]
   5568 		&& ops[i].op != frame_pointer_rtx
   5569 		&& ops[i].op != arg_pointer_rtx
   5570 		&& ops[i].op != stack_pointer_rtx))
   5571 	  return NULL_RTX;
   5572       goto gen_result;
   5573     }
   5574 
   5575   /* Create (minus -C X) instead of (neg (const (plus X C))).  */
   5576   if (n_ops == 2
   5577       && CONST_INT_P (ops[1].op)
   5578       && CONSTANT_P (ops[0].op)
   5579       && ops[0].neg)
   5580     return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
   5581 
   5582   /* We suppressed creation of trivial CONST expressions in the
   5583      combination loop to avoid recursion.  Create one manually now.
   5584      The combination loop should have ensured that there is exactly
   5585      one CONST_INT, and the sort will have ensured that it is last
   5586      in the array and that any other constant will be next-to-last.  */
   5587 
   5588   if (n_ops > 1
   5589       && poly_int_rtx_p (ops[n_ops - 1].op)
   5590       && CONSTANT_P (ops[n_ops - 2].op))
   5591     {
   5592       rtx value = ops[n_ops - 1].op;
   5593       if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
   5594 	value = neg_poly_int_rtx (mode, value);
   5595       if (CONST_INT_P (value))
   5596 	{
   5597 	  ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
   5598 					     INTVAL (value));
   5599 	  n_ops--;
   5600 	}
   5601     }
   5602 
   5603   /* Put a non-negated operand first, if possible.  */
   5604 
   5605   for (i = 0; i < n_ops && ops[i].neg; i++)
   5606     continue;
   5607   if (i == n_ops)
   5608     ops[0].op = gen_rtx_NEG (mode, ops[0].op);
   5609   else if (i != 0)
   5610     {
   5611       tem = ops[0].op;
   5612       ops[0] = ops[i];
   5613       ops[i].op = tem;
   5614       ops[i].neg = 1;
   5615     }
   5616 
   5617   /* Now make the result by performing the requested operations.  */
   5618  gen_result:
   5619   result = ops[0].op;
   5620   for (i = 1; i < n_ops; i++)
   5621     result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
   5622 			     mode, result, ops[i].op);
   5623 
   5624   return result;
   5625 }
   5626 
   5627 /* Check whether an operand is suitable for calling simplify_plus_minus.  */
   5628 static bool
   5629 plus_minus_operand_p (const_rtx x)
   5630 {
   5631   return GET_CODE (x) == PLUS
   5632          || GET_CODE (x) == MINUS
   5633 	 || (GET_CODE (x) == CONST
   5634 	     && GET_CODE (XEXP (x, 0)) == PLUS
   5635 	     && CONSTANT_P (XEXP (XEXP (x, 0), 0))
   5636 	     && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
   5637 }
   5638 
   5639 /* Like simplify_binary_operation except used for relational operators.
   5640    MODE is the mode of the result. If MODE is VOIDmode, both operands must
   5641    not also be VOIDmode.
   5642 
   5643    CMP_MODE specifies in which mode the comparison is done in, so it is
   5644    the mode of the operands.  If CMP_MODE is VOIDmode, it is taken from
   5645    the operands or, if both are VOIDmode, the operands are compared in
   5646    "infinite precision".  */
   5647 rtx
   5648 simplify_context::simplify_relational_operation (rtx_code code,
   5649 						 machine_mode mode,
   5650 						 machine_mode cmp_mode,
   5651 						 rtx op0, rtx op1)
   5652 {
   5653   rtx tem, trueop0, trueop1;
   5654 
   5655   if (cmp_mode == VOIDmode)
   5656     cmp_mode = GET_MODE (op0);
   5657   if (cmp_mode == VOIDmode)
   5658     cmp_mode = GET_MODE (op1);
   5659 
   5660   tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
   5661   if (tem)
   5662     return relational_result (mode, cmp_mode, tem);
   5663 
   5664   /* For the following tests, ensure const0_rtx is op1.  */
   5665   if (swap_commutative_operands_p (op0, op1)
   5666       || (op0 == const0_rtx && op1 != const0_rtx))
   5667     std::swap (op0, op1), code = swap_condition (code);
   5668 
   5669   /* If op0 is a compare, extract the comparison arguments from it.  */
   5670   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
   5671     return simplify_gen_relational (code, mode, VOIDmode,
   5672 				    XEXP (op0, 0), XEXP (op0, 1));
   5673 
   5674   if (GET_MODE_CLASS (cmp_mode) == MODE_CC)
   5675     return NULL_RTX;
   5676 
   5677   trueop0 = avoid_constant_pool_reference (op0);
   5678   trueop1 = avoid_constant_pool_reference (op1);
   5679   return simplify_relational_operation_1 (code, mode, cmp_mode,
   5680 		  			  trueop0, trueop1);
   5681 }
   5682 
   5683 /* This part of simplify_relational_operation is only used when CMP_MODE
   5684    is not in class MODE_CC (i.e. it is a real comparison).
   5685 
   5686    MODE is the mode of the result, while CMP_MODE specifies in which
   5687    mode the comparison is done in, so it is the mode of the operands.  */
   5688 
   5689 rtx
   5690 simplify_context::simplify_relational_operation_1 (rtx_code code,
   5691 						   machine_mode mode,
   5692 						   machine_mode cmp_mode,
   5693 						   rtx op0, rtx op1)
   5694 {
   5695   enum rtx_code op0code = GET_CODE (op0);
   5696 
   5697   if (op1 == const0_rtx && COMPARISON_P (op0))
   5698     {
   5699       /* If op0 is a comparison, extract the comparison arguments
   5700          from it.  */
   5701       if (code == NE)
   5702 	{
   5703 	  if (GET_MODE (op0) == mode)
   5704 	    return simplify_rtx (op0);
   5705 	  else
   5706 	    return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
   5707 					    XEXP (op0, 0), XEXP (op0, 1));
   5708 	}
   5709       else if (code == EQ)
   5710 	{
   5711 	  enum rtx_code new_code = reversed_comparison_code (op0, NULL);
   5712 	  if (new_code != UNKNOWN)
   5713 	    return simplify_gen_relational (new_code, mode, VOIDmode,
   5714 					    XEXP (op0, 0), XEXP (op0, 1));
   5715 	}
   5716     }
   5717 
   5718   /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
   5719      (GEU/LTU a -C).  Likewise for (LTU/GEU (PLUS a C) a).  */
   5720   if ((code == LTU || code == GEU)
   5721       && GET_CODE (op0) == PLUS
   5722       && CONST_INT_P (XEXP (op0, 1))
   5723       && (rtx_equal_p (op1, XEXP (op0, 0))
   5724 	  || rtx_equal_p (op1, XEXP (op0, 1)))
   5725       /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
   5726       && XEXP (op0, 1) != const0_rtx)
   5727     {
   5728       rtx new_cmp
   5729 	= simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
   5730       return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
   5731 				      cmp_mode, XEXP (op0, 0), new_cmp);
   5732     }
   5733 
   5734   /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
   5735      transformed into (LTU a -C).  */
   5736   if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
   5737       && CONST_INT_P (XEXP (op0, 1))
   5738       && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
   5739       && XEXP (op0, 1) != const0_rtx)
   5740     {
   5741       rtx new_cmp
   5742 	= simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
   5743       return simplify_gen_relational (LTU, mode, cmp_mode,
   5744 				       XEXP (op0, 0), new_cmp);
   5745     }
   5746 
   5747   /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a).  */
   5748   if ((code == LTU || code == GEU)
   5749       && GET_CODE (op0) == PLUS
   5750       && rtx_equal_p (op1, XEXP (op0, 1))
   5751       /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b).  */
   5752       && !rtx_equal_p (op1, XEXP (op0, 0)))
   5753     return simplify_gen_relational (code, mode, cmp_mode, op0,
   5754 				    copy_rtx (XEXP (op0, 0)));
   5755 
   5756   if (op1 == const0_rtx)
   5757     {
   5758       /* Canonicalize (GTU x 0) as (NE x 0).  */
   5759       if (code == GTU)
   5760         return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
   5761       /* Canonicalize (LEU x 0) as (EQ x 0).  */
   5762       if (code == LEU)
   5763         return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
   5764     }
   5765   else if (op1 == const1_rtx)
   5766     {
   5767       switch (code)
   5768         {
   5769         case GE:
   5770 	  /* Canonicalize (GE x 1) as (GT x 0).  */
   5771 	  return simplify_gen_relational (GT, mode, cmp_mode,
   5772 					  op0, const0_rtx);
   5773 	case GEU:
   5774 	  /* Canonicalize (GEU x 1) as (NE x 0).  */
   5775 	  return simplify_gen_relational (NE, mode, cmp_mode,
   5776 					  op0, const0_rtx);
   5777 	case LT:
   5778 	  /* Canonicalize (LT x 1) as (LE x 0).  */
   5779 	  return simplify_gen_relational (LE, mode, cmp_mode,
   5780 					  op0, const0_rtx);
   5781 	case LTU:
   5782 	  /* Canonicalize (LTU x 1) as (EQ x 0).  */
   5783 	  return simplify_gen_relational (EQ, mode, cmp_mode,
   5784 					  op0, const0_rtx);
   5785 	default:
   5786 	  break;
   5787 	}
   5788     }
   5789   else if (op1 == constm1_rtx)
   5790     {
   5791       /* Canonicalize (LE x -1) as (LT x 0).  */
   5792       if (code == LE)
   5793         return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
   5794       /* Canonicalize (GT x -1) as (GE x 0).  */
   5795       if (code == GT)
   5796         return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
   5797     }
   5798 
   5799   /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1))  */
   5800   if ((code == EQ || code == NE)
   5801       && (op0code == PLUS || op0code == MINUS)
   5802       && CONSTANT_P (op1)
   5803       && CONSTANT_P (XEXP (op0, 1))
   5804       && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
   5805     {
   5806       rtx x = XEXP (op0, 0);
   5807       rtx c = XEXP (op0, 1);
   5808       enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
   5809       rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
   5810 
   5811       /* Detect an infinite recursive condition, where we oscillate at this
   5812 	 simplification case between:
   5813 	    A + B == C  <--->  C - B == A,
   5814 	 where A, B, and C are all constants with non-simplifiable expressions,
   5815 	 usually SYMBOL_REFs.  */
   5816       if (GET_CODE (tem) == invcode
   5817 	  && CONSTANT_P (x)
   5818 	  && rtx_equal_p (c, XEXP (tem, 1)))
   5819 	return NULL_RTX;
   5820 
   5821       return simplify_gen_relational (code, mode, cmp_mode, x, tem);
   5822     }
   5823 
   5824   /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
   5825      the same as (zero_extract:SI FOO (const_int 1) BAR).  */
   5826   scalar_int_mode int_mode, int_cmp_mode;
   5827   if (code == NE
   5828       && op1 == const0_rtx
   5829       && is_int_mode (mode, &int_mode)
   5830       && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
   5831       /* ??? Work-around BImode bugs in the ia64 backend.  */
   5832       && int_mode != BImode
   5833       && int_cmp_mode != BImode
   5834       && nonzero_bits (op0, int_cmp_mode) == 1
   5835       && STORE_FLAG_VALUE == 1)
   5836     return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
   5837 	   ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
   5838 	   : lowpart_subreg (int_mode, op0, int_cmp_mode);
   5839 
   5840   /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y).  */
   5841   if ((code == EQ || code == NE)
   5842       && op1 == const0_rtx
   5843       && op0code == XOR)
   5844     return simplify_gen_relational (code, mode, cmp_mode,
   5845 				    XEXP (op0, 0), XEXP (op0, 1));
   5846 
   5847   /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0).  */
   5848   if ((code == EQ || code == NE)
   5849       && op0code == XOR
   5850       && rtx_equal_p (XEXP (op0, 0), op1)
   5851       && !side_effects_p (XEXP (op0, 0)))
   5852     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
   5853 				    CONST0_RTX (mode));
   5854 
   5855   /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0).  */
   5856   if ((code == EQ || code == NE)
   5857       && op0code == XOR
   5858       && rtx_equal_p (XEXP (op0, 1), op1)
   5859       && !side_effects_p (XEXP (op0, 1)))
   5860     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
   5861 				    CONST0_RTX (mode));
   5862 
   5863   /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)).  */
   5864   if ((code == EQ || code == NE)
   5865       && op0code == XOR
   5866       && CONST_SCALAR_INT_P (op1)
   5867       && CONST_SCALAR_INT_P (XEXP (op0, 1)))
   5868     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
   5869 				    simplify_gen_binary (XOR, cmp_mode,
   5870 							 XEXP (op0, 1), op1));
   5871 
   5872   /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
   5873      constant folding if x/y is a constant.  */
   5874   if ((code == EQ || code == NE)
   5875       && (op0code == AND || op0code == IOR)
   5876       && !side_effects_p (op1)
   5877       && op1 != CONST0_RTX (cmp_mode))
   5878     {
   5879       /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
   5880 	 (eq/ne (and (not y) x) 0).  */
   5881       if ((op0code == AND && rtx_equal_p (XEXP (op0, 0), op1))
   5882 	  || (op0code == IOR && rtx_equal_p (XEXP (op0, 1), op1)))
   5883 	{
   5884 	  rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1),
   5885 					  cmp_mode);
   5886 	  rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
   5887 
   5888 	  return simplify_gen_relational (code, mode, cmp_mode, lhs,
   5889 					  CONST0_RTX (cmp_mode));
   5890 	}
   5891 
   5892       /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
   5893 	 (eq/ne (and (not x) y) 0).  */
   5894       if ((op0code == AND && rtx_equal_p (XEXP (op0, 1), op1))
   5895 	  || (op0code == IOR && rtx_equal_p (XEXP (op0, 0), op1)))
   5896 	{
   5897 	  rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0),
   5898 					  cmp_mode);
   5899 	  rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
   5900 
   5901 	  return simplify_gen_relational (code, mode, cmp_mode, lhs,
   5902 					  CONST0_RTX (cmp_mode));
   5903 	}
   5904     }
   5905 
   5906   /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped.  */
   5907   if ((code == EQ || code == NE)
   5908       && GET_CODE (op0) == BSWAP
   5909       && CONST_SCALAR_INT_P (op1))
   5910     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
   5911 				    simplify_gen_unary (BSWAP, cmp_mode,
   5912 							op1, cmp_mode));
   5913 
   5914   /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y).  */
   5915   if ((code == EQ || code == NE)
   5916       && GET_CODE (op0) == BSWAP
   5917       && GET_CODE (op1) == BSWAP)
   5918     return simplify_gen_relational (code, mode, cmp_mode,
   5919 				    XEXP (op0, 0), XEXP (op1, 0));
   5920 
   5921   if (op0code == POPCOUNT && op1 == const0_rtx)
   5922     switch (code)
   5923       {
   5924       case EQ:
   5925       case LE:
   5926       case LEU:
   5927 	/* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)).  */
   5928 	return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
   5929 					XEXP (op0, 0), const0_rtx);
   5930 
   5931       case NE:
   5932       case GT:
   5933       case GTU:
   5934 	/* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)).  */
   5935 	return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
   5936 					XEXP (op0, 0), const0_rtx);
   5937 
   5938       default:
   5939 	break;
   5940       }
   5941 
   5942   return NULL_RTX;
   5943 }
   5944 
   5945 enum
   5946 {
   5947   CMP_EQ = 1,
   5948   CMP_LT = 2,
   5949   CMP_GT = 4,
   5950   CMP_LTU = 8,
   5951   CMP_GTU = 16
   5952 };
   5953 
   5954 
   5955 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
   5956    KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
   5957    For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
   5958    logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
   5959    For floating-point comparisons, assume that the operands were ordered.  */
   5960 
   5961 static rtx
   5962 comparison_result (enum rtx_code code, int known_results)
   5963 {
   5964   switch (code)
   5965     {
   5966     case EQ:
   5967     case UNEQ:
   5968       return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
   5969     case NE:
   5970     case LTGT:
   5971       return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
   5972 
   5973     case LT:
   5974     case UNLT:
   5975       return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
   5976     case GE:
   5977     case UNGE:
   5978       return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
   5979 
   5980     case GT:
   5981     case UNGT:
   5982       return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
   5983     case LE:
   5984     case UNLE:
   5985       return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
   5986 
   5987     case LTU:
   5988       return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
   5989     case GEU:
   5990       return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
   5991 
   5992     case GTU:
   5993       return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
   5994     case LEU:
   5995       return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
   5996 
   5997     case ORDERED:
   5998       return const_true_rtx;
   5999     case UNORDERED:
   6000       return const0_rtx;
   6001     default:
   6002       gcc_unreachable ();
   6003     }
   6004 }
   6005 
   6006 /* Check if the given comparison (done in the given MODE) is actually
   6007    a tautology or a contradiction.  If the mode is VOIDmode, the
   6008    comparison is done in "infinite precision".  If no simplification
   6009    is possible, this function returns zero.  Otherwise, it returns
   6010    either const_true_rtx or const0_rtx.  */
   6011 
   6012 rtx
   6013 simplify_const_relational_operation (enum rtx_code code,
   6014 				     machine_mode mode,
   6015 				     rtx op0, rtx op1)
   6016 {
   6017   rtx tem;
   6018   rtx trueop0;
   6019   rtx trueop1;
   6020 
   6021   gcc_assert (mode != VOIDmode
   6022 	      || (GET_MODE (op0) == VOIDmode
   6023 		  && GET_MODE (op1) == VOIDmode));
   6024 
   6025   /* If op0 is a compare, extract the comparison arguments from it.  */
   6026   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
   6027     {
   6028       op1 = XEXP (op0, 1);
   6029       op0 = XEXP (op0, 0);
   6030 
   6031       if (GET_MODE (op0) != VOIDmode)
   6032 	mode = GET_MODE (op0);
   6033       else if (GET_MODE (op1) != VOIDmode)
   6034 	mode = GET_MODE (op1);
   6035       else
   6036 	return 0;
   6037     }
   6038 
   6039   /* We can't simplify MODE_CC values since we don't know what the
   6040      actual comparison is.  */
   6041   if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
   6042     return 0;
   6043 
   6044   /* Make sure the constant is second.  */
   6045   if (swap_commutative_operands_p (op0, op1))
   6046     {
   6047       std::swap (op0, op1);
   6048       code = swap_condition (code);
   6049     }
   6050 
   6051   trueop0 = avoid_constant_pool_reference (op0);
   6052   trueop1 = avoid_constant_pool_reference (op1);
   6053 
   6054   /* For integer comparisons of A and B maybe we can simplify A - B and can
   6055      then simplify a comparison of that with zero.  If A and B are both either
   6056      a register or a CONST_INT, this can't help; testing for these cases will
   6057      prevent infinite recursion here and speed things up.
   6058 
   6059      We can only do this for EQ and NE comparisons as otherwise we may
   6060      lose or introduce overflow which we cannot disregard as undefined as
   6061      we do not know the signedness of the operation on either the left or
   6062      the right hand side of the comparison.  */
   6063 
   6064   if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
   6065       && (code == EQ || code == NE)
   6066       && ! ((REG_P (op0) || CONST_INT_P (trueop0))
   6067 	    && (REG_P (op1) || CONST_INT_P (trueop1)))
   6068       && (tem = simplify_binary_operation (MINUS, mode, op0, op1)) != 0
   6069       /* We cannot do this if tem is a nonzero address.  */
   6070       && ! nonzero_address_p (tem))
   6071     return simplify_const_relational_operation (signed_condition (code),
   6072 						mode, tem, const0_rtx);
   6073 
   6074   if (! HONOR_NANS (mode) && code == ORDERED)
   6075     return const_true_rtx;
   6076 
   6077   if (! HONOR_NANS (mode) && code == UNORDERED)
   6078     return const0_rtx;
   6079 
   6080   /* For modes without NaNs, if the two operands are equal, we know the
   6081      result except if they have side-effects.  Even with NaNs we know
   6082      the result of unordered comparisons and, if signaling NaNs are
   6083      irrelevant, also the result of LT/GT/LTGT.  */
   6084   if ((! HONOR_NANS (trueop0)
   6085        || code == UNEQ || code == UNLE || code == UNGE
   6086        || ((code == LT || code == GT || code == LTGT)
   6087 	   && ! HONOR_SNANS (trueop0)))
   6088       && rtx_equal_p (trueop0, trueop1)
   6089       && ! side_effects_p (trueop0))
   6090     return comparison_result (code, CMP_EQ);
   6091 
   6092   /* If the operands are floating-point constants, see if we can fold
   6093      the result.  */
   6094   if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
   6095       && CONST_DOUBLE_AS_FLOAT_P (trueop1)
   6096       && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
   6097     {
   6098       const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
   6099       const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
   6100 
   6101       /* Comparisons are unordered iff at least one of the values is NaN.  */
   6102       if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
   6103 	switch (code)
   6104 	  {
   6105 	  case UNEQ:
   6106 	  case UNLT:
   6107 	  case UNGT:
   6108 	  case UNLE:
   6109 	  case UNGE:
   6110 	  case NE:
   6111 	  case UNORDERED:
   6112 	    return const_true_rtx;
   6113 	  case EQ:
   6114 	  case LT:
   6115 	  case GT:
   6116 	  case LE:
   6117 	  case GE:
   6118 	  case LTGT:
   6119 	  case ORDERED:
   6120 	    return const0_rtx;
   6121 	  default:
   6122 	    return 0;
   6123 	  }
   6124 
   6125       return comparison_result (code,
   6126 				(real_equal (d0, d1) ? CMP_EQ :
   6127 				 real_less (d0, d1) ? CMP_LT : CMP_GT));
   6128     }
   6129 
   6130   /* Otherwise, see if the operands are both integers.  */
   6131   if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
   6132       && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
   6133     {
   6134       /* It would be nice if we really had a mode here.  However, the
   6135 	 largest int representable on the target is as good as
   6136 	 infinite.  */
   6137       machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
   6138       rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
   6139       rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
   6140 
   6141       if (wi::eq_p (ptrueop0, ptrueop1))
   6142 	return comparison_result (code, CMP_EQ);
   6143       else
   6144 	{
   6145 	  int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
   6146 	  cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
   6147 	  return comparison_result (code, cr);
   6148 	}
   6149     }
   6150 
   6151   /* Optimize comparisons with upper and lower bounds.  */
   6152   scalar_int_mode int_mode;
   6153   if (CONST_INT_P (trueop1)
   6154       && is_a <scalar_int_mode> (mode, &int_mode)
   6155       && HWI_COMPUTABLE_MODE_P (int_mode)
   6156       && !side_effects_p (trueop0))
   6157     {
   6158       int sign;
   6159       unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
   6160       HOST_WIDE_INT val = INTVAL (trueop1);
   6161       HOST_WIDE_INT mmin, mmax;
   6162 
   6163       if (code == GEU
   6164 	  || code == LEU
   6165 	  || code == GTU
   6166 	  || code == LTU)
   6167 	sign = 0;
   6168       else
   6169 	sign = 1;
   6170 
   6171       /* Get a reduced range if the sign bit is zero.  */
   6172       if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
   6173 	{
   6174 	  mmin = 0;
   6175 	  mmax = nonzero;
   6176 	}
   6177       else
   6178 	{
   6179 	  rtx mmin_rtx, mmax_rtx;
   6180 	  get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
   6181 
   6182 	  mmin = INTVAL (mmin_rtx);
   6183 	  mmax = INTVAL (mmax_rtx);
   6184 	  if (sign)
   6185 	    {
   6186 	      unsigned int sign_copies
   6187 		= num_sign_bit_copies (trueop0, int_mode);
   6188 
   6189 	      mmin >>= (sign_copies - 1);
   6190 	      mmax >>= (sign_copies - 1);
   6191 	    }
   6192 	}
   6193 
   6194       switch (code)
   6195 	{
   6196 	/* x >= y is always true for y <= mmin, always false for y > mmax.  */
   6197 	case GEU:
   6198 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
   6199 	    return const_true_rtx;
   6200 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
   6201 	    return const0_rtx;
   6202 	  break;
   6203 	case GE:
   6204 	  if (val <= mmin)
   6205 	    return const_true_rtx;
   6206 	  if (val > mmax)
   6207 	    return const0_rtx;
   6208 	  break;
   6209 
   6210 	/* x <= y is always true for y >= mmax, always false for y < mmin.  */
   6211 	case LEU:
   6212 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
   6213 	    return const_true_rtx;
   6214 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
   6215 	    return const0_rtx;
   6216 	  break;
   6217 	case LE:
   6218 	  if (val >= mmax)
   6219 	    return const_true_rtx;
   6220 	  if (val < mmin)
   6221 	    return const0_rtx;
   6222 	  break;
   6223 
   6224 	case EQ:
   6225 	  /* x == y is always false for y out of range.  */
   6226 	  if (val < mmin || val > mmax)
   6227 	    return const0_rtx;
   6228 	  break;
   6229 
   6230 	/* x > y is always false for y >= mmax, always true for y < mmin.  */
   6231 	case GTU:
   6232 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
   6233 	    return const0_rtx;
   6234 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
   6235 	    return const_true_rtx;
   6236 	  break;
   6237 	case GT:
   6238 	  if (val >= mmax)
   6239 	    return const0_rtx;
   6240 	  if (val < mmin)
   6241 	    return const_true_rtx;
   6242 	  break;
   6243 
   6244 	/* x < y is always false for y <= mmin, always true for y > mmax.  */
   6245 	case LTU:
   6246 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
   6247 	    return const0_rtx;
   6248 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
   6249 	    return const_true_rtx;
   6250 	  break;
   6251 	case LT:
   6252 	  if (val <= mmin)
   6253 	    return const0_rtx;
   6254 	  if (val > mmax)
   6255 	    return const_true_rtx;
   6256 	  break;
   6257 
   6258 	case NE:
   6259 	  /* x != y is always true for y out of range.  */
   6260 	  if (val < mmin || val > mmax)
   6261 	    return const_true_rtx;
   6262 	  break;
   6263 
   6264 	default:
   6265 	  break;
   6266 	}
   6267     }
   6268 
   6269   /* Optimize integer comparisons with zero.  */
   6270   if (is_a <scalar_int_mode> (mode, &int_mode)
   6271       && trueop1 == const0_rtx
   6272       && !side_effects_p (trueop0))
   6273     {
   6274       /* Some addresses are known to be nonzero.  We don't know
   6275 	 their sign, but equality comparisons are known.  */
   6276       if (nonzero_address_p (trueop0))
   6277 	{
   6278 	  if (code == EQ || code == LEU)
   6279 	    return const0_rtx;
   6280 	  if (code == NE || code == GTU)
   6281 	    return const_true_rtx;
   6282 	}
   6283 
   6284       /* See if the first operand is an IOR with a constant.  If so, we
   6285 	 may be able to determine the result of this comparison.  */
   6286       if (GET_CODE (op0) == IOR)
   6287 	{
   6288 	  rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
   6289 	  if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
   6290 	    {
   6291 	      int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
   6292 	      int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
   6293 			      && (UINTVAL (inner_const)
   6294 				  & (HOST_WIDE_INT_1U
   6295 				     << sign_bitnum)));
   6296 
   6297 	      switch (code)
   6298 		{
   6299 		case EQ:
   6300 		case LEU:
   6301 		  return const0_rtx;
   6302 		case NE:
   6303 		case GTU:
   6304 		  return const_true_rtx;
   6305 		case LT:
   6306 		case LE:
   6307 		  if (has_sign)
   6308 		    return const_true_rtx;
   6309 		  break;
   6310 		case GT:
   6311 		case GE:
   6312 		  if (has_sign)
   6313 		    return const0_rtx;
   6314 		  break;
   6315 		default:
   6316 		  break;
   6317 		}
   6318 	    }
   6319 	}
   6320     }
   6321 
   6322   /* Optimize comparison of ABS with zero.  */
   6323   if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
   6324       && (GET_CODE (trueop0) == ABS
   6325 	  || (GET_CODE (trueop0) == FLOAT_EXTEND
   6326 	      && GET_CODE (XEXP (trueop0, 0)) == ABS)))
   6327     {
   6328       switch (code)
   6329 	{
   6330 	case LT:
   6331 	  /* Optimize abs(x) < 0.0.  */
   6332 	  if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
   6333 	    return const0_rtx;
   6334 	  break;
   6335 
   6336 	case GE:
   6337 	  /* Optimize abs(x) >= 0.0.  */
   6338 	  if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
   6339 	    return const_true_rtx;
   6340 	  break;
   6341 
   6342 	case UNGE:
   6343 	  /* Optimize ! (abs(x) < 0.0).  */
   6344 	  return const_true_rtx;
   6345 
   6346 	default:
   6347 	  break;
   6348 	}
   6349     }
   6350 
   6351   return 0;
   6352 }
   6353 
   6354 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
   6355    where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
   6356    or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
   6357    can be simplified to that or NULL_RTX if not.
   6358    Assume X is compared against zero with CMP_CODE and the true
   6359    arm is TRUE_VAL and the false arm is FALSE_VAL.  */
   6360 
   6361 rtx
   6362 simplify_context::simplify_cond_clz_ctz (rtx x, rtx_code cmp_code,
   6363 					 rtx true_val, rtx false_val)
   6364 {
   6365   if (cmp_code != EQ && cmp_code != NE)
   6366     return NULL_RTX;
   6367 
   6368   /* Result on X == 0 and X !=0 respectively.  */
   6369   rtx on_zero, on_nonzero;
   6370   if (cmp_code == EQ)
   6371     {
   6372       on_zero = true_val;
   6373       on_nonzero = false_val;
   6374     }
   6375   else
   6376     {
   6377       on_zero = false_val;
   6378       on_nonzero = true_val;
   6379     }
   6380 
   6381   rtx_code op_code = GET_CODE (on_nonzero);
   6382   if ((op_code != CLZ && op_code != CTZ)
   6383       || !rtx_equal_p (XEXP (on_nonzero, 0), x)
   6384       || !CONST_INT_P (on_zero))
   6385     return NULL_RTX;
   6386 
   6387   HOST_WIDE_INT op_val;
   6388   scalar_int_mode mode ATTRIBUTE_UNUSED
   6389     = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
   6390   if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
   6391        || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
   6392       && op_val == INTVAL (on_zero))
   6393     return on_nonzero;
   6394 
   6395   return NULL_RTX;
   6396 }
   6397 
   6398 /* Try to simplify X given that it appears within operand OP of a
   6399    VEC_MERGE operation whose mask is MASK.  X need not use the same
   6400    vector mode as the VEC_MERGE, but it must have the same number of
   6401    elements.
   6402 
   6403    Return the simplified X on success, otherwise return NULL_RTX.  */
   6404 
   6405 rtx
   6406 simplify_context::simplify_merge_mask (rtx x, rtx mask, int op)
   6407 {
   6408   gcc_assert (VECTOR_MODE_P (GET_MODE (x)));
   6409   poly_uint64 nunits = GET_MODE_NUNITS (GET_MODE (x));
   6410   if (GET_CODE (x) == VEC_MERGE && rtx_equal_p (XEXP (x, 2), mask))
   6411     {
   6412       if (side_effects_p (XEXP (x, 1 - op)))
   6413 	return NULL_RTX;
   6414 
   6415       return XEXP (x, op);
   6416     }
   6417   if (UNARY_P (x)
   6418       && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
   6419       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits))
   6420     {
   6421       rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
   6422       if (top0)
   6423 	return simplify_gen_unary (GET_CODE (x), GET_MODE (x), top0,
   6424 				   GET_MODE (XEXP (x, 0)));
   6425     }
   6426   if (BINARY_P (x)
   6427       && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
   6428       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
   6429       && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
   6430       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits))
   6431     {
   6432       rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
   6433       rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
   6434       if (top0 || top1)
   6435 	{
   6436 	  if (COMPARISON_P (x))
   6437 	    return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
   6438 					    GET_MODE (XEXP (x, 0)) != VOIDmode
   6439 					    ? GET_MODE (XEXP (x, 0))
   6440 					    : GET_MODE (XEXP (x, 1)),
   6441 					    top0 ? top0 : XEXP (x, 0),
   6442 					    top1 ? top1 : XEXP (x, 1));
   6443 	  else
   6444 	    return simplify_gen_binary (GET_CODE (x), GET_MODE (x),
   6445 					top0 ? top0 : XEXP (x, 0),
   6446 					top1 ? top1 : XEXP (x, 1));
   6447 	}
   6448     }
   6449   if (GET_RTX_CLASS (GET_CODE (x)) == RTX_TERNARY
   6450       && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
   6451       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
   6452       && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
   6453       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits)
   6454       && VECTOR_MODE_P (GET_MODE (XEXP (x, 2)))
   6455       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 2))), nunits))
   6456     {
   6457       rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
   6458       rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
   6459       rtx top2 = simplify_merge_mask (XEXP (x, 2), mask, op);
   6460       if (top0 || top1 || top2)
   6461 	return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
   6462 				     GET_MODE (XEXP (x, 0)),
   6463 				     top0 ? top0 : XEXP (x, 0),
   6464 				     top1 ? top1 : XEXP (x, 1),
   6465 				     top2 ? top2 : XEXP (x, 2));
   6466     }
   6467   return NULL_RTX;
   6468 }
   6469 
   6470 
   6471 /* Simplify CODE, an operation with result mode MODE and three operands,
   6473    OP0, OP1, and OP2.  OP0_MODE was the mode of OP0 before it became
   6474    a constant.  Return 0 if no simplifications is possible.  */
   6475 
   6476 rtx
   6477 simplify_context::simplify_ternary_operation (rtx_code code, machine_mode mode,
   6478 					      machine_mode op0_mode,
   6479 					      rtx op0, rtx op1, rtx op2)
   6480 {
   6481   bool any_change = false;
   6482   rtx tem, trueop2;
   6483   scalar_int_mode int_mode, int_op0_mode;
   6484   unsigned int n_elts;
   6485 
   6486   switch (code)
   6487     {
   6488     case FMA:
   6489       /* Simplify negations around the multiplication.  */
   6490       /* -a * -b + c  =>  a * b + c.  */
   6491       if (GET_CODE (op0) == NEG)
   6492 	{
   6493 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
   6494 	  if (tem)
   6495 	    op1 = tem, op0 = XEXP (op0, 0), any_change = true;
   6496 	}
   6497       else if (GET_CODE (op1) == NEG)
   6498 	{
   6499 	  tem = simplify_unary_operation (NEG, mode, op0, mode);
   6500 	  if (tem)
   6501 	    op0 = tem, op1 = XEXP (op1, 0), any_change = true;
   6502 	}
   6503 
   6504       /* Canonicalize the two multiplication operands.  */
   6505       /* a * -b + c  =>  -b * a + c.  */
   6506       if (swap_commutative_operands_p (op0, op1))
   6507 	std::swap (op0, op1), any_change = true;
   6508 
   6509       if (any_change)
   6510 	return gen_rtx_FMA (mode, op0, op1, op2);
   6511       return NULL_RTX;
   6512 
   6513     case SIGN_EXTRACT:
   6514     case ZERO_EXTRACT:
   6515       if (CONST_INT_P (op0)
   6516 	  && CONST_INT_P (op1)
   6517 	  && CONST_INT_P (op2)
   6518 	  && is_a <scalar_int_mode> (mode, &int_mode)
   6519 	  && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
   6520 	  && HWI_COMPUTABLE_MODE_P (int_mode))
   6521 	{
   6522 	  /* Extracting a bit-field from a constant */
   6523 	  unsigned HOST_WIDE_INT val = UINTVAL (op0);
   6524 	  HOST_WIDE_INT op1val = INTVAL (op1);
   6525 	  HOST_WIDE_INT op2val = INTVAL (op2);
   6526 	  if (!BITS_BIG_ENDIAN)
   6527 	    val >>= op2val;
   6528 	  else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
   6529 	    val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
   6530 	  else
   6531 	    /* Not enough information to calculate the bit position.  */
   6532 	    break;
   6533 
   6534 	  if (HOST_BITS_PER_WIDE_INT != op1val)
   6535 	    {
   6536 	      /* First zero-extend.  */
   6537 	      val &= (HOST_WIDE_INT_1U << op1val) - 1;
   6538 	      /* If desired, propagate sign bit.  */
   6539 	      if (code == SIGN_EXTRACT
   6540 		  && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
   6541 		     != 0)
   6542 		val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
   6543 	    }
   6544 
   6545 	  return gen_int_mode (val, int_mode);
   6546 	}
   6547       break;
   6548 
   6549     case IF_THEN_ELSE:
   6550       if (CONST_INT_P (op0))
   6551 	return op0 != const0_rtx ? op1 : op2;
   6552 
   6553       /* Convert c ? a : a into "a".  */
   6554       if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
   6555 	return op1;
   6556 
   6557       /* Convert a != b ? a : b into "a".  */
   6558       if (GET_CODE (op0) == NE
   6559 	  && ! side_effects_p (op0)
   6560 	  && ! HONOR_NANS (mode)
   6561 	  && ! HONOR_SIGNED_ZEROS (mode)
   6562 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
   6563 	       && rtx_equal_p (XEXP (op0, 1), op2))
   6564 	      || (rtx_equal_p (XEXP (op0, 0), op2)
   6565 		  && rtx_equal_p (XEXP (op0, 1), op1))))
   6566 	return op1;
   6567 
   6568       /* Convert a == b ? a : b into "b".  */
   6569       if (GET_CODE (op0) == EQ
   6570 	  && ! side_effects_p (op0)
   6571 	  && ! HONOR_NANS (mode)
   6572 	  && ! HONOR_SIGNED_ZEROS (mode)
   6573 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
   6574 	       && rtx_equal_p (XEXP (op0, 1), op2))
   6575 	      || (rtx_equal_p (XEXP (op0, 0), op2)
   6576 		  && rtx_equal_p (XEXP (op0, 1), op1))))
   6577 	return op2;
   6578 
   6579       /* Convert (!c) != {0,...,0} ? a : b into
   6580          c != {0,...,0} ? b : a for vector modes.  */
   6581       if (VECTOR_MODE_P (GET_MODE (op1))
   6582 	  && GET_CODE (op0) == NE
   6583 	  && GET_CODE (XEXP (op0, 0)) == NOT
   6584 	  && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
   6585 	{
   6586 	  rtx cv = XEXP (op0, 1);
   6587 	  int nunits;
   6588 	  bool ok = true;
   6589 	  if (!CONST_VECTOR_NUNITS (cv).is_constant (&nunits))
   6590 	    ok = false;
   6591 	  else
   6592 	    for (int i = 0; i < nunits; ++i)
   6593 	      if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
   6594 		{
   6595 		  ok = false;
   6596 		  break;
   6597 		}
   6598 	  if (ok)
   6599 	    {
   6600 	      rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
   6601 					XEXP (XEXP (op0, 0), 0),
   6602 					XEXP (op0, 1));
   6603 	      rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
   6604 	      return retval;
   6605 	    }
   6606 	}
   6607 
   6608       /* Convert x == 0 ? N : clz (x) into clz (x) when
   6609 	 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
   6610 	 Similarly for ctz (x).  */
   6611       if (COMPARISON_P (op0) && !side_effects_p (op0)
   6612 	  && XEXP (op0, 1) == const0_rtx)
   6613 	{
   6614 	  rtx simplified
   6615 	    = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
   6616 				     op1, op2);
   6617 	  if (simplified)
   6618 	    return simplified;
   6619 	}
   6620 
   6621       if (COMPARISON_P (op0) && ! side_effects_p (op0))
   6622 	{
   6623 	  machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
   6624 					? GET_MODE (XEXP (op0, 1))
   6625 					: GET_MODE (XEXP (op0, 0)));
   6626 	  rtx temp;
   6627 
   6628 	  /* Look for happy constants in op1 and op2.  */
   6629 	  if (CONST_INT_P (op1) && CONST_INT_P (op2))
   6630 	    {
   6631 	      HOST_WIDE_INT t = INTVAL (op1);
   6632 	      HOST_WIDE_INT f = INTVAL (op2);
   6633 
   6634 	      if (t == STORE_FLAG_VALUE && f == 0)
   6635 	        code = GET_CODE (op0);
   6636 	      else if (t == 0 && f == STORE_FLAG_VALUE)
   6637 		{
   6638 		  enum rtx_code tmp;
   6639 		  tmp = reversed_comparison_code (op0, NULL);
   6640 		  if (tmp == UNKNOWN)
   6641 		    break;
   6642 		  code = tmp;
   6643 		}
   6644 	      else
   6645 		break;
   6646 
   6647 	      return simplify_gen_relational (code, mode, cmp_mode,
   6648 					      XEXP (op0, 0), XEXP (op0, 1));
   6649 	    }
   6650 
   6651 	  temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
   6652 			  			cmp_mode, XEXP (op0, 0),
   6653 						XEXP (op0, 1));
   6654 
   6655 	  /* See if any simplifications were possible.  */
   6656 	  if (temp)
   6657 	    {
   6658 	      if (CONST_INT_P (temp))
   6659 		return temp == const0_rtx ? op2 : op1;
   6660 	      else if (temp)
   6661 	        return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
   6662 	    }
   6663 	}
   6664       break;
   6665 
   6666     case VEC_MERGE:
   6667       gcc_assert (GET_MODE (op0) == mode);
   6668       gcc_assert (GET_MODE (op1) == mode);
   6669       gcc_assert (VECTOR_MODE_P (mode));
   6670       trueop2 = avoid_constant_pool_reference (op2);
   6671       if (CONST_INT_P (trueop2)
   6672 	  && GET_MODE_NUNITS (mode).is_constant (&n_elts))
   6673 	{
   6674 	  unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
   6675 	  unsigned HOST_WIDE_INT mask;
   6676 	  if (n_elts == HOST_BITS_PER_WIDE_INT)
   6677 	    mask = -1;
   6678 	  else
   6679 	    mask = (HOST_WIDE_INT_1U << n_elts) - 1;
   6680 
   6681 	  if (!(sel & mask) && !side_effects_p (op0))
   6682 	    return op1;
   6683 	  if ((sel & mask) == mask && !side_effects_p (op1))
   6684 	    return op0;
   6685 
   6686 	  rtx trueop0 = avoid_constant_pool_reference (op0);
   6687 	  rtx trueop1 = avoid_constant_pool_reference (op1);
   6688 	  if (GET_CODE (trueop0) == CONST_VECTOR
   6689 	      && GET_CODE (trueop1) == CONST_VECTOR)
   6690 	    {
   6691 	      rtvec v = rtvec_alloc (n_elts);
   6692 	      unsigned int i;
   6693 
   6694 	      for (i = 0; i < n_elts; i++)
   6695 		RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
   6696 				    ? CONST_VECTOR_ELT (trueop0, i)
   6697 				    : CONST_VECTOR_ELT (trueop1, i));
   6698 	      return gen_rtx_CONST_VECTOR (mode, v);
   6699 	    }
   6700 
   6701 	  /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
   6702 	     if no element from a appears in the result.  */
   6703 	  if (GET_CODE (op0) == VEC_MERGE)
   6704 	    {
   6705 	      tem = avoid_constant_pool_reference (XEXP (op0, 2));
   6706 	      if (CONST_INT_P (tem))
   6707 		{
   6708 		  unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
   6709 		  if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
   6710 		    return simplify_gen_ternary (code, mode, mode,
   6711 						 XEXP (op0, 1), op1, op2);
   6712 		  if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
   6713 		    return simplify_gen_ternary (code, mode, mode,
   6714 						 XEXP (op0, 0), op1, op2);
   6715 		}
   6716 	    }
   6717 	  if (GET_CODE (op1) == VEC_MERGE)
   6718 	    {
   6719 	      tem = avoid_constant_pool_reference (XEXP (op1, 2));
   6720 	      if (CONST_INT_P (tem))
   6721 		{
   6722 		  unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
   6723 		  if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
   6724 		    return simplify_gen_ternary (code, mode, mode,
   6725 						 op0, XEXP (op1, 1), op2);
   6726 		  if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
   6727 		    return simplify_gen_ternary (code, mode, mode,
   6728 						 op0, XEXP (op1, 0), op2);
   6729 		}
   6730 	    }
   6731 
   6732 	  /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
   6733 	     with a.  */
   6734 	  if (GET_CODE (op0) == VEC_DUPLICATE
   6735 	      && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
   6736 	      && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
   6737 	      && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0, 0))), 1))
   6738 	    {
   6739 	      tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
   6740 	      if (CONST_INT_P (tem) && CONST_INT_P (op2))
   6741 		{
   6742 		  if (XEXP (XEXP (op0, 0), 0) == op1
   6743 		      && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
   6744 		    return op1;
   6745 		}
   6746 	    }
   6747 	  /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
   6748 	     (const_int N))
   6749 	     with (vec_concat (X) (B)) if N == 1 or
   6750 	     (vec_concat (A) (X)) if N == 2.  */
   6751 	  if (GET_CODE (op0) == VEC_DUPLICATE
   6752 	      && GET_CODE (op1) == CONST_VECTOR
   6753 	      && known_eq (CONST_VECTOR_NUNITS (op1), 2)
   6754 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
   6755 	      && IN_RANGE (sel, 1, 2))
   6756 	    {
   6757 	      rtx newop0 = XEXP (op0, 0);
   6758 	      rtx newop1 = CONST_VECTOR_ELT (op1, 2 - sel);
   6759 	      if (sel == 2)
   6760 		std::swap (newop0, newop1);
   6761 	      return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
   6762 	    }
   6763 	  /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
   6764 	     with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
   6765 	     Only applies for vectors of two elements.  */
   6766 	  if (GET_CODE (op0) == VEC_DUPLICATE
   6767 	      && GET_CODE (op1) == VEC_CONCAT
   6768 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
   6769 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
   6770 	      && IN_RANGE (sel, 1, 2))
   6771 	    {
   6772 	      rtx newop0 = XEXP (op0, 0);
   6773 	      rtx newop1 = XEXP (op1, 2 - sel);
   6774 	      rtx otherop = XEXP (op1, sel - 1);
   6775 	      if (sel == 2)
   6776 		std::swap (newop0, newop1);
   6777 	      /* Don't want to throw away the other part of the vec_concat if
   6778 		 it has side-effects.  */
   6779 	      if (!side_effects_p (otherop))
   6780 		return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
   6781 	    }
   6782 
   6783 	  /* Replace:
   6784 
   6785 	      (vec_merge:outer (vec_duplicate:outer x:inner)
   6786 			       (subreg:outer y:inner 0)
   6787 			       (const_int N))
   6788 
   6789 	     with (vec_concat:outer x:inner y:inner) if N == 1,
   6790 	     or (vec_concat:outer y:inner x:inner) if N == 2.
   6791 
   6792 	     Implicitly, this means we have a paradoxical subreg, but such
   6793 	     a check is cheap, so make it anyway.
   6794 
   6795 	     Only applies for vectors of two elements.  */
   6796 	  if (GET_CODE (op0) == VEC_DUPLICATE
   6797 	      && GET_CODE (op1) == SUBREG
   6798 	      && GET_MODE (op1) == GET_MODE (op0)
   6799 	      && GET_MODE (SUBREG_REG (op1)) == GET_MODE (XEXP (op0, 0))
   6800 	      && paradoxical_subreg_p (op1)
   6801 	      && subreg_lowpart_p (op1)
   6802 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
   6803 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
   6804 	      && IN_RANGE (sel, 1, 2))
   6805 	    {
   6806 	      rtx newop0 = XEXP (op0, 0);
   6807 	      rtx newop1 = SUBREG_REG (op1);
   6808 	      if (sel == 2)
   6809 		std::swap (newop0, newop1);
   6810 	      return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
   6811 	    }
   6812 
   6813 	  /* Same as above but with switched operands:
   6814 		Replace (vec_merge:outer (subreg:outer x:inner 0)
   6815 					 (vec_duplicate:outer y:inner)
   6816 			       (const_int N))
   6817 
   6818 	     with (vec_concat:outer x:inner y:inner) if N == 1,
   6819 	     or (vec_concat:outer y:inner x:inner) if N == 2.  */
   6820 	  if (GET_CODE (op1) == VEC_DUPLICATE
   6821 	      && GET_CODE (op0) == SUBREG
   6822 	      && GET_MODE (op0) == GET_MODE (op1)
   6823 	      && GET_MODE (SUBREG_REG (op0)) == GET_MODE (XEXP (op1, 0))
   6824 	      && paradoxical_subreg_p (op0)
   6825 	      && subreg_lowpart_p (op0)
   6826 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
   6827 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
   6828 	      && IN_RANGE (sel, 1, 2))
   6829 	    {
   6830 	      rtx newop0 = SUBREG_REG (op0);
   6831 	      rtx newop1 = XEXP (op1, 0);
   6832 	      if (sel == 2)
   6833 		std::swap (newop0, newop1);
   6834 	      return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
   6835 	    }
   6836 
   6837 	  /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
   6838 				 (const_int n))
   6839 	     with (vec_concat x y) or (vec_concat y x) depending on value
   6840 	     of N.  */
   6841 	  if (GET_CODE (op0) == VEC_DUPLICATE
   6842 	      && GET_CODE (op1) == VEC_DUPLICATE
   6843 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
   6844 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
   6845 	      && IN_RANGE (sel, 1, 2))
   6846 	    {
   6847 	      rtx newop0 = XEXP (op0, 0);
   6848 	      rtx newop1 = XEXP (op1, 0);
   6849 	      if (sel == 2)
   6850 		std::swap (newop0, newop1);
   6851 
   6852 	      return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
   6853 	    }
   6854 	}
   6855 
   6856       if (rtx_equal_p (op0, op1)
   6857 	  && !side_effects_p (op2) && !side_effects_p (op1))
   6858 	return op0;
   6859 
   6860       if (!side_effects_p (op2))
   6861 	{
   6862 	  rtx top0
   6863 	    = may_trap_p (op0) ? NULL_RTX : simplify_merge_mask (op0, op2, 0);
   6864 	  rtx top1
   6865 	    = may_trap_p (op1) ? NULL_RTX : simplify_merge_mask (op1, op2, 1);
   6866 	  if (top0 || top1)
   6867 	    return simplify_gen_ternary (code, mode, mode,
   6868 					 top0 ? top0 : op0,
   6869 					 top1 ? top1 : op1, op2);
   6870 	}
   6871 
   6872       break;
   6873 
   6874     default:
   6875       gcc_unreachable ();
   6876     }
   6877 
   6878   return 0;
   6879 }
   6880 
   6881 /* Try to calculate NUM_BYTES bytes of the target memory image of X,
   6882    starting at byte FIRST_BYTE.  Return true on success and add the
   6883    bytes to BYTES, such that each byte has BITS_PER_UNIT bits and such
   6884    that the bytes follow target memory order.  Leave BYTES unmodified
   6885    on failure.
   6886 
   6887    MODE is the mode of X.  The caller must reserve NUM_BYTES bytes in
   6888    BYTES before calling this function.  */
   6889 
   6890 bool
   6891 native_encode_rtx (machine_mode mode, rtx x, vec<target_unit> &bytes,
   6892 		   unsigned int first_byte, unsigned int num_bytes)
   6893 {
   6894   /* Check the mode is sensible.  */
   6895   gcc_assert (GET_MODE (x) == VOIDmode
   6896 	      ? is_a <scalar_int_mode> (mode)
   6897 	      : mode == GET_MODE (x));
   6898 
   6899   if (GET_CODE (x) == CONST_VECTOR)
   6900     {
   6901       /* CONST_VECTOR_ELT follows target memory order, so no shuffling
   6902 	 is necessary.  The only complication is that MODE_VECTOR_BOOL
   6903 	 vectors can have several elements per byte.  */
   6904       unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
   6905 						   GET_MODE_NUNITS (mode));
   6906       unsigned int elt = first_byte * BITS_PER_UNIT / elt_bits;
   6907       if (elt_bits < BITS_PER_UNIT)
   6908 	{
   6909 	  /* This is the only case in which elements can be smaller than
   6910 	     a byte.  */
   6911 	  gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
   6912 	  auto mask = GET_MODE_MASK (GET_MODE_INNER (mode));
   6913 	  for (unsigned int i = 0; i < num_bytes; ++i)
   6914 	    {
   6915 	      target_unit value = 0;
   6916 	      for (unsigned int j = 0; j < BITS_PER_UNIT; j += elt_bits)
   6917 		{
   6918 		  value |= (INTVAL (CONST_VECTOR_ELT (x, elt)) & mask) << j;
   6919 		  elt += 1;
   6920 		}
   6921 	      bytes.quick_push (value);
   6922 	    }
   6923 	  return true;
   6924 	}
   6925 
   6926       unsigned int start = bytes.length ();
   6927       unsigned int elt_bytes = GET_MODE_UNIT_SIZE (mode);
   6928       /* Make FIRST_BYTE relative to ELT.  */
   6929       first_byte %= elt_bytes;
   6930       while (num_bytes > 0)
   6931 	{
   6932 	  /* Work out how many bytes we want from element ELT.  */
   6933 	  unsigned int chunk_bytes = MIN (num_bytes, elt_bytes - first_byte);
   6934 	  if (!native_encode_rtx (GET_MODE_INNER (mode),
   6935 				  CONST_VECTOR_ELT (x, elt), bytes,
   6936 				  first_byte, chunk_bytes))
   6937 	    {
   6938 	      bytes.truncate (start);
   6939 	      return false;
   6940 	    }
   6941 	  elt += 1;
   6942 	  first_byte = 0;
   6943 	  num_bytes -= chunk_bytes;
   6944 	}
   6945       return true;
   6946     }
   6947 
   6948   /* All subsequent cases are limited to scalars.  */
   6949   scalar_mode smode;
   6950   if (!is_a <scalar_mode> (mode, &smode))
   6951     return false;
   6952 
   6953   /* Make sure that the region is in range.  */
   6954   unsigned int end_byte = first_byte + num_bytes;
   6955   unsigned int mode_bytes = GET_MODE_SIZE (smode);
   6956   gcc_assert (end_byte <= mode_bytes);
   6957 
   6958   if (CONST_SCALAR_INT_P (x))
   6959     {
   6960       /* The target memory layout is affected by both BYTES_BIG_ENDIAN
   6961 	 and WORDS_BIG_ENDIAN.  Use the subreg machinery to get the lsb
   6962 	 position of each byte.  */
   6963       rtx_mode_t value (x, smode);
   6964       wide_int_ref value_wi (value);
   6965       for (unsigned int byte = first_byte; byte < end_byte; ++byte)
   6966 	{
   6967 	  /* Always constant because the inputs are.  */
   6968 	  unsigned int lsb
   6969 	    = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
   6970 	  /* Operate directly on the encoding rather than using
   6971 	     wi::extract_uhwi, so that we preserve the sign or zero
   6972 	     extension for modes that are not a whole number of bits in
   6973 	     size.  (Zero extension is only used for the combination of
   6974 	     innermode == BImode && STORE_FLAG_VALUE == 1).  */
   6975 	  unsigned int elt = lsb / HOST_BITS_PER_WIDE_INT;
   6976 	  unsigned int shift = lsb % HOST_BITS_PER_WIDE_INT;
   6977 	  unsigned HOST_WIDE_INT uhwi = value_wi.elt (elt);
   6978 	  bytes.quick_push (uhwi >> shift);
   6979 	}
   6980       return true;
   6981     }
   6982 
   6983   if (CONST_DOUBLE_P (x))
   6984     {
   6985       /* real_to_target produces an array of integers in target memory order.
   6986 	 All integers before the last one have 32 bits; the last one may
   6987 	 have 32 bits or fewer, depending on whether the mode bitsize
   6988 	 is divisible by 32.  Each of these integers is then laid out
   6989 	 in target memory as any other integer would be.  */
   6990       long el32[MAX_BITSIZE_MODE_ANY_MODE / 32];
   6991       real_to_target (el32, CONST_DOUBLE_REAL_VALUE (x), smode);
   6992 
   6993       /* The (maximum) number of target bytes per element of el32.  */
   6994       unsigned int bytes_per_el32 = 32 / BITS_PER_UNIT;
   6995       gcc_assert (bytes_per_el32 != 0);
   6996 
   6997       /* Build up the integers in a similar way to the CONST_SCALAR_INT_P
   6998 	 handling above.  */
   6999       for (unsigned int byte = first_byte; byte < end_byte; ++byte)
   7000 	{
   7001 	  unsigned int index = byte / bytes_per_el32;
   7002 	  unsigned int subbyte = byte % bytes_per_el32;
   7003 	  unsigned int int_bytes = MIN (bytes_per_el32,
   7004 					mode_bytes - index * bytes_per_el32);
   7005 	  /* Always constant because the inputs are.  */
   7006 	  unsigned int lsb
   7007 	    = subreg_size_lsb (1, int_bytes, subbyte).to_constant ();
   7008 	  bytes.quick_push ((unsigned long) el32[index] >> lsb);
   7009 	}
   7010       return true;
   7011     }
   7012 
   7013   if (GET_CODE (x) == CONST_FIXED)
   7014     {
   7015       for (unsigned int byte = first_byte; byte < end_byte; ++byte)
   7016 	{
   7017 	  /* Always constant because the inputs are.  */
   7018 	  unsigned int lsb
   7019 	    = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
   7020 	  unsigned HOST_WIDE_INT piece = CONST_FIXED_VALUE_LOW (x);
   7021 	  if (lsb >= HOST_BITS_PER_WIDE_INT)
   7022 	    {
   7023 	      lsb -= HOST_BITS_PER_WIDE_INT;
   7024 	      piece = CONST_FIXED_VALUE_HIGH (x);
   7025 	    }
   7026 	  bytes.quick_push (piece >> lsb);
   7027 	}
   7028       return true;
   7029     }
   7030 
   7031   return false;
   7032 }
   7033 
   7034 /* Read a vector of mode MODE from the target memory image given by BYTES,
   7035    starting at byte FIRST_BYTE.  The vector is known to be encodable using
   7036    NPATTERNS interleaved patterns with NELTS_PER_PATTERN elements each,
   7037    and BYTES is known to have enough bytes to supply NPATTERNS *
   7038    NELTS_PER_PATTERN vector elements.  Each element of BYTES contains
   7039    BITS_PER_UNIT bits and the bytes are in target memory order.
   7040 
   7041    Return the vector on success, otherwise return NULL_RTX.  */
   7042 
   7043 rtx
   7044 native_decode_vector_rtx (machine_mode mode, const vec<target_unit> &bytes,
   7045 			  unsigned int first_byte, unsigned int npatterns,
   7046 			  unsigned int nelts_per_pattern)
   7047 {
   7048   rtx_vector_builder builder (mode, npatterns, nelts_per_pattern);
   7049 
   7050   unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
   7051 					       GET_MODE_NUNITS (mode));
   7052   if (elt_bits < BITS_PER_UNIT)
   7053     {
   7054       /* This is the only case in which elements can be smaller than a byte.
   7055 	 Element 0 is always in the lsb of the containing byte.  */
   7056       gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
   7057       for (unsigned int i = 0; i < builder.encoded_nelts (); ++i)
   7058 	{
   7059 	  unsigned int bit_index = first_byte * BITS_PER_UNIT + i * elt_bits;
   7060 	  unsigned int byte_index = bit_index / BITS_PER_UNIT;
   7061 	  unsigned int lsb = bit_index % BITS_PER_UNIT;
   7062 	  unsigned int value = bytes[byte_index] >> lsb;
   7063 	  builder.quick_push (gen_int_mode (value, GET_MODE_INNER (mode)));
   7064 	}
   7065     }
   7066   else
   7067     {
   7068       for (unsigned int i = 0; i < builder.encoded_nelts (); ++i)
   7069 	{
   7070 	  rtx x = native_decode_rtx (GET_MODE_INNER (mode), bytes, first_byte);
   7071 	  if (!x)
   7072 	    return NULL_RTX;
   7073 	  builder.quick_push (x);
   7074 	  first_byte += elt_bits / BITS_PER_UNIT;
   7075 	}
   7076     }
   7077   return builder.build ();
   7078 }
   7079 
   7080 /* Read an rtx of mode MODE from the target memory image given by BYTES,
   7081    starting at byte FIRST_BYTE.  Each element of BYTES contains BITS_PER_UNIT
   7082    bits and the bytes are in target memory order.  The image has enough
   7083    values to specify all bytes of MODE.
   7084 
   7085    Return the rtx on success, otherwise return NULL_RTX.  */
   7086 
   7087 rtx
   7088 native_decode_rtx (machine_mode mode, const vec<target_unit> &bytes,
   7089 		   unsigned int first_byte)
   7090 {
   7091   if (VECTOR_MODE_P (mode))
   7092     {
   7093       /* If we know at compile time how many elements there are,
   7094 	 pull each element directly from BYTES.  */
   7095       unsigned int nelts;
   7096       if (GET_MODE_NUNITS (mode).is_constant (&nelts))
   7097 	return native_decode_vector_rtx (mode, bytes, first_byte, nelts, 1);
   7098       return NULL_RTX;
   7099     }
   7100 
   7101   scalar_int_mode imode;
   7102   if (is_a <scalar_int_mode> (mode, &imode)
   7103       && GET_MODE_PRECISION (imode) <= MAX_BITSIZE_MODE_ANY_INT)
   7104     {
   7105       /* Pull the bytes msb first, so that we can use simple
   7106 	 shift-and-insert wide_int operations.  */
   7107       unsigned int size = GET_MODE_SIZE (imode);
   7108       wide_int result (wi::zero (GET_MODE_PRECISION (imode)));
   7109       for (unsigned int i = 0; i < size; ++i)
   7110 	{
   7111 	  unsigned int lsb = (size - i - 1) * BITS_PER_UNIT;
   7112 	  /* Always constant because the inputs are.  */
   7113 	  unsigned int subbyte
   7114 	    = subreg_size_offset_from_lsb (1, size, lsb).to_constant ();
   7115 	  result <<= BITS_PER_UNIT;
   7116 	  result |= bytes[first_byte + subbyte];
   7117 	}
   7118       return immed_wide_int_const (result, imode);
   7119     }
   7120 
   7121   scalar_float_mode fmode;
   7122   if (is_a <scalar_float_mode> (mode, &fmode))
   7123     {
   7124       /* We need to build an array of integers in target memory order.
   7125 	 All integers before the last one have 32 bits; the last one may
   7126 	 have 32 bits or fewer, depending on whether the mode bitsize
   7127 	 is divisible by 32.  */
   7128       long el32[MAX_BITSIZE_MODE_ANY_MODE / 32];
   7129       unsigned int num_el32 = CEIL (GET_MODE_BITSIZE (fmode), 32);
   7130       memset (el32, 0, num_el32 * sizeof (long));
   7131 
   7132       /* The (maximum) number of target bytes per element of el32.  */
   7133       unsigned int bytes_per_el32 = 32 / BITS_PER_UNIT;
   7134       gcc_assert (bytes_per_el32 != 0);
   7135 
   7136       unsigned int mode_bytes = GET_MODE_SIZE (fmode);
   7137       for (unsigned int byte = 0; byte < mode_bytes; ++byte)
   7138 	{
   7139 	  unsigned int index = byte / bytes_per_el32;
   7140 	  unsigned int subbyte = byte % bytes_per_el32;
   7141 	  unsigned int int_bytes = MIN (bytes_per_el32,
   7142 					mode_bytes - index * bytes_per_el32);
   7143 	  /* Always constant because the inputs are.  */
   7144 	  unsigned int lsb
   7145 	    = subreg_size_lsb (1, int_bytes, subbyte).to_constant ();
   7146 	  el32[index] |= (unsigned long) bytes[first_byte + byte] << lsb;
   7147 	}
   7148       REAL_VALUE_TYPE r;
   7149       real_from_target (&r, el32, fmode);
   7150       return const_double_from_real_value (r, fmode);
   7151     }
   7152 
   7153   if (ALL_SCALAR_FIXED_POINT_MODE_P (mode))
   7154     {
   7155       scalar_mode smode = as_a <scalar_mode> (mode);
   7156       FIXED_VALUE_TYPE f;
   7157       f.data.low = 0;
   7158       f.data.high = 0;
   7159       f.mode = smode;
   7160 
   7161       unsigned int mode_bytes = GET_MODE_SIZE (smode);
   7162       for (unsigned int byte = 0; byte < mode_bytes; ++byte)
   7163 	{
   7164 	  /* Always constant because the inputs are.  */
   7165 	  unsigned int lsb
   7166 	    = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
   7167 	  unsigned HOST_WIDE_INT unit = bytes[first_byte + byte];
   7168 	  if (lsb >= HOST_BITS_PER_WIDE_INT)
   7169 	    f.data.high |= unit << (lsb - HOST_BITS_PER_WIDE_INT);
   7170 	  else
   7171 	    f.data.low |= unit << lsb;
   7172 	}
   7173       return CONST_FIXED_FROM_FIXED_VALUE (f, mode);
   7174     }
   7175 
   7176   return NULL_RTX;
   7177 }
   7178 
   7179 /* Simplify a byte offset BYTE into CONST_VECTOR X.  The main purpose
   7180    is to convert a runtime BYTE value into a constant one.  */
   7181 
   7182 static poly_uint64
   7183 simplify_const_vector_byte_offset (rtx x, poly_uint64 byte)
   7184 {
   7185   /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes.  */
   7186   machine_mode mode = GET_MODE (x);
   7187   unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
   7188 					       GET_MODE_NUNITS (mode));
   7189   /* The number of bits needed to encode one element from each pattern.  */
   7190   unsigned int sequence_bits = CONST_VECTOR_NPATTERNS (x) * elt_bits;
   7191 
   7192   /* Identify the start point in terms of a sequence number and a byte offset
   7193      within that sequence.  */
   7194   poly_uint64 first_sequence;
   7195   unsigned HOST_WIDE_INT subbit;
   7196   if (can_div_trunc_p (byte * BITS_PER_UNIT, sequence_bits,
   7197 		       &first_sequence, &subbit))
   7198     {
   7199       unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
   7200       if (nelts_per_pattern == 1)
   7201 	/* This is a duplicated vector, so the value of FIRST_SEQUENCE
   7202 	   doesn't matter.  */
   7203 	byte = subbit / BITS_PER_UNIT;
   7204       else if (nelts_per_pattern == 2 && known_gt (first_sequence, 0U))
   7205 	{
   7206 	  /* The subreg drops the first element from each pattern and
   7207 	     only uses the second element.  Find the first sequence
   7208 	     that starts on a byte boundary.  */
   7209 	  subbit += least_common_multiple (sequence_bits, BITS_PER_UNIT);
   7210 	  byte = subbit / BITS_PER_UNIT;
   7211 	}
   7212     }
   7213   return byte;
   7214 }
   7215 
   7216 /* Subroutine of simplify_subreg in which:
   7217 
   7218    - X is known to be a CONST_VECTOR
   7219    - OUTERMODE is known to be a vector mode
   7220 
   7221    Try to handle the subreg by operating on the CONST_VECTOR encoding
   7222    rather than on each individual element of the CONST_VECTOR.
   7223 
   7224    Return the simplified subreg on success, otherwise return NULL_RTX.  */
   7225 
   7226 static rtx
   7227 simplify_const_vector_subreg (machine_mode outermode, rtx x,
   7228 			      machine_mode innermode, unsigned int first_byte)
   7229 {
   7230   /* Paradoxical subregs of vectors have dubious semantics.  */
   7231   if (paradoxical_subreg_p (outermode, innermode))
   7232     return NULL_RTX;
   7233 
   7234   /* We can only preserve the semantics of a stepped pattern if the new
   7235      vector element is the same as the original one.  */
   7236   if (CONST_VECTOR_STEPPED_P (x)
   7237       && GET_MODE_INNER (outermode) != GET_MODE_INNER (innermode))
   7238     return NULL_RTX;
   7239 
   7240   /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes.  */
   7241   unsigned int x_elt_bits
   7242     = vector_element_size (GET_MODE_BITSIZE (innermode),
   7243 			   GET_MODE_NUNITS (innermode));
   7244   unsigned int out_elt_bits
   7245     = vector_element_size (GET_MODE_BITSIZE (outermode),
   7246 			   GET_MODE_NUNITS (outermode));
   7247 
   7248   /* The number of bits needed to encode one element from every pattern
   7249      of the original vector.  */
   7250   unsigned int x_sequence_bits = CONST_VECTOR_NPATTERNS (x) * x_elt_bits;
   7251 
   7252   /* The number of bits needed to encode one element from every pattern
   7253      of the result.  */
   7254   unsigned int out_sequence_bits
   7255     = least_common_multiple (x_sequence_bits, out_elt_bits);
   7256 
   7257   /* Work out the number of interleaved patterns in the output vector
   7258      and the number of encoded elements per pattern.  */
   7259   unsigned int out_npatterns = out_sequence_bits / out_elt_bits;
   7260   unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
   7261 
   7262   /* The encoding scheme requires the number of elements to be a multiple
   7263      of the number of patterns, so that each pattern appears at least once
   7264      and so that the same number of elements appear from each pattern.  */
   7265   bool ok_p = multiple_p (GET_MODE_NUNITS (outermode), out_npatterns);
   7266   unsigned int const_nunits;
   7267   if (GET_MODE_NUNITS (outermode).is_constant (&const_nunits)
   7268       && (!ok_p || out_npatterns * nelts_per_pattern > const_nunits))
   7269     {
   7270       /* Either the encoding is invalid, or applying it would give us
   7271 	 more elements than we need.  Just encode each element directly.  */
   7272       out_npatterns = const_nunits;
   7273       nelts_per_pattern = 1;
   7274     }
   7275   else if (!ok_p)
   7276     return NULL_RTX;
   7277 
   7278   /* Get enough bytes of X to form the new encoding.  */
   7279   unsigned int buffer_bits = out_npatterns * nelts_per_pattern * out_elt_bits;
   7280   unsigned int buffer_bytes = CEIL (buffer_bits, BITS_PER_UNIT);
   7281   auto_vec<target_unit, 128> buffer (buffer_bytes);
   7282   if (!native_encode_rtx (innermode, x, buffer, first_byte, buffer_bytes))
   7283     return NULL_RTX;
   7284 
   7285   /* Reencode the bytes as OUTERMODE.  */
   7286   return native_decode_vector_rtx (outermode, buffer, 0, out_npatterns,
   7287 				   nelts_per_pattern);
   7288 }
   7289 
   7290 /* Try to simplify a subreg of a constant by encoding the subreg region
   7291    as a sequence of target bytes and reading them back in the new mode.
   7292    Return the new value on success, otherwise return null.
   7293 
   7294    The subreg has outer mode OUTERMODE, inner mode INNERMODE, inner value X
   7295    and byte offset FIRST_BYTE.  */
   7296 
   7297 static rtx
   7298 simplify_immed_subreg (fixed_size_mode outermode, rtx x,
   7299 		       machine_mode innermode, unsigned int first_byte)
   7300 {
   7301   unsigned int buffer_bytes = GET_MODE_SIZE (outermode);
   7302   auto_vec<target_unit, 128> buffer (buffer_bytes);
   7303 
   7304   /* Some ports misuse CCmode.  */
   7305   if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (x))
   7306     return x;
   7307 
   7308   /* Paradoxical subregs read undefined values for bytes outside of the
   7309      inner value.  However, we have traditionally always sign-extended
   7310      integer constants and zero-extended others.  */
   7311   unsigned int inner_bytes = buffer_bytes;
   7312   if (paradoxical_subreg_p (outermode, innermode))
   7313     {
   7314       if (!GET_MODE_SIZE (innermode).is_constant (&inner_bytes))
   7315 	return NULL_RTX;
   7316 
   7317       target_unit filler = 0;
   7318       if (CONST_SCALAR_INT_P (x) && wi::neg_p (rtx_mode_t (x, innermode)))
   7319 	filler = -1;
   7320 
   7321       /* Add any leading bytes due to big-endian layout.  The number of
   7322 	 bytes must be constant because both modes have constant size.  */
   7323       unsigned int leading_bytes
   7324 	= -byte_lowpart_offset (outermode, innermode).to_constant ();
   7325       for (unsigned int i = 0; i < leading_bytes; ++i)
   7326 	buffer.quick_push (filler);
   7327 
   7328       if (!native_encode_rtx (innermode, x, buffer, first_byte, inner_bytes))
   7329 	return NULL_RTX;
   7330 
   7331       /* Add any trailing bytes due to little-endian layout.  */
   7332       while (buffer.length () < buffer_bytes)
   7333 	buffer.quick_push (filler);
   7334     }
   7335   else if (!native_encode_rtx (innermode, x, buffer, first_byte, inner_bytes))
   7336     return NULL_RTX;
   7337   rtx ret = native_decode_rtx (outermode, buffer, 0);
   7338   if (ret && FLOAT_MODE_P (outermode))
   7339     {
   7340       auto_vec<target_unit, 128> buffer2 (buffer_bytes);
   7341       if (!native_encode_rtx (outermode, ret, buffer2, 0, buffer_bytes))
   7342 	return NULL_RTX;
   7343       for (unsigned int i = 0; i < buffer_bytes; ++i)
   7344 	if (buffer[i] != buffer2[i])
   7345 	  return NULL_RTX;
   7346     }
   7347   return ret;
   7348 }
   7349 
   7350 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
   7351    Return 0 if no simplifications are possible.  */
   7352 rtx
   7353 simplify_context::simplify_subreg (machine_mode outermode, rtx op,
   7354 				   machine_mode innermode, poly_uint64 byte)
   7355 {
   7356   /* Little bit of sanity checking.  */
   7357   gcc_assert (innermode != VOIDmode);
   7358   gcc_assert (outermode != VOIDmode);
   7359   gcc_assert (innermode != BLKmode);
   7360   gcc_assert (outermode != BLKmode);
   7361 
   7362   gcc_assert (GET_MODE (op) == innermode
   7363 	      || GET_MODE (op) == VOIDmode);
   7364 
   7365   poly_uint64 outersize = GET_MODE_SIZE (outermode);
   7366   if (!multiple_p (byte, outersize))
   7367     return NULL_RTX;
   7368 
   7369   poly_uint64 innersize = GET_MODE_SIZE (innermode);
   7370   if (maybe_ge (byte, innersize))
   7371     return NULL_RTX;
   7372 
   7373   if (outermode == innermode && known_eq (byte, 0U))
   7374     return op;
   7375 
   7376   if (GET_CODE (op) == CONST_VECTOR)
   7377     byte = simplify_const_vector_byte_offset (op, byte);
   7378 
   7379   if (multiple_p (byte, GET_MODE_UNIT_SIZE (innermode)))
   7380     {
   7381       rtx elt;
   7382 
   7383       if (VECTOR_MODE_P (outermode)
   7384 	  && GET_MODE_INNER (outermode) == GET_MODE_INNER (innermode)
   7385 	  && vec_duplicate_p (op, &elt))
   7386 	return gen_vec_duplicate (outermode, elt);
   7387 
   7388       if (outermode == GET_MODE_INNER (innermode)
   7389 	  && vec_duplicate_p (op, &elt))
   7390 	return elt;
   7391     }
   7392 
   7393   if (CONST_SCALAR_INT_P (op)
   7394       || CONST_DOUBLE_AS_FLOAT_P (op)
   7395       || CONST_FIXED_P (op)
   7396       || GET_CODE (op) == CONST_VECTOR)
   7397     {
   7398       unsigned HOST_WIDE_INT cbyte;
   7399       if (byte.is_constant (&cbyte))
   7400 	{
   7401 	  if (GET_CODE (op) == CONST_VECTOR && VECTOR_MODE_P (outermode))
   7402 	    {
   7403 	      rtx tmp = simplify_const_vector_subreg (outermode, op,
   7404 						      innermode, cbyte);
   7405 	      if (tmp)
   7406 		return tmp;
   7407 	    }
   7408 
   7409 	  fixed_size_mode fs_outermode;
   7410 	  if (is_a <fixed_size_mode> (outermode, &fs_outermode))
   7411 	    return simplify_immed_subreg (fs_outermode, op, innermode, cbyte);
   7412 	}
   7413     }
   7414 
   7415   /* Changing mode twice with SUBREG => just change it once,
   7416      or not at all if changing back op starting mode.  */
   7417   if (GET_CODE (op) == SUBREG)
   7418     {
   7419       machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
   7420       poly_uint64 innermostsize = GET_MODE_SIZE (innermostmode);
   7421       rtx newx;
   7422 
   7423       if (outermode == innermostmode
   7424 	  && known_eq (byte, 0U)
   7425 	  && known_eq (SUBREG_BYTE (op), 0))
   7426 	return SUBREG_REG (op);
   7427 
   7428       /* Work out the memory offset of the final OUTERMODE value relative
   7429 	 to the inner value of OP.  */
   7430       poly_int64 mem_offset = subreg_memory_offset (outermode,
   7431 						    innermode, byte);
   7432       poly_int64 op_mem_offset = subreg_memory_offset (op);
   7433       poly_int64 final_offset = mem_offset + op_mem_offset;
   7434 
   7435       /* See whether resulting subreg will be paradoxical.  */
   7436       if (!paradoxical_subreg_p (outermode, innermostmode))
   7437 	{
   7438 	  /* Bail out in case resulting subreg would be incorrect.  */
   7439 	  if (maybe_lt (final_offset, 0)
   7440 	      || maybe_ge (poly_uint64 (final_offset), innermostsize)
   7441 	      || !multiple_p (final_offset, outersize))
   7442 	    return NULL_RTX;
   7443 	}
   7444       else
   7445 	{
   7446 	  poly_int64 required_offset = subreg_memory_offset (outermode,
   7447 							     innermostmode, 0);
   7448 	  if (maybe_ne (final_offset, required_offset))
   7449 	    return NULL_RTX;
   7450 	  /* Paradoxical subregs always have byte offset 0.  */
   7451 	  final_offset = 0;
   7452 	}
   7453 
   7454       /* Recurse for further possible simplifications.  */
   7455       newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
   7456 			      final_offset);
   7457       if (newx)
   7458 	return newx;
   7459       if (validate_subreg (outermode, innermostmode,
   7460 			   SUBREG_REG (op), final_offset))
   7461 	{
   7462 	  newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
   7463 	  if (SUBREG_PROMOTED_VAR_P (op)
   7464 	      && SUBREG_PROMOTED_SIGN (op) >= 0
   7465 	      && GET_MODE_CLASS (outermode) == MODE_INT
   7466 	      && known_ge (outersize, innersize)
   7467 	      && known_le (outersize, innermostsize)
   7468 	      && subreg_lowpart_p (newx))
   7469 	    {
   7470 	      SUBREG_PROMOTED_VAR_P (newx) = 1;
   7471 	      SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
   7472 	    }
   7473 	  return newx;
   7474 	}
   7475       return NULL_RTX;
   7476     }
   7477 
   7478   /* SUBREG of a hard register => just change the register number
   7479      and/or mode.  If the hard register is not valid in that mode,
   7480      suppress this simplification.  If the hard register is the stack,
   7481      frame, or argument pointer, leave this as a SUBREG.  */
   7482 
   7483   if (REG_P (op) && HARD_REGISTER_P (op))
   7484     {
   7485       unsigned int regno, final_regno;
   7486 
   7487       regno = REGNO (op);
   7488       final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
   7489       if (HARD_REGISTER_NUM_P (final_regno))
   7490 	{
   7491 	  rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
   7492 				      subreg_memory_offset (outermode,
   7493 							    innermode, byte));
   7494 
   7495 	  /* Propagate original regno.  We don't have any way to specify
   7496 	     the offset inside original regno, so do so only for lowpart.
   7497 	     The information is used only by alias analysis that cannot
   7498 	     grog partial register anyway.  */
   7499 
   7500 	  if (known_eq (subreg_lowpart_offset (outermode, innermode), byte))
   7501 	    ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
   7502 	  return x;
   7503 	}
   7504     }
   7505 
   7506   /* If we have a SUBREG of a register that we are replacing and we are
   7507      replacing it with a MEM, make a new MEM and try replacing the
   7508      SUBREG with it.  Don't do this if the MEM has a mode-dependent address
   7509      or if we would be widening it.  */
   7510 
   7511   if (MEM_P (op)
   7512       && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
   7513       /* Allow splitting of volatile memory references in case we don't
   7514          have instruction to move the whole thing.  */
   7515       && (! MEM_VOLATILE_P (op)
   7516 	  || ! have_insn_for (SET, innermode))
   7517       && !(STRICT_ALIGNMENT && MEM_ALIGN (op) < GET_MODE_ALIGNMENT (outermode))
   7518       && known_le (outersize, innersize))
   7519     return adjust_address_nv (op, outermode, byte);
   7520 
   7521   /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
   7522      of two parts.  */
   7523   if (GET_CODE (op) == CONCAT
   7524       || GET_CODE (op) == VEC_CONCAT)
   7525     {
   7526       poly_uint64 final_offset;
   7527       rtx part, res;
   7528 
   7529       machine_mode part_mode = GET_MODE (XEXP (op, 0));
   7530       if (part_mode == VOIDmode)
   7531 	part_mode = GET_MODE_INNER (GET_MODE (op));
   7532       poly_uint64 part_size = GET_MODE_SIZE (part_mode);
   7533       if (known_lt (byte, part_size))
   7534 	{
   7535 	  part = XEXP (op, 0);
   7536 	  final_offset = byte;
   7537 	}
   7538       else if (known_ge (byte, part_size))
   7539 	{
   7540 	  part = XEXP (op, 1);
   7541 	  final_offset = byte - part_size;
   7542 	}
   7543       else
   7544 	return NULL_RTX;
   7545 
   7546       if (maybe_gt (final_offset + outersize, part_size))
   7547 	return NULL_RTX;
   7548 
   7549       part_mode = GET_MODE (part);
   7550       if (part_mode == VOIDmode)
   7551 	part_mode = GET_MODE_INNER (GET_MODE (op));
   7552       res = simplify_subreg (outermode, part, part_mode, final_offset);
   7553       if (res)
   7554 	return res;
   7555       if (validate_subreg (outermode, part_mode, part, final_offset))
   7556 	return gen_rtx_SUBREG (outermode, part, final_offset);
   7557       return NULL_RTX;
   7558     }
   7559 
   7560   /* Simplify
   7561 	(subreg (vec_merge (X)
   7562 			   (vector)
   7563 			   (const_int ((1 << N) | M)))
   7564 		(N * sizeof (outermode)))
   7565      to
   7566 	(subreg (X) (N * sizeof (outermode)))
   7567    */
   7568   unsigned int idx;
   7569   if (constant_multiple_p (byte, GET_MODE_SIZE (outermode), &idx)
   7570       && idx < HOST_BITS_PER_WIDE_INT
   7571       && GET_CODE (op) == VEC_MERGE
   7572       && GET_MODE_INNER (innermode) == outermode
   7573       && CONST_INT_P (XEXP (op, 2))
   7574       && (UINTVAL (XEXP (op, 2)) & (HOST_WIDE_INT_1U << idx)) != 0)
   7575     return simplify_gen_subreg (outermode, XEXP (op, 0), innermode, byte);
   7576 
   7577   /* A SUBREG resulting from a zero extension may fold to zero if
   7578      it extracts higher bits that the ZERO_EXTEND's source bits.  */
   7579   if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
   7580     {
   7581       poly_uint64 bitpos = subreg_lsb_1 (outermode, innermode, byte);
   7582       if (known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))))
   7583 	return CONST0_RTX (outermode);
   7584     }
   7585 
   7586   scalar_int_mode int_outermode, int_innermode;
   7587   if (is_a <scalar_int_mode> (outermode, &int_outermode)
   7588       && is_a <scalar_int_mode> (innermode, &int_innermode)
   7589       && known_eq (byte, subreg_lowpart_offset (int_outermode, int_innermode)))
   7590     {
   7591       /* Handle polynomial integers.  The upper bits of a paradoxical
   7592 	 subreg are undefined, so this is safe regardless of whether
   7593 	 we're truncating or extending.  */
   7594       if (CONST_POLY_INT_P (op))
   7595 	{
   7596 	  poly_wide_int val
   7597 	    = poly_wide_int::from (const_poly_int_value (op),
   7598 				   GET_MODE_PRECISION (int_outermode),
   7599 				   SIGNED);
   7600 	  return immed_wide_int_const (val, int_outermode);
   7601 	}
   7602 
   7603       if (GET_MODE_PRECISION (int_outermode)
   7604 	  < GET_MODE_PRECISION (int_innermode))
   7605 	{
   7606 	  rtx tem = simplify_truncation (int_outermode, op, int_innermode);
   7607 	  if (tem)
   7608 	    return tem;
   7609 	}
   7610     }
   7611 
   7612   /* If OP is a vector comparison and the subreg is not changing the
   7613      number of elements or the size of the elements, change the result
   7614      of the comparison to the new mode.  */
   7615   if (COMPARISON_P (op)
   7616       && VECTOR_MODE_P (outermode)
   7617       && VECTOR_MODE_P (innermode)
   7618       && known_eq (GET_MODE_NUNITS (outermode), GET_MODE_NUNITS (innermode))
   7619       && known_eq (GET_MODE_UNIT_SIZE (outermode),
   7620 		    GET_MODE_UNIT_SIZE (innermode)))
   7621     return simplify_gen_relational (GET_CODE (op), outermode, innermode,
   7622 				    XEXP (op, 0), XEXP (op, 1));
   7623   return NULL_RTX;
   7624 }
   7625 
   7626 /* Make a SUBREG operation or equivalent if it folds.  */
   7627 
   7628 rtx
   7629 simplify_context::simplify_gen_subreg (machine_mode outermode, rtx op,
   7630 				       machine_mode innermode,
   7631 				       poly_uint64 byte)
   7632 {
   7633   rtx newx;
   7634 
   7635   newx = simplify_subreg (outermode, op, innermode, byte);
   7636   if (newx)
   7637     return newx;
   7638 
   7639   if (GET_CODE (op) == SUBREG
   7640       || GET_CODE (op) == CONCAT
   7641       || GET_MODE (op) == VOIDmode)
   7642     return NULL_RTX;
   7643 
   7644   if (MODE_COMPOSITE_P (outermode)
   7645       && (CONST_SCALAR_INT_P (op)
   7646 	  || CONST_DOUBLE_AS_FLOAT_P (op)
   7647 	  || CONST_FIXED_P (op)
   7648 	  || GET_CODE (op) == CONST_VECTOR))
   7649     return NULL_RTX;
   7650 
   7651   if (validate_subreg (outermode, innermode, op, byte))
   7652     return gen_rtx_SUBREG (outermode, op, byte);
   7653 
   7654   return NULL_RTX;
   7655 }
   7656 
   7657 /* Generates a subreg to get the least significant part of EXPR (in mode
   7658    INNER_MODE) to OUTER_MODE.  */
   7659 
   7660 rtx
   7661 simplify_context::lowpart_subreg (machine_mode outer_mode, rtx expr,
   7662 				  machine_mode inner_mode)
   7663 {
   7664   return simplify_gen_subreg (outer_mode, expr, inner_mode,
   7665 			      subreg_lowpart_offset (outer_mode, inner_mode));
   7666 }
   7667 
   7668 /* Generate RTX to select element at INDEX out of vector OP.  */
   7669 
   7670 rtx
   7671 simplify_context::simplify_gen_vec_select (rtx op, unsigned int index)
   7672 {
   7673   gcc_assert (VECTOR_MODE_P (GET_MODE (op)));
   7674 
   7675   scalar_mode imode = GET_MODE_INNER (GET_MODE (op));
   7676 
   7677   if (known_eq (index * GET_MODE_SIZE (imode),
   7678 		subreg_lowpart_offset (imode, GET_MODE (op))))
   7679     {
   7680       rtx res = lowpart_subreg (imode, op, GET_MODE (op));
   7681       if (res)
   7682 	return res;
   7683     }
   7684 
   7685   rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (index)));
   7686   return gen_rtx_VEC_SELECT (imode, op, tmp);
   7687 }
   7688 
   7689 
   7690 /* Simplify X, an rtx expression.
   7691 
   7692    Return the simplified expression or NULL if no simplifications
   7693    were possible.
   7694 
   7695    This is the preferred entry point into the simplification routines;
   7696    however, we still allow passes to call the more specific routines.
   7697 
   7698    Right now GCC has three (yes, three) major bodies of RTL simplification
   7699    code that need to be unified.
   7700 
   7701 	1. fold_rtx in cse.cc.  This code uses various CSE specific
   7702 	   information to aid in RTL simplification.
   7703 
   7704 	2. simplify_rtx in combine.cc.  Similar to fold_rtx, except that
   7705 	   it uses combine specific information to aid in RTL
   7706 	   simplification.
   7707 
   7708 	3. The routines in this file.
   7709 
   7710 
   7711    Long term we want to only have one body of simplification code; to
   7712    get to that state I recommend the following steps:
   7713 
   7714 	1. Pour over fold_rtx & simplify_rtx and move any simplifications
   7715 	   which are not pass dependent state into these routines.
   7716 
   7717 	2. As code is moved by #1, change fold_rtx & simplify_rtx to
   7718 	   use this routine whenever possible.
   7719 
   7720 	3. Allow for pass dependent state to be provided to these
   7721 	   routines and add simplifications based on the pass dependent
   7722 	   state.  Remove code from cse.cc & combine.cc that becomes
   7723 	   redundant/dead.
   7724 
   7725     It will take time, but ultimately the compiler will be easier to
   7726     maintain and improve.  It's totally silly that when we add a
   7727     simplification that it needs to be added to 4 places (3 for RTL
   7728     simplification and 1 for tree simplification.  */
   7729 
   7730 rtx
   7731 simplify_rtx (const_rtx x)
   7732 {
   7733   const enum rtx_code code = GET_CODE (x);
   7734   const machine_mode mode = GET_MODE (x);
   7735 
   7736   switch (GET_RTX_CLASS (code))
   7737     {
   7738     case RTX_UNARY:
   7739       return simplify_unary_operation (code, mode,
   7740 				       XEXP (x, 0), GET_MODE (XEXP (x, 0)));
   7741     case RTX_COMM_ARITH:
   7742       if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
   7743 	return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
   7744 
   7745       /* Fall through.  */
   7746 
   7747     case RTX_BIN_ARITH:
   7748       return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
   7749 
   7750     case RTX_TERNARY:
   7751     case RTX_BITFIELD_OPS:
   7752       return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
   7753 					 XEXP (x, 0), XEXP (x, 1),
   7754 					 XEXP (x, 2));
   7755 
   7756     case RTX_COMPARE:
   7757     case RTX_COMM_COMPARE:
   7758       return simplify_relational_operation (code, mode,
   7759                                             ((GET_MODE (XEXP (x, 0))
   7760                                              != VOIDmode)
   7761                                             ? GET_MODE (XEXP (x, 0))
   7762                                             : GET_MODE (XEXP (x, 1))),
   7763                                             XEXP (x, 0),
   7764                                             XEXP (x, 1));
   7765 
   7766     case RTX_EXTRA:
   7767       if (code == SUBREG)
   7768 	return simplify_subreg (mode, SUBREG_REG (x),
   7769 				GET_MODE (SUBREG_REG (x)),
   7770 				SUBREG_BYTE (x));
   7771       break;
   7772 
   7773     case RTX_OBJ:
   7774       if (code == LO_SUM)
   7775 	{
   7776 	  /* Convert (lo_sum (high FOO) FOO) to FOO.  */
   7777 	  if (GET_CODE (XEXP (x, 0)) == HIGH
   7778 	      && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
   7779 	  return XEXP (x, 1);
   7780 	}
   7781       break;
   7782 
   7783     default:
   7784       break;
   7785     }
   7786   return NULL;
   7787 }
   7788 
   7789 #if CHECKING_P
   7790 
   7791 namespace selftest {
   7792 
   7793 /* Make a unique pseudo REG of mode MODE for use by selftests.  */
   7794 
   7795 static rtx
   7796 make_test_reg (machine_mode mode)
   7797 {
   7798   static int test_reg_num = LAST_VIRTUAL_REGISTER + 1;
   7799 
   7800   return gen_rtx_REG (mode, test_reg_num++);
   7801 }
   7802 
   7803 static void
   7804 test_scalar_int_ops (machine_mode mode)
   7805 {
   7806   rtx op0 = make_test_reg (mode);
   7807   rtx op1 = make_test_reg (mode);
   7808   rtx six = GEN_INT (6);
   7809 
   7810   rtx neg_op0 = simplify_gen_unary (NEG, mode, op0, mode);
   7811   rtx not_op0 = simplify_gen_unary (NOT, mode, op0, mode);
   7812   rtx bswap_op0 = simplify_gen_unary (BSWAP, mode, op0, mode);
   7813 
   7814   rtx and_op0_op1 = simplify_gen_binary (AND, mode, op0, op1);
   7815   rtx ior_op0_op1 = simplify_gen_binary (IOR, mode, op0, op1);
   7816   rtx xor_op0_op1 = simplify_gen_binary (XOR, mode, op0, op1);
   7817 
   7818   rtx and_op0_6 = simplify_gen_binary (AND, mode, op0, six);
   7819   rtx and_op1_6 = simplify_gen_binary (AND, mode, op1, six);
   7820 
   7821   /* Test some binary identities.  */
   7822   ASSERT_RTX_EQ (op0, simplify_gen_binary (PLUS, mode, op0, const0_rtx));
   7823   ASSERT_RTX_EQ (op0, simplify_gen_binary (PLUS, mode, const0_rtx, op0));
   7824   ASSERT_RTX_EQ (op0, simplify_gen_binary (MINUS, mode, op0, const0_rtx));
   7825   ASSERT_RTX_EQ (op0, simplify_gen_binary (MULT, mode, op0, const1_rtx));
   7826   ASSERT_RTX_EQ (op0, simplify_gen_binary (MULT, mode, const1_rtx, op0));
   7827   ASSERT_RTX_EQ (op0, simplify_gen_binary (DIV, mode, op0, const1_rtx));
   7828   ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, op0, constm1_rtx));
   7829   ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, constm1_rtx, op0));
   7830   ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, op0, const0_rtx));
   7831   ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, const0_rtx, op0));
   7832   ASSERT_RTX_EQ (op0, simplify_gen_binary (XOR, mode, op0, const0_rtx));
   7833   ASSERT_RTX_EQ (op0, simplify_gen_binary (XOR, mode, const0_rtx, op0));
   7834   ASSERT_RTX_EQ (op0, simplify_gen_binary (ASHIFT, mode, op0, const0_rtx));
   7835   ASSERT_RTX_EQ (op0, simplify_gen_binary (ROTATE, mode, op0, const0_rtx));
   7836   ASSERT_RTX_EQ (op0, simplify_gen_binary (ASHIFTRT, mode, op0, const0_rtx));
   7837   ASSERT_RTX_EQ (op0, simplify_gen_binary (LSHIFTRT, mode, op0, const0_rtx));
   7838   ASSERT_RTX_EQ (op0, simplify_gen_binary (ROTATERT, mode, op0, const0_rtx));
   7839 
   7840   /* Test some self-inverse operations.  */
   7841   ASSERT_RTX_EQ (op0, simplify_gen_unary (NEG, mode, neg_op0, mode));
   7842   ASSERT_RTX_EQ (op0, simplify_gen_unary (NOT, mode, not_op0, mode));
   7843   ASSERT_RTX_EQ (op0, simplify_gen_unary (BSWAP, mode, bswap_op0, mode));
   7844 
   7845   /* Test some reflexive operations.  */
   7846   ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, op0, op0));
   7847   ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, op0, op0));
   7848   ASSERT_RTX_EQ (op0, simplify_gen_binary (SMIN, mode, op0, op0));
   7849   ASSERT_RTX_EQ (op0, simplify_gen_binary (SMAX, mode, op0, op0));
   7850   ASSERT_RTX_EQ (op0, simplify_gen_binary (UMIN, mode, op0, op0));
   7851   ASSERT_RTX_EQ (op0, simplify_gen_binary (UMAX, mode, op0, op0));
   7852 
   7853   ASSERT_RTX_EQ (const0_rtx, simplify_gen_binary (MINUS, mode, op0, op0));
   7854   ASSERT_RTX_EQ (const0_rtx, simplify_gen_binary (XOR, mode, op0, op0));
   7855 
   7856   /* Test simplify_distributive_operation.  */
   7857   ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, xor_op0_op1, six),
   7858 		 simplify_gen_binary (XOR, mode, and_op0_6, and_op1_6));
   7859   ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, ior_op0_op1, six),
   7860 		 simplify_gen_binary (IOR, mode, and_op0_6, and_op1_6));
   7861   ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, and_op0_op1, six),
   7862 		 simplify_gen_binary (AND, mode, and_op0_6, and_op1_6));
   7863 
   7864   /* Test useless extensions are eliminated.  */
   7865   ASSERT_RTX_EQ (op0, simplify_gen_unary (TRUNCATE, mode, op0, mode));
   7866   ASSERT_RTX_EQ (op0, simplify_gen_unary (ZERO_EXTEND, mode, op0, mode));
   7867   ASSERT_RTX_EQ (op0, simplify_gen_unary (SIGN_EXTEND, mode, op0, mode));
   7868   ASSERT_RTX_EQ (op0, lowpart_subreg (mode, op0, mode));
   7869 }
   7870 
   7871 /* Verify some simplifications of integer extension/truncation.
   7872    Machine mode BMODE is the guaranteed wider than SMODE.  */
   7873 
   7874 static void
   7875 test_scalar_int_ext_ops (machine_mode bmode, machine_mode smode)
   7876 {
   7877   rtx sreg = make_test_reg (smode);
   7878 
   7879   /* Check truncation of extension.  */
   7880   ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
   7881 				     simplify_gen_unary (ZERO_EXTEND, bmode,
   7882 							 sreg, smode),
   7883 				     bmode),
   7884 		 sreg);
   7885   ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
   7886 				     simplify_gen_unary (SIGN_EXTEND, bmode,
   7887 							 sreg, smode),
   7888 				     bmode),
   7889 		 sreg);
   7890   ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
   7891 				     lowpart_subreg (bmode, sreg, smode),
   7892 				     bmode),
   7893 		 sreg);
   7894 }
   7895 
   7896 /* Verify more simplifications of integer extension/truncation.
   7897    BMODE is wider than MMODE which is wider than SMODE.  */
   7898 
   7899 static void
   7900 test_scalar_int_ext_ops2 (machine_mode bmode, machine_mode mmode,
   7901 			  machine_mode smode)
   7902 {
   7903   rtx breg = make_test_reg (bmode);
   7904   rtx mreg = make_test_reg (mmode);
   7905   rtx sreg = make_test_reg (smode);
   7906 
   7907   /* Check truncate of truncate.  */
   7908   ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
   7909 				     simplify_gen_unary (TRUNCATE, mmode,
   7910 							 breg, bmode),
   7911 				     mmode),
   7912 		 simplify_gen_unary (TRUNCATE, smode, breg, bmode));
   7913 
   7914   /* Check extension of extension.  */
   7915   ASSERT_RTX_EQ (simplify_gen_unary (ZERO_EXTEND, bmode,
   7916 				     simplify_gen_unary (ZERO_EXTEND, mmode,
   7917 							 sreg, smode),
   7918 				     mmode),
   7919 		 simplify_gen_unary (ZERO_EXTEND, bmode, sreg, smode));
   7920   ASSERT_RTX_EQ (simplify_gen_unary (SIGN_EXTEND, bmode,
   7921 				     simplify_gen_unary (SIGN_EXTEND, mmode,
   7922 							 sreg, smode),
   7923 				     mmode),
   7924 		 simplify_gen_unary (SIGN_EXTEND, bmode, sreg, smode));
   7925   ASSERT_RTX_EQ (simplify_gen_unary (SIGN_EXTEND, bmode,
   7926 				     simplify_gen_unary (ZERO_EXTEND, mmode,
   7927 							 sreg, smode),
   7928 				     mmode),
   7929 		 simplify_gen_unary (ZERO_EXTEND, bmode, sreg, smode));
   7930 
   7931   /* Check truncation of extension.  */
   7932   ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
   7933 				     simplify_gen_unary (ZERO_EXTEND, bmode,
   7934 							 mreg, mmode),
   7935 				     bmode),
   7936 		 simplify_gen_unary (TRUNCATE, smode, mreg, mmode));
   7937   ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
   7938 				     simplify_gen_unary (SIGN_EXTEND, bmode,
   7939 							 mreg, mmode),
   7940 				     bmode),
   7941 		 simplify_gen_unary (TRUNCATE, smode, mreg, mmode));
   7942   ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
   7943 				     lowpart_subreg (bmode, mreg, mmode),
   7944 				     bmode),
   7945 		 simplify_gen_unary (TRUNCATE, smode, mreg, mmode));
   7946 }
   7947 
   7948 
   7949 /* Verify some simplifications involving scalar expressions.  */
   7950 
   7951 static void
   7952 test_scalar_ops ()
   7953 {
   7954   for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
   7955     {
   7956       machine_mode mode = (machine_mode) i;
   7957       if (SCALAR_INT_MODE_P (mode) && mode != BImode)
   7958 	test_scalar_int_ops (mode);
   7959     }
   7960 
   7961   test_scalar_int_ext_ops (HImode, QImode);
   7962   test_scalar_int_ext_ops (SImode, QImode);
   7963   test_scalar_int_ext_ops (SImode, HImode);
   7964   test_scalar_int_ext_ops (DImode, QImode);
   7965   test_scalar_int_ext_ops (DImode, HImode);
   7966   test_scalar_int_ext_ops (DImode, SImode);
   7967 
   7968   test_scalar_int_ext_ops2 (SImode, HImode, QImode);
   7969   test_scalar_int_ext_ops2 (DImode, HImode, QImode);
   7970   test_scalar_int_ext_ops2 (DImode, SImode, QImode);
   7971   test_scalar_int_ext_ops2 (DImode, SImode, HImode);
   7972 }
   7973 
   7974 /* Test vector simplifications involving VEC_DUPLICATE in which the
   7975    operands and result have vector mode MODE.  SCALAR_REG is a pseudo
   7976    register that holds one element of MODE.  */
   7977 
   7978 static void
   7979 test_vector_ops_duplicate (machine_mode mode, rtx scalar_reg)
   7980 {
   7981   scalar_mode inner_mode = GET_MODE_INNER (mode);
   7982   rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
   7983   poly_uint64 nunits = GET_MODE_NUNITS (mode);
   7984   if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
   7985     {
   7986       /* Test some simple unary cases with VEC_DUPLICATE arguments.  */
   7987       rtx not_scalar_reg = gen_rtx_NOT (inner_mode, scalar_reg);
   7988       rtx duplicate_not = gen_rtx_VEC_DUPLICATE (mode, not_scalar_reg);
   7989       ASSERT_RTX_EQ (duplicate,
   7990 		     simplify_unary_operation (NOT, mode,
   7991 					       duplicate_not, mode));
   7992 
   7993       rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
   7994       rtx duplicate_neg = gen_rtx_VEC_DUPLICATE (mode, neg_scalar_reg);
   7995       ASSERT_RTX_EQ (duplicate,
   7996 		     simplify_unary_operation (NEG, mode,
   7997 					       duplicate_neg, mode));
   7998 
   7999       /* Test some simple binary cases with VEC_DUPLICATE arguments.  */
   8000       ASSERT_RTX_EQ (duplicate,
   8001 		     simplify_binary_operation (PLUS, mode, duplicate,
   8002 						CONST0_RTX (mode)));
   8003 
   8004       ASSERT_RTX_EQ (duplicate,
   8005 		     simplify_binary_operation (MINUS, mode, duplicate,
   8006 						CONST0_RTX (mode)));
   8007 
   8008       ASSERT_RTX_PTR_EQ (CONST0_RTX (mode),
   8009 			 simplify_binary_operation (MINUS, mode, duplicate,
   8010 						    duplicate));
   8011     }
   8012 
   8013   /* Test a scalar VEC_SELECT of a VEC_DUPLICATE.  */
   8014   rtx zero_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
   8015   ASSERT_RTX_PTR_EQ (scalar_reg,
   8016 		     simplify_binary_operation (VEC_SELECT, inner_mode,
   8017 						duplicate, zero_par));
   8018 
   8019   unsigned HOST_WIDE_INT const_nunits;
   8020   if (nunits.is_constant (&const_nunits))
   8021     {
   8022       /* And again with the final element.  */
   8023       rtx last_index = gen_int_mode (const_nunits - 1, word_mode);
   8024       rtx last_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, last_index));
   8025       ASSERT_RTX_PTR_EQ (scalar_reg,
   8026 			 simplify_binary_operation (VEC_SELECT, inner_mode,
   8027 						    duplicate, last_par));
   8028 
   8029       /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE.  */
   8030       /* Skip this test for vectors of booleans, because offset is in bytes,
   8031 	 while vec_merge indices are in elements (usually bits).  */
   8032       if (GET_MODE_CLASS (mode) != MODE_VECTOR_BOOL)
   8033 	{
   8034 	  rtx vector_reg = make_test_reg (mode);
   8035 	  for (unsigned HOST_WIDE_INT i = 0; i < const_nunits; i++)
   8036 	    {
   8037 	      if (i >= HOST_BITS_PER_WIDE_INT)
   8038 		break;
   8039 	      rtx mask = GEN_INT ((HOST_WIDE_INT_1U << i) | (i + 1));
   8040 	      rtx vm = gen_rtx_VEC_MERGE (mode, duplicate, vector_reg, mask);
   8041 	      poly_uint64 offset = i * GET_MODE_SIZE (inner_mode);
   8042 
   8043 	      ASSERT_RTX_EQ (scalar_reg,
   8044 			     simplify_gen_subreg (inner_mode, vm,
   8045 						  mode, offset));
   8046 	    }
   8047 	}
   8048     }
   8049 
   8050   /* Test a scalar subreg of a VEC_DUPLICATE.  */
   8051   poly_uint64 offset = subreg_lowpart_offset (inner_mode, mode);
   8052   ASSERT_RTX_EQ (scalar_reg,
   8053 		 simplify_gen_subreg (inner_mode, duplicate,
   8054 				      mode, offset));
   8055 
   8056   machine_mode narrower_mode;
   8057   if (maybe_ne (nunits, 2U)
   8058       && multiple_p (nunits, 2)
   8059       && mode_for_vector (inner_mode, 2).exists (&narrower_mode)
   8060       && VECTOR_MODE_P (narrower_mode))
   8061     {
   8062       /* Test VEC_DUPLICATE of a vector.  */
   8063       rtx_vector_builder nbuilder (narrower_mode, 2, 1);
   8064       nbuilder.quick_push (const0_rtx);
   8065       nbuilder.quick_push (const1_rtx);
   8066       rtx_vector_builder builder (mode, 2, 1);
   8067       builder.quick_push (const0_rtx);
   8068       builder.quick_push (const1_rtx);
   8069       ASSERT_RTX_EQ (builder.build (),
   8070 		     simplify_unary_operation (VEC_DUPLICATE, mode,
   8071 					       nbuilder.build (),
   8072 					       narrower_mode));
   8073 
   8074       /* Test VEC_SELECT of a vector.  */
   8075       rtx vec_par
   8076 	= gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx));
   8077       rtx narrower_duplicate
   8078 	= gen_rtx_VEC_DUPLICATE (narrower_mode, scalar_reg);
   8079       ASSERT_RTX_EQ (narrower_duplicate,
   8080 		     simplify_binary_operation (VEC_SELECT, narrower_mode,
   8081 						duplicate, vec_par));
   8082 
   8083       /* Test a vector subreg of a VEC_DUPLICATE.  */
   8084       poly_uint64 offset = subreg_lowpart_offset (narrower_mode, mode);
   8085       ASSERT_RTX_EQ (narrower_duplicate,
   8086 		     simplify_gen_subreg (narrower_mode, duplicate,
   8087 					  mode, offset));
   8088     }
   8089 }
   8090 
   8091 /* Test vector simplifications involving VEC_SERIES in which the
   8092    operands and result have vector mode MODE.  SCALAR_REG is a pseudo
   8093    register that holds one element of MODE.  */
   8094 
   8095 static void
   8096 test_vector_ops_series (machine_mode mode, rtx scalar_reg)
   8097 {
   8098   /* Test unary cases with VEC_SERIES arguments.  */
   8099   scalar_mode inner_mode = GET_MODE_INNER (mode);
   8100   rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
   8101   rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
   8102   rtx series_0_r = gen_rtx_VEC_SERIES (mode, const0_rtx, scalar_reg);
   8103   rtx series_0_nr = gen_rtx_VEC_SERIES (mode, const0_rtx, neg_scalar_reg);
   8104   rtx series_nr_1 = gen_rtx_VEC_SERIES (mode, neg_scalar_reg, const1_rtx);
   8105   rtx series_r_m1 = gen_rtx_VEC_SERIES (mode, scalar_reg, constm1_rtx);
   8106   rtx series_r_r = gen_rtx_VEC_SERIES (mode, scalar_reg, scalar_reg);
   8107   rtx series_nr_nr = gen_rtx_VEC_SERIES (mode, neg_scalar_reg,
   8108 					 neg_scalar_reg);
   8109   ASSERT_RTX_EQ (series_0_r,
   8110 		 simplify_unary_operation (NEG, mode, series_0_nr, mode));
   8111   ASSERT_RTX_EQ (series_r_m1,
   8112 		 simplify_unary_operation (NEG, mode, series_nr_1, mode));
   8113   ASSERT_RTX_EQ (series_r_r,
   8114 		 simplify_unary_operation (NEG, mode, series_nr_nr, mode));
   8115 
   8116   /* Test that a VEC_SERIES with a zero step is simplified away.  */
   8117   ASSERT_RTX_EQ (duplicate,
   8118 		 simplify_binary_operation (VEC_SERIES, mode,
   8119 					    scalar_reg, const0_rtx));
   8120 
   8121   /* Test PLUS and MINUS with VEC_SERIES.  */
   8122   rtx series_0_1 = gen_const_vec_series (mode, const0_rtx, const1_rtx);
   8123   rtx series_0_m1 = gen_const_vec_series (mode, const0_rtx, constm1_rtx);
   8124   rtx series_r_1 = gen_rtx_VEC_SERIES (mode, scalar_reg, const1_rtx);
   8125   ASSERT_RTX_EQ (series_r_r,
   8126 		 simplify_binary_operation (PLUS, mode, series_0_r,
   8127 					    duplicate));
   8128   ASSERT_RTX_EQ (series_r_1,
   8129 		 simplify_binary_operation (PLUS, mode, duplicate,
   8130 					    series_0_1));
   8131   ASSERT_RTX_EQ (series_r_m1,
   8132 		 simplify_binary_operation (PLUS, mode, duplicate,
   8133 					    series_0_m1));
   8134   ASSERT_RTX_EQ (series_0_r,
   8135 		 simplify_binary_operation (MINUS, mode, series_r_r,
   8136 					    duplicate));
   8137   ASSERT_RTX_EQ (series_r_m1,
   8138 		 simplify_binary_operation (MINUS, mode, duplicate,
   8139 					    series_0_1));
   8140   ASSERT_RTX_EQ (series_r_1,
   8141 		 simplify_binary_operation (MINUS, mode, duplicate,
   8142 					    series_0_m1));
   8143   ASSERT_RTX_EQ (series_0_m1,
   8144 		 simplify_binary_operation (VEC_SERIES, mode, const0_rtx,
   8145 					    constm1_rtx));
   8146 
   8147   /* Test NEG on constant vector series.  */
   8148   ASSERT_RTX_EQ (series_0_m1,
   8149 		 simplify_unary_operation (NEG, mode, series_0_1, mode));
   8150   ASSERT_RTX_EQ (series_0_1,
   8151 		 simplify_unary_operation (NEG, mode, series_0_m1, mode));
   8152 
   8153   /* Test PLUS and MINUS on constant vector series.  */
   8154   rtx scalar2 = gen_int_mode (2, inner_mode);
   8155   rtx scalar3 = gen_int_mode (3, inner_mode);
   8156   rtx series_1_1 = gen_const_vec_series (mode, const1_rtx, const1_rtx);
   8157   rtx series_0_2 = gen_const_vec_series (mode, const0_rtx, scalar2);
   8158   rtx series_1_3 = gen_const_vec_series (mode, const1_rtx, scalar3);
   8159   ASSERT_RTX_EQ (series_1_1,
   8160 		 simplify_binary_operation (PLUS, mode, series_0_1,
   8161 					    CONST1_RTX (mode)));
   8162   ASSERT_RTX_EQ (series_0_m1,
   8163 		 simplify_binary_operation (PLUS, mode, CONST0_RTX (mode),
   8164 					    series_0_m1));
   8165   ASSERT_RTX_EQ (series_1_3,
   8166 		 simplify_binary_operation (PLUS, mode, series_1_1,
   8167 					    series_0_2));
   8168   ASSERT_RTX_EQ (series_0_1,
   8169 		 simplify_binary_operation (MINUS, mode, series_1_1,
   8170 					    CONST1_RTX (mode)));
   8171   ASSERT_RTX_EQ (series_1_1,
   8172 		 simplify_binary_operation (MINUS, mode, CONST1_RTX (mode),
   8173 					    series_0_m1));
   8174   ASSERT_RTX_EQ (series_1_1,
   8175 		 simplify_binary_operation (MINUS, mode, series_1_3,
   8176 					    series_0_2));
   8177 
   8178   /* Test MULT between constant vectors.  */
   8179   rtx vec2 = gen_const_vec_duplicate (mode, scalar2);
   8180   rtx vec3 = gen_const_vec_duplicate (mode, scalar3);
   8181   rtx scalar9 = gen_int_mode (9, inner_mode);
   8182   rtx series_3_9 = gen_const_vec_series (mode, scalar3, scalar9);
   8183   ASSERT_RTX_EQ (series_0_2,
   8184 		 simplify_binary_operation (MULT, mode, series_0_1, vec2));
   8185   ASSERT_RTX_EQ (series_3_9,
   8186 		 simplify_binary_operation (MULT, mode, vec3, series_1_3));
   8187   if (!GET_MODE_NUNITS (mode).is_constant ())
   8188     ASSERT_FALSE (simplify_binary_operation (MULT, mode, series_0_1,
   8189 					     series_0_1));
   8190 
   8191   /* Test ASHIFT between constant vectors.  */
   8192   ASSERT_RTX_EQ (series_0_2,
   8193 		 simplify_binary_operation (ASHIFT, mode, series_0_1,
   8194 					    CONST1_RTX (mode)));
   8195   if (!GET_MODE_NUNITS (mode).is_constant ())
   8196     ASSERT_FALSE (simplify_binary_operation (ASHIFT, mode, CONST1_RTX (mode),
   8197 					     series_0_1));
   8198 }
   8199 
   8200 static rtx
   8201 simplify_merge_mask (rtx x, rtx mask, int op)
   8202 {
   8203   return simplify_context ().simplify_merge_mask (x, mask, op);
   8204 }
   8205 
   8206 /* Verify simplify_merge_mask works correctly.  */
   8207 
   8208 static void
   8209 test_vec_merge (machine_mode mode)
   8210 {
   8211   rtx op0 = make_test_reg (mode);
   8212   rtx op1 = make_test_reg (mode);
   8213   rtx op2 = make_test_reg (mode);
   8214   rtx op3 = make_test_reg (mode);
   8215   rtx op4 = make_test_reg (mode);
   8216   rtx op5 = make_test_reg (mode);
   8217   rtx mask1 = make_test_reg (SImode);
   8218   rtx mask2 = make_test_reg (SImode);
   8219   rtx vm1 = gen_rtx_VEC_MERGE (mode, op0, op1, mask1);
   8220   rtx vm2 = gen_rtx_VEC_MERGE (mode, op2, op3, mask1);
   8221   rtx vm3 = gen_rtx_VEC_MERGE (mode, op4, op5, mask1);
   8222 
   8223   /* Simple vec_merge.  */
   8224   ASSERT_EQ (op0, simplify_merge_mask (vm1, mask1, 0));
   8225   ASSERT_EQ (op1, simplify_merge_mask (vm1, mask1, 1));
   8226   ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 0));
   8227   ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 1));
   8228 
   8229   /* Nested vec_merge.
   8230      It's tempting to make this simplify right down to opN, but we don't
   8231      because all the simplify_* functions assume that the operands have
   8232      already been simplified.  */
   8233   rtx nvm = gen_rtx_VEC_MERGE (mode, vm1, vm2, mask1);
   8234   ASSERT_EQ (vm1, simplify_merge_mask (nvm, mask1, 0));
   8235   ASSERT_EQ (vm2, simplify_merge_mask (nvm, mask1, 1));
   8236 
   8237   /* Intermediate unary op. */
   8238   rtx unop = gen_rtx_NOT (mode, vm1);
   8239   ASSERT_RTX_EQ (gen_rtx_NOT (mode, op0),
   8240 		 simplify_merge_mask (unop, mask1, 0));
   8241   ASSERT_RTX_EQ (gen_rtx_NOT (mode, op1),
   8242 		 simplify_merge_mask (unop, mask1, 1));
   8243 
   8244   /* Intermediate binary op. */
   8245   rtx binop = gen_rtx_PLUS (mode, vm1, vm2);
   8246   ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op0, op2),
   8247 		 simplify_merge_mask (binop, mask1, 0));
   8248   ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op1, op3),
   8249 		 simplify_merge_mask (binop, mask1, 1));
   8250 
   8251   /* Intermediate ternary op. */
   8252   rtx tenop = gen_rtx_FMA (mode, vm1, vm2, vm3);
   8253   ASSERT_RTX_EQ (gen_rtx_FMA (mode, op0, op2, op4),
   8254 		 simplify_merge_mask (tenop, mask1, 0));
   8255   ASSERT_RTX_EQ (gen_rtx_FMA (mode, op1, op3, op5),
   8256 		 simplify_merge_mask (tenop, mask1, 1));
   8257 
   8258   /* Side effects.  */
   8259   rtx badop0 = gen_rtx_PRE_INC (mode, op0);
   8260   rtx badvm = gen_rtx_VEC_MERGE (mode, badop0, op1, mask1);
   8261   ASSERT_EQ (badop0, simplify_merge_mask (badvm, mask1, 0));
   8262   ASSERT_EQ (NULL_RTX, simplify_merge_mask (badvm, mask1, 1));
   8263 
   8264   /* Called indirectly.  */
   8265   ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode, op0, op3, mask1),
   8266 		 simplify_rtx (nvm));
   8267 }
   8268 
   8269 /* Test subregs of integer vector constant X, trying elements in
   8270    the range [ELT_BIAS, ELT_BIAS + constant_lower_bound (NELTS)),
   8271    where NELTS is the number of elements in X.  Subregs involving
   8272    elements [ELT_BIAS, ELT_BIAS + FIRST_VALID) are expected to fail.  */
   8273 
   8274 static void
   8275 test_vector_subregs_modes (rtx x, poly_uint64 elt_bias = 0,
   8276 			   unsigned int first_valid = 0)
   8277 {
   8278   machine_mode inner_mode = GET_MODE (x);
   8279   scalar_mode int_mode = GET_MODE_INNER (inner_mode);
   8280 
   8281   for (unsigned int modei = 0; modei < NUM_MACHINE_MODES; ++modei)
   8282     {
   8283       machine_mode outer_mode = (machine_mode) modei;
   8284       if (!VECTOR_MODE_P (outer_mode))
   8285 	continue;
   8286 
   8287       unsigned int outer_nunits;
   8288       if (GET_MODE_INNER (outer_mode) == int_mode
   8289 	  && GET_MODE_NUNITS (outer_mode).is_constant (&outer_nunits)
   8290 	  && multiple_p (GET_MODE_NUNITS (inner_mode), outer_nunits))
   8291 	{
   8292 	  /* Test subregs in which the outer mode is a smaller,
   8293 	     constant-sized vector of the same element type.  */
   8294 	  unsigned int limit
   8295 	    = constant_lower_bound (GET_MODE_NUNITS (inner_mode));
   8296 	  for (unsigned int elt = 0; elt < limit; elt += outer_nunits)
   8297 	    {
   8298 	      rtx expected = NULL_RTX;
   8299 	      if (elt >= first_valid)
   8300 		{
   8301 		  rtx_vector_builder builder (outer_mode, outer_nunits, 1);
   8302 		  for (unsigned int i = 0; i < outer_nunits; ++i)
   8303 		    builder.quick_push (CONST_VECTOR_ELT (x, elt + i));
   8304 		  expected = builder.build ();
   8305 		}
   8306 	      poly_uint64 byte = (elt_bias + elt) * GET_MODE_SIZE (int_mode);
   8307 	      ASSERT_RTX_EQ (expected,
   8308 			     simplify_subreg (outer_mode, x,
   8309 					      inner_mode, byte));
   8310 	    }
   8311 	}
   8312       else if (known_eq (GET_MODE_SIZE (outer_mode),
   8313 			 GET_MODE_SIZE (inner_mode))
   8314 	       && known_eq (elt_bias, 0U)
   8315 	       && (GET_MODE_CLASS (outer_mode) != MODE_VECTOR_BOOL
   8316 		   || known_eq (GET_MODE_BITSIZE (outer_mode),
   8317 				GET_MODE_NUNITS (outer_mode)))
   8318 	       && (!FLOAT_MODE_P (outer_mode)
   8319 		   || (FLOAT_MODE_FORMAT (outer_mode)->ieee_bits
   8320 		       == GET_MODE_UNIT_PRECISION (outer_mode)))
   8321 	       && (GET_MODE_SIZE (inner_mode).is_constant ()
   8322 		   || !CONST_VECTOR_STEPPED_P (x)))
   8323 	{
   8324 	  /* Try converting to OUTER_MODE and back.  */
   8325 	  rtx outer_x = simplify_subreg (outer_mode, x, inner_mode, 0);
   8326 	  ASSERT_TRUE (outer_x != NULL_RTX);
   8327 	  ASSERT_RTX_EQ (x, simplify_subreg (inner_mode, outer_x,
   8328 					     outer_mode, 0));
   8329 	}
   8330     }
   8331 
   8332   if (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN)
   8333     {
   8334       /* Test each byte in the element range.  */
   8335       unsigned int limit
   8336 	= constant_lower_bound (GET_MODE_SIZE (inner_mode));
   8337       for (unsigned int i = 0; i < limit; ++i)
   8338 	{
   8339 	  unsigned int elt = i / GET_MODE_SIZE (int_mode);
   8340 	  rtx expected = NULL_RTX;
   8341 	  if (elt >= first_valid)
   8342 	    {
   8343 	      unsigned int byte_shift = i % GET_MODE_SIZE (int_mode);
   8344 	      if (BYTES_BIG_ENDIAN)
   8345 		byte_shift = GET_MODE_SIZE (int_mode) - byte_shift - 1;
   8346 	      rtx_mode_t vec_elt (CONST_VECTOR_ELT (x, elt), int_mode);
   8347 	      wide_int shifted_elt
   8348 		= wi::lrshift (vec_elt, byte_shift * BITS_PER_UNIT);
   8349 	      expected = immed_wide_int_const (shifted_elt, QImode);
   8350 	    }
   8351 	  poly_uint64 byte = elt_bias * GET_MODE_SIZE (int_mode) + i;
   8352 	  ASSERT_RTX_EQ (expected,
   8353 			 simplify_subreg (QImode, x, inner_mode, byte));
   8354 	}
   8355     }
   8356 }
   8357 
   8358 /* Test constant subregs of integer vector mode INNER_MODE, using 1
   8359    element per pattern.  */
   8360 
   8361 static void
   8362 test_vector_subregs_repeating (machine_mode inner_mode)
   8363 {
   8364   poly_uint64 nunits = GET_MODE_NUNITS (inner_mode);
   8365   unsigned int min_nunits = constant_lower_bound (nunits);
   8366   scalar_mode int_mode = GET_MODE_INNER (inner_mode);
   8367   unsigned int count = gcd (min_nunits, 8);
   8368 
   8369   rtx_vector_builder builder (inner_mode, count, 1);
   8370   for (unsigned int i = 0; i < count; ++i)
   8371     builder.quick_push (gen_int_mode (8 - i, int_mode));
   8372   rtx x = builder.build ();
   8373 
   8374   test_vector_subregs_modes (x);
   8375   if (!nunits.is_constant ())
   8376     test_vector_subregs_modes (x, nunits - min_nunits);
   8377 }
   8378 
   8379 /* Test constant subregs of integer vector mode INNER_MODE, using 2
   8380    elements per pattern.  */
   8381 
   8382 static void
   8383 test_vector_subregs_fore_back (machine_mode inner_mode)
   8384 {
   8385   poly_uint64 nunits = GET_MODE_NUNITS (inner_mode);
   8386   unsigned int min_nunits = constant_lower_bound (nunits);
   8387   scalar_mode int_mode = GET_MODE_INNER (inner_mode);
   8388   unsigned int count = gcd (min_nunits, 4);
   8389 
   8390   rtx_vector_builder builder (inner_mode, count, 2);
   8391   for (unsigned int i = 0; i < count; ++i)
   8392     builder.quick_push (gen_int_mode (i, int_mode));
   8393   for (unsigned int i = 0; i < count; ++i)
   8394     builder.quick_push (gen_int_mode (-(int) i, int_mode));
   8395   rtx x = builder.build ();
   8396 
   8397   test_vector_subregs_modes (x);
   8398   if (!nunits.is_constant ())
   8399     test_vector_subregs_modes (x, nunits - min_nunits, count);
   8400 }
   8401 
   8402 /* Test constant subregs of integer vector mode INNER_MODE, using 3
   8403    elements per pattern.  */
   8404 
   8405 static void
   8406 test_vector_subregs_stepped (machine_mode inner_mode)
   8407 {
   8408   /* Build { 0, 1, 2, 3, ... }.  */
   8409   scalar_mode int_mode = GET_MODE_INNER (inner_mode);
   8410   rtx_vector_builder builder (inner_mode, 1, 3);
   8411   for (unsigned int i = 0; i < 3; ++i)
   8412     builder.quick_push (gen_int_mode (i, int_mode));
   8413   rtx x = builder.build ();
   8414 
   8415   test_vector_subregs_modes (x);
   8416 }
   8417 
   8418 /* Test constant subregs of integer vector mode INNER_MODE.  */
   8419 
   8420 static void
   8421 test_vector_subregs (machine_mode inner_mode)
   8422 {
   8423   test_vector_subregs_repeating (inner_mode);
   8424   test_vector_subregs_fore_back (inner_mode);
   8425   test_vector_subregs_stepped (inner_mode);
   8426 }
   8427 
   8428 /* Verify some simplifications involving vectors.  */
   8429 
   8430 static void
   8431 test_vector_ops ()
   8432 {
   8433   for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
   8434     {
   8435       machine_mode mode = (machine_mode) i;
   8436       if (VECTOR_MODE_P (mode))
   8437 	{
   8438 	  rtx scalar_reg = make_test_reg (GET_MODE_INNER (mode));
   8439 	  test_vector_ops_duplicate (mode, scalar_reg);
   8440 	  if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
   8441 	      && maybe_gt (GET_MODE_NUNITS (mode), 2))
   8442 	    {
   8443 	      test_vector_ops_series (mode, scalar_reg);
   8444 	      test_vector_subregs (mode);
   8445 	    }
   8446 	  test_vec_merge (mode);
   8447 	}
   8448     }
   8449 }
   8450 
   8451 template<unsigned int N>
   8452 struct simplify_const_poly_int_tests
   8453 {
   8454   static void run ();
   8455 };
   8456 
   8457 template<>
   8458 struct simplify_const_poly_int_tests<1>
   8459 {
   8460   static void run () {}
   8461 };
   8462 
   8463 /* Test various CONST_POLY_INT properties.  */
   8464 
   8465 template<unsigned int N>
   8466 void
   8467 simplify_const_poly_int_tests<N>::run ()
   8468 {
   8469   rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode);
   8470   rtx x2 = gen_int_mode (poly_int64 (-80, 127), QImode);
   8471   rtx x3 = gen_int_mode (poly_int64 (-79, -128), QImode);
   8472   rtx x4 = gen_int_mode (poly_int64 (5, 4), QImode);
   8473   rtx x5 = gen_int_mode (poly_int64 (30, 24), QImode);
   8474   rtx x6 = gen_int_mode (poly_int64 (20, 16), QImode);
   8475   rtx x7 = gen_int_mode (poly_int64 (7, 4), QImode);
   8476   rtx x8 = gen_int_mode (poly_int64 (30, 24), HImode);
   8477   rtx x9 = gen_int_mode (poly_int64 (-30, -24), HImode);
   8478   rtx x10 = gen_int_mode (poly_int64 (-31, -24), HImode);
   8479   rtx two = GEN_INT (2);
   8480   rtx six = GEN_INT (6);
   8481   poly_uint64 offset = subreg_lowpart_offset (QImode, HImode);
   8482 
   8483   /* These tests only try limited operation combinations.  Fuller arithmetic
   8484      testing is done directly on poly_ints.  */
   8485   ASSERT_EQ (simplify_unary_operation (NEG, HImode, x8, HImode), x9);
   8486   ASSERT_EQ (simplify_unary_operation (NOT, HImode, x8, HImode), x10);
   8487   ASSERT_EQ (simplify_unary_operation (TRUNCATE, QImode, x8, HImode), x5);
   8488   ASSERT_EQ (simplify_binary_operation (PLUS, QImode, x1, x2), x3);
   8489   ASSERT_EQ (simplify_binary_operation (MINUS, QImode, x3, x1), x2);
   8490   ASSERT_EQ (simplify_binary_operation (MULT, QImode, x4, six), x5);
   8491   ASSERT_EQ (simplify_binary_operation (MULT, QImode, six, x4), x5);
   8492   ASSERT_EQ (simplify_binary_operation (ASHIFT, QImode, x4, two), x6);
   8493   ASSERT_EQ (simplify_binary_operation (IOR, QImode, x4, two), x7);
   8494   ASSERT_EQ (simplify_subreg (HImode, x5, QImode, 0), x8);
   8495   ASSERT_EQ (simplify_subreg (QImode, x8, HImode, offset), x5);
   8496 }
   8497 
   8498 /* Run all of the selftests within this file.  */
   8499 
   8500 void
   8501 simplify_rtx_cc_tests ()
   8502 {
   8503   test_scalar_ops ();
   8504   test_vector_ops ();
   8505   simplify_const_poly_int_tests<NUM_POLY_INT_COEFFS>::run ();
   8506 }
   8507 
   8508 } // namespace selftest
   8509 
   8510 #endif /* CHECKING_P */
   8511