Home | History | Annotate | Line # | Download | only in gcc
cfgcleanup.cc revision 1.1.1.1
      1 /* Control flow optimization code for GNU compiler.
      2    Copyright (C) 1987-2022 Free Software Foundation, Inc.
      3 
      4 This file is part of GCC.
      5 
      6 GCC is free software; you can redistribute it and/or modify it under
      7 the terms of the GNU General Public License as published by the Free
      8 Software Foundation; either version 3, or (at your option) any later
      9 version.
     10 
     11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
     12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
     13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
     14 for more details.
     15 
     16 You should have received a copy of the GNU General Public License
     17 along with GCC; see the file COPYING3.  If not see
     18 <http://www.gnu.org/licenses/>.  */
     19 
     20 /* This file contains optimizer of the control flow.  The main entry point is
     21    cleanup_cfg.  Following optimizations are performed:
     22 
     23    - Unreachable blocks removal
     24    - Edge forwarding (edge to the forwarder block is forwarded to its
     25      successor.  Simplification of the branch instruction is performed by
     26      underlying infrastructure so branch can be converted to simplejump or
     27      eliminated).
     28    - Cross jumping (tail merging)
     29    - Conditional jump-around-simplejump simplification
     30    - Basic block merging.  */
     31 
     32 #include "config.h"
     33 #include "system.h"
     34 #include "coretypes.h"
     35 #include "backend.h"
     36 #include "target.h"
     37 #include "rtl.h"
     38 #include "tree.h"
     39 #include "cfghooks.h"
     40 #include "df.h"
     41 #include "memmodel.h"
     42 #include "tm_p.h"
     43 #include "insn-config.h"
     44 #include "emit-rtl.h"
     45 #include "cselib.h"
     46 #include "tree-pass.h"
     47 #include "cfgloop.h"
     48 #include "cfgrtl.h"
     49 #include "cfganal.h"
     50 #include "cfgbuild.h"
     51 #include "cfgcleanup.h"
     52 #include "dce.h"
     53 #include "dbgcnt.h"
     54 #include "rtl-iter.h"
     55 #include "regs.h"
     56 #include "function-abi.h"
     57 
     58 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
     59 
     60 /* Set to true when we are running first pass of try_optimize_cfg loop.  */
     61 static bool first_pass;
     62 
     63 /* Set to true if crossjumps occurred in the latest run of try_optimize_cfg.  */
     64 static bool crossjumps_occurred;
     65 
     66 /* Set to true if we couldn't run an optimization due to stale liveness
     67    information; we should run df_analyze to enable more opportunities.  */
     68 static bool block_was_dirty;
     69 
     70 static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction);
     71 static bool try_crossjump_bb (int, basic_block);
     72 static bool outgoing_edges_match (int, basic_block, basic_block);
     73 static enum replace_direction old_insns_match_p (int, rtx_insn *, rtx_insn *);
     74 
     75 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
     76 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
     77 static bool try_optimize_cfg (int);
     78 static bool try_simplify_condjump (basic_block);
     79 static bool try_forward_edges (int, basic_block);
     80 static edge thread_jump (edge, basic_block);
     81 static bool mark_effect (rtx, bitmap);
     82 static void notice_new_block (basic_block);
     83 static void update_forwarder_flag (basic_block);
     84 static void merge_memattrs (rtx, rtx);
     85 
     86 /* Set flags for newly created block.  */
     88 
     89 static void
     90 notice_new_block (basic_block bb)
     91 {
     92   if (!bb)
     93     return;
     94 
     95   if (forwarder_block_p (bb))
     96     bb->flags |= BB_FORWARDER_BLOCK;
     97 }
     98 
     99 /* Recompute forwarder flag after block has been modified.  */
    100 
    101 static void
    102 update_forwarder_flag (basic_block bb)
    103 {
    104   if (forwarder_block_p (bb))
    105     bb->flags |= BB_FORWARDER_BLOCK;
    106   else
    107     bb->flags &= ~BB_FORWARDER_BLOCK;
    108 }
    109 
    110 /* Simplify a conditional jump around an unconditional jump.
    112    Return true if something changed.  */
    113 
    114 static bool
    115 try_simplify_condjump (basic_block cbranch_block)
    116 {
    117   basic_block jump_block, jump_dest_block, cbranch_dest_block;
    118   edge cbranch_jump_edge, cbranch_fallthru_edge;
    119   rtx_insn *cbranch_insn;
    120 
    121   /* Verify that there are exactly two successors.  */
    122   if (EDGE_COUNT (cbranch_block->succs) != 2)
    123     return false;
    124 
    125   /* Verify that we've got a normal conditional branch at the end
    126      of the block.  */
    127   cbranch_insn = BB_END (cbranch_block);
    128   if (!any_condjump_p (cbranch_insn))
    129     return false;
    130 
    131   cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
    132   cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
    133 
    134   /* The next block must not have multiple predecessors, must not
    135      be the last block in the function, and must contain just the
    136      unconditional jump.  */
    137   jump_block = cbranch_fallthru_edge->dest;
    138   if (!single_pred_p (jump_block)
    139       || jump_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
    140       || !FORWARDER_BLOCK_P (jump_block))
    141     return false;
    142   jump_dest_block = single_succ (jump_block);
    143 
    144   /* If we are partitioning hot/cold basic blocks, we don't want to
    145      mess up unconditional or indirect jumps that cross between hot
    146      and cold sections.
    147 
    148      Basic block partitioning may result in some jumps that appear to
    149      be optimizable (or blocks that appear to be mergeable), but which really
    150      must be left untouched (they are required to make it safely across
    151      partition boundaries).  See the comments at the top of
    152      bb-reorder.cc:partition_hot_cold_basic_blocks for complete details.  */
    153 
    154   if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
    155       || (cbranch_jump_edge->flags & EDGE_CROSSING))
    156     return false;
    157 
    158   /* The conditional branch must target the block after the
    159      unconditional branch.  */
    160   cbranch_dest_block = cbranch_jump_edge->dest;
    161 
    162   if (cbranch_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
    163       || jump_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
    164       || !can_fallthru (jump_block, cbranch_dest_block))
    165     return false;
    166 
    167   /* Invert the conditional branch.  */
    168   if (!invert_jump (as_a <rtx_jump_insn *> (cbranch_insn),
    169 		    block_label (jump_dest_block), 0))
    170     return false;
    171 
    172   if (dump_file)
    173     fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
    174 	     INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
    175 
    176   /* Success.  Update the CFG to match.  Note that after this point
    177      the edge variable names appear backwards; the redirection is done
    178      this way to preserve edge profile data.  */
    179   cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
    180 						cbranch_dest_block);
    181   cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
    182 						    jump_dest_block);
    183   cbranch_jump_edge->flags |= EDGE_FALLTHRU;
    184   cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
    185   update_br_prob_note (cbranch_block);
    186 
    187   /* Delete the block with the unconditional jump, and clean up the mess.  */
    188   delete_basic_block (jump_block);
    189   tidy_fallthru_edge (cbranch_jump_edge);
    190   update_forwarder_flag (cbranch_block);
    191 
    192   return true;
    193 }
    194 
    195 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
    197    on register.  Used by jump threading.  */
    198 
    199 static bool
    200 mark_effect (rtx exp, regset nonequal)
    201 {
    202   rtx dest;
    203   switch (GET_CODE (exp))
    204     {
    205       /* In case we do clobber the register, mark it as equal, as we know the
    206 	 value is dead so it don't have to match.  */
    207     case CLOBBER:
    208       dest = XEXP (exp, 0);
    209       if (REG_P (dest))
    210 	bitmap_clear_range (nonequal, REGNO (dest), REG_NREGS (dest));
    211       return false;
    212 
    213     case SET:
    214       if (cselib_redundant_set_p (exp))
    215 	return false;
    216       dest = SET_DEST (exp);
    217       if (dest == pc_rtx)
    218 	return false;
    219       if (!REG_P (dest))
    220 	return true;
    221       bitmap_set_range (nonequal, REGNO (dest), REG_NREGS (dest));
    222       return false;
    223 
    224     default:
    225       return false;
    226     }
    227 }
    228 
    229 /* Return true if X contains a register in NONEQUAL.  */
    230 static bool
    231 mentions_nonequal_regs (const_rtx x, regset nonequal)
    232 {
    233   subrtx_iterator::array_type array;
    234   FOR_EACH_SUBRTX (iter, array, x, NONCONST)
    235     {
    236       const_rtx x = *iter;
    237       if (REG_P (x))
    238 	{
    239 	  unsigned int end_regno = END_REGNO (x);
    240 	  for (unsigned int regno = REGNO (x); regno < end_regno; ++regno)
    241 	    if (REGNO_REG_SET_P (nonequal, regno))
    242 	      return true;
    243 	}
    244     }
    245   return false;
    246 }
    247 
    248 /* Attempt to prove that the basic block B will have no side effects and
    249    always continues in the same edge if reached via E.  Return the edge
    250    if exist, NULL otherwise.  */
    251 
    252 static edge
    253 thread_jump (edge e, basic_block b)
    254 {
    255   rtx set1, set2, cond1, cond2;
    256   rtx_insn *insn;
    257   enum rtx_code code1, code2, reversed_code2;
    258   bool reverse1 = false;
    259   unsigned i;
    260   regset nonequal;
    261   bool failed = false;
    262 
    263   /* Jump threading may cause fixup_partitions to introduce new crossing edges,
    264      which is not allowed after reload.  */
    265   gcc_checking_assert (!reload_completed || !crtl->has_bb_partition);
    266 
    267   if (b->flags & BB_NONTHREADABLE_BLOCK)
    268     return NULL;
    269 
    270   /* At the moment, we do handle only conditional jumps, but later we may
    271      want to extend this code to tablejumps and others.  */
    272   if (EDGE_COUNT (e->src->succs) != 2)
    273     return NULL;
    274   if (EDGE_COUNT (b->succs) != 2)
    275     {
    276       b->flags |= BB_NONTHREADABLE_BLOCK;
    277       return NULL;
    278     }
    279 
    280   /* Second branch must end with onlyjump, as we will eliminate the jump.  */
    281   if (!any_condjump_p (BB_END (e->src)))
    282     return NULL;
    283 
    284   if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
    285     {
    286       b->flags |= BB_NONTHREADABLE_BLOCK;
    287       return NULL;
    288     }
    289 
    290   set1 = pc_set (BB_END (e->src));
    291   set2 = pc_set (BB_END (b));
    292   if (((e->flags & EDGE_FALLTHRU) != 0)
    293       != (XEXP (SET_SRC (set1), 1) == pc_rtx))
    294     reverse1 = true;
    295 
    296   cond1 = XEXP (SET_SRC (set1), 0);
    297   cond2 = XEXP (SET_SRC (set2), 0);
    298   if (reverse1)
    299     code1 = reversed_comparison_code (cond1, BB_END (e->src));
    300   else
    301     code1 = GET_CODE (cond1);
    302 
    303   code2 = GET_CODE (cond2);
    304   reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
    305 
    306   if (!comparison_dominates_p (code1, code2)
    307       && !comparison_dominates_p (code1, reversed_code2))
    308     return NULL;
    309 
    310   /* Ensure that the comparison operators are equivalent.
    311      ??? This is far too pessimistic.  We should allow swapped operands,
    312      different CCmodes, or for example comparisons for interval, that
    313      dominate even when operands are not equivalent.  */
    314   if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
    315       || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
    316     return NULL;
    317 
    318   /* Punt if BB_END (e->src) is doloop-like conditional jump that modifies
    319      the registers used in cond1.  */
    320   if (modified_in_p (cond1, BB_END (e->src)))
    321     return NULL;
    322 
    323   /* Short circuit cases where block B contains some side effects, as we can't
    324      safely bypass it.  */
    325   for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
    326        insn = NEXT_INSN (insn))
    327     if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
    328       {
    329 	b->flags |= BB_NONTHREADABLE_BLOCK;
    330 	return NULL;
    331       }
    332 
    333   cselib_init (0);
    334 
    335   /* First process all values computed in the source basic block.  */
    336   for (insn = NEXT_INSN (BB_HEAD (e->src));
    337        insn != NEXT_INSN (BB_END (e->src));
    338        insn = NEXT_INSN (insn))
    339     if (INSN_P (insn))
    340       cselib_process_insn (insn);
    341 
    342   nonequal = BITMAP_ALLOC (NULL);
    343   CLEAR_REG_SET (nonequal);
    344 
    345   /* Now assume that we've continued by the edge E to B and continue
    346      processing as if it were same basic block.
    347      Our goal is to prove that whole block is an NOOP.  */
    348 
    349   for (insn = NEXT_INSN (BB_HEAD (b));
    350        insn != NEXT_INSN (BB_END (b)) && !failed;
    351        insn = NEXT_INSN (insn))
    352     {
    353       /* cond2 must not mention any register that is not equal to the
    354 	 former block.  Check this before processing that instruction,
    355 	 as BB_END (b) could contain also clobbers.  */
    356       if (insn == BB_END (b)
    357 	  && mentions_nonequal_regs (cond2, nonequal))
    358 	goto failed_exit;
    359 
    360       if (INSN_P (insn))
    361 	{
    362 	  rtx pat = PATTERN (insn);
    363 
    364 	  if (GET_CODE (pat) == PARALLEL)
    365 	    {
    366 	      for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
    367 		failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
    368 	    }
    369 	  else
    370 	    failed |= mark_effect (pat, nonequal);
    371 	}
    372 
    373       cselib_process_insn (insn);
    374     }
    375 
    376   /* Later we should clear nonequal of dead registers.  So far we don't
    377      have life information in cfg_cleanup.  */
    378   if (failed)
    379     {
    380       b->flags |= BB_NONTHREADABLE_BLOCK;
    381       goto failed_exit;
    382     }
    383 
    384   if (!REG_SET_EMPTY_P (nonequal))
    385     goto failed_exit;
    386 
    387   BITMAP_FREE (nonequal);
    388   cselib_finish ();
    389   if ((comparison_dominates_p (code1, code2) != 0)
    390       != (XEXP (SET_SRC (set2), 1) == pc_rtx))
    391     return BRANCH_EDGE (b);
    392   else
    393     return FALLTHRU_EDGE (b);
    394 
    395 failed_exit:
    396   BITMAP_FREE (nonequal);
    397   cselib_finish ();
    398   return NULL;
    399 }
    400 
    401 /* Attempt to forward edges leaving basic block B.
    403    Return true if successful.  */
    404 
    405 static bool
    406 try_forward_edges (int mode, basic_block b)
    407 {
    408   bool changed = false;
    409   edge_iterator ei;
    410   edge e, *threaded_edges = NULL;
    411 
    412   for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
    413     {
    414       basic_block target, first;
    415       location_t goto_locus;
    416       int counter;
    417       bool threaded = false;
    418       int nthreaded_edges = 0;
    419       bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0;
    420       bool new_target_threaded = false;
    421 
    422       /* Skip complex edges because we don't know how to update them.
    423 
    424 	 Still handle fallthru edges, as we can succeed to forward fallthru
    425 	 edge to the same place as the branch edge of conditional branch
    426 	 and turn conditional branch to an unconditional branch.  */
    427       if (e->flags & EDGE_COMPLEX)
    428 	{
    429 	  ei_next (&ei);
    430 	  continue;
    431 	}
    432 
    433       target = first = e->dest;
    434       counter = NUM_FIXED_BLOCKS;
    435       goto_locus = e->goto_locus;
    436 
    437       while (counter < n_basic_blocks_for_fn (cfun))
    438 	{
    439 	  basic_block new_target = NULL;
    440 	  may_thread |= (target->flags & BB_MODIFIED) != 0;
    441 
    442 	  if (FORWARDER_BLOCK_P (target)
    443 	      && single_succ (target) != EXIT_BLOCK_PTR_FOR_FN (cfun))
    444 	    {
    445 	      /* Bypass trivial infinite loops.  */
    446 	      new_target = single_succ (target);
    447 	      if (target == new_target)
    448 		counter = n_basic_blocks_for_fn (cfun);
    449 	      else if (!optimize)
    450 		{
    451 		  /* When not optimizing, ensure that edges or forwarder
    452 		     blocks with different locus are not optimized out.  */
    453 		  location_t new_locus = single_succ_edge (target)->goto_locus;
    454 		  location_t locus = goto_locus;
    455 
    456 		  if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
    457 		      && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
    458 		      && new_locus != locus)
    459 		    new_target = NULL;
    460 		  else
    461 		    {
    462 		      if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
    463 			locus = new_locus;
    464 
    465 		      rtx_insn *last = BB_END (target);
    466 		      if (DEBUG_INSN_P (last))
    467 			last = prev_nondebug_insn (last);
    468 		      if (last && INSN_P (last))
    469 			new_locus = INSN_LOCATION (last);
    470 		      else
    471 			new_locus = UNKNOWN_LOCATION;
    472 
    473 		      if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
    474 			  && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
    475 			  && new_locus != locus)
    476 			new_target = NULL;
    477 		      else
    478 			{
    479 			  if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
    480 			    locus = new_locus;
    481 
    482 			  goto_locus = locus;
    483 			}
    484 		    }
    485 		}
    486 	    }
    487 
    488 	  /* Allow to thread only over one edge at time to simplify updating
    489 	     of probabilities.  */
    490 	  else if ((mode & CLEANUP_THREADING) && may_thread)
    491 	    {
    492 	      edge t = thread_jump (e, target);
    493 	      if (t)
    494 		{
    495 		  if (!threaded_edges)
    496 		    threaded_edges = XNEWVEC (edge,
    497 					      n_basic_blocks_for_fn (cfun));
    498 		  else
    499 		    {
    500 		      int i;
    501 
    502 		      /* Detect an infinite loop across blocks not
    503 			 including the start block.  */
    504 		      for (i = 0; i < nthreaded_edges; ++i)
    505 			if (threaded_edges[i] == t)
    506 			  break;
    507 		      if (i < nthreaded_edges)
    508 			{
    509 			  counter = n_basic_blocks_for_fn (cfun);
    510 			  break;
    511 			}
    512 		    }
    513 
    514 		  /* Detect an infinite loop across the start block.  */
    515 		  if (t->dest == b)
    516 		    break;
    517 
    518 		  gcc_assert (nthreaded_edges
    519 			      < (n_basic_blocks_for_fn (cfun)
    520 				 - NUM_FIXED_BLOCKS));
    521 		  threaded_edges[nthreaded_edges++] = t;
    522 
    523 		  new_target = t->dest;
    524 		  new_target_threaded = true;
    525 		}
    526 	    }
    527 
    528 	  if (!new_target)
    529 	    break;
    530 
    531 	  counter++;
    532 	  /* Do not turn non-crossing jump to crossing.  Depending on target
    533 	     it may require different instruction pattern.  */
    534 	  if ((e->flags & EDGE_CROSSING)
    535 	      || BB_PARTITION (first) == BB_PARTITION (new_target))
    536 	    {
    537 	      target = new_target;
    538 	      threaded |= new_target_threaded;
    539 	    }
    540 	}
    541 
    542       if (counter >= n_basic_blocks_for_fn (cfun))
    543 	{
    544 	  if (dump_file)
    545 	    fprintf (dump_file, "Infinite loop in BB %i.\n",
    546 		     target->index);
    547 	}
    548       else if (target == first)
    549 	; /* We didn't do anything.  */
    550       else
    551 	{
    552 	  /* Save the values now, as the edge may get removed.  */
    553 	  profile_count edge_count = e->count ();
    554 	  int n = 0;
    555 
    556 	  e->goto_locus = goto_locus;
    557 
    558 	  /* Don't force if target is exit block.  */
    559 	  if (threaded && target != EXIT_BLOCK_PTR_FOR_FN (cfun))
    560 	    {
    561 	      notice_new_block (redirect_edge_and_branch_force (e, target));
    562 	      if (dump_file)
    563 		fprintf (dump_file, "Conditionals threaded.\n");
    564 	    }
    565 	  else if (!redirect_edge_and_branch (e, target))
    566 	    {
    567 	      if (dump_file)
    568 		fprintf (dump_file,
    569 			 "Forwarding edge %i->%i to %i failed.\n",
    570 			 b->index, e->dest->index, target->index);
    571 	      ei_next (&ei);
    572 	      continue;
    573 	    }
    574 
    575 	  /* We successfully forwarded the edge.  Now update profile
    576 	     data: for each edge we traversed in the chain, remove
    577 	     the original edge's execution count.  */
    578 	  do
    579 	    {
    580 	      edge t;
    581 
    582 	      if (!single_succ_p (first))
    583 		{
    584 		  gcc_assert (n < nthreaded_edges);
    585 		  t = threaded_edges [n++];
    586 		  gcc_assert (t->src == first);
    587 		  update_bb_profile_for_threading (first, edge_count, t);
    588 		  update_br_prob_note (first);
    589 		}
    590 	      else
    591 		{
    592 		  first->count -= edge_count;
    593 		  /* It is possible that as the result of
    594 		     threading we've removed edge as it is
    595 		     threaded to the fallthru edge.  Avoid
    596 		     getting out of sync.  */
    597 		  if (n < nthreaded_edges
    598 		      && first == threaded_edges [n]->src)
    599 		    n++;
    600 		  t = single_succ_edge (first);
    601 		}
    602 
    603 	      first = t->dest;
    604 	    }
    605 	  while (first != target);
    606 
    607 	  changed = true;
    608 	  continue;
    609 	}
    610       ei_next (&ei);
    611     }
    612 
    613   free (threaded_edges);
    614   return changed;
    615 }
    616 
    617 
    619 /* Blocks A and B are to be merged into a single block.  A has no incoming
    620    fallthru edge, so it can be moved before B without adding or modifying
    621    any jumps (aside from the jump from A to B).  */
    622 
    623 static void
    624 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
    625 {
    626   rtx_insn *barrier;
    627 
    628   /* If we are partitioning hot/cold basic blocks, we don't want to
    629      mess up unconditional or indirect jumps that cross between hot
    630      and cold sections.
    631 
    632      Basic block partitioning may result in some jumps that appear to
    633      be optimizable (or blocks that appear to be mergeable), but which really
    634      must be left untouched (they are required to make it safely across
    635      partition boundaries).  See the comments at the top of
    636      bb-reorder.cc:partition_hot_cold_basic_blocks for complete details.  */
    637 
    638   if (BB_PARTITION (a) != BB_PARTITION (b))
    639     return;
    640 
    641   barrier = next_nonnote_insn (BB_END (a));
    642   gcc_assert (BARRIER_P (barrier));
    643   delete_insn (barrier);
    644 
    645   /* Scramble the insn chain.  */
    646   if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
    647     reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
    648   df_set_bb_dirty (a);
    649 
    650   if (dump_file)
    651     fprintf (dump_file, "Moved block %d before %d and merged.\n",
    652 	     a->index, b->index);
    653 
    654   /* Swap the records for the two blocks around.  */
    655 
    656   unlink_block (a);
    657   link_block (a, b->prev_bb);
    658 
    659   /* Now blocks A and B are contiguous.  Merge them.  */
    660   merge_blocks (a, b);
    661 }
    662 
    663 /* Blocks A and B are to be merged into a single block.  B has no outgoing
    664    fallthru edge, so it can be moved after A without adding or modifying
    665    any jumps (aside from the jump from A to B).  */
    666 
    667 static void
    668 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
    669 {
    670   rtx_insn *barrier, *real_b_end;
    671   rtx_insn *label;
    672   rtx_jump_table_data *table;
    673 
    674   /* If we are partitioning hot/cold basic blocks, we don't want to
    675      mess up unconditional or indirect jumps that cross between hot
    676      and cold sections.
    677 
    678      Basic block partitioning may result in some jumps that appear to
    679      be optimizable (or blocks that appear to be mergeable), but which really
    680      must be left untouched (they are required to make it safely across
    681      partition boundaries).  See the comments at the top of
    682      bb-reorder.cc:partition_hot_cold_basic_blocks for complete details.  */
    683 
    684   if (BB_PARTITION (a) != BB_PARTITION (b))
    685     return;
    686 
    687   real_b_end = BB_END (b);
    688 
    689   /* If there is a jump table following block B temporarily add the jump table
    690      to block B so that it will also be moved to the correct location.  */
    691   if (tablejump_p (BB_END (b), &label, &table)
    692       && prev_active_insn (label) == BB_END (b))
    693     {
    694       BB_END (b) = table;
    695     }
    696 
    697   /* There had better have been a barrier there.  Delete it.  */
    698   barrier = NEXT_INSN (BB_END (b));
    699   if (barrier && BARRIER_P (barrier))
    700     delete_insn (barrier);
    701 
    702 
    703   /* Scramble the insn chain.  */
    704   reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
    705 
    706   /* Restore the real end of b.  */
    707   BB_END (b) = real_b_end;
    708 
    709   if (dump_file)
    710     fprintf (dump_file, "Moved block %d after %d and merged.\n",
    711 	     b->index, a->index);
    712 
    713   /* Now blocks A and B are contiguous.  Merge them.  */
    714   merge_blocks (a, b);
    715 }
    716 
    717 /* Attempt to merge basic blocks that are potentially non-adjacent.
    718    Return NULL iff the attempt failed, otherwise return basic block
    719    where cleanup_cfg should continue.  Because the merging commonly
    720    moves basic block away or introduces another optimization
    721    possibility, return basic block just before B so cleanup_cfg don't
    722    need to iterate.
    723 
    724    It may be good idea to return basic block before C in the case
    725    C has been moved after B and originally appeared earlier in the
    726    insn sequence, but we have no information available about the
    727    relative ordering of these two.  Hopefully it is not too common.  */
    728 
    729 static basic_block
    730 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
    731 {
    732   basic_block next;
    733 
    734   /* If we are partitioning hot/cold basic blocks, we don't want to
    735      mess up unconditional or indirect jumps that cross between hot
    736      and cold sections.
    737 
    738      Basic block partitioning may result in some jumps that appear to
    739      be optimizable (or blocks that appear to be mergeable), but which really
    740      must be left untouched (they are required to make it safely across
    741      partition boundaries).  See the comments at the top of
    742      bb-reorder.cc:partition_hot_cold_basic_blocks for complete details.  */
    743 
    744   if (BB_PARTITION (b) != BB_PARTITION (c))
    745     return NULL;
    746 
    747   /* If B has a fallthru edge to C, no need to move anything.  */
    748   if (e->flags & EDGE_FALLTHRU)
    749     {
    750       int b_index = b->index, c_index = c->index;
    751 
    752       /* Protect the loop latches.  */
    753       if (current_loops && c->loop_father->latch == c)
    754 	return NULL;
    755 
    756       merge_blocks (b, c);
    757       update_forwarder_flag (b);
    758 
    759       if (dump_file)
    760 	fprintf (dump_file, "Merged %d and %d without moving.\n",
    761 		 b_index, c_index);
    762 
    763       return b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? b : b->prev_bb;
    764     }
    765 
    766   /* Otherwise we will need to move code around.  Do that only if expensive
    767      transformations are allowed.  */
    768   else if (mode & CLEANUP_EXPENSIVE)
    769     {
    770       edge tmp_edge, b_fallthru_edge;
    771       bool c_has_outgoing_fallthru;
    772       bool b_has_incoming_fallthru;
    773 
    774       /* Avoid overactive code motion, as the forwarder blocks should be
    775 	 eliminated by edge redirection instead.  One exception might have
    776 	 been if B is a forwarder block and C has no fallthru edge, but
    777 	 that should be cleaned up by bb-reorder instead.  */
    778       if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
    779 	return NULL;
    780 
    781       /* We must make sure to not munge nesting of lexical blocks,
    782 	 and loop notes.  This is done by squeezing out all the notes
    783 	 and leaving them there to lie.  Not ideal, but functional.  */
    784 
    785       tmp_edge = find_fallthru_edge (c->succs);
    786       c_has_outgoing_fallthru = (tmp_edge != NULL);
    787 
    788       tmp_edge = find_fallthru_edge (b->preds);
    789       b_has_incoming_fallthru = (tmp_edge != NULL);
    790       b_fallthru_edge = tmp_edge;
    791       next = b->prev_bb;
    792       if (next == c)
    793 	next = next->prev_bb;
    794 
    795       /* Otherwise, we're going to try to move C after B.  If C does
    796 	 not have an outgoing fallthru, then it can be moved
    797 	 immediately after B without introducing or modifying jumps.  */
    798       if (! c_has_outgoing_fallthru)
    799 	{
    800 	  merge_blocks_move_successor_nojumps (b, c);
    801 	  return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
    802 	}
    803 
    804       /* If B does not have an incoming fallthru, then it can be moved
    805 	 immediately before C without introducing or modifying jumps.
    806 	 C cannot be the first block, so we do not have to worry about
    807 	 accessing a non-existent block.  */
    808 
    809       if (b_has_incoming_fallthru)
    810 	{
    811 	  basic_block bb;
    812 
    813 	  if (b_fallthru_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
    814 	    return NULL;
    815 	  bb = force_nonfallthru (b_fallthru_edge);
    816 	  if (bb)
    817 	    notice_new_block (bb);
    818 	}
    819 
    820       merge_blocks_move_predecessor_nojumps (b, c);
    821       return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
    822     }
    823 
    824   return NULL;
    825 }
    826 
    827 
    829 /* Removes the memory attributes of MEM expression
    830    if they are not equal.  */
    831 
    832 static void
    833 merge_memattrs (rtx x, rtx y)
    834 {
    835   int i;
    836   int j;
    837   enum rtx_code code;
    838   const char *fmt;
    839 
    840   if (x == y)
    841     return;
    842   if (x == 0 || y == 0)
    843     return;
    844 
    845   code = GET_CODE (x);
    846 
    847   if (code != GET_CODE (y))
    848     return;
    849 
    850   if (GET_MODE (x) != GET_MODE (y))
    851     return;
    852 
    853   if (code == MEM && !mem_attrs_eq_p (MEM_ATTRS (x), MEM_ATTRS (y)))
    854     {
    855       if (! MEM_ATTRS (x))
    856 	MEM_ATTRS (y) = 0;
    857       else if (! MEM_ATTRS (y))
    858 	MEM_ATTRS (x) = 0;
    859       else
    860 	{
    861 	  if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
    862 	    {
    863 	      set_mem_alias_set (x, 0);
    864 	      set_mem_alias_set (y, 0);
    865 	    }
    866 
    867 	  if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
    868 	    {
    869 	      set_mem_expr (x, 0);
    870 	      set_mem_expr (y, 0);
    871 	      clear_mem_offset (x);
    872 	      clear_mem_offset (y);
    873 	    }
    874 	  else if (MEM_OFFSET_KNOWN_P (x) != MEM_OFFSET_KNOWN_P (y)
    875 		   || (MEM_OFFSET_KNOWN_P (x)
    876 		       && maybe_ne (MEM_OFFSET (x), MEM_OFFSET (y))))
    877 	    {
    878 	      clear_mem_offset (x);
    879 	      clear_mem_offset (y);
    880 	    }
    881 
    882 	  if (!MEM_SIZE_KNOWN_P (x))
    883 	    clear_mem_size (y);
    884 	  else if (!MEM_SIZE_KNOWN_P (y))
    885 	    clear_mem_size (x);
    886 	  else if (known_le (MEM_SIZE (x), MEM_SIZE (y)))
    887 	    set_mem_size (x, MEM_SIZE (y));
    888 	  else if (known_le (MEM_SIZE (y), MEM_SIZE (x)))
    889 	    set_mem_size (y, MEM_SIZE (x));
    890 	  else
    891 	    {
    892 	      /* The sizes aren't ordered, so we can't merge them.  */
    893 	      clear_mem_size (x);
    894 	      clear_mem_size (y);
    895 	    }
    896 
    897 	  set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
    898 	  set_mem_align (y, MEM_ALIGN (x));
    899 	}
    900     }
    901   if (code == MEM)
    902     {
    903       if (MEM_READONLY_P (x) != MEM_READONLY_P (y))
    904 	{
    905 	  MEM_READONLY_P (x) = 0;
    906 	  MEM_READONLY_P (y) = 0;
    907 	}
    908       if (MEM_NOTRAP_P (x) != MEM_NOTRAP_P (y))
    909 	{
    910 	  MEM_NOTRAP_P (x) = 0;
    911 	  MEM_NOTRAP_P (y) = 0;
    912 	}
    913       if (MEM_VOLATILE_P (x) != MEM_VOLATILE_P (y))
    914 	{
    915 	  MEM_VOLATILE_P (x) = 1;
    916 	  MEM_VOLATILE_P (y) = 1;
    917 	}
    918     }
    919 
    920   fmt = GET_RTX_FORMAT (code);
    921   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
    922     {
    923       switch (fmt[i])
    924 	{
    925 	case 'E':
    926 	  /* Two vectors must have the same length.  */
    927 	  if (XVECLEN (x, i) != XVECLEN (y, i))
    928 	    return;
    929 
    930 	  for (j = 0; j < XVECLEN (x, i); j++)
    931 	    merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
    932 
    933 	  break;
    934 
    935 	case 'e':
    936 	  merge_memattrs (XEXP (x, i), XEXP (y, i));
    937 	}
    938     }
    939   return;
    940 }
    941 
    942 
    943  /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
    944     different single sets S1 and S2.  */
    945 
    946 static bool
    947 equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2)
    948 {
    949   int i;
    950   rtx e1, e2;
    951 
    952   if (p1 == s1 && p2 == s2)
    953     return true;
    954 
    955   if (GET_CODE (p1) != PARALLEL || GET_CODE (p2) != PARALLEL)
    956     return false;
    957 
    958   if (XVECLEN (p1, 0) != XVECLEN (p2, 0))
    959     return false;
    960 
    961   for (i = 0; i < XVECLEN (p1, 0); i++)
    962     {
    963       e1 = XVECEXP (p1, 0, i);
    964       e2 = XVECEXP (p2, 0, i);
    965       if (e1 == s1 && e2 == s2)
    966         continue;
    967       if (reload_completed
    968           ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2))
    969         continue;
    970 
    971       return false;
    972     }
    973 
    974   return true;
    975 }
    976 
    977 
    978 /* NOTE1 is the REG_EQUAL note, if any, attached to an insn
    979    that is a single_set with a SET_SRC of SRC1.  Similarly
    980    for NOTE2/SRC2.
    981 
    982    So effectively NOTE1/NOTE2 are an alternate form of
    983    SRC1/SRC2 respectively.
    984 
    985    Return nonzero if SRC1 or NOTE1 has the same constant
    986    integer value as SRC2 or NOTE2.   Else return zero.  */
    987 static int
    988 values_equal_p (rtx note1, rtx note2, rtx src1, rtx src2)
    989 {
    990   if (note1
    991       && note2
    992       && CONST_INT_P (XEXP (note1, 0))
    993       && rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0)))
    994     return 1;
    995 
    996   if (!note1
    997       && !note2
    998       && CONST_INT_P (src1)
    999       && CONST_INT_P (src2)
   1000       && rtx_equal_p (src1, src2))
   1001     return 1;
   1002 
   1003   if (note1
   1004       && CONST_INT_P (src2)
   1005       && rtx_equal_p (XEXP (note1, 0), src2))
   1006     return 1;
   1007 
   1008   if (note2
   1009       && CONST_INT_P (src1)
   1010       && rtx_equal_p (XEXP (note2, 0), src1))
   1011     return 1;
   1012 
   1013   return 0;
   1014 }
   1015 
   1016 /* Examine register notes on I1 and I2 and return:
   1017    - dir_forward if I1 can be replaced by I2, or
   1018    - dir_backward if I2 can be replaced by I1, or
   1019    - dir_both if both are the case.  */
   1020 
   1021 static enum replace_direction
   1022 can_replace_by (rtx_insn *i1, rtx_insn *i2)
   1023 {
   1024   rtx s1, s2, d1, d2, src1, src2, note1, note2;
   1025   bool c1, c2;
   1026 
   1027   /* Check for 2 sets.  */
   1028   s1 = single_set (i1);
   1029   s2 = single_set (i2);
   1030   if (s1 == NULL_RTX || s2 == NULL_RTX)
   1031     return dir_none;
   1032 
   1033   /* Check that the 2 sets set the same dest.  */
   1034   d1 = SET_DEST (s1);
   1035   d2 = SET_DEST (s2);
   1036   if (!(reload_completed
   1037         ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2)))
   1038     return dir_none;
   1039 
   1040   /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
   1041      set dest to the same value.  */
   1042   note1 = find_reg_equal_equiv_note (i1);
   1043   note2 = find_reg_equal_equiv_note (i2);
   1044 
   1045   src1 = SET_SRC (s1);
   1046   src2 = SET_SRC (s2);
   1047 
   1048   if (!values_equal_p (note1, note2, src1, src2))
   1049     return dir_none;
   1050 
   1051   if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2))
   1052     return dir_none;
   1053 
   1054   /* Although the 2 sets set dest to the same value, we cannot replace
   1055        (set (dest) (const_int))
   1056      by
   1057        (set (dest) (reg))
   1058      because we don't know if the reg is live and has the same value at the
   1059      location of replacement.  */
   1060   c1 = CONST_INT_P (src1);
   1061   c2 = CONST_INT_P (src2);
   1062   if (c1 && c2)
   1063     return dir_both;
   1064   else if (c2)
   1065     return dir_forward;
   1066   else if (c1)
   1067     return dir_backward;
   1068 
   1069   return dir_none;
   1070 }
   1071 
   1072 /* Merges directions A and B.  */
   1073 
   1074 static enum replace_direction
   1075 merge_dir (enum replace_direction a, enum replace_direction b)
   1076 {
   1077   /* Implements the following table:
   1078         |bo fw bw no
   1079      ---+-----------
   1080      bo |bo fw bw no
   1081      fw |-- fw no no
   1082      bw |-- -- bw no
   1083      no |-- -- -- no.  */
   1084 
   1085   if (a == b)
   1086     return a;
   1087 
   1088   if (a == dir_both)
   1089     return b;
   1090   if (b == dir_both)
   1091     return a;
   1092 
   1093   return dir_none;
   1094 }
   1095 
   1096 /* Array of flags indexed by reg note kind, true if the given
   1097    reg note is CFA related.  */
   1098 static const bool reg_note_cfa_p[] = {
   1099 #undef REG_CFA_NOTE
   1100 #define DEF_REG_NOTE(NAME) false,
   1101 #define REG_CFA_NOTE(NAME) true,
   1102 #include "reg-notes.def"
   1103 #undef REG_CFA_NOTE
   1104 #undef DEF_REG_NOTE
   1105   false
   1106 };
   1107 
   1108 /* Return true if I1 and I2 have identical CFA notes (the same order
   1109    and equivalent content).  */
   1110 
   1111 static bool
   1112 insns_have_identical_cfa_notes (rtx_insn *i1, rtx_insn *i2)
   1113 {
   1114   rtx n1, n2;
   1115   for (n1 = REG_NOTES (i1), n2 = REG_NOTES (i2); ;
   1116        n1 = XEXP (n1, 1), n2 = XEXP (n2, 1))
   1117     {
   1118       /* Skip over reg notes not related to CFI information.  */
   1119       while (n1 && !reg_note_cfa_p[REG_NOTE_KIND (n1)])
   1120 	n1 = XEXP (n1, 1);
   1121       while (n2 && !reg_note_cfa_p[REG_NOTE_KIND (n2)])
   1122 	n2 = XEXP (n2, 1);
   1123       if (n1 == NULL_RTX && n2 == NULL_RTX)
   1124 	return true;
   1125       if (n1 == NULL_RTX || n2 == NULL_RTX)
   1126 	return false;
   1127       if (XEXP (n1, 0) == XEXP (n2, 0))
   1128 	;
   1129       else if (XEXP (n1, 0) == NULL_RTX || XEXP (n2, 0) == NULL_RTX)
   1130 	return false;
   1131       else if (!(reload_completed
   1132 		 ? rtx_renumbered_equal_p (XEXP (n1, 0), XEXP (n2, 0))
   1133 		 : rtx_equal_p (XEXP (n1, 0), XEXP (n2, 0))))
   1134 	return false;
   1135     }
   1136 }
   1137 
   1138 /* Examine I1 and I2 and return:
   1139    - dir_forward if I1 can be replaced by I2, or
   1140    - dir_backward if I2 can be replaced by I1, or
   1141    - dir_both if both are the case.  */
   1142 
   1143 static enum replace_direction
   1144 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx_insn *i1, rtx_insn *i2)
   1145 {
   1146   rtx p1, p2;
   1147 
   1148   /* Verify that I1 and I2 are equivalent.  */
   1149   if (GET_CODE (i1) != GET_CODE (i2))
   1150     return dir_none;
   1151 
   1152   /* __builtin_unreachable() may lead to empty blocks (ending with
   1153      NOTE_INSN_BASIC_BLOCK).  They may be crossjumped. */
   1154   if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
   1155     return dir_both;
   1156 
   1157   /* ??? Do not allow cross-jumping between different stack levels.  */
   1158   p1 = find_reg_note (i1, REG_ARGS_SIZE, NULL);
   1159   p2 = find_reg_note (i2, REG_ARGS_SIZE, NULL);
   1160   if (p1 && p2)
   1161     {
   1162       p1 = XEXP (p1, 0);
   1163       p2 = XEXP (p2, 0);
   1164       if (!rtx_equal_p (p1, p2))
   1165         return dir_none;
   1166 
   1167       /* ??? Worse, this adjustment had better be constant lest we
   1168          have differing incoming stack levels.  */
   1169       if (!frame_pointer_needed
   1170 	  && known_eq (find_args_size_adjust (i1), HOST_WIDE_INT_MIN))
   1171 	return dir_none;
   1172     }
   1173   else if (p1 || p2)
   1174     return dir_none;
   1175 
   1176   /* Do not allow cross-jumping between frame related insns and other
   1177      insns.  */
   1178   if (RTX_FRAME_RELATED_P (i1) != RTX_FRAME_RELATED_P (i2))
   1179     return dir_none;
   1180 
   1181   p1 = PATTERN (i1);
   1182   p2 = PATTERN (i2);
   1183 
   1184   if (GET_CODE (p1) != GET_CODE (p2))
   1185     return dir_none;
   1186 
   1187   /* If this is a CALL_INSN, compare register usage information.
   1188      If we don't check this on stack register machines, the two
   1189      CALL_INSNs might be merged leaving reg-stack.cc with mismatching
   1190      numbers of stack registers in the same basic block.
   1191      If we don't check this on machines with delay slots, a delay slot may
   1192      be filled that clobbers a parameter expected by the subroutine.
   1193 
   1194      ??? We take the simple route for now and assume that if they're
   1195      equal, they were constructed identically.
   1196 
   1197      Also check for identical exception regions.  */
   1198 
   1199   if (CALL_P (i1))
   1200     {
   1201       /* Ensure the same EH region.  */
   1202       rtx n1 = find_reg_note (i1, REG_EH_REGION, 0);
   1203       rtx n2 = find_reg_note (i2, REG_EH_REGION, 0);
   1204 
   1205       if (!n1 && n2)
   1206 	return dir_none;
   1207 
   1208       if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
   1209 	return dir_none;
   1210 
   1211       if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
   1212 			CALL_INSN_FUNCTION_USAGE (i2))
   1213 	  || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2))
   1214 	return dir_none;
   1215 
   1216       /* For address sanitizer, never crossjump __asan_report_* builtins,
   1217 	 otherwise errors might be reported on incorrect lines.  */
   1218       if (flag_sanitize & SANITIZE_ADDRESS)
   1219 	{
   1220 	  rtx call = get_call_rtx_from (i1);
   1221 	  if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
   1222 	    {
   1223 	      rtx symbol = XEXP (XEXP (call, 0), 0);
   1224 	      if (SYMBOL_REF_DECL (symbol)
   1225 		  && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
   1226 		{
   1227 		  if ((DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
   1228 		       == BUILT_IN_NORMAL)
   1229 		      && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
   1230 			 >= BUILT_IN_ASAN_REPORT_LOAD1
   1231 		      && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
   1232 			 <= BUILT_IN_ASAN_STOREN)
   1233 		    return dir_none;
   1234 		}
   1235 	    }
   1236 	}
   1237 
   1238       if (insn_callee_abi (i1) != insn_callee_abi (i2))
   1239         return dir_none;
   1240     }
   1241 
   1242   /* If both i1 and i2 are frame related, verify all the CFA notes
   1243      in the same order and with the same content.  */
   1244   if (RTX_FRAME_RELATED_P (i1) && !insns_have_identical_cfa_notes (i1, i2))
   1245     return dir_none;
   1246 
   1247 #ifdef STACK_REGS
   1248   /* If cross_jump_death_matters is not 0, the insn's mode
   1249      indicates whether or not the insn contains any stack-like
   1250      regs.  */
   1251 
   1252   if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
   1253     {
   1254       /* If register stack conversion has already been done, then
   1255 	 death notes must also be compared before it is certain that
   1256 	 the two instruction streams match.  */
   1257 
   1258       rtx note;
   1259       HARD_REG_SET i1_regset, i2_regset;
   1260 
   1261       CLEAR_HARD_REG_SET (i1_regset);
   1262       CLEAR_HARD_REG_SET (i2_regset);
   1263 
   1264       for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
   1265 	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
   1266 	  SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
   1267 
   1268       for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
   1269 	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
   1270 	  SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
   1271 
   1272       if (i1_regset != i2_regset)
   1273 	return dir_none;
   1274     }
   1275 #endif
   1276 
   1277   if (reload_completed
   1278       ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
   1279     return dir_both;
   1280 
   1281   return can_replace_by (i1, i2);
   1282 }
   1283 
   1284 /* When comparing insns I1 and I2 in flow_find_cross_jump or
   1286    flow_find_head_matching_sequence, ensure the notes match.  */
   1287 
   1288 static void
   1289 merge_notes (rtx_insn *i1, rtx_insn *i2)
   1290 {
   1291   /* If the merged insns have different REG_EQUAL notes, then
   1292      remove them.  */
   1293   rtx equiv1 = find_reg_equal_equiv_note (i1);
   1294   rtx equiv2 = find_reg_equal_equiv_note (i2);
   1295 
   1296   if (equiv1 && !equiv2)
   1297     remove_note (i1, equiv1);
   1298   else if (!equiv1 && equiv2)
   1299     remove_note (i2, equiv2);
   1300   else if (equiv1 && equiv2
   1301 	   && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
   1302     {
   1303       remove_note (i1, equiv1);
   1304       remove_note (i2, equiv2);
   1305     }
   1306 }
   1307 
   1308  /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
   1309     resulting insn in I1, and the corresponding bb in BB1.  At the head of a
   1310     bb, if there is a predecessor bb that reaches this bb via fallthru, and
   1311     FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
   1312     DID_FALLTHRU.  Otherwise, stops at the head of the bb.  */
   1313 
   1314 static void
   1315 walk_to_nondebug_insn (rtx_insn **i1, basic_block *bb1, bool follow_fallthru,
   1316                        bool *did_fallthru)
   1317 {
   1318   edge fallthru;
   1319 
   1320   *did_fallthru = false;
   1321 
   1322   /* Ignore notes.  */
   1323   while (!NONDEBUG_INSN_P (*i1))
   1324     {
   1325       if (*i1 != BB_HEAD (*bb1))
   1326         {
   1327           *i1 = PREV_INSN (*i1);
   1328           continue;
   1329         }
   1330 
   1331       if (!follow_fallthru)
   1332         return;
   1333 
   1334       fallthru = find_fallthru_edge ((*bb1)->preds);
   1335       if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
   1336           || !single_succ_p (fallthru->src))
   1337         return;
   1338 
   1339       *bb1 = fallthru->src;
   1340       *i1 = BB_END (*bb1);
   1341       *did_fallthru = true;
   1342      }
   1343 }
   1344 
   1345 /* Look through the insns at the end of BB1 and BB2 and find the longest
   1346    sequence that are either equivalent, or allow forward or backward
   1347    replacement.  Store the first insns for that sequence in *F1 and *F2 and
   1348    return the sequence length.
   1349 
   1350    DIR_P indicates the allowed replacement direction on function entry, and
   1351    the actual replacement direction on function exit.  If NULL, only equivalent
   1352    sequences are allowed.
   1353 
   1354    To simplify callers of this function, if the blocks match exactly,
   1355    store the head of the blocks in *F1 and *F2.  */
   1356 
   1357 int
   1358 flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx_insn **f1,
   1359 		      rtx_insn **f2, enum replace_direction *dir_p)
   1360 {
   1361   rtx_insn *i1, *i2, *last1, *last2, *afterlast1, *afterlast2;
   1362   int ninsns = 0;
   1363   enum replace_direction dir, last_dir, afterlast_dir;
   1364   bool follow_fallthru, did_fallthru;
   1365 
   1366   if (dir_p)
   1367     dir = *dir_p;
   1368   else
   1369     dir = dir_both;
   1370   afterlast_dir = dir;
   1371   last_dir = afterlast_dir;
   1372 
   1373   /* Skip simple jumps at the end of the blocks.  Complex jumps still
   1374      need to be compared for equivalence, which we'll do below.  */
   1375 
   1376   i1 = BB_END (bb1);
   1377   last1 = afterlast1 = last2 = afterlast2 = NULL;
   1378   if (onlyjump_p (i1)
   1379       || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
   1380     {
   1381       last1 = i1;
   1382       i1 = PREV_INSN (i1);
   1383     }
   1384 
   1385   i2 = BB_END (bb2);
   1386   if (onlyjump_p (i2)
   1387       || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
   1388     {
   1389       last2 = i2;
   1390       /* Count everything except for unconditional jump as insn.
   1391 	 Don't count any jumps if dir_p is NULL.  */
   1392       if (!simplejump_p (i2) && !returnjump_p (i2) && last1 && dir_p)
   1393 	ninsns++;
   1394       i2 = PREV_INSN (i2);
   1395     }
   1396 
   1397   while (true)
   1398     {
   1399       /* In the following example, we can replace all jumps to C by jumps to A.
   1400 
   1401          This removes 4 duplicate insns.
   1402          [bb A] insn1            [bb C] insn1
   1403                 insn2                   insn2
   1404          [bb B] insn3                   insn3
   1405                 insn4                   insn4
   1406                 jump_insn               jump_insn
   1407 
   1408          We could also replace all jumps to A by jumps to C, but that leaves B
   1409          alive, and removes only 2 duplicate insns.  In a subsequent crossjump
   1410          step, all jumps to B would be replaced with jumps to the middle of C,
   1411          achieving the same result with more effort.
   1412          So we allow only the first possibility, which means that we don't allow
   1413          fallthru in the block that's being replaced.  */
   1414 
   1415       follow_fallthru = dir_p && dir != dir_forward;
   1416       walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru);
   1417       if (did_fallthru)
   1418         dir = dir_backward;
   1419 
   1420       follow_fallthru = dir_p && dir != dir_backward;
   1421       walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru);
   1422       if (did_fallthru)
   1423         dir = dir_forward;
   1424 
   1425       if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
   1426 	break;
   1427 
   1428       /* Do not turn corssing edge to non-crossing or vice versa after
   1429 	 reload. */
   1430       if (BB_PARTITION (BLOCK_FOR_INSN (i1))
   1431 	  != BB_PARTITION (BLOCK_FOR_INSN (i2))
   1432 	  && reload_completed)
   1433 	break;
   1434 
   1435       dir = merge_dir (dir, old_insns_match_p (0, i1, i2));
   1436       if (dir == dir_none || (!dir_p && dir != dir_both))
   1437 	break;
   1438 
   1439       merge_memattrs (i1, i2);
   1440 
   1441       /* Don't begin a cross-jump with a NOTE insn.  */
   1442       if (INSN_P (i1))
   1443 	{
   1444 	  merge_notes (i1, i2);
   1445 
   1446 	  afterlast1 = last1, afterlast2 = last2;
   1447 	  last1 = i1, last2 = i2;
   1448 	  afterlast_dir = last_dir;
   1449 	  last_dir = dir;
   1450 	  if (active_insn_p (i1))
   1451 	    ninsns++;
   1452 	}
   1453 
   1454       i1 = PREV_INSN (i1);
   1455       i2 = PREV_INSN (i2);
   1456     }
   1457 
   1458   /* Include preceding notes and labels in the cross-jump.  One,
   1459      this may bring us to the head of the blocks as requested above.
   1460      Two, it keeps line number notes as matched as may be.  */
   1461   if (ninsns)
   1462     {
   1463       bb1 = BLOCK_FOR_INSN (last1);
   1464       while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
   1465 	last1 = PREV_INSN (last1);
   1466 
   1467       if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
   1468 	last1 = PREV_INSN (last1);
   1469 
   1470       bb2 = BLOCK_FOR_INSN (last2);
   1471       while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
   1472 	last2 = PREV_INSN (last2);
   1473 
   1474       if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
   1475 	last2 = PREV_INSN (last2);
   1476 
   1477       *f1 = last1;
   1478       *f2 = last2;
   1479     }
   1480 
   1481   if (dir_p)
   1482     *dir_p = last_dir;
   1483   return ninsns;
   1484 }
   1485 
   1486 /* Like flow_find_cross_jump, except start looking for a matching sequence from
   1487    the head of the two blocks.  Do not include jumps at the end.
   1488    If STOP_AFTER is nonzero, stop after finding that many matching
   1489    instructions.  If STOP_AFTER is zero, count all INSN_P insns, if it is
   1490    non-zero, only count active insns.  */
   1491 
   1492 int
   1493 flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx_insn **f1,
   1494 				  rtx_insn **f2, int stop_after)
   1495 {
   1496   rtx_insn *i1, *i2, *last1, *last2, *beforelast1, *beforelast2;
   1497   int ninsns = 0;
   1498   edge e;
   1499   edge_iterator ei;
   1500   int nehedges1 = 0, nehedges2 = 0;
   1501 
   1502   FOR_EACH_EDGE (e, ei, bb1->succs)
   1503     if (e->flags & EDGE_EH)
   1504       nehedges1++;
   1505   FOR_EACH_EDGE (e, ei, bb2->succs)
   1506     if (e->flags & EDGE_EH)
   1507       nehedges2++;
   1508 
   1509   i1 = BB_HEAD (bb1);
   1510   i2 = BB_HEAD (bb2);
   1511   last1 = beforelast1 = last2 = beforelast2 = NULL;
   1512 
   1513   while (true)
   1514     {
   1515       /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG.  */
   1516       while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1))
   1517 	{
   1518 	  if (NOTE_P (i1) && NOTE_KIND (i1) == NOTE_INSN_EPILOGUE_BEG)
   1519 	    break;
   1520 	  i1 = NEXT_INSN (i1);
   1521 	}
   1522 
   1523       while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2))
   1524 	{
   1525 	  if (NOTE_P (i2) && NOTE_KIND (i2) == NOTE_INSN_EPILOGUE_BEG)
   1526 	    break;
   1527 	  i2 = NEXT_INSN (i2);
   1528 	}
   1529 
   1530       if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1))
   1531 	  || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2)))
   1532 	break;
   1533 
   1534       if (NOTE_P (i1) || NOTE_P (i2)
   1535 	  || JUMP_P (i1) || JUMP_P (i2))
   1536 	break;
   1537 
   1538       /* A sanity check to make sure we're not merging insns with different
   1539 	 effects on EH.  If only one of them ends a basic block, it shouldn't
   1540 	 have an EH edge; if both end a basic block, there should be the same
   1541 	 number of EH edges.  */
   1542       if ((i1 == BB_END (bb1) && i2 != BB_END (bb2)
   1543 	   && nehedges1 > 0)
   1544 	  || (i2 == BB_END (bb2) && i1 != BB_END (bb1)
   1545 	      && nehedges2 > 0)
   1546 	  || (i1 == BB_END (bb1) && i2 == BB_END (bb2)
   1547 	      && nehedges1 != nehedges2))
   1548 	break;
   1549 
   1550       if (old_insns_match_p (0, i1, i2) != dir_both)
   1551 	break;
   1552 
   1553       merge_memattrs (i1, i2);
   1554 
   1555       /* Don't begin a cross-jump with a NOTE insn.  */
   1556       if (INSN_P (i1))
   1557 	{
   1558 	  merge_notes (i1, i2);
   1559 
   1560 	  beforelast1 = last1, beforelast2 = last2;
   1561 	  last1 = i1, last2 = i2;
   1562 	  if (!stop_after || active_insn_p (i1))
   1563 	    ninsns++;
   1564 	}
   1565 
   1566       if (i1 == BB_END (bb1) || i2 == BB_END (bb2)
   1567 	  || (stop_after > 0 && ninsns == stop_after))
   1568 	break;
   1569 
   1570       i1 = NEXT_INSN (i1);
   1571       i2 = NEXT_INSN (i2);
   1572     }
   1573 
   1574   if (ninsns)
   1575     {
   1576       *f1 = last1;
   1577       *f2 = last2;
   1578     }
   1579 
   1580   return ninsns;
   1581 }
   1582 
   1583 /* Return true iff outgoing edges of BB1 and BB2 match, together with
   1584    the branch instruction.  This means that if we commonize the control
   1585    flow before end of the basic block, the semantic remains unchanged.
   1586 
   1587    We may assume that there exists one edge with a common destination.  */
   1588 
   1589 static bool
   1590 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
   1591 {
   1592   int nehedges1 = 0, nehedges2 = 0;
   1593   edge fallthru1 = 0, fallthru2 = 0;
   1594   edge e1, e2;
   1595   edge_iterator ei;
   1596 
   1597   /* If we performed shrink-wrapping, edges to the exit block can
   1598      only be distinguished for JUMP_INSNs.  The two paths may differ in
   1599      whether they went through the prologue.  Sibcalls are fine, we know
   1600      that we either didn't need or inserted an epilogue before them.  */
   1601   if (crtl->shrink_wrapped
   1602       && single_succ_p (bb1)
   1603       && single_succ (bb1) == EXIT_BLOCK_PTR_FOR_FN (cfun)
   1604       && (!JUMP_P (BB_END (bb1))
   1605 	  /* Punt if the only successor is a fake edge to exit, the jump
   1606 	     must be some weird one.  */
   1607 	  || (single_succ_edge (bb1)->flags & EDGE_FAKE) != 0)
   1608       && !(CALL_P (BB_END (bb1)) && SIBLING_CALL_P (BB_END (bb1))))
   1609     return false;
   1610 
   1611   /* If BB1 has only one successor, we may be looking at either an
   1612      unconditional jump, or a fake edge to exit.  */
   1613   if (single_succ_p (bb1)
   1614       && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
   1615       && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
   1616     return (single_succ_p (bb2)
   1617 	    && (single_succ_edge (bb2)->flags
   1618 		& (EDGE_COMPLEX | EDGE_FAKE)) == 0
   1619 	    && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
   1620 
   1621   /* Match conditional jumps - this may get tricky when fallthru and branch
   1622      edges are crossed.  */
   1623   if (EDGE_COUNT (bb1->succs) == 2
   1624       && any_condjump_p (BB_END (bb1))
   1625       && onlyjump_p (BB_END (bb1)))
   1626     {
   1627       edge b1, f1, b2, f2;
   1628       bool reverse, match;
   1629       rtx set1, set2, cond1, cond2;
   1630       enum rtx_code code1, code2;
   1631 
   1632       if (EDGE_COUNT (bb2->succs) != 2
   1633 	  || !any_condjump_p (BB_END (bb2))
   1634 	  || !onlyjump_p (BB_END (bb2)))
   1635 	return false;
   1636 
   1637       b1 = BRANCH_EDGE (bb1);
   1638       b2 = BRANCH_EDGE (bb2);
   1639       f1 = FALLTHRU_EDGE (bb1);
   1640       f2 = FALLTHRU_EDGE (bb2);
   1641 
   1642       /* Get around possible forwarders on fallthru edges.  Other cases
   1643 	 should be optimized out already.  */
   1644       if (FORWARDER_BLOCK_P (f1->dest))
   1645 	f1 = single_succ_edge (f1->dest);
   1646 
   1647       if (FORWARDER_BLOCK_P (f2->dest))
   1648 	f2 = single_succ_edge (f2->dest);
   1649 
   1650       /* To simplify use of this function, return false if there are
   1651 	 unneeded forwarder blocks.  These will get eliminated later
   1652 	 during cleanup_cfg.  */
   1653       if (FORWARDER_BLOCK_P (f1->dest)
   1654 	  || FORWARDER_BLOCK_P (f2->dest)
   1655 	  || FORWARDER_BLOCK_P (b1->dest)
   1656 	  || FORWARDER_BLOCK_P (b2->dest))
   1657 	return false;
   1658 
   1659       if (f1->dest == f2->dest && b1->dest == b2->dest)
   1660 	reverse = false;
   1661       else if (f1->dest == b2->dest && b1->dest == f2->dest)
   1662 	reverse = true;
   1663       else
   1664 	return false;
   1665 
   1666       set1 = pc_set (BB_END (bb1));
   1667       set2 = pc_set (BB_END (bb2));
   1668       if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
   1669 	  != (XEXP (SET_SRC (set2), 1) == pc_rtx))
   1670 	reverse = !reverse;
   1671 
   1672       cond1 = XEXP (SET_SRC (set1), 0);
   1673       cond2 = XEXP (SET_SRC (set2), 0);
   1674       code1 = GET_CODE (cond1);
   1675       if (reverse)
   1676 	code2 = reversed_comparison_code (cond2, BB_END (bb2));
   1677       else
   1678 	code2 = GET_CODE (cond2);
   1679 
   1680       if (code2 == UNKNOWN)
   1681 	return false;
   1682 
   1683       /* Verify codes and operands match.  */
   1684       match = ((code1 == code2
   1685 		&& rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
   1686 		&& rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
   1687 	       || (code1 == swap_condition (code2)
   1688 		   && rtx_renumbered_equal_p (XEXP (cond1, 1),
   1689 					      XEXP (cond2, 0))
   1690 		   && rtx_renumbered_equal_p (XEXP (cond1, 0),
   1691 					      XEXP (cond2, 1))));
   1692 
   1693       /* If we return true, we will join the blocks.  Which means that
   1694 	 we will only have one branch prediction bit to work with.  Thus
   1695 	 we require the existing branches to have probabilities that are
   1696 	 roughly similar.  */
   1697       if (match
   1698 	  && optimize_bb_for_speed_p (bb1)
   1699 	  && optimize_bb_for_speed_p (bb2))
   1700 	{
   1701 	  profile_probability prob2;
   1702 
   1703 	  if (b1->dest == b2->dest)
   1704 	    prob2 = b2->probability;
   1705 	  else
   1706 	    /* Do not use f2 probability as f2 may be forwarded.  */
   1707 	    prob2 = b2->probability.invert ();
   1708 
   1709 	  /* Fail if the difference in probabilities is greater than 50%.
   1710 	     This rules out two well-predicted branches with opposite
   1711 	     outcomes.  */
   1712 	  if (b1->probability.differs_lot_from_p (prob2))
   1713 	    {
   1714 	      if (dump_file)
   1715 		{
   1716 		  fprintf (dump_file,
   1717 			   "Outcomes of branch in bb %i and %i differ too"
   1718 			   " much (", bb1->index, bb2->index);
   1719 		  b1->probability.dump (dump_file);
   1720 		  prob2.dump (dump_file);
   1721 		  fprintf (dump_file, ")\n");
   1722 		}
   1723 	      return false;
   1724 	    }
   1725 	}
   1726 
   1727       if (dump_file && match)
   1728 	fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
   1729 		 bb1->index, bb2->index);
   1730 
   1731       return match;
   1732     }
   1733 
   1734   /* Generic case - we are seeing a computed jump, table jump or trapping
   1735      instruction.  */
   1736 
   1737   /* Check whether there are tablejumps in the end of BB1 and BB2.
   1738      Return true if they are identical.  */
   1739     {
   1740       rtx_insn *label1, *label2;
   1741       rtx_jump_table_data *table1, *table2;
   1742 
   1743       if (tablejump_p (BB_END (bb1), &label1, &table1)
   1744 	  && tablejump_p (BB_END (bb2), &label2, &table2)
   1745 	  && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
   1746 	{
   1747 	  /* The labels should never be the same rtx.  If they really are same
   1748 	     the jump tables are same too. So disable crossjumping of blocks BB1
   1749 	     and BB2 because when deleting the common insns in the end of BB1
   1750 	     by delete_basic_block () the jump table would be deleted too.  */
   1751 	  /* If LABEL2 is referenced in BB1->END do not do anything
   1752 	     because we would loose information when replacing
   1753 	     LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END.  */
   1754 	  if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
   1755 	    {
   1756 	      /* Set IDENTICAL to true when the tables are identical.  */
   1757 	      bool identical = false;
   1758 	      rtx p1, p2;
   1759 
   1760 	      p1 = PATTERN (table1);
   1761 	      p2 = PATTERN (table2);
   1762 	      if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
   1763 		{
   1764 		  identical = true;
   1765 		}
   1766 	      else if (GET_CODE (p1) == ADDR_DIFF_VEC
   1767 		       && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
   1768 		       && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
   1769 		       && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
   1770 		{
   1771 		  int i;
   1772 
   1773 		  identical = true;
   1774 		  for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
   1775 		    if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
   1776 		      identical = false;
   1777 		}
   1778 
   1779 	      if (identical)
   1780 		{
   1781 		  bool match;
   1782 
   1783 		  /* Temporarily replace references to LABEL1 with LABEL2
   1784 		     in BB1->END so that we could compare the instructions.  */
   1785 		  replace_label_in_insn (BB_END (bb1), label1, label2, false);
   1786 
   1787 		  match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2))
   1788 			   == dir_both);
   1789 		  if (dump_file && match)
   1790 		    fprintf (dump_file,
   1791 			     "Tablejumps in bb %i and %i match.\n",
   1792 			     bb1->index, bb2->index);
   1793 
   1794 		  /* Set the original label in BB1->END because when deleting
   1795 		     a block whose end is a tablejump, the tablejump referenced
   1796 		     from the instruction is deleted too.  */
   1797 		  replace_label_in_insn (BB_END (bb1), label2, label1, false);
   1798 
   1799 		  return match;
   1800 		}
   1801 	    }
   1802 	  return false;
   1803 	}
   1804     }
   1805 
   1806   /* Find the last non-debug non-note instruction in each bb, except
   1807      stop when we see the NOTE_INSN_BASIC_BLOCK, as old_insns_match_p
   1808      handles that case specially. old_insns_match_p does not handle
   1809      other types of instruction notes.  */
   1810   rtx_insn *last1 = BB_END (bb1);
   1811   rtx_insn *last2 = BB_END (bb2);
   1812   while (!NOTE_INSN_BASIC_BLOCK_P (last1) &&
   1813          (DEBUG_INSN_P (last1) || NOTE_P (last1)))
   1814     last1 = PREV_INSN (last1);
   1815   while (!NOTE_INSN_BASIC_BLOCK_P (last2) &&
   1816          (DEBUG_INSN_P (last2) || NOTE_P (last2)))
   1817     last2 = PREV_INSN (last2);
   1818   gcc_assert (last1 && last2);
   1819 
   1820   /* First ensure that the instructions match.  There may be many outgoing
   1821      edges so this test is generally cheaper.  */
   1822   if (old_insns_match_p (mode, last1, last2) != dir_both)
   1823     return false;
   1824 
   1825   /* Search the outgoing edges, ensure that the counts do match, find possible
   1826      fallthru and exception handling edges since these needs more
   1827      validation.  */
   1828   if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
   1829     return false;
   1830 
   1831   bool nonfakeedges = false;
   1832   FOR_EACH_EDGE (e1, ei, bb1->succs)
   1833     {
   1834       e2 = EDGE_SUCC (bb2, ei.index);
   1835 
   1836       if ((e1->flags & EDGE_FAKE) == 0)
   1837 	nonfakeedges = true;
   1838 
   1839       if (e1->flags & EDGE_EH)
   1840 	nehedges1++;
   1841 
   1842       if (e2->flags & EDGE_EH)
   1843 	nehedges2++;
   1844 
   1845       if (e1->flags & EDGE_FALLTHRU)
   1846 	fallthru1 = e1;
   1847       if (e2->flags & EDGE_FALLTHRU)
   1848 	fallthru2 = e2;
   1849     }
   1850 
   1851   /* If number of edges of various types does not match, fail.  */
   1852   if (nehedges1 != nehedges2
   1853       || (fallthru1 != 0) != (fallthru2 != 0))
   1854     return false;
   1855 
   1856   /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors
   1857      and the last real insn doesn't have REG_ARGS_SIZE note, don't
   1858      attempt to optimize, as the two basic blocks might have different
   1859      REG_ARGS_SIZE depths.  For noreturn calls and unconditional
   1860      traps there should be REG_ARG_SIZE notes, they could be missing
   1861      for __builtin_unreachable () uses though.  */
   1862   if (!nonfakeedges
   1863       && !ACCUMULATE_OUTGOING_ARGS
   1864       && (!INSN_P (last1)
   1865           || !find_reg_note (last1, REG_ARGS_SIZE, NULL)))
   1866     return false;
   1867 
   1868   /* fallthru edges must be forwarded to the same destination.  */
   1869   if (fallthru1)
   1870     {
   1871       basic_block d1 = (forwarder_block_p (fallthru1->dest)
   1872 			? single_succ (fallthru1->dest): fallthru1->dest);
   1873       basic_block d2 = (forwarder_block_p (fallthru2->dest)
   1874 			? single_succ (fallthru2->dest): fallthru2->dest);
   1875 
   1876       if (d1 != d2)
   1877 	return false;
   1878     }
   1879 
   1880   /* Ensure the same EH region.  */
   1881   {
   1882     rtx n1 = find_reg_note (last1, REG_EH_REGION, 0);
   1883     rtx n2 = find_reg_note (last2, REG_EH_REGION, 0);
   1884 
   1885     if (!n1 && n2)
   1886       return false;
   1887 
   1888     if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
   1889       return false;
   1890   }
   1891 
   1892   /* The same checks as in try_crossjump_to_edge. It is required for RTL
   1893      version of sequence abstraction.  */
   1894   FOR_EACH_EDGE (e1, ei, bb2->succs)
   1895     {
   1896       edge e2;
   1897       edge_iterator ei;
   1898       basic_block d1 = e1->dest;
   1899 
   1900       if (FORWARDER_BLOCK_P (d1))
   1901         d1 = EDGE_SUCC (d1, 0)->dest;
   1902 
   1903       FOR_EACH_EDGE (e2, ei, bb1->succs)
   1904         {
   1905           basic_block d2 = e2->dest;
   1906           if (FORWARDER_BLOCK_P (d2))
   1907             d2 = EDGE_SUCC (d2, 0)->dest;
   1908           if (d1 == d2)
   1909             break;
   1910         }
   1911 
   1912       if (!e2)
   1913         return false;
   1914     }
   1915 
   1916   return true;
   1917 }
   1918 
   1919 /* Returns true if BB basic block has a preserve label.  */
   1920 
   1921 static bool
   1922 block_has_preserve_label (basic_block bb)
   1923 {
   1924   return (bb
   1925           && block_label (bb)
   1926           && LABEL_PRESERVE_P (block_label (bb)));
   1927 }
   1928 
   1929 /* E1 and E2 are edges with the same destination block.  Search their
   1930    predecessors for common code.  If found, redirect control flow from
   1931    (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
   1932    or the other way around (dir_backward).  DIR specifies the allowed
   1933    replacement direction.  */
   1934 
   1935 static bool
   1936 try_crossjump_to_edge (int mode, edge e1, edge e2,
   1937                        enum replace_direction dir)
   1938 {
   1939   int nmatch;
   1940   basic_block src1 = e1->src, src2 = e2->src;
   1941   basic_block redirect_to, redirect_from, to_remove;
   1942   basic_block osrc1, osrc2, redirect_edges_to, tmp;
   1943   rtx_insn *newpos1, *newpos2;
   1944   edge s;
   1945   edge_iterator ei;
   1946 
   1947   newpos1 = newpos2 = NULL;
   1948 
   1949   /* Search backward through forwarder blocks.  We don't need to worry
   1950      about multiple entry or chained forwarders, as they will be optimized
   1951      away.  We do this to look past the unconditional jump following a
   1952      conditional jump that is required due to the current CFG shape.  */
   1953   if (single_pred_p (src1)
   1954       && FORWARDER_BLOCK_P (src1))
   1955     e1 = single_pred_edge (src1), src1 = e1->src;
   1956 
   1957   if (single_pred_p (src2)
   1958       && FORWARDER_BLOCK_P (src2))
   1959     e2 = single_pred_edge (src2), src2 = e2->src;
   1960 
   1961   /* Nothing to do if we reach ENTRY, or a common source block.  */
   1962   if (src1 == ENTRY_BLOCK_PTR_FOR_FN (cfun) || src2
   1963       == ENTRY_BLOCK_PTR_FOR_FN (cfun))
   1964     return false;
   1965   if (src1 == src2)
   1966     return false;
   1967 
   1968   /* Seeing more than 1 forwarder blocks would confuse us later...  */
   1969   if (FORWARDER_BLOCK_P (e1->dest)
   1970       && FORWARDER_BLOCK_P (single_succ (e1->dest)))
   1971     return false;
   1972 
   1973   if (FORWARDER_BLOCK_P (e2->dest)
   1974       && FORWARDER_BLOCK_P (single_succ (e2->dest)))
   1975     return false;
   1976 
   1977   /* Likewise with dead code (possibly newly created by the other optimizations
   1978      of cfg_cleanup).  */
   1979   if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
   1980     return false;
   1981 
   1982   /* Do not turn corssing edge to non-crossing or vice versa after reload.  */
   1983   if (BB_PARTITION (src1) != BB_PARTITION (src2)
   1984       && reload_completed)
   1985     return false;
   1986 
   1987   /* Look for the common insn sequence, part the first ...  */
   1988   if (!outgoing_edges_match (mode, src1, src2))
   1989     return false;
   1990 
   1991   /* ... and part the second.  */
   1992   nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir);
   1993 
   1994   osrc1 = src1;
   1995   osrc2 = src2;
   1996   if (newpos1 != NULL_RTX)
   1997     src1 = BLOCK_FOR_INSN (newpos1);
   1998   if (newpos2 != NULL_RTX)
   1999     src2 = BLOCK_FOR_INSN (newpos2);
   2000 
   2001   /* Check that SRC1 and SRC2 have preds again.  They may have changed
   2002      above due to the call to flow_find_cross_jump.  */
   2003   if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
   2004     return false;
   2005 
   2006   if (dir == dir_backward)
   2007     {
   2008       std::swap (osrc1, osrc2);
   2009       std::swap (src1, src2);
   2010       std::swap (e1, e2);
   2011       std::swap (newpos1, newpos2);
   2012     }
   2013 
   2014   /* Don't proceed with the crossjump unless we found a sufficient number
   2015      of matching instructions or the 'from' block was totally matched
   2016      (such that its predecessors will hopefully be redirected and the
   2017      block removed).  */
   2018   if ((nmatch < param_min_crossjump_insns)
   2019       && (newpos1 != BB_HEAD (src1)))
   2020     return false;
   2021 
   2022   /* Avoid deleting preserve label when redirecting ABNORMAL edges.  */
   2023   if (block_has_preserve_label (e1->dest)
   2024       && (e1->flags & EDGE_ABNORMAL))
   2025     return false;
   2026 
   2027   /* Here we know that the insns in the end of SRC1 which are common with SRC2
   2028      will be deleted.
   2029      If we have tablejumps in the end of SRC1 and SRC2
   2030      they have been already compared for equivalence in outgoing_edges_match ()
   2031      so replace the references to TABLE1 by references to TABLE2.  */
   2032   {
   2033       rtx_insn *label1, *label2;
   2034       rtx_jump_table_data *table1, *table2;
   2035 
   2036       if (tablejump_p (BB_END (osrc1), &label1, &table1)
   2037 	  && tablejump_p (BB_END (osrc2), &label2, &table2)
   2038 	  && label1 != label2)
   2039 	{
   2040 	  rtx_insn *insn;
   2041 
   2042 	  /* Replace references to LABEL1 with LABEL2.  */
   2043 	  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
   2044 	    {
   2045 	      /* Do not replace the label in SRC1->END because when deleting
   2046 		 a block whose end is a tablejump, the tablejump referenced
   2047 		 from the instruction is deleted too.  */
   2048 	      if (insn != BB_END (osrc1))
   2049 		replace_label_in_insn (insn, label1, label2, true);
   2050 	    }
   2051 	}
   2052   }
   2053 
   2054   /* Avoid splitting if possible.  We must always split when SRC2 has
   2055      EH predecessor edges, or we may end up with basic blocks with both
   2056      normal and EH predecessor edges.  */
   2057   if (newpos2 == BB_HEAD (src2)
   2058       && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
   2059     redirect_to = src2;
   2060   else
   2061     {
   2062       if (newpos2 == BB_HEAD (src2))
   2063 	{
   2064 	  /* Skip possible basic block header.  */
   2065 	  if (LABEL_P (newpos2))
   2066 	    newpos2 = NEXT_INSN (newpos2);
   2067 	  while (DEBUG_INSN_P (newpos2))
   2068 	    newpos2 = NEXT_INSN (newpos2);
   2069 	  if (NOTE_P (newpos2))
   2070 	    newpos2 = NEXT_INSN (newpos2);
   2071 	  while (DEBUG_INSN_P (newpos2))
   2072 	    newpos2 = NEXT_INSN (newpos2);
   2073 	}
   2074 
   2075       if (dump_file)
   2076 	fprintf (dump_file, "Splitting bb %i before %i insns\n",
   2077 		 src2->index, nmatch);
   2078       redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
   2079     }
   2080 
   2081   if (dump_file)
   2082     fprintf (dump_file,
   2083 	     "Cross jumping from bb %i to bb %i; %i common insns\n",
   2084 	     src1->index, src2->index, nmatch);
   2085 
   2086   /* We may have some registers visible through the block.  */
   2087   df_set_bb_dirty (redirect_to);
   2088 
   2089   if (osrc2 == src2)
   2090     redirect_edges_to = redirect_to;
   2091   else
   2092     redirect_edges_to = osrc2;
   2093 
   2094   /* Recompute the counts of destinations of outgoing edges.  */
   2095   FOR_EACH_EDGE (s, ei, redirect_edges_to->succs)
   2096     {
   2097       edge s2;
   2098       edge_iterator ei;
   2099       basic_block d = s->dest;
   2100 
   2101       if (FORWARDER_BLOCK_P (d))
   2102 	d = single_succ (d);
   2103 
   2104       FOR_EACH_EDGE (s2, ei, src1->succs)
   2105 	{
   2106 	  basic_block d2 = s2->dest;
   2107 	  if (FORWARDER_BLOCK_P (d2))
   2108 	    d2 = single_succ (d2);
   2109 	  if (d == d2)
   2110 	    break;
   2111 	}
   2112 
   2113       /* Take care to update possible forwarder blocks.  We verified
   2114 	 that there is no more than one in the chain, so we can't run
   2115 	 into infinite loop.  */
   2116       if (FORWARDER_BLOCK_P (s->dest))
   2117 	s->dest->count += s->count ();
   2118 
   2119       if (FORWARDER_BLOCK_P (s2->dest))
   2120 	s2->dest->count -= s->count ();
   2121 
   2122       s->probability = s->probability.combine_with_count
   2123 			  (redirect_edges_to->count,
   2124 			   s2->probability, src1->count);
   2125     }
   2126 
   2127   /* Adjust count for the block.  An earlier jump
   2128      threading pass may have left the profile in an inconsistent
   2129      state (see update_bb_profile_for_threading) so we must be
   2130      prepared for overflows.  */
   2131   tmp = redirect_to;
   2132   do
   2133     {
   2134       tmp->count += src1->count;
   2135       if (tmp == redirect_edges_to)
   2136         break;
   2137       tmp = find_fallthru_edge (tmp->succs)->dest;
   2138     }
   2139   while (true);
   2140   update_br_prob_note (redirect_edges_to);
   2141 
   2142   /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1.  */
   2143 
   2144   /* Skip possible basic block header.  */
   2145   if (LABEL_P (newpos1))
   2146     newpos1 = NEXT_INSN (newpos1);
   2147 
   2148   while (DEBUG_INSN_P (newpos1))
   2149     newpos1 = NEXT_INSN (newpos1);
   2150 
   2151   if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
   2152     newpos1 = NEXT_INSN (newpos1);
   2153 
   2154   /* Skip also prologue and function markers.  */
   2155   while (DEBUG_INSN_P (newpos1)
   2156 	 || (NOTE_P (newpos1)
   2157 	     && (NOTE_KIND (newpos1) == NOTE_INSN_PROLOGUE_END
   2158 		 || NOTE_KIND (newpos1) == NOTE_INSN_FUNCTION_BEG)))
   2159     newpos1 = NEXT_INSN (newpos1);
   2160 
   2161   redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
   2162   to_remove = single_succ (redirect_from);
   2163 
   2164   redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
   2165   delete_basic_block (to_remove);
   2166 
   2167   update_forwarder_flag (redirect_from);
   2168   if (redirect_to != src2)
   2169     update_forwarder_flag (src2);
   2170 
   2171   return true;
   2172 }
   2173 
   2174 /* Search the predecessors of BB for common insn sequences.  When found,
   2175    share code between them by redirecting control flow.  Return true if
   2176    any changes made.  */
   2177 
   2178 static bool
   2179 try_crossjump_bb (int mode, basic_block bb)
   2180 {
   2181   edge e, e2, fallthru;
   2182   bool changed;
   2183   unsigned max, ix, ix2;
   2184 
   2185   /* Nothing to do if there is not at least two incoming edges.  */
   2186   if (EDGE_COUNT (bb->preds) < 2)
   2187     return false;
   2188 
   2189   /* Don't crossjump if this block ends in a computed jump,
   2190      unless we are optimizing for size.  */
   2191   if (optimize_bb_for_size_p (bb)
   2192       && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
   2193       && computed_jump_p (BB_END (bb)))
   2194     return false;
   2195 
   2196   /* If we are partitioning hot/cold basic blocks, we don't want to
   2197      mess up unconditional or indirect jumps that cross between hot
   2198      and cold sections.
   2199 
   2200      Basic block partitioning may result in some jumps that appear to
   2201      be optimizable (or blocks that appear to be mergeable), but which really
   2202      must be left untouched (they are required to make it safely across
   2203      partition boundaries).  See the comments at the top of
   2204      bb-reorder.cc:partition_hot_cold_basic_blocks for complete details.  */
   2205 
   2206   if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
   2207 					BB_PARTITION (EDGE_PRED (bb, 1)->src)
   2208       || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
   2209     return false;
   2210 
   2211   /* It is always cheapest to redirect a block that ends in a branch to
   2212      a block that falls through into BB, as that adds no branches to the
   2213      program.  We'll try that combination first.  */
   2214   fallthru = NULL;
   2215   max = param_max_crossjump_edges;
   2216 
   2217   if (EDGE_COUNT (bb->preds) > max)
   2218     return false;
   2219 
   2220   fallthru = find_fallthru_edge (bb->preds);
   2221 
   2222   changed = false;
   2223   for (ix = 0; ix < EDGE_COUNT (bb->preds);)
   2224     {
   2225       e = EDGE_PRED (bb, ix);
   2226       ix++;
   2227 
   2228       /* As noted above, first try with the fallthru predecessor (or, a
   2229 	 fallthru predecessor if we are in cfglayout mode).  */
   2230       if (fallthru)
   2231 	{
   2232 	  /* Don't combine the fallthru edge into anything else.
   2233 	     If there is a match, we'll do it the other way around.  */
   2234 	  if (e == fallthru)
   2235 	    continue;
   2236 	  /* If nothing changed since the last attempt, there is nothing
   2237 	     we can do.  */
   2238 	  if (!first_pass
   2239 	      && !((e->src->flags & BB_MODIFIED)
   2240 		   || (fallthru->src->flags & BB_MODIFIED)))
   2241 	    continue;
   2242 
   2243 	  if (try_crossjump_to_edge (mode, e, fallthru, dir_forward))
   2244 	    {
   2245 	      changed = true;
   2246 	      ix = 0;
   2247 	      continue;
   2248 	    }
   2249 	}
   2250 
   2251       /* Non-obvious work limiting check: Recognize that we're going
   2252 	 to call try_crossjump_bb on every basic block.  So if we have
   2253 	 two blocks with lots of outgoing edges (a switch) and they
   2254 	 share lots of common destinations, then we would do the
   2255 	 cross-jump check once for each common destination.
   2256 
   2257 	 Now, if the blocks actually are cross-jump candidates, then
   2258 	 all of their destinations will be shared.  Which means that
   2259 	 we only need check them for cross-jump candidacy once.  We
   2260 	 can eliminate redundant checks of crossjump(A,B) by arbitrarily
   2261 	 choosing to do the check from the block for which the edge
   2262 	 in question is the first successor of A.  */
   2263       if (EDGE_SUCC (e->src, 0) != e)
   2264 	continue;
   2265 
   2266       for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds); ix2++)
   2267 	{
   2268 	  e2 = EDGE_PRED (bb, ix2);
   2269 
   2270 	  if (e2 == e)
   2271 	    continue;
   2272 
   2273 	  /* We've already checked the fallthru edge above.  */
   2274 	  if (e2 == fallthru)
   2275 	    continue;
   2276 
   2277 	  /* The "first successor" check above only prevents multiple
   2278 	     checks of crossjump(A,B).  In order to prevent redundant
   2279 	     checks of crossjump(B,A), require that A be the block
   2280 	     with the lowest index.  */
   2281 	  if (e->src->index > e2->src->index)
   2282 	    continue;
   2283 
   2284 	  /* If nothing changed since the last attempt, there is nothing
   2285 	     we can do.  */
   2286 	  if (!first_pass
   2287 	      && !((e->src->flags & BB_MODIFIED)
   2288 		   || (e2->src->flags & BB_MODIFIED)))
   2289 	    continue;
   2290 
   2291 	  /* Both e and e2 are not fallthru edges, so we can crossjump in either
   2292 	     direction.  */
   2293 	  if (try_crossjump_to_edge (mode, e, e2, dir_both))
   2294 	    {
   2295 	      changed = true;
   2296 	      ix = 0;
   2297 	      break;
   2298 	    }
   2299 	}
   2300     }
   2301 
   2302   if (changed)
   2303     crossjumps_occurred = true;
   2304 
   2305   return changed;
   2306 }
   2307 
   2308 /* Search the successors of BB for common insn sequences.  When found,
   2309    share code between them by moving it across the basic block
   2310    boundary.  Return true if any changes made.  */
   2311 
   2312 static bool
   2313 try_head_merge_bb (basic_block bb)
   2314 {
   2315   basic_block final_dest_bb = NULL;
   2316   int max_match = INT_MAX;
   2317   edge e0;
   2318   rtx_insn **headptr, **currptr, **nextptr;
   2319   bool changed, moveall;
   2320   unsigned ix;
   2321   rtx_insn *e0_last_head;
   2322   rtx cond;
   2323   rtx_insn *move_before;
   2324   unsigned nedges = EDGE_COUNT (bb->succs);
   2325   rtx_insn *jump = BB_END (bb);
   2326   regset live, live_union;
   2327 
   2328   /* Nothing to do if there is not at least two outgoing edges.  */
   2329   if (nedges < 2)
   2330     return false;
   2331 
   2332   /* Don't crossjump if this block ends in a computed jump,
   2333      unless we are optimizing for size.  */
   2334   if (optimize_bb_for_size_p (bb)
   2335       && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
   2336       && computed_jump_p (BB_END (bb)))
   2337     return false;
   2338 
   2339   cond = get_condition (jump, &move_before, true, false);
   2340   if (cond == NULL_RTX)
   2341     move_before = jump;
   2342 
   2343   for (ix = 0; ix < nedges; ix++)
   2344     if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
   2345       return false;
   2346 
   2347   for (ix = 0; ix < nedges; ix++)
   2348     {
   2349       edge e = EDGE_SUCC (bb, ix);
   2350       basic_block other_bb = e->dest;
   2351 
   2352       if (df_get_bb_dirty (other_bb))
   2353 	{
   2354 	  block_was_dirty = true;
   2355 	  return false;
   2356 	}
   2357 
   2358       if (e->flags & EDGE_ABNORMAL)
   2359 	return false;
   2360 
   2361       /* Normally, all destination blocks must only be reachable from this
   2362 	 block, i.e. they must have one incoming edge.
   2363 
   2364 	 There is one special case we can handle, that of multiple consecutive
   2365 	 jumps where the first jumps to one of the targets of the second jump.
   2366 	 This happens frequently in switch statements for default labels.
   2367 	 The structure is as follows:
   2368 	 FINAL_DEST_BB
   2369 	 ....
   2370 	 if (cond) jump A;
   2371 	 fall through
   2372 	 BB
   2373 	 jump with targets A, B, C, D...
   2374 	 A
   2375 	 has two incoming edges, from FINAL_DEST_BB and BB
   2376 
   2377 	 In this case, we can try to move the insns through BB and into
   2378 	 FINAL_DEST_BB.  */
   2379       if (EDGE_COUNT (other_bb->preds) != 1)
   2380 	{
   2381 	  edge incoming_edge, incoming_bb_other_edge;
   2382 	  edge_iterator ei;
   2383 
   2384 	  if (final_dest_bb != NULL
   2385 	      || EDGE_COUNT (other_bb->preds) != 2)
   2386 	    return false;
   2387 
   2388 	  /* We must be able to move the insns across the whole block.  */
   2389 	  move_before = BB_HEAD (bb);
   2390 	  while (!NONDEBUG_INSN_P (move_before))
   2391 	    move_before = NEXT_INSN (move_before);
   2392 
   2393 	  if (EDGE_COUNT (bb->preds) != 1)
   2394 	    return false;
   2395 	  incoming_edge = EDGE_PRED (bb, 0);
   2396 	  final_dest_bb = incoming_edge->src;
   2397 	  if (EDGE_COUNT (final_dest_bb->succs) != 2)
   2398 	    return false;
   2399 	  FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs)
   2400 	    if (incoming_bb_other_edge != incoming_edge)
   2401 	      break;
   2402 	  if (incoming_bb_other_edge->dest != other_bb)
   2403 	    return false;
   2404 	}
   2405     }
   2406 
   2407   e0 = EDGE_SUCC (bb, 0);
   2408   e0_last_head = NULL;
   2409   changed = false;
   2410 
   2411   for (ix = 1; ix < nedges; ix++)
   2412     {
   2413       edge e = EDGE_SUCC (bb, ix);
   2414       rtx_insn *e0_last, *e_last;
   2415       int nmatch;
   2416 
   2417       nmatch = flow_find_head_matching_sequence (e0->dest, e->dest,
   2418 						 &e0_last, &e_last, 0);
   2419       if (nmatch == 0)
   2420 	return false;
   2421 
   2422       if (nmatch < max_match)
   2423 	{
   2424 	  max_match = nmatch;
   2425 	  e0_last_head = e0_last;
   2426 	}
   2427     }
   2428 
   2429   /* If we matched an entire block, we probably have to avoid moving the
   2430      last insn.  */
   2431   if (max_match > 0
   2432       && e0_last_head == BB_END (e0->dest)
   2433       && (find_reg_note (e0_last_head, REG_EH_REGION, 0)
   2434 	  || control_flow_insn_p (e0_last_head)))
   2435     {
   2436       max_match--;
   2437       if (max_match == 0)
   2438 	return false;
   2439       e0_last_head = prev_real_nondebug_insn (e0_last_head);
   2440     }
   2441 
   2442   if (max_match == 0)
   2443     return false;
   2444 
   2445   /* We must find a union of the live registers at each of the end points.  */
   2446   live = BITMAP_ALLOC (NULL);
   2447   live_union = BITMAP_ALLOC (NULL);
   2448 
   2449   currptr = XNEWVEC (rtx_insn *, nedges);
   2450   headptr = XNEWVEC (rtx_insn *, nedges);
   2451   nextptr = XNEWVEC (rtx_insn *, nedges);
   2452 
   2453   for (ix = 0; ix < nedges; ix++)
   2454     {
   2455       int j;
   2456       basic_block merge_bb = EDGE_SUCC (bb, ix)->dest;
   2457       rtx_insn *head = BB_HEAD (merge_bb);
   2458 
   2459       while (!NONDEBUG_INSN_P (head))
   2460 	head = NEXT_INSN (head);
   2461       headptr[ix] = head;
   2462       currptr[ix] = head;
   2463 
   2464       /* Compute the end point and live information  */
   2465       for (j = 1; j < max_match; j++)
   2466 	do
   2467 	  head = NEXT_INSN (head);
   2468 	while (!NONDEBUG_INSN_P (head));
   2469       simulate_backwards_to_point (merge_bb, live, head);
   2470       IOR_REG_SET (live_union, live);
   2471     }
   2472 
   2473   /* If we're moving across two blocks, verify the validity of the
   2474      first move, then adjust the target and let the loop below deal
   2475      with the final move.  */
   2476   if (final_dest_bb != NULL)
   2477     {
   2478       rtx_insn *move_upto;
   2479 
   2480       moveall = can_move_insns_across (currptr[0], e0_last_head, move_before,
   2481 				       jump, e0->dest, live_union,
   2482 				       NULL, &move_upto);
   2483       if (!moveall)
   2484 	{
   2485 	  if (move_upto == NULL_RTX)
   2486 	    goto out;
   2487 
   2488 	  while (e0_last_head != move_upto)
   2489 	    {
   2490 	      df_simulate_one_insn_backwards (e0->dest, e0_last_head,
   2491 					      live_union);
   2492 	      e0_last_head = PREV_INSN (e0_last_head);
   2493 	    }
   2494 	}
   2495       if (e0_last_head == NULL_RTX)
   2496 	goto out;
   2497 
   2498       jump = BB_END (final_dest_bb);
   2499       cond = get_condition (jump, &move_before, true, false);
   2500       if (cond == NULL_RTX)
   2501 	move_before = jump;
   2502     }
   2503 
   2504   do
   2505     {
   2506       rtx_insn *move_upto;
   2507       moveall = can_move_insns_across (currptr[0], e0_last_head,
   2508 				       move_before, jump, e0->dest, live_union,
   2509 				       NULL, &move_upto);
   2510       if (!moveall && move_upto == NULL_RTX)
   2511 	{
   2512 	  if (jump == move_before)
   2513 	    break;
   2514 
   2515 	  /* Try again, using a different insertion point.  */
   2516 	  move_before = jump;
   2517 
   2518 	  continue;
   2519 	}
   2520 
   2521       if (final_dest_bb && !moveall)
   2522 	/* We haven't checked whether a partial move would be OK for the first
   2523 	   move, so we have to fail this case.  */
   2524 	break;
   2525 
   2526       changed = true;
   2527       for (;;)
   2528 	{
   2529 	  if (currptr[0] == move_upto)
   2530 	    break;
   2531 	  for (ix = 0; ix < nedges; ix++)
   2532 	    {
   2533 	      rtx_insn *curr = currptr[ix];
   2534 	      do
   2535 		curr = NEXT_INSN (curr);
   2536 	      while (!NONDEBUG_INSN_P (curr));
   2537 	      currptr[ix] = curr;
   2538 	    }
   2539 	}
   2540 
   2541       /* If we can't currently move all of the identical insns, remember
   2542 	 each insn after the range that we'll merge.  */
   2543       if (!moveall)
   2544 	for (ix = 0; ix < nedges; ix++)
   2545 	  {
   2546 	    rtx_insn *curr = currptr[ix];
   2547 	    do
   2548 	      curr = NEXT_INSN (curr);
   2549 	    while (!NONDEBUG_INSN_P (curr));
   2550 	    nextptr[ix] = curr;
   2551 	  }
   2552 
   2553       reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before));
   2554       df_set_bb_dirty (EDGE_SUCC (bb, 0)->dest);
   2555       if (final_dest_bb != NULL)
   2556 	df_set_bb_dirty (final_dest_bb);
   2557       df_set_bb_dirty (bb);
   2558       for (ix = 1; ix < nedges; ix++)
   2559 	{
   2560 	  df_set_bb_dirty (EDGE_SUCC (bb, ix)->dest);
   2561 	  delete_insn_chain (headptr[ix], currptr[ix], false);
   2562 	}
   2563       if (!moveall)
   2564 	{
   2565 	  if (jump == move_before)
   2566 	    break;
   2567 
   2568 	  /* For the unmerged insns, try a different insertion point.  */
   2569 	  move_before = jump;
   2570 
   2571 	  for (ix = 0; ix < nedges; ix++)
   2572 	    currptr[ix] = headptr[ix] = nextptr[ix];
   2573 	}
   2574     }
   2575   while (!moveall);
   2576 
   2577  out:
   2578   free (currptr);
   2579   free (headptr);
   2580   free (nextptr);
   2581 
   2582   crossjumps_occurred |= changed;
   2583 
   2584   return changed;
   2585 }
   2586 
   2587 /* Return true if BB contains just bb note, or bb note followed
   2588    by only DEBUG_INSNs.  */
   2589 
   2590 static bool
   2591 trivially_empty_bb_p (basic_block bb)
   2592 {
   2593   rtx_insn *insn = BB_END (bb);
   2594 
   2595   while (1)
   2596     {
   2597       if (insn == BB_HEAD (bb))
   2598 	return true;
   2599       if (!DEBUG_INSN_P (insn))
   2600 	return false;
   2601       insn = PREV_INSN (insn);
   2602     }
   2603 }
   2604 
   2605 /* Return true if BB contains just a return and possibly a USE of the
   2606    return value.  Fill in *RET and *USE with the return and use insns
   2607    if any found, otherwise NULL.  All CLOBBERs are ignored.  */
   2608 
   2609 static bool
   2610 bb_is_just_return (basic_block bb, rtx_insn **ret, rtx_insn **use)
   2611 {
   2612   *ret = *use = NULL;
   2613   rtx_insn *insn;
   2614 
   2615   if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
   2616     return false;
   2617 
   2618   FOR_BB_INSNS (bb, insn)
   2619     if (NONDEBUG_INSN_P (insn))
   2620       {
   2621 	rtx pat = PATTERN (insn);
   2622 
   2623 	if (!*ret && ANY_RETURN_P (pat))
   2624 	  *ret = insn;
   2625 	else if (!*ret && !*use && GET_CODE (pat) == USE
   2626 	    && REG_P (XEXP (pat, 0))
   2627 	    && REG_FUNCTION_VALUE_P (XEXP (pat, 0)))
   2628 	  *use = insn;
   2629 	else if (GET_CODE (pat) != CLOBBER)
   2630 	  return false;
   2631       }
   2632 
   2633   return !!*ret;
   2634 }
   2635 
   2636 /* Do simple CFG optimizations - basic block merging, simplifying of jump
   2637    instructions etc.  Return nonzero if changes were made.  */
   2638 
   2639 static bool
   2640 try_optimize_cfg (int mode)
   2641 {
   2642   bool changed_overall = false;
   2643   bool changed;
   2644   int iterations = 0;
   2645   basic_block bb, b, next;
   2646 
   2647   if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
   2648     clear_bb_flags ();
   2649 
   2650   crossjumps_occurred = false;
   2651 
   2652   FOR_EACH_BB_FN (bb, cfun)
   2653     update_forwarder_flag (bb);
   2654 
   2655   if (! targetm.cannot_modify_jumps_p ())
   2656     {
   2657       first_pass = true;
   2658       /* Attempt to merge blocks as made possible by edge removal.  If
   2659 	 a block has only one successor, and the successor has only
   2660 	 one predecessor, they may be combined.  */
   2661       do
   2662 	{
   2663 	  block_was_dirty = false;
   2664 	  changed = false;
   2665 	  iterations++;
   2666 
   2667 	  if (dump_file)
   2668 	    fprintf (dump_file,
   2669 		     "\n\ntry_optimize_cfg iteration %i\n\n",
   2670 		     iterations);
   2671 
   2672 	  for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
   2673 	       != EXIT_BLOCK_PTR_FOR_FN (cfun);)
   2674 	    {
   2675 	      basic_block c;
   2676 	      edge s;
   2677 	      bool changed_here = false;
   2678 
   2679 	      /* Delete trivially dead basic blocks.  This is either
   2680 		 blocks with no predecessors, or empty blocks with no
   2681 		 successors.  However if the empty block with no
   2682 		 successors is the successor of the ENTRY_BLOCK, it is
   2683 		 kept.  This ensures that the ENTRY_BLOCK will have a
   2684 		 successor which is a precondition for many RTL
   2685 		 passes.  Empty blocks may result from expanding
   2686 		 __builtin_unreachable ().  */
   2687 	      if (EDGE_COUNT (b->preds) == 0
   2688 		  || (EDGE_COUNT (b->succs) == 0
   2689 		      && trivially_empty_bb_p (b)
   2690 		      && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->dest
   2691 		      != b))
   2692 		{
   2693 		  c = b->prev_bb;
   2694 		  if (EDGE_COUNT (b->preds) > 0)
   2695 		    {
   2696 		      edge e;
   2697 		      edge_iterator ei;
   2698 
   2699 		      if (current_ir_type () == IR_RTL_CFGLAYOUT)
   2700 			{
   2701 			  rtx_insn *insn;
   2702 			  for (insn = BB_FOOTER (b);
   2703 			       insn; insn = NEXT_INSN (insn))
   2704 			    if (BARRIER_P (insn))
   2705 			      break;
   2706 			  if (insn)
   2707 			    FOR_EACH_EDGE (e, ei, b->preds)
   2708 			      if ((e->flags & EDGE_FALLTHRU))
   2709 				{
   2710 				  if (BB_FOOTER (b)
   2711 				      && BB_FOOTER (e->src) == NULL)
   2712 				    {
   2713 				      BB_FOOTER (e->src) = BB_FOOTER (b);
   2714 				      BB_FOOTER (b) = NULL;
   2715 				    }
   2716 				  else
   2717 				    emit_barrier_after_bb (e->src);
   2718 				}
   2719 			}
   2720 		      else
   2721 			{
   2722 			  rtx_insn *last = get_last_bb_insn (b);
   2723 			  if (last && BARRIER_P (last))
   2724 			    FOR_EACH_EDGE (e, ei, b->preds)
   2725 			      if ((e->flags & EDGE_FALLTHRU))
   2726 				emit_barrier_after (BB_END (e->src));
   2727 			}
   2728 		    }
   2729 		  delete_basic_block (b);
   2730 		  changed = true;
   2731 		  /* Avoid trying to remove the exit block.  */
   2732 		  b = (c == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? c->next_bb : c);
   2733 		  continue;
   2734 		}
   2735 
   2736 	      /* Remove code labels no longer used.  */
   2737 	      if (single_pred_p (b)
   2738 		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
   2739 		  && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
   2740 		  && LABEL_P (BB_HEAD (b))
   2741 		  && !LABEL_PRESERVE_P (BB_HEAD (b))
   2742 		  /* If the previous block ends with a branch to this
   2743 		     block, we can't delete the label.  Normally this
   2744 		     is a condjump that is yet to be simplified, but
   2745 		     if CASE_DROPS_THRU, this can be a tablejump with
   2746 		     some element going to the same place as the
   2747 		     default (fallthru).  */
   2748 		  && (single_pred (b) == ENTRY_BLOCK_PTR_FOR_FN (cfun)
   2749 		      || !JUMP_P (BB_END (single_pred (b)))
   2750 		      || ! label_is_jump_target_p (BB_HEAD (b),
   2751 						   BB_END (single_pred (b)))))
   2752 		{
   2753 		  delete_insn (BB_HEAD (b));
   2754 		  if (dump_file)
   2755 		    fprintf (dump_file, "Deleted label in block %i.\n",
   2756 			     b->index);
   2757 		}
   2758 
   2759 	      /* If we fall through an empty block, we can remove it.  */
   2760 	      if (!(mode & (CLEANUP_CFGLAYOUT | CLEANUP_NO_INSN_DEL))
   2761 		  && single_pred_p (b)
   2762 		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
   2763 		  && !LABEL_P (BB_HEAD (b))
   2764 		  && FORWARDER_BLOCK_P (b)
   2765 		  /* Note that forwarder_block_p true ensures that
   2766 		     there is a successor for this block.  */
   2767 		  && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
   2768 		  && n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS + 1)
   2769 		{
   2770 		  if (dump_file)
   2771 		    fprintf (dump_file,
   2772 			     "Deleting fallthru block %i.\n",
   2773 			     b->index);
   2774 
   2775 		  c = ((b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
   2776 		       ? b->next_bb : b->prev_bb);
   2777 		  redirect_edge_succ_nodup (single_pred_edge (b),
   2778 					    single_succ (b));
   2779 		  delete_basic_block (b);
   2780 		  changed = true;
   2781 		  b = c;
   2782 		  continue;
   2783 		}
   2784 
   2785 	      /* Merge B with its single successor, if any.  */
   2786 	      if (single_succ_p (b)
   2787 		  && (s = single_succ_edge (b))
   2788 		  && !(s->flags & EDGE_COMPLEX)
   2789 		  && (c = s->dest) != EXIT_BLOCK_PTR_FOR_FN (cfun)
   2790 		  && single_pred_p (c)
   2791 		  && b != c)
   2792 		{
   2793 		  /* When not in cfg_layout mode use code aware of reordering
   2794 		     INSN.  This code possibly creates new basic blocks so it
   2795 		     does not fit merge_blocks interface and is kept here in
   2796 		     hope that it will become useless once more of compiler
   2797 		     is transformed to use cfg_layout mode.  */
   2798 
   2799 		  if ((mode & CLEANUP_CFGLAYOUT)
   2800 		      && can_merge_blocks_p (b, c))
   2801 		    {
   2802 		      merge_blocks (b, c);
   2803 		      update_forwarder_flag (b);
   2804 		      changed_here = true;
   2805 		    }
   2806 		  else if (!(mode & CLEANUP_CFGLAYOUT)
   2807 			   /* If the jump insn has side effects,
   2808 			      we can't kill the edge.  */
   2809 			   && (!JUMP_P (BB_END (b))
   2810 			       || (reload_completed
   2811 				   ? simplejump_p (BB_END (b))
   2812 				   : (onlyjump_p (BB_END (b))
   2813 				      && !tablejump_p (BB_END (b),
   2814 						       NULL, NULL))))
   2815 			   && (next = merge_blocks_move (s, b, c, mode)))
   2816 		      {
   2817 			b = next;
   2818 			changed_here = true;
   2819 		      }
   2820 		}
   2821 
   2822 	      /* Try to change a branch to a return to just that return.  */
   2823 	      rtx_insn *ret, *use;
   2824 	      if (single_succ_p (b)
   2825 		  && onlyjump_p (BB_END (b))
   2826 		  && bb_is_just_return (single_succ (b), &ret, &use))
   2827 		{
   2828 		  if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
   2829 				     PATTERN (ret), 0))
   2830 		    {
   2831 		      if (use)
   2832 			emit_insn_before (copy_insn (PATTERN (use)),
   2833 					  BB_END (b));
   2834 		      if (dump_file)
   2835 			fprintf (dump_file, "Changed jump %d->%d to return.\n",
   2836 				 b->index, single_succ (b)->index);
   2837 		      redirect_edge_succ (single_succ_edge (b),
   2838 					  EXIT_BLOCK_PTR_FOR_FN (cfun));
   2839 		      single_succ_edge (b)->flags &= ~EDGE_CROSSING;
   2840 		      changed_here = true;
   2841 		    }
   2842 		}
   2843 
   2844 	      /* Try to change a conditional branch to a return to the
   2845 		 respective conditional return.  */
   2846 	      if (EDGE_COUNT (b->succs) == 2
   2847 		  && any_condjump_p (BB_END (b))
   2848 		  && bb_is_just_return (BRANCH_EDGE (b)->dest, &ret, &use))
   2849 		{
   2850 		  if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
   2851 				     PATTERN (ret), 0))
   2852 		    {
   2853 		      if (use)
   2854 			emit_insn_before (copy_insn (PATTERN (use)),
   2855 					  BB_END (b));
   2856 		      if (dump_file)
   2857 			fprintf (dump_file, "Changed conditional jump %d->%d "
   2858 				 "to conditional return.\n",
   2859 				 b->index, BRANCH_EDGE (b)->dest->index);
   2860 		      redirect_edge_succ (BRANCH_EDGE (b),
   2861 					  EXIT_BLOCK_PTR_FOR_FN (cfun));
   2862 		      BRANCH_EDGE (b)->flags &= ~EDGE_CROSSING;
   2863 		      changed_here = true;
   2864 		    }
   2865 		}
   2866 
   2867 	      /* Try to flip a conditional branch that falls through to
   2868 		 a return so that it becomes a conditional return and a
   2869 		 new jump to the original branch target.  */
   2870 	      if (EDGE_COUNT (b->succs) == 2
   2871 		  && BRANCH_EDGE (b)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
   2872 		  && any_condjump_p (BB_END (b))
   2873 		  && bb_is_just_return (FALLTHRU_EDGE (b)->dest, &ret, &use))
   2874 		{
   2875 		  if (invert_jump (as_a <rtx_jump_insn *> (BB_END (b)),
   2876 				   JUMP_LABEL (BB_END (b)), 0))
   2877 		    {
   2878 		      basic_block new_ft = BRANCH_EDGE (b)->dest;
   2879 		      if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
   2880 					 PATTERN (ret), 0))
   2881 			{
   2882 			  if (use)
   2883 			    emit_insn_before (copy_insn (PATTERN (use)),
   2884 					      BB_END (b));
   2885 			  if (dump_file)
   2886 			    fprintf (dump_file, "Changed conditional jump "
   2887 				     "%d->%d to conditional return, adding "
   2888 				     "fall-through jump.\n",
   2889 				     b->index, BRANCH_EDGE (b)->dest->index);
   2890 			  redirect_edge_succ (BRANCH_EDGE (b),
   2891 					      EXIT_BLOCK_PTR_FOR_FN (cfun));
   2892 			  BRANCH_EDGE (b)->flags &= ~EDGE_CROSSING;
   2893 			  std::swap (BRANCH_EDGE (b)->probability,
   2894 				     FALLTHRU_EDGE (b)->probability);
   2895 			  update_br_prob_note (b);
   2896 			  basic_block jb = force_nonfallthru (FALLTHRU_EDGE (b));
   2897 			  notice_new_block (jb);
   2898 			  if (!redirect_jump (as_a <rtx_jump_insn *> (BB_END (jb)),
   2899 					      block_label (new_ft), 0))
   2900 			    gcc_unreachable ();
   2901 			  redirect_edge_succ (single_succ_edge (jb), new_ft);
   2902 			  changed_here = true;
   2903 			}
   2904 		      else
   2905 			{
   2906 			  /* Invert the jump back to what it was.  This should
   2907 			     never fail.  */
   2908 			  if (!invert_jump (as_a <rtx_jump_insn *> (BB_END (b)),
   2909 					    JUMP_LABEL (BB_END (b)), 0))
   2910 			    gcc_unreachable ();
   2911 			}
   2912 		    }
   2913 		}
   2914 
   2915 	      /* Simplify branch over branch.  */
   2916 	      if ((mode & CLEANUP_EXPENSIVE)
   2917 		   && !(mode & CLEANUP_CFGLAYOUT)
   2918 		   && try_simplify_condjump (b))
   2919 		changed_here = true;
   2920 
   2921 	      /* If B has a single outgoing edge, but uses a
   2922 		 non-trivial jump instruction without side-effects, we
   2923 		 can either delete the jump entirely, or replace it
   2924 		 with a simple unconditional jump.  */
   2925 	      if (single_succ_p (b)
   2926 		  && single_succ (b) != EXIT_BLOCK_PTR_FOR_FN (cfun)
   2927 		  && onlyjump_p (BB_END (b))
   2928 		  && !CROSSING_JUMP_P (BB_END (b))
   2929 		  && try_redirect_by_replacing_jump (single_succ_edge (b),
   2930 						     single_succ (b),
   2931 						     (mode & CLEANUP_CFGLAYOUT) != 0))
   2932 		{
   2933 		  update_forwarder_flag (b);
   2934 		  changed_here = true;
   2935 		}
   2936 
   2937 	      /* Simplify branch to branch.  */
   2938 	      if (try_forward_edges (mode, b))
   2939 		{
   2940 		  update_forwarder_flag (b);
   2941 		  changed_here = true;
   2942 		}
   2943 
   2944 	      /* Look for shared code between blocks.  */
   2945 	      if ((mode & CLEANUP_CROSSJUMP)
   2946 		  && try_crossjump_bb (mode, b))
   2947 		changed_here = true;
   2948 
   2949 	      if ((mode & CLEANUP_CROSSJUMP)
   2950 		  /* This can lengthen register lifetimes.  Do it only after
   2951 		     reload.  */
   2952 		  && reload_completed
   2953 		  && try_head_merge_bb (b))
   2954 		changed_here = true;
   2955 
   2956 	      /* Don't get confused by the index shift caused by
   2957 		 deleting blocks.  */
   2958 	      if (!changed_here)
   2959 		b = b->next_bb;
   2960 	      else
   2961 		changed = true;
   2962 	    }
   2963 
   2964 	  if ((mode & CLEANUP_CROSSJUMP)
   2965 	      && try_crossjump_bb (mode, EXIT_BLOCK_PTR_FOR_FN (cfun)))
   2966 	    changed = true;
   2967 
   2968 	  if (block_was_dirty)
   2969 	    {
   2970 	      /* This should only be set by head-merging.  */
   2971 	      gcc_assert (mode & CLEANUP_CROSSJUMP);
   2972 	      df_analyze ();
   2973 	    }
   2974 
   2975 	  if (changed)
   2976             {
   2977               /* Edge forwarding in particular can cause hot blocks previously
   2978                  reached by both hot and cold blocks to become dominated only
   2979                  by cold blocks. This will cause the verification below to fail,
   2980                  and lead to now cold code in the hot section. This is not easy
   2981                  to detect and fix during edge forwarding, and in some cases
   2982                  is only visible after newly unreachable blocks are deleted,
   2983                  which will be done in fixup_partitions.  */
   2984 	      if ((mode & CLEANUP_NO_PARTITIONING) == 0)
   2985 		{
   2986 		  fixup_partitions ();
   2987 	          checking_verify_flow_info ();
   2988 		}
   2989             }
   2990 
   2991 	  changed_overall |= changed;
   2992 	  first_pass = false;
   2993 	}
   2994       while (changed);
   2995     }
   2996 
   2997   FOR_ALL_BB_FN (b, cfun)
   2998     b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
   2999 
   3000   return changed_overall;
   3001 }
   3002 
   3003 /* Delete all unreachable basic blocks.  */
   3005 
   3006 bool
   3007 delete_unreachable_blocks (void)
   3008 {
   3009   bool changed = false;
   3010   basic_block b, prev_bb;
   3011 
   3012   find_unreachable_blocks ();
   3013 
   3014   /* When we're in GIMPLE mode and there may be debug bind insns, we
   3015      should delete blocks in reverse dominator order, so as to get a
   3016      chance to substitute all released DEFs into debug bind stmts.  If
   3017      we don't have dominators information, walking blocks backward
   3018      gets us a better chance of retaining most debug information than
   3019      otherwise.  */
   3020   if (MAY_HAVE_DEBUG_BIND_INSNS && current_ir_type () == IR_GIMPLE
   3021       && dom_info_available_p (CDI_DOMINATORS))
   3022     {
   3023       for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
   3024 	   b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
   3025 	{
   3026 	  prev_bb = b->prev_bb;
   3027 
   3028 	  if (!(b->flags & BB_REACHABLE))
   3029 	    {
   3030 	      /* Speed up the removal of blocks that don't dominate
   3031 		 others.  Walking backwards, this should be the common
   3032 		 case.  */
   3033 	      if (!first_dom_son (CDI_DOMINATORS, b))
   3034 		delete_basic_block (b);
   3035 	      else
   3036 		{
   3037 		  auto_vec<basic_block> h
   3038 		    = get_all_dominated_blocks (CDI_DOMINATORS, b);
   3039 
   3040 		  while (h.length ())
   3041 		    {
   3042 		      b = h.pop ();
   3043 
   3044 		      prev_bb = b->prev_bb;
   3045 
   3046 		      gcc_assert (!(b->flags & BB_REACHABLE));
   3047 
   3048 		      delete_basic_block (b);
   3049 		    }
   3050 		}
   3051 
   3052 	      changed = true;
   3053 	    }
   3054 	}
   3055     }
   3056   else
   3057     {
   3058       for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
   3059 	   b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
   3060 	{
   3061 	  prev_bb = b->prev_bb;
   3062 
   3063 	  if (!(b->flags & BB_REACHABLE))
   3064 	    {
   3065 	      delete_basic_block (b);
   3066 	      changed = true;
   3067 	    }
   3068 	}
   3069     }
   3070 
   3071   if (changed)
   3072     tidy_fallthru_edges ();
   3073   return changed;
   3074 }
   3075 
   3076 /* Delete any jump tables never referenced.  We can't delete them at the
   3077    time of removing tablejump insn as they are referenced by the preceding
   3078    insns computing the destination, so we delay deleting and garbagecollect
   3079    them once life information is computed.  */
   3080 void
   3081 delete_dead_jumptables (void)
   3082 {
   3083   basic_block bb;
   3084 
   3085   /* A dead jump table does not belong to any basic block.  Scan insns
   3086      between two adjacent basic blocks.  */
   3087   FOR_EACH_BB_FN (bb, cfun)
   3088     {
   3089       rtx_insn *insn, *next;
   3090 
   3091       for (insn = NEXT_INSN (BB_END (bb));
   3092 	   insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
   3093 	   insn = next)
   3094 	{
   3095 	  next = NEXT_INSN (insn);
   3096 	  if (LABEL_P (insn)
   3097 	      && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
   3098 	      && JUMP_TABLE_DATA_P (next))
   3099 	    {
   3100 	      rtx_insn *label = insn, *jump = next;
   3101 
   3102 	      if (dump_file)
   3103 		fprintf (dump_file, "Dead jumptable %i removed\n",
   3104 			 INSN_UID (insn));
   3105 
   3106 	      next = NEXT_INSN (next);
   3107 	      delete_insn (jump);
   3108 	      delete_insn (label);
   3109 	    }
   3110 	}
   3111     }
   3112 }
   3113 
   3114 
   3115 /* Tidy the CFG by deleting unreachable code and whatnot.  */
   3117 
   3118 bool
   3119 cleanup_cfg (int mode)
   3120 {
   3121   bool changed = false;
   3122 
   3123   /* Set the cfglayout mode flag here.  We could update all the callers
   3124      but that is just inconvenient, especially given that we eventually
   3125      want to have cfglayout mode as the default.  */
   3126   if (current_ir_type () == IR_RTL_CFGLAYOUT)
   3127     mode |= CLEANUP_CFGLAYOUT;
   3128 
   3129   timevar_push (TV_CLEANUP_CFG);
   3130   if (delete_unreachable_blocks ())
   3131     {
   3132       changed = true;
   3133       /* We've possibly created trivially dead code.  Cleanup it right
   3134 	 now to introduce more opportunities for try_optimize_cfg.  */
   3135       if (!(mode & (CLEANUP_NO_INSN_DEL))
   3136 	  && !reload_completed)
   3137 	delete_trivially_dead_insns (get_insns (), max_reg_num ());
   3138     }
   3139 
   3140   compact_blocks ();
   3141 
   3142   /* To tail-merge blocks ending in the same noreturn function (e.g.
   3143      a call to abort) we have to insert fake edges to exit.  Do this
   3144      here once.  The fake edges do not interfere with any other CFG
   3145      cleanups.  */
   3146   if (mode & CLEANUP_CROSSJUMP)
   3147     add_noreturn_fake_exit_edges ();
   3148 
   3149   if (!dbg_cnt (cfg_cleanup))
   3150     return changed;
   3151 
   3152   while (try_optimize_cfg (mode))
   3153     {
   3154       delete_unreachable_blocks (), changed = true;
   3155       if (!(mode & CLEANUP_NO_INSN_DEL))
   3156 	{
   3157 	  /* Try to remove some trivially dead insns when doing an expensive
   3158 	     cleanup.  But delete_trivially_dead_insns doesn't work after
   3159 	     reload (it only handles pseudos) and run_fast_dce is too costly
   3160 	     to run in every iteration.
   3161 
   3162 	     For effective cross jumping, we really want to run a fast DCE to
   3163 	     clean up any dead conditions, or they get in the way of performing
   3164 	     useful tail merges.
   3165 
   3166 	     Other transformations in cleanup_cfg are not so sensitive to dead
   3167 	     code, so delete_trivially_dead_insns or even doing nothing at all
   3168 	     is good enough.  */
   3169 	  if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
   3170 	      && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
   3171 	    break;
   3172 	  if ((mode & CLEANUP_CROSSJUMP) && crossjumps_occurred)
   3173 	    {
   3174 	      run_fast_dce ();
   3175 	      mode &= ~CLEANUP_FORCE_FAST_DCE;
   3176 	    }
   3177 	}
   3178       else
   3179 	break;
   3180     }
   3181 
   3182   if (mode & CLEANUP_CROSSJUMP)
   3183     remove_fake_exit_edges ();
   3184 
   3185   if (mode & CLEANUP_FORCE_FAST_DCE)
   3186     run_fast_dce ();
   3187 
   3188   /* Don't call delete_dead_jumptables in cfglayout mode, because
   3189      that function assumes that jump tables are in the insns stream.
   3190      But we also don't _have_ to delete dead jumptables in cfglayout
   3191      mode because we shouldn't even be looking at things that are
   3192      not in a basic block.  Dead jumptables are cleaned up when
   3193      going out of cfglayout mode.  */
   3194   if (!(mode & CLEANUP_CFGLAYOUT))
   3195     delete_dead_jumptables ();
   3196 
   3197   /* ???  We probably do this way too often.  */
   3198   if (current_loops
   3199       && (changed
   3200 	  || (mode & CLEANUP_CFG_CHANGED)))
   3201     {
   3202       timevar_push (TV_REPAIR_LOOPS);
   3203       /* The above doesn't preserve dominance info if available.  */
   3204       gcc_assert (!dom_info_available_p (CDI_DOMINATORS));
   3205       calculate_dominance_info (CDI_DOMINATORS);
   3206       fix_loop_structure (NULL);
   3207       free_dominance_info (CDI_DOMINATORS);
   3208       timevar_pop (TV_REPAIR_LOOPS);
   3209     }
   3210 
   3211   timevar_pop (TV_CLEANUP_CFG);
   3212 
   3213   return changed;
   3214 }
   3215 
   3216 namespace {
   3218 
   3219 const pass_data pass_data_jump =
   3220 {
   3221   RTL_PASS, /* type */
   3222   "jump", /* name */
   3223   OPTGROUP_NONE, /* optinfo_flags */
   3224   TV_JUMP, /* tv_id */
   3225   0, /* properties_required */
   3226   0, /* properties_provided */
   3227   0, /* properties_destroyed */
   3228   0, /* todo_flags_start */
   3229   0, /* todo_flags_finish */
   3230 };
   3231 
   3232 class pass_jump : public rtl_opt_pass
   3233 {
   3234 public:
   3235   pass_jump (gcc::context *ctxt)
   3236     : rtl_opt_pass (pass_data_jump, ctxt)
   3237   {}
   3238 
   3239   /* opt_pass methods: */
   3240   virtual unsigned int execute (function *);
   3241 
   3242 }; // class pass_jump
   3243 
   3244 unsigned int
   3245 pass_jump::execute (function *)
   3246 {
   3247   delete_trivially_dead_insns (get_insns (), max_reg_num ());
   3248   if (dump_file)
   3249     dump_flow_info (dump_file, dump_flags);
   3250   cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
   3251 	       | (flag_thread_jumps && flag_expensive_optimizations
   3252 		  ? CLEANUP_THREADING : 0));
   3253   return 0;
   3254 }
   3255 
   3256 } // anon namespace
   3257 
   3258 rtl_opt_pass *
   3259 make_pass_jump (gcc::context *ctxt)
   3260 {
   3261   return new pass_jump (ctxt);
   3262 }
   3263 
   3264 namespace {
   3266 
   3267 const pass_data pass_data_jump_after_combine =
   3268 {
   3269   RTL_PASS, /* type */
   3270   "jump_after_combine", /* name */
   3271   OPTGROUP_NONE, /* optinfo_flags */
   3272   TV_JUMP, /* tv_id */
   3273   0, /* properties_required */
   3274   0, /* properties_provided */
   3275   0, /* properties_destroyed */
   3276   0, /* todo_flags_start */
   3277   0, /* todo_flags_finish */
   3278 };
   3279 
   3280 class pass_jump_after_combine : public rtl_opt_pass
   3281 {
   3282 public:
   3283   pass_jump_after_combine (gcc::context *ctxt)
   3284     : rtl_opt_pass (pass_data_jump_after_combine, ctxt)
   3285   {}
   3286 
   3287   /* opt_pass methods: */
   3288   virtual bool gate (function *)
   3289   {
   3290     return flag_thread_jumps && flag_expensive_optimizations;
   3291   }
   3292   virtual unsigned int execute (function *);
   3293 
   3294 }; // class pass_jump_after_combine
   3295 
   3296 unsigned int
   3297 pass_jump_after_combine::execute (function *)
   3298 {
   3299   /* Jump threading does not keep dominators up-to-date.  */
   3300   free_dominance_info (CDI_DOMINATORS);
   3301   cleanup_cfg (CLEANUP_THREADING);
   3302   return 0;
   3303 }
   3304 
   3305 } // anon namespace
   3306 
   3307 rtl_opt_pass *
   3308 make_pass_jump_after_combine (gcc::context *ctxt)
   3309 {
   3310   return new pass_jump_after_combine (ctxt);
   3311 }
   3312 
   3313 namespace {
   3314 
   3315 const pass_data pass_data_jump2 =
   3316 {
   3317   RTL_PASS, /* type */
   3318   "jump2", /* name */
   3319   OPTGROUP_NONE, /* optinfo_flags */
   3320   TV_JUMP, /* tv_id */
   3321   0, /* properties_required */
   3322   0, /* properties_provided */
   3323   0, /* properties_destroyed */
   3324   0, /* todo_flags_start */
   3325   0, /* todo_flags_finish */
   3326 };
   3327 
   3328 class pass_jump2 : public rtl_opt_pass
   3329 {
   3330 public:
   3331   pass_jump2 (gcc::context *ctxt)
   3332     : rtl_opt_pass (pass_data_jump2, ctxt)
   3333   {}
   3334 
   3335   /* opt_pass methods: */
   3336   virtual unsigned int execute (function *)
   3337     {
   3338       cleanup_cfg (flag_crossjumping ? CLEANUP_CROSSJUMP : 0);
   3339       return 0;
   3340     }
   3341 
   3342 }; // class pass_jump2
   3343 
   3344 } // anon namespace
   3345 
   3346 rtl_opt_pass *
   3347 make_pass_jump2 (gcc::context *ctxt)
   3348 {
   3349   return new pass_jump2 (ctxt);
   3350 }
   3351