cfgcleanup.cc revision 1.1 1 1.1 mrg /* Control flow optimization code for GNU compiler.
2 1.1 mrg Copyright (C) 1987-2022 Free Software Foundation, Inc.
3 1.1 mrg
4 1.1 mrg This file is part of GCC.
5 1.1 mrg
6 1.1 mrg GCC is free software; you can redistribute it and/or modify it under
7 1.1 mrg the terms of the GNU General Public License as published by the Free
8 1.1 mrg Software Foundation; either version 3, or (at your option) any later
9 1.1 mrg version.
10 1.1 mrg
11 1.1 mrg GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 1.1 mrg WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 1.1 mrg FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 1.1 mrg for more details.
15 1.1 mrg
16 1.1 mrg You should have received a copy of the GNU General Public License
17 1.1 mrg along with GCC; see the file COPYING3. If not see
18 1.1 mrg <http://www.gnu.org/licenses/>. */
19 1.1 mrg
20 1.1 mrg /* This file contains optimizer of the control flow. The main entry point is
21 1.1 mrg cleanup_cfg. Following optimizations are performed:
22 1.1 mrg
23 1.1 mrg - Unreachable blocks removal
24 1.1 mrg - Edge forwarding (edge to the forwarder block is forwarded to its
25 1.1 mrg successor. Simplification of the branch instruction is performed by
26 1.1 mrg underlying infrastructure so branch can be converted to simplejump or
27 1.1 mrg eliminated).
28 1.1 mrg - Cross jumping (tail merging)
29 1.1 mrg - Conditional jump-around-simplejump simplification
30 1.1 mrg - Basic block merging. */
31 1.1 mrg
32 1.1 mrg #include "config.h"
33 1.1 mrg #include "system.h"
34 1.1 mrg #include "coretypes.h"
35 1.1 mrg #include "backend.h"
36 1.1 mrg #include "target.h"
37 1.1 mrg #include "rtl.h"
38 1.1 mrg #include "tree.h"
39 1.1 mrg #include "cfghooks.h"
40 1.1 mrg #include "df.h"
41 1.1 mrg #include "memmodel.h"
42 1.1 mrg #include "tm_p.h"
43 1.1 mrg #include "insn-config.h"
44 1.1 mrg #include "emit-rtl.h"
45 1.1 mrg #include "cselib.h"
46 1.1 mrg #include "tree-pass.h"
47 1.1 mrg #include "cfgloop.h"
48 1.1 mrg #include "cfgrtl.h"
49 1.1 mrg #include "cfganal.h"
50 1.1 mrg #include "cfgbuild.h"
51 1.1 mrg #include "cfgcleanup.h"
52 1.1 mrg #include "dce.h"
53 1.1 mrg #include "dbgcnt.h"
54 1.1 mrg #include "rtl-iter.h"
55 1.1 mrg #include "regs.h"
56 1.1 mrg #include "function-abi.h"
57 1.1 mrg
58 1.1 mrg #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
59 1.1 mrg
60 1.1 mrg /* Set to true when we are running first pass of try_optimize_cfg loop. */
61 1.1 mrg static bool first_pass;
62 1.1 mrg
63 1.1 mrg /* Set to true if crossjumps occurred in the latest run of try_optimize_cfg. */
64 1.1 mrg static bool crossjumps_occurred;
65 1.1 mrg
66 1.1 mrg /* Set to true if we couldn't run an optimization due to stale liveness
67 1.1 mrg information; we should run df_analyze to enable more opportunities. */
68 1.1 mrg static bool block_was_dirty;
69 1.1 mrg
70 1.1 mrg static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction);
71 1.1 mrg static bool try_crossjump_bb (int, basic_block);
72 1.1 mrg static bool outgoing_edges_match (int, basic_block, basic_block);
73 1.1 mrg static enum replace_direction old_insns_match_p (int, rtx_insn *, rtx_insn *);
74 1.1 mrg
75 1.1 mrg static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
76 1.1 mrg static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
77 1.1 mrg static bool try_optimize_cfg (int);
78 1.1 mrg static bool try_simplify_condjump (basic_block);
79 1.1 mrg static bool try_forward_edges (int, basic_block);
80 1.1 mrg static edge thread_jump (edge, basic_block);
81 1.1 mrg static bool mark_effect (rtx, bitmap);
82 1.1 mrg static void notice_new_block (basic_block);
83 1.1 mrg static void update_forwarder_flag (basic_block);
84 1.1 mrg static void merge_memattrs (rtx, rtx);
85 1.1 mrg
86 1.1 mrg /* Set flags for newly created block. */
88 1.1 mrg
89 1.1 mrg static void
90 1.1 mrg notice_new_block (basic_block bb)
91 1.1 mrg {
92 1.1 mrg if (!bb)
93 1.1 mrg return;
94 1.1 mrg
95 1.1 mrg if (forwarder_block_p (bb))
96 1.1 mrg bb->flags |= BB_FORWARDER_BLOCK;
97 1.1 mrg }
98 1.1 mrg
99 1.1 mrg /* Recompute forwarder flag after block has been modified. */
100 1.1 mrg
101 1.1 mrg static void
102 1.1 mrg update_forwarder_flag (basic_block bb)
103 1.1 mrg {
104 1.1 mrg if (forwarder_block_p (bb))
105 1.1 mrg bb->flags |= BB_FORWARDER_BLOCK;
106 1.1 mrg else
107 1.1 mrg bb->flags &= ~BB_FORWARDER_BLOCK;
108 1.1 mrg }
109 1.1 mrg
110 1.1 mrg /* Simplify a conditional jump around an unconditional jump.
112 1.1 mrg Return true if something changed. */
113 1.1 mrg
114 1.1 mrg static bool
115 1.1 mrg try_simplify_condjump (basic_block cbranch_block)
116 1.1 mrg {
117 1.1 mrg basic_block jump_block, jump_dest_block, cbranch_dest_block;
118 1.1 mrg edge cbranch_jump_edge, cbranch_fallthru_edge;
119 1.1 mrg rtx_insn *cbranch_insn;
120 1.1 mrg
121 1.1 mrg /* Verify that there are exactly two successors. */
122 1.1 mrg if (EDGE_COUNT (cbranch_block->succs) != 2)
123 1.1 mrg return false;
124 1.1 mrg
125 1.1 mrg /* Verify that we've got a normal conditional branch at the end
126 1.1 mrg of the block. */
127 1.1 mrg cbranch_insn = BB_END (cbranch_block);
128 1.1 mrg if (!any_condjump_p (cbranch_insn))
129 1.1 mrg return false;
130 1.1 mrg
131 1.1 mrg cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
132 1.1 mrg cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
133 1.1 mrg
134 1.1 mrg /* The next block must not have multiple predecessors, must not
135 1.1 mrg be the last block in the function, and must contain just the
136 1.1 mrg unconditional jump. */
137 1.1 mrg jump_block = cbranch_fallthru_edge->dest;
138 1.1 mrg if (!single_pred_p (jump_block)
139 1.1 mrg || jump_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
140 1.1 mrg || !FORWARDER_BLOCK_P (jump_block))
141 1.1 mrg return false;
142 1.1 mrg jump_dest_block = single_succ (jump_block);
143 1.1 mrg
144 1.1 mrg /* If we are partitioning hot/cold basic blocks, we don't want to
145 1.1 mrg mess up unconditional or indirect jumps that cross between hot
146 1.1 mrg and cold sections.
147 1.1 mrg
148 1.1 mrg Basic block partitioning may result in some jumps that appear to
149 1.1 mrg be optimizable (or blocks that appear to be mergeable), but which really
150 1.1 mrg must be left untouched (they are required to make it safely across
151 1.1 mrg partition boundaries). See the comments at the top of
152 1.1 mrg bb-reorder.cc:partition_hot_cold_basic_blocks for complete details. */
153 1.1 mrg
154 1.1 mrg if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
155 1.1 mrg || (cbranch_jump_edge->flags & EDGE_CROSSING))
156 1.1 mrg return false;
157 1.1 mrg
158 1.1 mrg /* The conditional branch must target the block after the
159 1.1 mrg unconditional branch. */
160 1.1 mrg cbranch_dest_block = cbranch_jump_edge->dest;
161 1.1 mrg
162 1.1 mrg if (cbranch_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
163 1.1 mrg || jump_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
164 1.1 mrg || !can_fallthru (jump_block, cbranch_dest_block))
165 1.1 mrg return false;
166 1.1 mrg
167 1.1 mrg /* Invert the conditional branch. */
168 1.1 mrg if (!invert_jump (as_a <rtx_jump_insn *> (cbranch_insn),
169 1.1 mrg block_label (jump_dest_block), 0))
170 1.1 mrg return false;
171 1.1 mrg
172 1.1 mrg if (dump_file)
173 1.1 mrg fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
174 1.1 mrg INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
175 1.1 mrg
176 1.1 mrg /* Success. Update the CFG to match. Note that after this point
177 1.1 mrg the edge variable names appear backwards; the redirection is done
178 1.1 mrg this way to preserve edge profile data. */
179 1.1 mrg cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
180 1.1 mrg cbranch_dest_block);
181 1.1 mrg cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
182 1.1 mrg jump_dest_block);
183 1.1 mrg cbranch_jump_edge->flags |= EDGE_FALLTHRU;
184 1.1 mrg cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
185 1.1 mrg update_br_prob_note (cbranch_block);
186 1.1 mrg
187 1.1 mrg /* Delete the block with the unconditional jump, and clean up the mess. */
188 1.1 mrg delete_basic_block (jump_block);
189 1.1 mrg tidy_fallthru_edge (cbranch_jump_edge);
190 1.1 mrg update_forwarder_flag (cbranch_block);
191 1.1 mrg
192 1.1 mrg return true;
193 1.1 mrg }
194 1.1 mrg
195 1.1 mrg /* Attempt to prove that operation is NOOP using CSElib or mark the effect
197 1.1 mrg on register. Used by jump threading. */
198 1.1 mrg
199 1.1 mrg static bool
200 1.1 mrg mark_effect (rtx exp, regset nonequal)
201 1.1 mrg {
202 1.1 mrg rtx dest;
203 1.1 mrg switch (GET_CODE (exp))
204 1.1 mrg {
205 1.1 mrg /* In case we do clobber the register, mark it as equal, as we know the
206 1.1 mrg value is dead so it don't have to match. */
207 1.1 mrg case CLOBBER:
208 1.1 mrg dest = XEXP (exp, 0);
209 1.1 mrg if (REG_P (dest))
210 1.1 mrg bitmap_clear_range (nonequal, REGNO (dest), REG_NREGS (dest));
211 1.1 mrg return false;
212 1.1 mrg
213 1.1 mrg case SET:
214 1.1 mrg if (cselib_redundant_set_p (exp))
215 1.1 mrg return false;
216 1.1 mrg dest = SET_DEST (exp);
217 1.1 mrg if (dest == pc_rtx)
218 1.1 mrg return false;
219 1.1 mrg if (!REG_P (dest))
220 1.1 mrg return true;
221 1.1 mrg bitmap_set_range (nonequal, REGNO (dest), REG_NREGS (dest));
222 1.1 mrg return false;
223 1.1 mrg
224 1.1 mrg default:
225 1.1 mrg return false;
226 1.1 mrg }
227 1.1 mrg }
228 1.1 mrg
229 1.1 mrg /* Return true if X contains a register in NONEQUAL. */
230 1.1 mrg static bool
231 1.1 mrg mentions_nonequal_regs (const_rtx x, regset nonequal)
232 1.1 mrg {
233 1.1 mrg subrtx_iterator::array_type array;
234 1.1 mrg FOR_EACH_SUBRTX (iter, array, x, NONCONST)
235 1.1 mrg {
236 1.1 mrg const_rtx x = *iter;
237 1.1 mrg if (REG_P (x))
238 1.1 mrg {
239 1.1 mrg unsigned int end_regno = END_REGNO (x);
240 1.1 mrg for (unsigned int regno = REGNO (x); regno < end_regno; ++regno)
241 1.1 mrg if (REGNO_REG_SET_P (nonequal, regno))
242 1.1 mrg return true;
243 1.1 mrg }
244 1.1 mrg }
245 1.1 mrg return false;
246 1.1 mrg }
247 1.1 mrg
248 1.1 mrg /* Attempt to prove that the basic block B will have no side effects and
249 1.1 mrg always continues in the same edge if reached via E. Return the edge
250 1.1 mrg if exist, NULL otherwise. */
251 1.1 mrg
252 1.1 mrg static edge
253 1.1 mrg thread_jump (edge e, basic_block b)
254 1.1 mrg {
255 1.1 mrg rtx set1, set2, cond1, cond2;
256 1.1 mrg rtx_insn *insn;
257 1.1 mrg enum rtx_code code1, code2, reversed_code2;
258 1.1 mrg bool reverse1 = false;
259 1.1 mrg unsigned i;
260 1.1 mrg regset nonequal;
261 1.1 mrg bool failed = false;
262 1.1 mrg
263 1.1 mrg /* Jump threading may cause fixup_partitions to introduce new crossing edges,
264 1.1 mrg which is not allowed after reload. */
265 1.1 mrg gcc_checking_assert (!reload_completed || !crtl->has_bb_partition);
266 1.1 mrg
267 1.1 mrg if (b->flags & BB_NONTHREADABLE_BLOCK)
268 1.1 mrg return NULL;
269 1.1 mrg
270 1.1 mrg /* At the moment, we do handle only conditional jumps, but later we may
271 1.1 mrg want to extend this code to tablejumps and others. */
272 1.1 mrg if (EDGE_COUNT (e->src->succs) != 2)
273 1.1 mrg return NULL;
274 1.1 mrg if (EDGE_COUNT (b->succs) != 2)
275 1.1 mrg {
276 1.1 mrg b->flags |= BB_NONTHREADABLE_BLOCK;
277 1.1 mrg return NULL;
278 1.1 mrg }
279 1.1 mrg
280 1.1 mrg /* Second branch must end with onlyjump, as we will eliminate the jump. */
281 1.1 mrg if (!any_condjump_p (BB_END (e->src)))
282 1.1 mrg return NULL;
283 1.1 mrg
284 1.1 mrg if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
285 1.1 mrg {
286 1.1 mrg b->flags |= BB_NONTHREADABLE_BLOCK;
287 1.1 mrg return NULL;
288 1.1 mrg }
289 1.1 mrg
290 1.1 mrg set1 = pc_set (BB_END (e->src));
291 1.1 mrg set2 = pc_set (BB_END (b));
292 1.1 mrg if (((e->flags & EDGE_FALLTHRU) != 0)
293 1.1 mrg != (XEXP (SET_SRC (set1), 1) == pc_rtx))
294 1.1 mrg reverse1 = true;
295 1.1 mrg
296 1.1 mrg cond1 = XEXP (SET_SRC (set1), 0);
297 1.1 mrg cond2 = XEXP (SET_SRC (set2), 0);
298 1.1 mrg if (reverse1)
299 1.1 mrg code1 = reversed_comparison_code (cond1, BB_END (e->src));
300 1.1 mrg else
301 1.1 mrg code1 = GET_CODE (cond1);
302 1.1 mrg
303 1.1 mrg code2 = GET_CODE (cond2);
304 1.1 mrg reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
305 1.1 mrg
306 1.1 mrg if (!comparison_dominates_p (code1, code2)
307 1.1 mrg && !comparison_dominates_p (code1, reversed_code2))
308 1.1 mrg return NULL;
309 1.1 mrg
310 1.1 mrg /* Ensure that the comparison operators are equivalent.
311 1.1 mrg ??? This is far too pessimistic. We should allow swapped operands,
312 1.1 mrg different CCmodes, or for example comparisons for interval, that
313 1.1 mrg dominate even when operands are not equivalent. */
314 1.1 mrg if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
315 1.1 mrg || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
316 1.1 mrg return NULL;
317 1.1 mrg
318 1.1 mrg /* Punt if BB_END (e->src) is doloop-like conditional jump that modifies
319 1.1 mrg the registers used in cond1. */
320 1.1 mrg if (modified_in_p (cond1, BB_END (e->src)))
321 1.1 mrg return NULL;
322 1.1 mrg
323 1.1 mrg /* Short circuit cases where block B contains some side effects, as we can't
324 1.1 mrg safely bypass it. */
325 1.1 mrg for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
326 1.1 mrg insn = NEXT_INSN (insn))
327 1.1 mrg if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
328 1.1 mrg {
329 1.1 mrg b->flags |= BB_NONTHREADABLE_BLOCK;
330 1.1 mrg return NULL;
331 1.1 mrg }
332 1.1 mrg
333 1.1 mrg cselib_init (0);
334 1.1 mrg
335 1.1 mrg /* First process all values computed in the source basic block. */
336 1.1 mrg for (insn = NEXT_INSN (BB_HEAD (e->src));
337 1.1 mrg insn != NEXT_INSN (BB_END (e->src));
338 1.1 mrg insn = NEXT_INSN (insn))
339 1.1 mrg if (INSN_P (insn))
340 1.1 mrg cselib_process_insn (insn);
341 1.1 mrg
342 1.1 mrg nonequal = BITMAP_ALLOC (NULL);
343 1.1 mrg CLEAR_REG_SET (nonequal);
344 1.1 mrg
345 1.1 mrg /* Now assume that we've continued by the edge E to B and continue
346 1.1 mrg processing as if it were same basic block.
347 1.1 mrg Our goal is to prove that whole block is an NOOP. */
348 1.1 mrg
349 1.1 mrg for (insn = NEXT_INSN (BB_HEAD (b));
350 1.1 mrg insn != NEXT_INSN (BB_END (b)) && !failed;
351 1.1 mrg insn = NEXT_INSN (insn))
352 1.1 mrg {
353 1.1 mrg /* cond2 must not mention any register that is not equal to the
354 1.1 mrg former block. Check this before processing that instruction,
355 1.1 mrg as BB_END (b) could contain also clobbers. */
356 1.1 mrg if (insn == BB_END (b)
357 1.1 mrg && mentions_nonequal_regs (cond2, nonequal))
358 1.1 mrg goto failed_exit;
359 1.1 mrg
360 1.1 mrg if (INSN_P (insn))
361 1.1 mrg {
362 1.1 mrg rtx pat = PATTERN (insn);
363 1.1 mrg
364 1.1 mrg if (GET_CODE (pat) == PARALLEL)
365 1.1 mrg {
366 1.1 mrg for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
367 1.1 mrg failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
368 1.1 mrg }
369 1.1 mrg else
370 1.1 mrg failed |= mark_effect (pat, nonequal);
371 1.1 mrg }
372 1.1 mrg
373 1.1 mrg cselib_process_insn (insn);
374 1.1 mrg }
375 1.1 mrg
376 1.1 mrg /* Later we should clear nonequal of dead registers. So far we don't
377 1.1 mrg have life information in cfg_cleanup. */
378 1.1 mrg if (failed)
379 1.1 mrg {
380 1.1 mrg b->flags |= BB_NONTHREADABLE_BLOCK;
381 1.1 mrg goto failed_exit;
382 1.1 mrg }
383 1.1 mrg
384 1.1 mrg if (!REG_SET_EMPTY_P (nonequal))
385 1.1 mrg goto failed_exit;
386 1.1 mrg
387 1.1 mrg BITMAP_FREE (nonequal);
388 1.1 mrg cselib_finish ();
389 1.1 mrg if ((comparison_dominates_p (code1, code2) != 0)
390 1.1 mrg != (XEXP (SET_SRC (set2), 1) == pc_rtx))
391 1.1 mrg return BRANCH_EDGE (b);
392 1.1 mrg else
393 1.1 mrg return FALLTHRU_EDGE (b);
394 1.1 mrg
395 1.1 mrg failed_exit:
396 1.1 mrg BITMAP_FREE (nonequal);
397 1.1 mrg cselib_finish ();
398 1.1 mrg return NULL;
399 1.1 mrg }
400 1.1 mrg
401 1.1 mrg /* Attempt to forward edges leaving basic block B.
403 1.1 mrg Return true if successful. */
404 1.1 mrg
405 1.1 mrg static bool
406 1.1 mrg try_forward_edges (int mode, basic_block b)
407 1.1 mrg {
408 1.1 mrg bool changed = false;
409 1.1 mrg edge_iterator ei;
410 1.1 mrg edge e, *threaded_edges = NULL;
411 1.1 mrg
412 1.1 mrg for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
413 1.1 mrg {
414 1.1 mrg basic_block target, first;
415 1.1 mrg location_t goto_locus;
416 1.1 mrg int counter;
417 1.1 mrg bool threaded = false;
418 1.1 mrg int nthreaded_edges = 0;
419 1.1 mrg bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0;
420 1.1 mrg bool new_target_threaded = false;
421 1.1 mrg
422 1.1 mrg /* Skip complex edges because we don't know how to update them.
423 1.1 mrg
424 1.1 mrg Still handle fallthru edges, as we can succeed to forward fallthru
425 1.1 mrg edge to the same place as the branch edge of conditional branch
426 1.1 mrg and turn conditional branch to an unconditional branch. */
427 1.1 mrg if (e->flags & EDGE_COMPLEX)
428 1.1 mrg {
429 1.1 mrg ei_next (&ei);
430 1.1 mrg continue;
431 1.1 mrg }
432 1.1 mrg
433 1.1 mrg target = first = e->dest;
434 1.1 mrg counter = NUM_FIXED_BLOCKS;
435 1.1 mrg goto_locus = e->goto_locus;
436 1.1 mrg
437 1.1 mrg while (counter < n_basic_blocks_for_fn (cfun))
438 1.1 mrg {
439 1.1 mrg basic_block new_target = NULL;
440 1.1 mrg may_thread |= (target->flags & BB_MODIFIED) != 0;
441 1.1 mrg
442 1.1 mrg if (FORWARDER_BLOCK_P (target)
443 1.1 mrg && single_succ (target) != EXIT_BLOCK_PTR_FOR_FN (cfun))
444 1.1 mrg {
445 1.1 mrg /* Bypass trivial infinite loops. */
446 1.1 mrg new_target = single_succ (target);
447 1.1 mrg if (target == new_target)
448 1.1 mrg counter = n_basic_blocks_for_fn (cfun);
449 1.1 mrg else if (!optimize)
450 1.1 mrg {
451 1.1 mrg /* When not optimizing, ensure that edges or forwarder
452 1.1 mrg blocks with different locus are not optimized out. */
453 1.1 mrg location_t new_locus = single_succ_edge (target)->goto_locus;
454 1.1 mrg location_t locus = goto_locus;
455 1.1 mrg
456 1.1 mrg if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
457 1.1 mrg && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
458 1.1 mrg && new_locus != locus)
459 1.1 mrg new_target = NULL;
460 1.1 mrg else
461 1.1 mrg {
462 1.1 mrg if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
463 1.1 mrg locus = new_locus;
464 1.1 mrg
465 1.1 mrg rtx_insn *last = BB_END (target);
466 1.1 mrg if (DEBUG_INSN_P (last))
467 1.1 mrg last = prev_nondebug_insn (last);
468 1.1 mrg if (last && INSN_P (last))
469 1.1 mrg new_locus = INSN_LOCATION (last);
470 1.1 mrg else
471 1.1 mrg new_locus = UNKNOWN_LOCATION;
472 1.1 mrg
473 1.1 mrg if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
474 1.1 mrg && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
475 1.1 mrg && new_locus != locus)
476 1.1 mrg new_target = NULL;
477 1.1 mrg else
478 1.1 mrg {
479 1.1 mrg if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
480 1.1 mrg locus = new_locus;
481 1.1 mrg
482 1.1 mrg goto_locus = locus;
483 1.1 mrg }
484 1.1 mrg }
485 1.1 mrg }
486 1.1 mrg }
487 1.1 mrg
488 1.1 mrg /* Allow to thread only over one edge at time to simplify updating
489 1.1 mrg of probabilities. */
490 1.1 mrg else if ((mode & CLEANUP_THREADING) && may_thread)
491 1.1 mrg {
492 1.1 mrg edge t = thread_jump (e, target);
493 1.1 mrg if (t)
494 1.1 mrg {
495 1.1 mrg if (!threaded_edges)
496 1.1 mrg threaded_edges = XNEWVEC (edge,
497 1.1 mrg n_basic_blocks_for_fn (cfun));
498 1.1 mrg else
499 1.1 mrg {
500 1.1 mrg int i;
501 1.1 mrg
502 1.1 mrg /* Detect an infinite loop across blocks not
503 1.1 mrg including the start block. */
504 1.1 mrg for (i = 0; i < nthreaded_edges; ++i)
505 1.1 mrg if (threaded_edges[i] == t)
506 1.1 mrg break;
507 1.1 mrg if (i < nthreaded_edges)
508 1.1 mrg {
509 1.1 mrg counter = n_basic_blocks_for_fn (cfun);
510 1.1 mrg break;
511 1.1 mrg }
512 1.1 mrg }
513 1.1 mrg
514 1.1 mrg /* Detect an infinite loop across the start block. */
515 1.1 mrg if (t->dest == b)
516 1.1 mrg break;
517 1.1 mrg
518 1.1 mrg gcc_assert (nthreaded_edges
519 1.1 mrg < (n_basic_blocks_for_fn (cfun)
520 1.1 mrg - NUM_FIXED_BLOCKS));
521 1.1 mrg threaded_edges[nthreaded_edges++] = t;
522 1.1 mrg
523 1.1 mrg new_target = t->dest;
524 1.1 mrg new_target_threaded = true;
525 1.1 mrg }
526 1.1 mrg }
527 1.1 mrg
528 1.1 mrg if (!new_target)
529 1.1 mrg break;
530 1.1 mrg
531 1.1 mrg counter++;
532 1.1 mrg /* Do not turn non-crossing jump to crossing. Depending on target
533 1.1 mrg it may require different instruction pattern. */
534 1.1 mrg if ((e->flags & EDGE_CROSSING)
535 1.1 mrg || BB_PARTITION (first) == BB_PARTITION (new_target))
536 1.1 mrg {
537 1.1 mrg target = new_target;
538 1.1 mrg threaded |= new_target_threaded;
539 1.1 mrg }
540 1.1 mrg }
541 1.1 mrg
542 1.1 mrg if (counter >= n_basic_blocks_for_fn (cfun))
543 1.1 mrg {
544 1.1 mrg if (dump_file)
545 1.1 mrg fprintf (dump_file, "Infinite loop in BB %i.\n",
546 1.1 mrg target->index);
547 1.1 mrg }
548 1.1 mrg else if (target == first)
549 1.1 mrg ; /* We didn't do anything. */
550 1.1 mrg else
551 1.1 mrg {
552 1.1 mrg /* Save the values now, as the edge may get removed. */
553 1.1 mrg profile_count edge_count = e->count ();
554 1.1 mrg int n = 0;
555 1.1 mrg
556 1.1 mrg e->goto_locus = goto_locus;
557 1.1 mrg
558 1.1 mrg /* Don't force if target is exit block. */
559 1.1 mrg if (threaded && target != EXIT_BLOCK_PTR_FOR_FN (cfun))
560 1.1 mrg {
561 1.1 mrg notice_new_block (redirect_edge_and_branch_force (e, target));
562 1.1 mrg if (dump_file)
563 1.1 mrg fprintf (dump_file, "Conditionals threaded.\n");
564 1.1 mrg }
565 1.1 mrg else if (!redirect_edge_and_branch (e, target))
566 1.1 mrg {
567 1.1 mrg if (dump_file)
568 1.1 mrg fprintf (dump_file,
569 1.1 mrg "Forwarding edge %i->%i to %i failed.\n",
570 1.1 mrg b->index, e->dest->index, target->index);
571 1.1 mrg ei_next (&ei);
572 1.1 mrg continue;
573 1.1 mrg }
574 1.1 mrg
575 1.1 mrg /* We successfully forwarded the edge. Now update profile
576 1.1 mrg data: for each edge we traversed in the chain, remove
577 1.1 mrg the original edge's execution count. */
578 1.1 mrg do
579 1.1 mrg {
580 1.1 mrg edge t;
581 1.1 mrg
582 1.1 mrg if (!single_succ_p (first))
583 1.1 mrg {
584 1.1 mrg gcc_assert (n < nthreaded_edges);
585 1.1 mrg t = threaded_edges [n++];
586 1.1 mrg gcc_assert (t->src == first);
587 1.1 mrg update_bb_profile_for_threading (first, edge_count, t);
588 1.1 mrg update_br_prob_note (first);
589 1.1 mrg }
590 1.1 mrg else
591 1.1 mrg {
592 1.1 mrg first->count -= edge_count;
593 1.1 mrg /* It is possible that as the result of
594 1.1 mrg threading we've removed edge as it is
595 1.1 mrg threaded to the fallthru edge. Avoid
596 1.1 mrg getting out of sync. */
597 1.1 mrg if (n < nthreaded_edges
598 1.1 mrg && first == threaded_edges [n]->src)
599 1.1 mrg n++;
600 1.1 mrg t = single_succ_edge (first);
601 1.1 mrg }
602 1.1 mrg
603 1.1 mrg first = t->dest;
604 1.1 mrg }
605 1.1 mrg while (first != target);
606 1.1 mrg
607 1.1 mrg changed = true;
608 1.1 mrg continue;
609 1.1 mrg }
610 1.1 mrg ei_next (&ei);
611 1.1 mrg }
612 1.1 mrg
613 1.1 mrg free (threaded_edges);
614 1.1 mrg return changed;
615 1.1 mrg }
616 1.1 mrg
617 1.1 mrg
619 1.1 mrg /* Blocks A and B are to be merged into a single block. A has no incoming
620 1.1 mrg fallthru edge, so it can be moved before B without adding or modifying
621 1.1 mrg any jumps (aside from the jump from A to B). */
622 1.1 mrg
623 1.1 mrg static void
624 1.1 mrg merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
625 1.1 mrg {
626 1.1 mrg rtx_insn *barrier;
627 1.1 mrg
628 1.1 mrg /* If we are partitioning hot/cold basic blocks, we don't want to
629 1.1 mrg mess up unconditional or indirect jumps that cross between hot
630 1.1 mrg and cold sections.
631 1.1 mrg
632 1.1 mrg Basic block partitioning may result in some jumps that appear to
633 1.1 mrg be optimizable (or blocks that appear to be mergeable), but which really
634 1.1 mrg must be left untouched (they are required to make it safely across
635 1.1 mrg partition boundaries). See the comments at the top of
636 1.1 mrg bb-reorder.cc:partition_hot_cold_basic_blocks for complete details. */
637 1.1 mrg
638 1.1 mrg if (BB_PARTITION (a) != BB_PARTITION (b))
639 1.1 mrg return;
640 1.1 mrg
641 1.1 mrg barrier = next_nonnote_insn (BB_END (a));
642 1.1 mrg gcc_assert (BARRIER_P (barrier));
643 1.1 mrg delete_insn (barrier);
644 1.1 mrg
645 1.1 mrg /* Scramble the insn chain. */
646 1.1 mrg if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
647 1.1 mrg reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
648 1.1 mrg df_set_bb_dirty (a);
649 1.1 mrg
650 1.1 mrg if (dump_file)
651 1.1 mrg fprintf (dump_file, "Moved block %d before %d and merged.\n",
652 1.1 mrg a->index, b->index);
653 1.1 mrg
654 1.1 mrg /* Swap the records for the two blocks around. */
655 1.1 mrg
656 1.1 mrg unlink_block (a);
657 1.1 mrg link_block (a, b->prev_bb);
658 1.1 mrg
659 1.1 mrg /* Now blocks A and B are contiguous. Merge them. */
660 1.1 mrg merge_blocks (a, b);
661 1.1 mrg }
662 1.1 mrg
663 1.1 mrg /* Blocks A and B are to be merged into a single block. B has no outgoing
664 1.1 mrg fallthru edge, so it can be moved after A without adding or modifying
665 1.1 mrg any jumps (aside from the jump from A to B). */
666 1.1 mrg
667 1.1 mrg static void
668 1.1 mrg merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
669 1.1 mrg {
670 1.1 mrg rtx_insn *barrier, *real_b_end;
671 1.1 mrg rtx_insn *label;
672 1.1 mrg rtx_jump_table_data *table;
673 1.1 mrg
674 1.1 mrg /* If we are partitioning hot/cold basic blocks, we don't want to
675 1.1 mrg mess up unconditional or indirect jumps that cross between hot
676 1.1 mrg and cold sections.
677 1.1 mrg
678 1.1 mrg Basic block partitioning may result in some jumps that appear to
679 1.1 mrg be optimizable (or blocks that appear to be mergeable), but which really
680 1.1 mrg must be left untouched (they are required to make it safely across
681 1.1 mrg partition boundaries). See the comments at the top of
682 1.1 mrg bb-reorder.cc:partition_hot_cold_basic_blocks for complete details. */
683 1.1 mrg
684 1.1 mrg if (BB_PARTITION (a) != BB_PARTITION (b))
685 1.1 mrg return;
686 1.1 mrg
687 1.1 mrg real_b_end = BB_END (b);
688 1.1 mrg
689 1.1 mrg /* If there is a jump table following block B temporarily add the jump table
690 1.1 mrg to block B so that it will also be moved to the correct location. */
691 1.1 mrg if (tablejump_p (BB_END (b), &label, &table)
692 1.1 mrg && prev_active_insn (label) == BB_END (b))
693 1.1 mrg {
694 1.1 mrg BB_END (b) = table;
695 1.1 mrg }
696 1.1 mrg
697 1.1 mrg /* There had better have been a barrier there. Delete it. */
698 1.1 mrg barrier = NEXT_INSN (BB_END (b));
699 1.1 mrg if (barrier && BARRIER_P (barrier))
700 1.1 mrg delete_insn (barrier);
701 1.1 mrg
702 1.1 mrg
703 1.1 mrg /* Scramble the insn chain. */
704 1.1 mrg reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
705 1.1 mrg
706 1.1 mrg /* Restore the real end of b. */
707 1.1 mrg BB_END (b) = real_b_end;
708 1.1 mrg
709 1.1 mrg if (dump_file)
710 1.1 mrg fprintf (dump_file, "Moved block %d after %d and merged.\n",
711 1.1 mrg b->index, a->index);
712 1.1 mrg
713 1.1 mrg /* Now blocks A and B are contiguous. Merge them. */
714 1.1 mrg merge_blocks (a, b);
715 1.1 mrg }
716 1.1 mrg
717 1.1 mrg /* Attempt to merge basic blocks that are potentially non-adjacent.
718 1.1 mrg Return NULL iff the attempt failed, otherwise return basic block
719 1.1 mrg where cleanup_cfg should continue. Because the merging commonly
720 1.1 mrg moves basic block away or introduces another optimization
721 1.1 mrg possibility, return basic block just before B so cleanup_cfg don't
722 1.1 mrg need to iterate.
723 1.1 mrg
724 1.1 mrg It may be good idea to return basic block before C in the case
725 1.1 mrg C has been moved after B and originally appeared earlier in the
726 1.1 mrg insn sequence, but we have no information available about the
727 1.1 mrg relative ordering of these two. Hopefully it is not too common. */
728 1.1 mrg
729 1.1 mrg static basic_block
730 1.1 mrg merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
731 1.1 mrg {
732 1.1 mrg basic_block next;
733 1.1 mrg
734 1.1 mrg /* If we are partitioning hot/cold basic blocks, we don't want to
735 1.1 mrg mess up unconditional or indirect jumps that cross between hot
736 1.1 mrg and cold sections.
737 1.1 mrg
738 1.1 mrg Basic block partitioning may result in some jumps that appear to
739 1.1 mrg be optimizable (or blocks that appear to be mergeable), but which really
740 1.1 mrg must be left untouched (they are required to make it safely across
741 1.1 mrg partition boundaries). See the comments at the top of
742 1.1 mrg bb-reorder.cc:partition_hot_cold_basic_blocks for complete details. */
743 1.1 mrg
744 1.1 mrg if (BB_PARTITION (b) != BB_PARTITION (c))
745 1.1 mrg return NULL;
746 1.1 mrg
747 1.1 mrg /* If B has a fallthru edge to C, no need to move anything. */
748 1.1 mrg if (e->flags & EDGE_FALLTHRU)
749 1.1 mrg {
750 1.1 mrg int b_index = b->index, c_index = c->index;
751 1.1 mrg
752 1.1 mrg /* Protect the loop latches. */
753 1.1 mrg if (current_loops && c->loop_father->latch == c)
754 1.1 mrg return NULL;
755 1.1 mrg
756 1.1 mrg merge_blocks (b, c);
757 1.1 mrg update_forwarder_flag (b);
758 1.1 mrg
759 1.1 mrg if (dump_file)
760 1.1 mrg fprintf (dump_file, "Merged %d and %d without moving.\n",
761 1.1 mrg b_index, c_index);
762 1.1 mrg
763 1.1 mrg return b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? b : b->prev_bb;
764 1.1 mrg }
765 1.1 mrg
766 1.1 mrg /* Otherwise we will need to move code around. Do that only if expensive
767 1.1 mrg transformations are allowed. */
768 1.1 mrg else if (mode & CLEANUP_EXPENSIVE)
769 1.1 mrg {
770 1.1 mrg edge tmp_edge, b_fallthru_edge;
771 1.1 mrg bool c_has_outgoing_fallthru;
772 1.1 mrg bool b_has_incoming_fallthru;
773 1.1 mrg
774 1.1 mrg /* Avoid overactive code motion, as the forwarder blocks should be
775 1.1 mrg eliminated by edge redirection instead. One exception might have
776 1.1 mrg been if B is a forwarder block and C has no fallthru edge, but
777 1.1 mrg that should be cleaned up by bb-reorder instead. */
778 1.1 mrg if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
779 1.1 mrg return NULL;
780 1.1 mrg
781 1.1 mrg /* We must make sure to not munge nesting of lexical blocks,
782 1.1 mrg and loop notes. This is done by squeezing out all the notes
783 1.1 mrg and leaving them there to lie. Not ideal, but functional. */
784 1.1 mrg
785 1.1 mrg tmp_edge = find_fallthru_edge (c->succs);
786 1.1 mrg c_has_outgoing_fallthru = (tmp_edge != NULL);
787 1.1 mrg
788 1.1 mrg tmp_edge = find_fallthru_edge (b->preds);
789 1.1 mrg b_has_incoming_fallthru = (tmp_edge != NULL);
790 1.1 mrg b_fallthru_edge = tmp_edge;
791 1.1 mrg next = b->prev_bb;
792 1.1 mrg if (next == c)
793 1.1 mrg next = next->prev_bb;
794 1.1 mrg
795 1.1 mrg /* Otherwise, we're going to try to move C after B. If C does
796 1.1 mrg not have an outgoing fallthru, then it can be moved
797 1.1 mrg immediately after B without introducing or modifying jumps. */
798 1.1 mrg if (! c_has_outgoing_fallthru)
799 1.1 mrg {
800 1.1 mrg merge_blocks_move_successor_nojumps (b, c);
801 1.1 mrg return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
802 1.1 mrg }
803 1.1 mrg
804 1.1 mrg /* If B does not have an incoming fallthru, then it can be moved
805 1.1 mrg immediately before C without introducing or modifying jumps.
806 1.1 mrg C cannot be the first block, so we do not have to worry about
807 1.1 mrg accessing a non-existent block. */
808 1.1 mrg
809 1.1 mrg if (b_has_incoming_fallthru)
810 1.1 mrg {
811 1.1 mrg basic_block bb;
812 1.1 mrg
813 1.1 mrg if (b_fallthru_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
814 1.1 mrg return NULL;
815 1.1 mrg bb = force_nonfallthru (b_fallthru_edge);
816 1.1 mrg if (bb)
817 1.1 mrg notice_new_block (bb);
818 1.1 mrg }
819 1.1 mrg
820 1.1 mrg merge_blocks_move_predecessor_nojumps (b, c);
821 1.1 mrg return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
822 1.1 mrg }
823 1.1 mrg
824 1.1 mrg return NULL;
825 1.1 mrg }
826 1.1 mrg
827 1.1 mrg
829 1.1 mrg /* Removes the memory attributes of MEM expression
830 1.1 mrg if they are not equal. */
831 1.1 mrg
832 1.1 mrg static void
833 1.1 mrg merge_memattrs (rtx x, rtx y)
834 1.1 mrg {
835 1.1 mrg int i;
836 1.1 mrg int j;
837 1.1 mrg enum rtx_code code;
838 1.1 mrg const char *fmt;
839 1.1 mrg
840 1.1 mrg if (x == y)
841 1.1 mrg return;
842 1.1 mrg if (x == 0 || y == 0)
843 1.1 mrg return;
844 1.1 mrg
845 1.1 mrg code = GET_CODE (x);
846 1.1 mrg
847 1.1 mrg if (code != GET_CODE (y))
848 1.1 mrg return;
849 1.1 mrg
850 1.1 mrg if (GET_MODE (x) != GET_MODE (y))
851 1.1 mrg return;
852 1.1 mrg
853 1.1 mrg if (code == MEM && !mem_attrs_eq_p (MEM_ATTRS (x), MEM_ATTRS (y)))
854 1.1 mrg {
855 1.1 mrg if (! MEM_ATTRS (x))
856 1.1 mrg MEM_ATTRS (y) = 0;
857 1.1 mrg else if (! MEM_ATTRS (y))
858 1.1 mrg MEM_ATTRS (x) = 0;
859 1.1 mrg else
860 1.1 mrg {
861 1.1 mrg if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
862 1.1 mrg {
863 1.1 mrg set_mem_alias_set (x, 0);
864 1.1 mrg set_mem_alias_set (y, 0);
865 1.1 mrg }
866 1.1 mrg
867 1.1 mrg if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
868 1.1 mrg {
869 1.1 mrg set_mem_expr (x, 0);
870 1.1 mrg set_mem_expr (y, 0);
871 1.1 mrg clear_mem_offset (x);
872 1.1 mrg clear_mem_offset (y);
873 1.1 mrg }
874 1.1 mrg else if (MEM_OFFSET_KNOWN_P (x) != MEM_OFFSET_KNOWN_P (y)
875 1.1 mrg || (MEM_OFFSET_KNOWN_P (x)
876 1.1 mrg && maybe_ne (MEM_OFFSET (x), MEM_OFFSET (y))))
877 1.1 mrg {
878 1.1 mrg clear_mem_offset (x);
879 1.1 mrg clear_mem_offset (y);
880 1.1 mrg }
881 1.1 mrg
882 1.1 mrg if (!MEM_SIZE_KNOWN_P (x))
883 1.1 mrg clear_mem_size (y);
884 1.1 mrg else if (!MEM_SIZE_KNOWN_P (y))
885 1.1 mrg clear_mem_size (x);
886 1.1 mrg else if (known_le (MEM_SIZE (x), MEM_SIZE (y)))
887 1.1 mrg set_mem_size (x, MEM_SIZE (y));
888 1.1 mrg else if (known_le (MEM_SIZE (y), MEM_SIZE (x)))
889 1.1 mrg set_mem_size (y, MEM_SIZE (x));
890 1.1 mrg else
891 1.1 mrg {
892 1.1 mrg /* The sizes aren't ordered, so we can't merge them. */
893 1.1 mrg clear_mem_size (x);
894 1.1 mrg clear_mem_size (y);
895 1.1 mrg }
896 1.1 mrg
897 1.1 mrg set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
898 1.1 mrg set_mem_align (y, MEM_ALIGN (x));
899 1.1 mrg }
900 1.1 mrg }
901 1.1 mrg if (code == MEM)
902 1.1 mrg {
903 1.1 mrg if (MEM_READONLY_P (x) != MEM_READONLY_P (y))
904 1.1 mrg {
905 1.1 mrg MEM_READONLY_P (x) = 0;
906 1.1 mrg MEM_READONLY_P (y) = 0;
907 1.1 mrg }
908 1.1 mrg if (MEM_NOTRAP_P (x) != MEM_NOTRAP_P (y))
909 1.1 mrg {
910 1.1 mrg MEM_NOTRAP_P (x) = 0;
911 1.1 mrg MEM_NOTRAP_P (y) = 0;
912 1.1 mrg }
913 1.1 mrg if (MEM_VOLATILE_P (x) != MEM_VOLATILE_P (y))
914 1.1 mrg {
915 1.1 mrg MEM_VOLATILE_P (x) = 1;
916 1.1 mrg MEM_VOLATILE_P (y) = 1;
917 1.1 mrg }
918 1.1 mrg }
919 1.1 mrg
920 1.1 mrg fmt = GET_RTX_FORMAT (code);
921 1.1 mrg for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
922 1.1 mrg {
923 1.1 mrg switch (fmt[i])
924 1.1 mrg {
925 1.1 mrg case 'E':
926 1.1 mrg /* Two vectors must have the same length. */
927 1.1 mrg if (XVECLEN (x, i) != XVECLEN (y, i))
928 1.1 mrg return;
929 1.1 mrg
930 1.1 mrg for (j = 0; j < XVECLEN (x, i); j++)
931 1.1 mrg merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
932 1.1 mrg
933 1.1 mrg break;
934 1.1 mrg
935 1.1 mrg case 'e':
936 1.1 mrg merge_memattrs (XEXP (x, i), XEXP (y, i));
937 1.1 mrg }
938 1.1 mrg }
939 1.1 mrg return;
940 1.1 mrg }
941 1.1 mrg
942 1.1 mrg
943 1.1 mrg /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
944 1.1 mrg different single sets S1 and S2. */
945 1.1 mrg
946 1.1 mrg static bool
947 1.1 mrg equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2)
948 1.1 mrg {
949 1.1 mrg int i;
950 1.1 mrg rtx e1, e2;
951 1.1 mrg
952 1.1 mrg if (p1 == s1 && p2 == s2)
953 1.1 mrg return true;
954 1.1 mrg
955 1.1 mrg if (GET_CODE (p1) != PARALLEL || GET_CODE (p2) != PARALLEL)
956 1.1 mrg return false;
957 1.1 mrg
958 1.1 mrg if (XVECLEN (p1, 0) != XVECLEN (p2, 0))
959 1.1 mrg return false;
960 1.1 mrg
961 1.1 mrg for (i = 0; i < XVECLEN (p1, 0); i++)
962 1.1 mrg {
963 1.1 mrg e1 = XVECEXP (p1, 0, i);
964 1.1 mrg e2 = XVECEXP (p2, 0, i);
965 1.1 mrg if (e1 == s1 && e2 == s2)
966 1.1 mrg continue;
967 1.1 mrg if (reload_completed
968 1.1 mrg ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2))
969 1.1 mrg continue;
970 1.1 mrg
971 1.1 mrg return false;
972 1.1 mrg }
973 1.1 mrg
974 1.1 mrg return true;
975 1.1 mrg }
976 1.1 mrg
977 1.1 mrg
978 1.1 mrg /* NOTE1 is the REG_EQUAL note, if any, attached to an insn
979 1.1 mrg that is a single_set with a SET_SRC of SRC1. Similarly
980 1.1 mrg for NOTE2/SRC2.
981 1.1 mrg
982 1.1 mrg So effectively NOTE1/NOTE2 are an alternate form of
983 1.1 mrg SRC1/SRC2 respectively.
984 1.1 mrg
985 1.1 mrg Return nonzero if SRC1 or NOTE1 has the same constant
986 1.1 mrg integer value as SRC2 or NOTE2. Else return zero. */
987 1.1 mrg static int
988 1.1 mrg values_equal_p (rtx note1, rtx note2, rtx src1, rtx src2)
989 1.1 mrg {
990 1.1 mrg if (note1
991 1.1 mrg && note2
992 1.1 mrg && CONST_INT_P (XEXP (note1, 0))
993 1.1 mrg && rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0)))
994 1.1 mrg return 1;
995 1.1 mrg
996 1.1 mrg if (!note1
997 1.1 mrg && !note2
998 1.1 mrg && CONST_INT_P (src1)
999 1.1 mrg && CONST_INT_P (src2)
1000 1.1 mrg && rtx_equal_p (src1, src2))
1001 1.1 mrg return 1;
1002 1.1 mrg
1003 1.1 mrg if (note1
1004 1.1 mrg && CONST_INT_P (src2)
1005 1.1 mrg && rtx_equal_p (XEXP (note1, 0), src2))
1006 1.1 mrg return 1;
1007 1.1 mrg
1008 1.1 mrg if (note2
1009 1.1 mrg && CONST_INT_P (src1)
1010 1.1 mrg && rtx_equal_p (XEXP (note2, 0), src1))
1011 1.1 mrg return 1;
1012 1.1 mrg
1013 1.1 mrg return 0;
1014 1.1 mrg }
1015 1.1 mrg
1016 1.1 mrg /* Examine register notes on I1 and I2 and return:
1017 1.1 mrg - dir_forward if I1 can be replaced by I2, or
1018 1.1 mrg - dir_backward if I2 can be replaced by I1, or
1019 1.1 mrg - dir_both if both are the case. */
1020 1.1 mrg
1021 1.1 mrg static enum replace_direction
1022 1.1 mrg can_replace_by (rtx_insn *i1, rtx_insn *i2)
1023 1.1 mrg {
1024 1.1 mrg rtx s1, s2, d1, d2, src1, src2, note1, note2;
1025 1.1 mrg bool c1, c2;
1026 1.1 mrg
1027 1.1 mrg /* Check for 2 sets. */
1028 1.1 mrg s1 = single_set (i1);
1029 1.1 mrg s2 = single_set (i2);
1030 1.1 mrg if (s1 == NULL_RTX || s2 == NULL_RTX)
1031 1.1 mrg return dir_none;
1032 1.1 mrg
1033 1.1 mrg /* Check that the 2 sets set the same dest. */
1034 1.1 mrg d1 = SET_DEST (s1);
1035 1.1 mrg d2 = SET_DEST (s2);
1036 1.1 mrg if (!(reload_completed
1037 1.1 mrg ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2)))
1038 1.1 mrg return dir_none;
1039 1.1 mrg
1040 1.1 mrg /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
1041 1.1 mrg set dest to the same value. */
1042 1.1 mrg note1 = find_reg_equal_equiv_note (i1);
1043 1.1 mrg note2 = find_reg_equal_equiv_note (i2);
1044 1.1 mrg
1045 1.1 mrg src1 = SET_SRC (s1);
1046 1.1 mrg src2 = SET_SRC (s2);
1047 1.1 mrg
1048 1.1 mrg if (!values_equal_p (note1, note2, src1, src2))
1049 1.1 mrg return dir_none;
1050 1.1 mrg
1051 1.1 mrg if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2))
1052 1.1 mrg return dir_none;
1053 1.1 mrg
1054 1.1 mrg /* Although the 2 sets set dest to the same value, we cannot replace
1055 1.1 mrg (set (dest) (const_int))
1056 1.1 mrg by
1057 1.1 mrg (set (dest) (reg))
1058 1.1 mrg because we don't know if the reg is live and has the same value at the
1059 1.1 mrg location of replacement. */
1060 1.1 mrg c1 = CONST_INT_P (src1);
1061 1.1 mrg c2 = CONST_INT_P (src2);
1062 1.1 mrg if (c1 && c2)
1063 1.1 mrg return dir_both;
1064 1.1 mrg else if (c2)
1065 1.1 mrg return dir_forward;
1066 1.1 mrg else if (c1)
1067 1.1 mrg return dir_backward;
1068 1.1 mrg
1069 1.1 mrg return dir_none;
1070 1.1 mrg }
1071 1.1 mrg
1072 1.1 mrg /* Merges directions A and B. */
1073 1.1 mrg
1074 1.1 mrg static enum replace_direction
1075 1.1 mrg merge_dir (enum replace_direction a, enum replace_direction b)
1076 1.1 mrg {
1077 1.1 mrg /* Implements the following table:
1078 1.1 mrg |bo fw bw no
1079 1.1 mrg ---+-----------
1080 1.1 mrg bo |bo fw bw no
1081 1.1 mrg fw |-- fw no no
1082 1.1 mrg bw |-- -- bw no
1083 1.1 mrg no |-- -- -- no. */
1084 1.1 mrg
1085 1.1 mrg if (a == b)
1086 1.1 mrg return a;
1087 1.1 mrg
1088 1.1 mrg if (a == dir_both)
1089 1.1 mrg return b;
1090 1.1 mrg if (b == dir_both)
1091 1.1 mrg return a;
1092 1.1 mrg
1093 1.1 mrg return dir_none;
1094 1.1 mrg }
1095 1.1 mrg
1096 1.1 mrg /* Array of flags indexed by reg note kind, true if the given
1097 1.1 mrg reg note is CFA related. */
1098 1.1 mrg static const bool reg_note_cfa_p[] = {
1099 1.1 mrg #undef REG_CFA_NOTE
1100 1.1 mrg #define DEF_REG_NOTE(NAME) false,
1101 1.1 mrg #define REG_CFA_NOTE(NAME) true,
1102 1.1 mrg #include "reg-notes.def"
1103 1.1 mrg #undef REG_CFA_NOTE
1104 1.1 mrg #undef DEF_REG_NOTE
1105 1.1 mrg false
1106 1.1 mrg };
1107 1.1 mrg
1108 1.1 mrg /* Return true if I1 and I2 have identical CFA notes (the same order
1109 1.1 mrg and equivalent content). */
1110 1.1 mrg
1111 1.1 mrg static bool
1112 1.1 mrg insns_have_identical_cfa_notes (rtx_insn *i1, rtx_insn *i2)
1113 1.1 mrg {
1114 1.1 mrg rtx n1, n2;
1115 1.1 mrg for (n1 = REG_NOTES (i1), n2 = REG_NOTES (i2); ;
1116 1.1 mrg n1 = XEXP (n1, 1), n2 = XEXP (n2, 1))
1117 1.1 mrg {
1118 1.1 mrg /* Skip over reg notes not related to CFI information. */
1119 1.1 mrg while (n1 && !reg_note_cfa_p[REG_NOTE_KIND (n1)])
1120 1.1 mrg n1 = XEXP (n1, 1);
1121 1.1 mrg while (n2 && !reg_note_cfa_p[REG_NOTE_KIND (n2)])
1122 1.1 mrg n2 = XEXP (n2, 1);
1123 1.1 mrg if (n1 == NULL_RTX && n2 == NULL_RTX)
1124 1.1 mrg return true;
1125 1.1 mrg if (n1 == NULL_RTX || n2 == NULL_RTX)
1126 1.1 mrg return false;
1127 1.1 mrg if (XEXP (n1, 0) == XEXP (n2, 0))
1128 1.1 mrg ;
1129 1.1 mrg else if (XEXP (n1, 0) == NULL_RTX || XEXP (n2, 0) == NULL_RTX)
1130 1.1 mrg return false;
1131 1.1 mrg else if (!(reload_completed
1132 1.1 mrg ? rtx_renumbered_equal_p (XEXP (n1, 0), XEXP (n2, 0))
1133 1.1 mrg : rtx_equal_p (XEXP (n1, 0), XEXP (n2, 0))))
1134 1.1 mrg return false;
1135 1.1 mrg }
1136 1.1 mrg }
1137 1.1 mrg
1138 1.1 mrg /* Examine I1 and I2 and return:
1139 1.1 mrg - dir_forward if I1 can be replaced by I2, or
1140 1.1 mrg - dir_backward if I2 can be replaced by I1, or
1141 1.1 mrg - dir_both if both are the case. */
1142 1.1 mrg
1143 1.1 mrg static enum replace_direction
1144 1.1 mrg old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx_insn *i1, rtx_insn *i2)
1145 1.1 mrg {
1146 1.1 mrg rtx p1, p2;
1147 1.1 mrg
1148 1.1 mrg /* Verify that I1 and I2 are equivalent. */
1149 1.1 mrg if (GET_CODE (i1) != GET_CODE (i2))
1150 1.1 mrg return dir_none;
1151 1.1 mrg
1152 1.1 mrg /* __builtin_unreachable() may lead to empty blocks (ending with
1153 1.1 mrg NOTE_INSN_BASIC_BLOCK). They may be crossjumped. */
1154 1.1 mrg if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
1155 1.1 mrg return dir_both;
1156 1.1 mrg
1157 1.1 mrg /* ??? Do not allow cross-jumping between different stack levels. */
1158 1.1 mrg p1 = find_reg_note (i1, REG_ARGS_SIZE, NULL);
1159 1.1 mrg p2 = find_reg_note (i2, REG_ARGS_SIZE, NULL);
1160 1.1 mrg if (p1 && p2)
1161 1.1 mrg {
1162 1.1 mrg p1 = XEXP (p1, 0);
1163 1.1 mrg p2 = XEXP (p2, 0);
1164 1.1 mrg if (!rtx_equal_p (p1, p2))
1165 1.1 mrg return dir_none;
1166 1.1 mrg
1167 1.1 mrg /* ??? Worse, this adjustment had better be constant lest we
1168 1.1 mrg have differing incoming stack levels. */
1169 1.1 mrg if (!frame_pointer_needed
1170 1.1 mrg && known_eq (find_args_size_adjust (i1), HOST_WIDE_INT_MIN))
1171 1.1 mrg return dir_none;
1172 1.1 mrg }
1173 1.1 mrg else if (p1 || p2)
1174 1.1 mrg return dir_none;
1175 1.1 mrg
1176 1.1 mrg /* Do not allow cross-jumping between frame related insns and other
1177 1.1 mrg insns. */
1178 1.1 mrg if (RTX_FRAME_RELATED_P (i1) != RTX_FRAME_RELATED_P (i2))
1179 1.1 mrg return dir_none;
1180 1.1 mrg
1181 1.1 mrg p1 = PATTERN (i1);
1182 1.1 mrg p2 = PATTERN (i2);
1183 1.1 mrg
1184 1.1 mrg if (GET_CODE (p1) != GET_CODE (p2))
1185 1.1 mrg return dir_none;
1186 1.1 mrg
1187 1.1 mrg /* If this is a CALL_INSN, compare register usage information.
1188 1.1 mrg If we don't check this on stack register machines, the two
1189 1.1 mrg CALL_INSNs might be merged leaving reg-stack.cc with mismatching
1190 1.1 mrg numbers of stack registers in the same basic block.
1191 1.1 mrg If we don't check this on machines with delay slots, a delay slot may
1192 1.1 mrg be filled that clobbers a parameter expected by the subroutine.
1193 1.1 mrg
1194 1.1 mrg ??? We take the simple route for now and assume that if they're
1195 1.1 mrg equal, they were constructed identically.
1196 1.1 mrg
1197 1.1 mrg Also check for identical exception regions. */
1198 1.1 mrg
1199 1.1 mrg if (CALL_P (i1))
1200 1.1 mrg {
1201 1.1 mrg /* Ensure the same EH region. */
1202 1.1 mrg rtx n1 = find_reg_note (i1, REG_EH_REGION, 0);
1203 1.1 mrg rtx n2 = find_reg_note (i2, REG_EH_REGION, 0);
1204 1.1 mrg
1205 1.1 mrg if (!n1 && n2)
1206 1.1 mrg return dir_none;
1207 1.1 mrg
1208 1.1 mrg if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1209 1.1 mrg return dir_none;
1210 1.1 mrg
1211 1.1 mrg if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1212 1.1 mrg CALL_INSN_FUNCTION_USAGE (i2))
1213 1.1 mrg || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2))
1214 1.1 mrg return dir_none;
1215 1.1 mrg
1216 1.1 mrg /* For address sanitizer, never crossjump __asan_report_* builtins,
1217 1.1 mrg otherwise errors might be reported on incorrect lines. */
1218 1.1 mrg if (flag_sanitize & SANITIZE_ADDRESS)
1219 1.1 mrg {
1220 1.1 mrg rtx call = get_call_rtx_from (i1);
1221 1.1 mrg if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
1222 1.1 mrg {
1223 1.1 mrg rtx symbol = XEXP (XEXP (call, 0), 0);
1224 1.1 mrg if (SYMBOL_REF_DECL (symbol)
1225 1.1 mrg && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
1226 1.1 mrg {
1227 1.1 mrg if ((DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
1228 1.1 mrg == BUILT_IN_NORMAL)
1229 1.1 mrg && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
1230 1.1 mrg >= BUILT_IN_ASAN_REPORT_LOAD1
1231 1.1 mrg && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
1232 1.1 mrg <= BUILT_IN_ASAN_STOREN)
1233 1.1 mrg return dir_none;
1234 1.1 mrg }
1235 1.1 mrg }
1236 1.1 mrg }
1237 1.1 mrg
1238 1.1 mrg if (insn_callee_abi (i1) != insn_callee_abi (i2))
1239 1.1 mrg return dir_none;
1240 1.1 mrg }
1241 1.1 mrg
1242 1.1 mrg /* If both i1 and i2 are frame related, verify all the CFA notes
1243 1.1 mrg in the same order and with the same content. */
1244 1.1 mrg if (RTX_FRAME_RELATED_P (i1) && !insns_have_identical_cfa_notes (i1, i2))
1245 1.1 mrg return dir_none;
1246 1.1 mrg
1247 1.1 mrg #ifdef STACK_REGS
1248 1.1 mrg /* If cross_jump_death_matters is not 0, the insn's mode
1249 1.1 mrg indicates whether or not the insn contains any stack-like
1250 1.1 mrg regs. */
1251 1.1 mrg
1252 1.1 mrg if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1253 1.1 mrg {
1254 1.1 mrg /* If register stack conversion has already been done, then
1255 1.1 mrg death notes must also be compared before it is certain that
1256 1.1 mrg the two instruction streams match. */
1257 1.1 mrg
1258 1.1 mrg rtx note;
1259 1.1 mrg HARD_REG_SET i1_regset, i2_regset;
1260 1.1 mrg
1261 1.1 mrg CLEAR_HARD_REG_SET (i1_regset);
1262 1.1 mrg CLEAR_HARD_REG_SET (i2_regset);
1263 1.1 mrg
1264 1.1 mrg for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1265 1.1 mrg if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1266 1.1 mrg SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1267 1.1 mrg
1268 1.1 mrg for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1269 1.1 mrg if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1270 1.1 mrg SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1271 1.1 mrg
1272 1.1 mrg if (i1_regset != i2_regset)
1273 1.1 mrg return dir_none;
1274 1.1 mrg }
1275 1.1 mrg #endif
1276 1.1 mrg
1277 1.1 mrg if (reload_completed
1278 1.1 mrg ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1279 1.1 mrg return dir_both;
1280 1.1 mrg
1281 1.1 mrg return can_replace_by (i1, i2);
1282 1.1 mrg }
1283 1.1 mrg
1284 1.1 mrg /* When comparing insns I1 and I2 in flow_find_cross_jump or
1286 1.1 mrg flow_find_head_matching_sequence, ensure the notes match. */
1287 1.1 mrg
1288 1.1 mrg static void
1289 1.1 mrg merge_notes (rtx_insn *i1, rtx_insn *i2)
1290 1.1 mrg {
1291 1.1 mrg /* If the merged insns have different REG_EQUAL notes, then
1292 1.1 mrg remove them. */
1293 1.1 mrg rtx equiv1 = find_reg_equal_equiv_note (i1);
1294 1.1 mrg rtx equiv2 = find_reg_equal_equiv_note (i2);
1295 1.1 mrg
1296 1.1 mrg if (equiv1 && !equiv2)
1297 1.1 mrg remove_note (i1, equiv1);
1298 1.1 mrg else if (!equiv1 && equiv2)
1299 1.1 mrg remove_note (i2, equiv2);
1300 1.1 mrg else if (equiv1 && equiv2
1301 1.1 mrg && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1302 1.1 mrg {
1303 1.1 mrg remove_note (i1, equiv1);
1304 1.1 mrg remove_note (i2, equiv2);
1305 1.1 mrg }
1306 1.1 mrg }
1307 1.1 mrg
1308 1.1 mrg /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
1309 1.1 mrg resulting insn in I1, and the corresponding bb in BB1. At the head of a
1310 1.1 mrg bb, if there is a predecessor bb that reaches this bb via fallthru, and
1311 1.1 mrg FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
1312 1.1 mrg DID_FALLTHRU. Otherwise, stops at the head of the bb. */
1313 1.1 mrg
1314 1.1 mrg static void
1315 1.1 mrg walk_to_nondebug_insn (rtx_insn **i1, basic_block *bb1, bool follow_fallthru,
1316 1.1 mrg bool *did_fallthru)
1317 1.1 mrg {
1318 1.1 mrg edge fallthru;
1319 1.1 mrg
1320 1.1 mrg *did_fallthru = false;
1321 1.1 mrg
1322 1.1 mrg /* Ignore notes. */
1323 1.1 mrg while (!NONDEBUG_INSN_P (*i1))
1324 1.1 mrg {
1325 1.1 mrg if (*i1 != BB_HEAD (*bb1))
1326 1.1 mrg {
1327 1.1 mrg *i1 = PREV_INSN (*i1);
1328 1.1 mrg continue;
1329 1.1 mrg }
1330 1.1 mrg
1331 1.1 mrg if (!follow_fallthru)
1332 1.1 mrg return;
1333 1.1 mrg
1334 1.1 mrg fallthru = find_fallthru_edge ((*bb1)->preds);
1335 1.1 mrg if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
1336 1.1 mrg || !single_succ_p (fallthru->src))
1337 1.1 mrg return;
1338 1.1 mrg
1339 1.1 mrg *bb1 = fallthru->src;
1340 1.1 mrg *i1 = BB_END (*bb1);
1341 1.1 mrg *did_fallthru = true;
1342 1.1 mrg }
1343 1.1 mrg }
1344 1.1 mrg
1345 1.1 mrg /* Look through the insns at the end of BB1 and BB2 and find the longest
1346 1.1 mrg sequence that are either equivalent, or allow forward or backward
1347 1.1 mrg replacement. Store the first insns for that sequence in *F1 and *F2 and
1348 1.1 mrg return the sequence length.
1349 1.1 mrg
1350 1.1 mrg DIR_P indicates the allowed replacement direction on function entry, and
1351 1.1 mrg the actual replacement direction on function exit. If NULL, only equivalent
1352 1.1 mrg sequences are allowed.
1353 1.1 mrg
1354 1.1 mrg To simplify callers of this function, if the blocks match exactly,
1355 1.1 mrg store the head of the blocks in *F1 and *F2. */
1356 1.1 mrg
1357 1.1 mrg int
1358 1.1 mrg flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx_insn **f1,
1359 1.1 mrg rtx_insn **f2, enum replace_direction *dir_p)
1360 1.1 mrg {
1361 1.1 mrg rtx_insn *i1, *i2, *last1, *last2, *afterlast1, *afterlast2;
1362 1.1 mrg int ninsns = 0;
1363 1.1 mrg enum replace_direction dir, last_dir, afterlast_dir;
1364 1.1 mrg bool follow_fallthru, did_fallthru;
1365 1.1 mrg
1366 1.1 mrg if (dir_p)
1367 1.1 mrg dir = *dir_p;
1368 1.1 mrg else
1369 1.1 mrg dir = dir_both;
1370 1.1 mrg afterlast_dir = dir;
1371 1.1 mrg last_dir = afterlast_dir;
1372 1.1 mrg
1373 1.1 mrg /* Skip simple jumps at the end of the blocks. Complex jumps still
1374 1.1 mrg need to be compared for equivalence, which we'll do below. */
1375 1.1 mrg
1376 1.1 mrg i1 = BB_END (bb1);
1377 1.1 mrg last1 = afterlast1 = last2 = afterlast2 = NULL;
1378 1.1 mrg if (onlyjump_p (i1)
1379 1.1 mrg || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1380 1.1 mrg {
1381 1.1 mrg last1 = i1;
1382 1.1 mrg i1 = PREV_INSN (i1);
1383 1.1 mrg }
1384 1.1 mrg
1385 1.1 mrg i2 = BB_END (bb2);
1386 1.1 mrg if (onlyjump_p (i2)
1387 1.1 mrg || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1388 1.1 mrg {
1389 1.1 mrg last2 = i2;
1390 1.1 mrg /* Count everything except for unconditional jump as insn.
1391 1.1 mrg Don't count any jumps if dir_p is NULL. */
1392 1.1 mrg if (!simplejump_p (i2) && !returnjump_p (i2) && last1 && dir_p)
1393 1.1 mrg ninsns++;
1394 1.1 mrg i2 = PREV_INSN (i2);
1395 1.1 mrg }
1396 1.1 mrg
1397 1.1 mrg while (true)
1398 1.1 mrg {
1399 1.1 mrg /* In the following example, we can replace all jumps to C by jumps to A.
1400 1.1 mrg
1401 1.1 mrg This removes 4 duplicate insns.
1402 1.1 mrg [bb A] insn1 [bb C] insn1
1403 1.1 mrg insn2 insn2
1404 1.1 mrg [bb B] insn3 insn3
1405 1.1 mrg insn4 insn4
1406 1.1 mrg jump_insn jump_insn
1407 1.1 mrg
1408 1.1 mrg We could also replace all jumps to A by jumps to C, but that leaves B
1409 1.1 mrg alive, and removes only 2 duplicate insns. In a subsequent crossjump
1410 1.1 mrg step, all jumps to B would be replaced with jumps to the middle of C,
1411 1.1 mrg achieving the same result with more effort.
1412 1.1 mrg So we allow only the first possibility, which means that we don't allow
1413 1.1 mrg fallthru in the block that's being replaced. */
1414 1.1 mrg
1415 1.1 mrg follow_fallthru = dir_p && dir != dir_forward;
1416 1.1 mrg walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru);
1417 1.1 mrg if (did_fallthru)
1418 1.1 mrg dir = dir_backward;
1419 1.1 mrg
1420 1.1 mrg follow_fallthru = dir_p && dir != dir_backward;
1421 1.1 mrg walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru);
1422 1.1 mrg if (did_fallthru)
1423 1.1 mrg dir = dir_forward;
1424 1.1 mrg
1425 1.1 mrg if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1426 1.1 mrg break;
1427 1.1 mrg
1428 1.1 mrg /* Do not turn corssing edge to non-crossing or vice versa after
1429 1.1 mrg reload. */
1430 1.1 mrg if (BB_PARTITION (BLOCK_FOR_INSN (i1))
1431 1.1 mrg != BB_PARTITION (BLOCK_FOR_INSN (i2))
1432 1.1 mrg && reload_completed)
1433 1.1 mrg break;
1434 1.1 mrg
1435 1.1 mrg dir = merge_dir (dir, old_insns_match_p (0, i1, i2));
1436 1.1 mrg if (dir == dir_none || (!dir_p && dir != dir_both))
1437 1.1 mrg break;
1438 1.1 mrg
1439 1.1 mrg merge_memattrs (i1, i2);
1440 1.1 mrg
1441 1.1 mrg /* Don't begin a cross-jump with a NOTE insn. */
1442 1.1 mrg if (INSN_P (i1))
1443 1.1 mrg {
1444 1.1 mrg merge_notes (i1, i2);
1445 1.1 mrg
1446 1.1 mrg afterlast1 = last1, afterlast2 = last2;
1447 1.1 mrg last1 = i1, last2 = i2;
1448 1.1 mrg afterlast_dir = last_dir;
1449 1.1 mrg last_dir = dir;
1450 1.1 mrg if (active_insn_p (i1))
1451 1.1 mrg ninsns++;
1452 1.1 mrg }
1453 1.1 mrg
1454 1.1 mrg i1 = PREV_INSN (i1);
1455 1.1 mrg i2 = PREV_INSN (i2);
1456 1.1 mrg }
1457 1.1 mrg
1458 1.1 mrg /* Include preceding notes and labels in the cross-jump. One,
1459 1.1 mrg this may bring us to the head of the blocks as requested above.
1460 1.1 mrg Two, it keeps line number notes as matched as may be. */
1461 1.1 mrg if (ninsns)
1462 1.1 mrg {
1463 1.1 mrg bb1 = BLOCK_FOR_INSN (last1);
1464 1.1 mrg while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
1465 1.1 mrg last1 = PREV_INSN (last1);
1466 1.1 mrg
1467 1.1 mrg if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1468 1.1 mrg last1 = PREV_INSN (last1);
1469 1.1 mrg
1470 1.1 mrg bb2 = BLOCK_FOR_INSN (last2);
1471 1.1 mrg while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
1472 1.1 mrg last2 = PREV_INSN (last2);
1473 1.1 mrg
1474 1.1 mrg if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1475 1.1 mrg last2 = PREV_INSN (last2);
1476 1.1 mrg
1477 1.1 mrg *f1 = last1;
1478 1.1 mrg *f2 = last2;
1479 1.1 mrg }
1480 1.1 mrg
1481 1.1 mrg if (dir_p)
1482 1.1 mrg *dir_p = last_dir;
1483 1.1 mrg return ninsns;
1484 1.1 mrg }
1485 1.1 mrg
1486 1.1 mrg /* Like flow_find_cross_jump, except start looking for a matching sequence from
1487 1.1 mrg the head of the two blocks. Do not include jumps at the end.
1488 1.1 mrg If STOP_AFTER is nonzero, stop after finding that many matching
1489 1.1 mrg instructions. If STOP_AFTER is zero, count all INSN_P insns, if it is
1490 1.1 mrg non-zero, only count active insns. */
1491 1.1 mrg
1492 1.1 mrg int
1493 1.1 mrg flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx_insn **f1,
1494 1.1 mrg rtx_insn **f2, int stop_after)
1495 1.1 mrg {
1496 1.1 mrg rtx_insn *i1, *i2, *last1, *last2, *beforelast1, *beforelast2;
1497 1.1 mrg int ninsns = 0;
1498 1.1 mrg edge e;
1499 1.1 mrg edge_iterator ei;
1500 1.1 mrg int nehedges1 = 0, nehedges2 = 0;
1501 1.1 mrg
1502 1.1 mrg FOR_EACH_EDGE (e, ei, bb1->succs)
1503 1.1 mrg if (e->flags & EDGE_EH)
1504 1.1 mrg nehedges1++;
1505 1.1 mrg FOR_EACH_EDGE (e, ei, bb2->succs)
1506 1.1 mrg if (e->flags & EDGE_EH)
1507 1.1 mrg nehedges2++;
1508 1.1 mrg
1509 1.1 mrg i1 = BB_HEAD (bb1);
1510 1.1 mrg i2 = BB_HEAD (bb2);
1511 1.1 mrg last1 = beforelast1 = last2 = beforelast2 = NULL;
1512 1.1 mrg
1513 1.1 mrg while (true)
1514 1.1 mrg {
1515 1.1 mrg /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG. */
1516 1.1 mrg while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1))
1517 1.1 mrg {
1518 1.1 mrg if (NOTE_P (i1) && NOTE_KIND (i1) == NOTE_INSN_EPILOGUE_BEG)
1519 1.1 mrg break;
1520 1.1 mrg i1 = NEXT_INSN (i1);
1521 1.1 mrg }
1522 1.1 mrg
1523 1.1 mrg while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2))
1524 1.1 mrg {
1525 1.1 mrg if (NOTE_P (i2) && NOTE_KIND (i2) == NOTE_INSN_EPILOGUE_BEG)
1526 1.1 mrg break;
1527 1.1 mrg i2 = NEXT_INSN (i2);
1528 1.1 mrg }
1529 1.1 mrg
1530 1.1 mrg if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1))
1531 1.1 mrg || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2)))
1532 1.1 mrg break;
1533 1.1 mrg
1534 1.1 mrg if (NOTE_P (i1) || NOTE_P (i2)
1535 1.1 mrg || JUMP_P (i1) || JUMP_P (i2))
1536 1.1 mrg break;
1537 1.1 mrg
1538 1.1 mrg /* A sanity check to make sure we're not merging insns with different
1539 1.1 mrg effects on EH. If only one of them ends a basic block, it shouldn't
1540 1.1 mrg have an EH edge; if both end a basic block, there should be the same
1541 1.1 mrg number of EH edges. */
1542 1.1 mrg if ((i1 == BB_END (bb1) && i2 != BB_END (bb2)
1543 1.1 mrg && nehedges1 > 0)
1544 1.1 mrg || (i2 == BB_END (bb2) && i1 != BB_END (bb1)
1545 1.1 mrg && nehedges2 > 0)
1546 1.1 mrg || (i1 == BB_END (bb1) && i2 == BB_END (bb2)
1547 1.1 mrg && nehedges1 != nehedges2))
1548 1.1 mrg break;
1549 1.1 mrg
1550 1.1 mrg if (old_insns_match_p (0, i1, i2) != dir_both)
1551 1.1 mrg break;
1552 1.1 mrg
1553 1.1 mrg merge_memattrs (i1, i2);
1554 1.1 mrg
1555 1.1 mrg /* Don't begin a cross-jump with a NOTE insn. */
1556 1.1 mrg if (INSN_P (i1))
1557 1.1 mrg {
1558 1.1 mrg merge_notes (i1, i2);
1559 1.1 mrg
1560 1.1 mrg beforelast1 = last1, beforelast2 = last2;
1561 1.1 mrg last1 = i1, last2 = i2;
1562 1.1 mrg if (!stop_after || active_insn_p (i1))
1563 1.1 mrg ninsns++;
1564 1.1 mrg }
1565 1.1 mrg
1566 1.1 mrg if (i1 == BB_END (bb1) || i2 == BB_END (bb2)
1567 1.1 mrg || (stop_after > 0 && ninsns == stop_after))
1568 1.1 mrg break;
1569 1.1 mrg
1570 1.1 mrg i1 = NEXT_INSN (i1);
1571 1.1 mrg i2 = NEXT_INSN (i2);
1572 1.1 mrg }
1573 1.1 mrg
1574 1.1 mrg if (ninsns)
1575 1.1 mrg {
1576 1.1 mrg *f1 = last1;
1577 1.1 mrg *f2 = last2;
1578 1.1 mrg }
1579 1.1 mrg
1580 1.1 mrg return ninsns;
1581 1.1 mrg }
1582 1.1 mrg
1583 1.1 mrg /* Return true iff outgoing edges of BB1 and BB2 match, together with
1584 1.1 mrg the branch instruction. This means that if we commonize the control
1585 1.1 mrg flow before end of the basic block, the semantic remains unchanged.
1586 1.1 mrg
1587 1.1 mrg We may assume that there exists one edge with a common destination. */
1588 1.1 mrg
1589 1.1 mrg static bool
1590 1.1 mrg outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1591 1.1 mrg {
1592 1.1 mrg int nehedges1 = 0, nehedges2 = 0;
1593 1.1 mrg edge fallthru1 = 0, fallthru2 = 0;
1594 1.1 mrg edge e1, e2;
1595 1.1 mrg edge_iterator ei;
1596 1.1 mrg
1597 1.1 mrg /* If we performed shrink-wrapping, edges to the exit block can
1598 1.1 mrg only be distinguished for JUMP_INSNs. The two paths may differ in
1599 1.1 mrg whether they went through the prologue. Sibcalls are fine, we know
1600 1.1 mrg that we either didn't need or inserted an epilogue before them. */
1601 1.1 mrg if (crtl->shrink_wrapped
1602 1.1 mrg && single_succ_p (bb1)
1603 1.1 mrg && single_succ (bb1) == EXIT_BLOCK_PTR_FOR_FN (cfun)
1604 1.1 mrg && (!JUMP_P (BB_END (bb1))
1605 1.1 mrg /* Punt if the only successor is a fake edge to exit, the jump
1606 1.1 mrg must be some weird one. */
1607 1.1 mrg || (single_succ_edge (bb1)->flags & EDGE_FAKE) != 0)
1608 1.1 mrg && !(CALL_P (BB_END (bb1)) && SIBLING_CALL_P (BB_END (bb1))))
1609 1.1 mrg return false;
1610 1.1 mrg
1611 1.1 mrg /* If BB1 has only one successor, we may be looking at either an
1612 1.1 mrg unconditional jump, or a fake edge to exit. */
1613 1.1 mrg if (single_succ_p (bb1)
1614 1.1 mrg && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1615 1.1 mrg && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1616 1.1 mrg return (single_succ_p (bb2)
1617 1.1 mrg && (single_succ_edge (bb2)->flags
1618 1.1 mrg & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1619 1.1 mrg && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1620 1.1 mrg
1621 1.1 mrg /* Match conditional jumps - this may get tricky when fallthru and branch
1622 1.1 mrg edges are crossed. */
1623 1.1 mrg if (EDGE_COUNT (bb1->succs) == 2
1624 1.1 mrg && any_condjump_p (BB_END (bb1))
1625 1.1 mrg && onlyjump_p (BB_END (bb1)))
1626 1.1 mrg {
1627 1.1 mrg edge b1, f1, b2, f2;
1628 1.1 mrg bool reverse, match;
1629 1.1 mrg rtx set1, set2, cond1, cond2;
1630 1.1 mrg enum rtx_code code1, code2;
1631 1.1 mrg
1632 1.1 mrg if (EDGE_COUNT (bb2->succs) != 2
1633 1.1 mrg || !any_condjump_p (BB_END (bb2))
1634 1.1 mrg || !onlyjump_p (BB_END (bb2)))
1635 1.1 mrg return false;
1636 1.1 mrg
1637 1.1 mrg b1 = BRANCH_EDGE (bb1);
1638 1.1 mrg b2 = BRANCH_EDGE (bb2);
1639 1.1 mrg f1 = FALLTHRU_EDGE (bb1);
1640 1.1 mrg f2 = FALLTHRU_EDGE (bb2);
1641 1.1 mrg
1642 1.1 mrg /* Get around possible forwarders on fallthru edges. Other cases
1643 1.1 mrg should be optimized out already. */
1644 1.1 mrg if (FORWARDER_BLOCK_P (f1->dest))
1645 1.1 mrg f1 = single_succ_edge (f1->dest);
1646 1.1 mrg
1647 1.1 mrg if (FORWARDER_BLOCK_P (f2->dest))
1648 1.1 mrg f2 = single_succ_edge (f2->dest);
1649 1.1 mrg
1650 1.1 mrg /* To simplify use of this function, return false if there are
1651 1.1 mrg unneeded forwarder blocks. These will get eliminated later
1652 1.1 mrg during cleanup_cfg. */
1653 1.1 mrg if (FORWARDER_BLOCK_P (f1->dest)
1654 1.1 mrg || FORWARDER_BLOCK_P (f2->dest)
1655 1.1 mrg || FORWARDER_BLOCK_P (b1->dest)
1656 1.1 mrg || FORWARDER_BLOCK_P (b2->dest))
1657 1.1 mrg return false;
1658 1.1 mrg
1659 1.1 mrg if (f1->dest == f2->dest && b1->dest == b2->dest)
1660 1.1 mrg reverse = false;
1661 1.1 mrg else if (f1->dest == b2->dest && b1->dest == f2->dest)
1662 1.1 mrg reverse = true;
1663 1.1 mrg else
1664 1.1 mrg return false;
1665 1.1 mrg
1666 1.1 mrg set1 = pc_set (BB_END (bb1));
1667 1.1 mrg set2 = pc_set (BB_END (bb2));
1668 1.1 mrg if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1669 1.1 mrg != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1670 1.1 mrg reverse = !reverse;
1671 1.1 mrg
1672 1.1 mrg cond1 = XEXP (SET_SRC (set1), 0);
1673 1.1 mrg cond2 = XEXP (SET_SRC (set2), 0);
1674 1.1 mrg code1 = GET_CODE (cond1);
1675 1.1 mrg if (reverse)
1676 1.1 mrg code2 = reversed_comparison_code (cond2, BB_END (bb2));
1677 1.1 mrg else
1678 1.1 mrg code2 = GET_CODE (cond2);
1679 1.1 mrg
1680 1.1 mrg if (code2 == UNKNOWN)
1681 1.1 mrg return false;
1682 1.1 mrg
1683 1.1 mrg /* Verify codes and operands match. */
1684 1.1 mrg match = ((code1 == code2
1685 1.1 mrg && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1686 1.1 mrg && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1687 1.1 mrg || (code1 == swap_condition (code2)
1688 1.1 mrg && rtx_renumbered_equal_p (XEXP (cond1, 1),
1689 1.1 mrg XEXP (cond2, 0))
1690 1.1 mrg && rtx_renumbered_equal_p (XEXP (cond1, 0),
1691 1.1 mrg XEXP (cond2, 1))));
1692 1.1 mrg
1693 1.1 mrg /* If we return true, we will join the blocks. Which means that
1694 1.1 mrg we will only have one branch prediction bit to work with. Thus
1695 1.1 mrg we require the existing branches to have probabilities that are
1696 1.1 mrg roughly similar. */
1697 1.1 mrg if (match
1698 1.1 mrg && optimize_bb_for_speed_p (bb1)
1699 1.1 mrg && optimize_bb_for_speed_p (bb2))
1700 1.1 mrg {
1701 1.1 mrg profile_probability prob2;
1702 1.1 mrg
1703 1.1 mrg if (b1->dest == b2->dest)
1704 1.1 mrg prob2 = b2->probability;
1705 1.1 mrg else
1706 1.1 mrg /* Do not use f2 probability as f2 may be forwarded. */
1707 1.1 mrg prob2 = b2->probability.invert ();
1708 1.1 mrg
1709 1.1 mrg /* Fail if the difference in probabilities is greater than 50%.
1710 1.1 mrg This rules out two well-predicted branches with opposite
1711 1.1 mrg outcomes. */
1712 1.1 mrg if (b1->probability.differs_lot_from_p (prob2))
1713 1.1 mrg {
1714 1.1 mrg if (dump_file)
1715 1.1 mrg {
1716 1.1 mrg fprintf (dump_file,
1717 1.1 mrg "Outcomes of branch in bb %i and %i differ too"
1718 1.1 mrg " much (", bb1->index, bb2->index);
1719 1.1 mrg b1->probability.dump (dump_file);
1720 1.1 mrg prob2.dump (dump_file);
1721 1.1 mrg fprintf (dump_file, ")\n");
1722 1.1 mrg }
1723 1.1 mrg return false;
1724 1.1 mrg }
1725 1.1 mrg }
1726 1.1 mrg
1727 1.1 mrg if (dump_file && match)
1728 1.1 mrg fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1729 1.1 mrg bb1->index, bb2->index);
1730 1.1 mrg
1731 1.1 mrg return match;
1732 1.1 mrg }
1733 1.1 mrg
1734 1.1 mrg /* Generic case - we are seeing a computed jump, table jump or trapping
1735 1.1 mrg instruction. */
1736 1.1 mrg
1737 1.1 mrg /* Check whether there are tablejumps in the end of BB1 and BB2.
1738 1.1 mrg Return true if they are identical. */
1739 1.1 mrg {
1740 1.1 mrg rtx_insn *label1, *label2;
1741 1.1 mrg rtx_jump_table_data *table1, *table2;
1742 1.1 mrg
1743 1.1 mrg if (tablejump_p (BB_END (bb1), &label1, &table1)
1744 1.1 mrg && tablejump_p (BB_END (bb2), &label2, &table2)
1745 1.1 mrg && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1746 1.1 mrg {
1747 1.1 mrg /* The labels should never be the same rtx. If they really are same
1748 1.1 mrg the jump tables are same too. So disable crossjumping of blocks BB1
1749 1.1 mrg and BB2 because when deleting the common insns in the end of BB1
1750 1.1 mrg by delete_basic_block () the jump table would be deleted too. */
1751 1.1 mrg /* If LABEL2 is referenced in BB1->END do not do anything
1752 1.1 mrg because we would loose information when replacing
1753 1.1 mrg LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1754 1.1 mrg if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1755 1.1 mrg {
1756 1.1 mrg /* Set IDENTICAL to true when the tables are identical. */
1757 1.1 mrg bool identical = false;
1758 1.1 mrg rtx p1, p2;
1759 1.1 mrg
1760 1.1 mrg p1 = PATTERN (table1);
1761 1.1 mrg p2 = PATTERN (table2);
1762 1.1 mrg if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1763 1.1 mrg {
1764 1.1 mrg identical = true;
1765 1.1 mrg }
1766 1.1 mrg else if (GET_CODE (p1) == ADDR_DIFF_VEC
1767 1.1 mrg && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1768 1.1 mrg && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1769 1.1 mrg && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1770 1.1 mrg {
1771 1.1 mrg int i;
1772 1.1 mrg
1773 1.1 mrg identical = true;
1774 1.1 mrg for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1775 1.1 mrg if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1776 1.1 mrg identical = false;
1777 1.1 mrg }
1778 1.1 mrg
1779 1.1 mrg if (identical)
1780 1.1 mrg {
1781 1.1 mrg bool match;
1782 1.1 mrg
1783 1.1 mrg /* Temporarily replace references to LABEL1 with LABEL2
1784 1.1 mrg in BB1->END so that we could compare the instructions. */
1785 1.1 mrg replace_label_in_insn (BB_END (bb1), label1, label2, false);
1786 1.1 mrg
1787 1.1 mrg match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2))
1788 1.1 mrg == dir_both);
1789 1.1 mrg if (dump_file && match)
1790 1.1 mrg fprintf (dump_file,
1791 1.1 mrg "Tablejumps in bb %i and %i match.\n",
1792 1.1 mrg bb1->index, bb2->index);
1793 1.1 mrg
1794 1.1 mrg /* Set the original label in BB1->END because when deleting
1795 1.1 mrg a block whose end is a tablejump, the tablejump referenced
1796 1.1 mrg from the instruction is deleted too. */
1797 1.1 mrg replace_label_in_insn (BB_END (bb1), label2, label1, false);
1798 1.1 mrg
1799 1.1 mrg return match;
1800 1.1 mrg }
1801 1.1 mrg }
1802 1.1 mrg return false;
1803 1.1 mrg }
1804 1.1 mrg }
1805 1.1 mrg
1806 1.1 mrg /* Find the last non-debug non-note instruction in each bb, except
1807 1.1 mrg stop when we see the NOTE_INSN_BASIC_BLOCK, as old_insns_match_p
1808 1.1 mrg handles that case specially. old_insns_match_p does not handle
1809 1.1 mrg other types of instruction notes. */
1810 1.1 mrg rtx_insn *last1 = BB_END (bb1);
1811 1.1 mrg rtx_insn *last2 = BB_END (bb2);
1812 1.1 mrg while (!NOTE_INSN_BASIC_BLOCK_P (last1) &&
1813 1.1 mrg (DEBUG_INSN_P (last1) || NOTE_P (last1)))
1814 1.1 mrg last1 = PREV_INSN (last1);
1815 1.1 mrg while (!NOTE_INSN_BASIC_BLOCK_P (last2) &&
1816 1.1 mrg (DEBUG_INSN_P (last2) || NOTE_P (last2)))
1817 1.1 mrg last2 = PREV_INSN (last2);
1818 1.1 mrg gcc_assert (last1 && last2);
1819 1.1 mrg
1820 1.1 mrg /* First ensure that the instructions match. There may be many outgoing
1821 1.1 mrg edges so this test is generally cheaper. */
1822 1.1 mrg if (old_insns_match_p (mode, last1, last2) != dir_both)
1823 1.1 mrg return false;
1824 1.1 mrg
1825 1.1 mrg /* Search the outgoing edges, ensure that the counts do match, find possible
1826 1.1 mrg fallthru and exception handling edges since these needs more
1827 1.1 mrg validation. */
1828 1.1 mrg if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1829 1.1 mrg return false;
1830 1.1 mrg
1831 1.1 mrg bool nonfakeedges = false;
1832 1.1 mrg FOR_EACH_EDGE (e1, ei, bb1->succs)
1833 1.1 mrg {
1834 1.1 mrg e2 = EDGE_SUCC (bb2, ei.index);
1835 1.1 mrg
1836 1.1 mrg if ((e1->flags & EDGE_FAKE) == 0)
1837 1.1 mrg nonfakeedges = true;
1838 1.1 mrg
1839 1.1 mrg if (e1->flags & EDGE_EH)
1840 1.1 mrg nehedges1++;
1841 1.1 mrg
1842 1.1 mrg if (e2->flags & EDGE_EH)
1843 1.1 mrg nehedges2++;
1844 1.1 mrg
1845 1.1 mrg if (e1->flags & EDGE_FALLTHRU)
1846 1.1 mrg fallthru1 = e1;
1847 1.1 mrg if (e2->flags & EDGE_FALLTHRU)
1848 1.1 mrg fallthru2 = e2;
1849 1.1 mrg }
1850 1.1 mrg
1851 1.1 mrg /* If number of edges of various types does not match, fail. */
1852 1.1 mrg if (nehedges1 != nehedges2
1853 1.1 mrg || (fallthru1 != 0) != (fallthru2 != 0))
1854 1.1 mrg return false;
1855 1.1 mrg
1856 1.1 mrg /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors
1857 1.1 mrg and the last real insn doesn't have REG_ARGS_SIZE note, don't
1858 1.1 mrg attempt to optimize, as the two basic blocks might have different
1859 1.1 mrg REG_ARGS_SIZE depths. For noreturn calls and unconditional
1860 1.1 mrg traps there should be REG_ARG_SIZE notes, they could be missing
1861 1.1 mrg for __builtin_unreachable () uses though. */
1862 1.1 mrg if (!nonfakeedges
1863 1.1 mrg && !ACCUMULATE_OUTGOING_ARGS
1864 1.1 mrg && (!INSN_P (last1)
1865 1.1 mrg || !find_reg_note (last1, REG_ARGS_SIZE, NULL)))
1866 1.1 mrg return false;
1867 1.1 mrg
1868 1.1 mrg /* fallthru edges must be forwarded to the same destination. */
1869 1.1 mrg if (fallthru1)
1870 1.1 mrg {
1871 1.1 mrg basic_block d1 = (forwarder_block_p (fallthru1->dest)
1872 1.1 mrg ? single_succ (fallthru1->dest): fallthru1->dest);
1873 1.1 mrg basic_block d2 = (forwarder_block_p (fallthru2->dest)
1874 1.1 mrg ? single_succ (fallthru2->dest): fallthru2->dest);
1875 1.1 mrg
1876 1.1 mrg if (d1 != d2)
1877 1.1 mrg return false;
1878 1.1 mrg }
1879 1.1 mrg
1880 1.1 mrg /* Ensure the same EH region. */
1881 1.1 mrg {
1882 1.1 mrg rtx n1 = find_reg_note (last1, REG_EH_REGION, 0);
1883 1.1 mrg rtx n2 = find_reg_note (last2, REG_EH_REGION, 0);
1884 1.1 mrg
1885 1.1 mrg if (!n1 && n2)
1886 1.1 mrg return false;
1887 1.1 mrg
1888 1.1 mrg if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1889 1.1 mrg return false;
1890 1.1 mrg }
1891 1.1 mrg
1892 1.1 mrg /* The same checks as in try_crossjump_to_edge. It is required for RTL
1893 1.1 mrg version of sequence abstraction. */
1894 1.1 mrg FOR_EACH_EDGE (e1, ei, bb2->succs)
1895 1.1 mrg {
1896 1.1 mrg edge e2;
1897 1.1 mrg edge_iterator ei;
1898 1.1 mrg basic_block d1 = e1->dest;
1899 1.1 mrg
1900 1.1 mrg if (FORWARDER_BLOCK_P (d1))
1901 1.1 mrg d1 = EDGE_SUCC (d1, 0)->dest;
1902 1.1 mrg
1903 1.1 mrg FOR_EACH_EDGE (e2, ei, bb1->succs)
1904 1.1 mrg {
1905 1.1 mrg basic_block d2 = e2->dest;
1906 1.1 mrg if (FORWARDER_BLOCK_P (d2))
1907 1.1 mrg d2 = EDGE_SUCC (d2, 0)->dest;
1908 1.1 mrg if (d1 == d2)
1909 1.1 mrg break;
1910 1.1 mrg }
1911 1.1 mrg
1912 1.1 mrg if (!e2)
1913 1.1 mrg return false;
1914 1.1 mrg }
1915 1.1 mrg
1916 1.1 mrg return true;
1917 1.1 mrg }
1918 1.1 mrg
1919 1.1 mrg /* Returns true if BB basic block has a preserve label. */
1920 1.1 mrg
1921 1.1 mrg static bool
1922 1.1 mrg block_has_preserve_label (basic_block bb)
1923 1.1 mrg {
1924 1.1 mrg return (bb
1925 1.1 mrg && block_label (bb)
1926 1.1 mrg && LABEL_PRESERVE_P (block_label (bb)));
1927 1.1 mrg }
1928 1.1 mrg
1929 1.1 mrg /* E1 and E2 are edges with the same destination block. Search their
1930 1.1 mrg predecessors for common code. If found, redirect control flow from
1931 1.1 mrg (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
1932 1.1 mrg or the other way around (dir_backward). DIR specifies the allowed
1933 1.1 mrg replacement direction. */
1934 1.1 mrg
1935 1.1 mrg static bool
1936 1.1 mrg try_crossjump_to_edge (int mode, edge e1, edge e2,
1937 1.1 mrg enum replace_direction dir)
1938 1.1 mrg {
1939 1.1 mrg int nmatch;
1940 1.1 mrg basic_block src1 = e1->src, src2 = e2->src;
1941 1.1 mrg basic_block redirect_to, redirect_from, to_remove;
1942 1.1 mrg basic_block osrc1, osrc2, redirect_edges_to, tmp;
1943 1.1 mrg rtx_insn *newpos1, *newpos2;
1944 1.1 mrg edge s;
1945 1.1 mrg edge_iterator ei;
1946 1.1 mrg
1947 1.1 mrg newpos1 = newpos2 = NULL;
1948 1.1 mrg
1949 1.1 mrg /* Search backward through forwarder blocks. We don't need to worry
1950 1.1 mrg about multiple entry or chained forwarders, as they will be optimized
1951 1.1 mrg away. We do this to look past the unconditional jump following a
1952 1.1 mrg conditional jump that is required due to the current CFG shape. */
1953 1.1 mrg if (single_pred_p (src1)
1954 1.1 mrg && FORWARDER_BLOCK_P (src1))
1955 1.1 mrg e1 = single_pred_edge (src1), src1 = e1->src;
1956 1.1 mrg
1957 1.1 mrg if (single_pred_p (src2)
1958 1.1 mrg && FORWARDER_BLOCK_P (src2))
1959 1.1 mrg e2 = single_pred_edge (src2), src2 = e2->src;
1960 1.1 mrg
1961 1.1 mrg /* Nothing to do if we reach ENTRY, or a common source block. */
1962 1.1 mrg if (src1 == ENTRY_BLOCK_PTR_FOR_FN (cfun) || src2
1963 1.1 mrg == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1964 1.1 mrg return false;
1965 1.1 mrg if (src1 == src2)
1966 1.1 mrg return false;
1967 1.1 mrg
1968 1.1 mrg /* Seeing more than 1 forwarder blocks would confuse us later... */
1969 1.1 mrg if (FORWARDER_BLOCK_P (e1->dest)
1970 1.1 mrg && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1971 1.1 mrg return false;
1972 1.1 mrg
1973 1.1 mrg if (FORWARDER_BLOCK_P (e2->dest)
1974 1.1 mrg && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1975 1.1 mrg return false;
1976 1.1 mrg
1977 1.1 mrg /* Likewise with dead code (possibly newly created by the other optimizations
1978 1.1 mrg of cfg_cleanup). */
1979 1.1 mrg if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1980 1.1 mrg return false;
1981 1.1 mrg
1982 1.1 mrg /* Do not turn corssing edge to non-crossing or vice versa after reload. */
1983 1.1 mrg if (BB_PARTITION (src1) != BB_PARTITION (src2)
1984 1.1 mrg && reload_completed)
1985 1.1 mrg return false;
1986 1.1 mrg
1987 1.1 mrg /* Look for the common insn sequence, part the first ... */
1988 1.1 mrg if (!outgoing_edges_match (mode, src1, src2))
1989 1.1 mrg return false;
1990 1.1 mrg
1991 1.1 mrg /* ... and part the second. */
1992 1.1 mrg nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir);
1993 1.1 mrg
1994 1.1 mrg osrc1 = src1;
1995 1.1 mrg osrc2 = src2;
1996 1.1 mrg if (newpos1 != NULL_RTX)
1997 1.1 mrg src1 = BLOCK_FOR_INSN (newpos1);
1998 1.1 mrg if (newpos2 != NULL_RTX)
1999 1.1 mrg src2 = BLOCK_FOR_INSN (newpos2);
2000 1.1 mrg
2001 1.1 mrg /* Check that SRC1 and SRC2 have preds again. They may have changed
2002 1.1 mrg above due to the call to flow_find_cross_jump. */
2003 1.1 mrg if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
2004 1.1 mrg return false;
2005 1.1 mrg
2006 1.1 mrg if (dir == dir_backward)
2007 1.1 mrg {
2008 1.1 mrg std::swap (osrc1, osrc2);
2009 1.1 mrg std::swap (src1, src2);
2010 1.1 mrg std::swap (e1, e2);
2011 1.1 mrg std::swap (newpos1, newpos2);
2012 1.1 mrg }
2013 1.1 mrg
2014 1.1 mrg /* Don't proceed with the crossjump unless we found a sufficient number
2015 1.1 mrg of matching instructions or the 'from' block was totally matched
2016 1.1 mrg (such that its predecessors will hopefully be redirected and the
2017 1.1 mrg block removed). */
2018 1.1 mrg if ((nmatch < param_min_crossjump_insns)
2019 1.1 mrg && (newpos1 != BB_HEAD (src1)))
2020 1.1 mrg return false;
2021 1.1 mrg
2022 1.1 mrg /* Avoid deleting preserve label when redirecting ABNORMAL edges. */
2023 1.1 mrg if (block_has_preserve_label (e1->dest)
2024 1.1 mrg && (e1->flags & EDGE_ABNORMAL))
2025 1.1 mrg return false;
2026 1.1 mrg
2027 1.1 mrg /* Here we know that the insns in the end of SRC1 which are common with SRC2
2028 1.1 mrg will be deleted.
2029 1.1 mrg If we have tablejumps in the end of SRC1 and SRC2
2030 1.1 mrg they have been already compared for equivalence in outgoing_edges_match ()
2031 1.1 mrg so replace the references to TABLE1 by references to TABLE2. */
2032 1.1 mrg {
2033 1.1 mrg rtx_insn *label1, *label2;
2034 1.1 mrg rtx_jump_table_data *table1, *table2;
2035 1.1 mrg
2036 1.1 mrg if (tablejump_p (BB_END (osrc1), &label1, &table1)
2037 1.1 mrg && tablejump_p (BB_END (osrc2), &label2, &table2)
2038 1.1 mrg && label1 != label2)
2039 1.1 mrg {
2040 1.1 mrg rtx_insn *insn;
2041 1.1 mrg
2042 1.1 mrg /* Replace references to LABEL1 with LABEL2. */
2043 1.1 mrg for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2044 1.1 mrg {
2045 1.1 mrg /* Do not replace the label in SRC1->END because when deleting
2046 1.1 mrg a block whose end is a tablejump, the tablejump referenced
2047 1.1 mrg from the instruction is deleted too. */
2048 1.1 mrg if (insn != BB_END (osrc1))
2049 1.1 mrg replace_label_in_insn (insn, label1, label2, true);
2050 1.1 mrg }
2051 1.1 mrg }
2052 1.1 mrg }
2053 1.1 mrg
2054 1.1 mrg /* Avoid splitting if possible. We must always split when SRC2 has
2055 1.1 mrg EH predecessor edges, or we may end up with basic blocks with both
2056 1.1 mrg normal and EH predecessor edges. */
2057 1.1 mrg if (newpos2 == BB_HEAD (src2)
2058 1.1 mrg && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
2059 1.1 mrg redirect_to = src2;
2060 1.1 mrg else
2061 1.1 mrg {
2062 1.1 mrg if (newpos2 == BB_HEAD (src2))
2063 1.1 mrg {
2064 1.1 mrg /* Skip possible basic block header. */
2065 1.1 mrg if (LABEL_P (newpos2))
2066 1.1 mrg newpos2 = NEXT_INSN (newpos2);
2067 1.1 mrg while (DEBUG_INSN_P (newpos2))
2068 1.1 mrg newpos2 = NEXT_INSN (newpos2);
2069 1.1 mrg if (NOTE_P (newpos2))
2070 1.1 mrg newpos2 = NEXT_INSN (newpos2);
2071 1.1 mrg while (DEBUG_INSN_P (newpos2))
2072 1.1 mrg newpos2 = NEXT_INSN (newpos2);
2073 1.1 mrg }
2074 1.1 mrg
2075 1.1 mrg if (dump_file)
2076 1.1 mrg fprintf (dump_file, "Splitting bb %i before %i insns\n",
2077 1.1 mrg src2->index, nmatch);
2078 1.1 mrg redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
2079 1.1 mrg }
2080 1.1 mrg
2081 1.1 mrg if (dump_file)
2082 1.1 mrg fprintf (dump_file,
2083 1.1 mrg "Cross jumping from bb %i to bb %i; %i common insns\n",
2084 1.1 mrg src1->index, src2->index, nmatch);
2085 1.1 mrg
2086 1.1 mrg /* We may have some registers visible through the block. */
2087 1.1 mrg df_set_bb_dirty (redirect_to);
2088 1.1 mrg
2089 1.1 mrg if (osrc2 == src2)
2090 1.1 mrg redirect_edges_to = redirect_to;
2091 1.1 mrg else
2092 1.1 mrg redirect_edges_to = osrc2;
2093 1.1 mrg
2094 1.1 mrg /* Recompute the counts of destinations of outgoing edges. */
2095 1.1 mrg FOR_EACH_EDGE (s, ei, redirect_edges_to->succs)
2096 1.1 mrg {
2097 1.1 mrg edge s2;
2098 1.1 mrg edge_iterator ei;
2099 1.1 mrg basic_block d = s->dest;
2100 1.1 mrg
2101 1.1 mrg if (FORWARDER_BLOCK_P (d))
2102 1.1 mrg d = single_succ (d);
2103 1.1 mrg
2104 1.1 mrg FOR_EACH_EDGE (s2, ei, src1->succs)
2105 1.1 mrg {
2106 1.1 mrg basic_block d2 = s2->dest;
2107 1.1 mrg if (FORWARDER_BLOCK_P (d2))
2108 1.1 mrg d2 = single_succ (d2);
2109 1.1 mrg if (d == d2)
2110 1.1 mrg break;
2111 1.1 mrg }
2112 1.1 mrg
2113 1.1 mrg /* Take care to update possible forwarder blocks. We verified
2114 1.1 mrg that there is no more than one in the chain, so we can't run
2115 1.1 mrg into infinite loop. */
2116 1.1 mrg if (FORWARDER_BLOCK_P (s->dest))
2117 1.1 mrg s->dest->count += s->count ();
2118 1.1 mrg
2119 1.1 mrg if (FORWARDER_BLOCK_P (s2->dest))
2120 1.1 mrg s2->dest->count -= s->count ();
2121 1.1 mrg
2122 1.1 mrg s->probability = s->probability.combine_with_count
2123 1.1 mrg (redirect_edges_to->count,
2124 1.1 mrg s2->probability, src1->count);
2125 1.1 mrg }
2126 1.1 mrg
2127 1.1 mrg /* Adjust count for the block. An earlier jump
2128 1.1 mrg threading pass may have left the profile in an inconsistent
2129 1.1 mrg state (see update_bb_profile_for_threading) so we must be
2130 1.1 mrg prepared for overflows. */
2131 1.1 mrg tmp = redirect_to;
2132 1.1 mrg do
2133 1.1 mrg {
2134 1.1 mrg tmp->count += src1->count;
2135 1.1 mrg if (tmp == redirect_edges_to)
2136 1.1 mrg break;
2137 1.1 mrg tmp = find_fallthru_edge (tmp->succs)->dest;
2138 1.1 mrg }
2139 1.1 mrg while (true);
2140 1.1 mrg update_br_prob_note (redirect_edges_to);
2141 1.1 mrg
2142 1.1 mrg /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
2143 1.1 mrg
2144 1.1 mrg /* Skip possible basic block header. */
2145 1.1 mrg if (LABEL_P (newpos1))
2146 1.1 mrg newpos1 = NEXT_INSN (newpos1);
2147 1.1 mrg
2148 1.1 mrg while (DEBUG_INSN_P (newpos1))
2149 1.1 mrg newpos1 = NEXT_INSN (newpos1);
2150 1.1 mrg
2151 1.1 mrg if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
2152 1.1 mrg newpos1 = NEXT_INSN (newpos1);
2153 1.1 mrg
2154 1.1 mrg /* Skip also prologue and function markers. */
2155 1.1 mrg while (DEBUG_INSN_P (newpos1)
2156 1.1 mrg || (NOTE_P (newpos1)
2157 1.1 mrg && (NOTE_KIND (newpos1) == NOTE_INSN_PROLOGUE_END
2158 1.1 mrg || NOTE_KIND (newpos1) == NOTE_INSN_FUNCTION_BEG)))
2159 1.1 mrg newpos1 = NEXT_INSN (newpos1);
2160 1.1 mrg
2161 1.1 mrg redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
2162 1.1 mrg to_remove = single_succ (redirect_from);
2163 1.1 mrg
2164 1.1 mrg redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
2165 1.1 mrg delete_basic_block (to_remove);
2166 1.1 mrg
2167 1.1 mrg update_forwarder_flag (redirect_from);
2168 1.1 mrg if (redirect_to != src2)
2169 1.1 mrg update_forwarder_flag (src2);
2170 1.1 mrg
2171 1.1 mrg return true;
2172 1.1 mrg }
2173 1.1 mrg
2174 1.1 mrg /* Search the predecessors of BB for common insn sequences. When found,
2175 1.1 mrg share code between them by redirecting control flow. Return true if
2176 1.1 mrg any changes made. */
2177 1.1 mrg
2178 1.1 mrg static bool
2179 1.1 mrg try_crossjump_bb (int mode, basic_block bb)
2180 1.1 mrg {
2181 1.1 mrg edge e, e2, fallthru;
2182 1.1 mrg bool changed;
2183 1.1 mrg unsigned max, ix, ix2;
2184 1.1 mrg
2185 1.1 mrg /* Nothing to do if there is not at least two incoming edges. */
2186 1.1 mrg if (EDGE_COUNT (bb->preds) < 2)
2187 1.1 mrg return false;
2188 1.1 mrg
2189 1.1 mrg /* Don't crossjump if this block ends in a computed jump,
2190 1.1 mrg unless we are optimizing for size. */
2191 1.1 mrg if (optimize_bb_for_size_p (bb)
2192 1.1 mrg && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
2193 1.1 mrg && computed_jump_p (BB_END (bb)))
2194 1.1 mrg return false;
2195 1.1 mrg
2196 1.1 mrg /* If we are partitioning hot/cold basic blocks, we don't want to
2197 1.1 mrg mess up unconditional or indirect jumps that cross between hot
2198 1.1 mrg and cold sections.
2199 1.1 mrg
2200 1.1 mrg Basic block partitioning may result in some jumps that appear to
2201 1.1 mrg be optimizable (or blocks that appear to be mergeable), but which really
2202 1.1 mrg must be left untouched (they are required to make it safely across
2203 1.1 mrg partition boundaries). See the comments at the top of
2204 1.1 mrg bb-reorder.cc:partition_hot_cold_basic_blocks for complete details. */
2205 1.1 mrg
2206 1.1 mrg if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
2207 1.1 mrg BB_PARTITION (EDGE_PRED (bb, 1)->src)
2208 1.1 mrg || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
2209 1.1 mrg return false;
2210 1.1 mrg
2211 1.1 mrg /* It is always cheapest to redirect a block that ends in a branch to
2212 1.1 mrg a block that falls through into BB, as that adds no branches to the
2213 1.1 mrg program. We'll try that combination first. */
2214 1.1 mrg fallthru = NULL;
2215 1.1 mrg max = param_max_crossjump_edges;
2216 1.1 mrg
2217 1.1 mrg if (EDGE_COUNT (bb->preds) > max)
2218 1.1 mrg return false;
2219 1.1 mrg
2220 1.1 mrg fallthru = find_fallthru_edge (bb->preds);
2221 1.1 mrg
2222 1.1 mrg changed = false;
2223 1.1 mrg for (ix = 0; ix < EDGE_COUNT (bb->preds);)
2224 1.1 mrg {
2225 1.1 mrg e = EDGE_PRED (bb, ix);
2226 1.1 mrg ix++;
2227 1.1 mrg
2228 1.1 mrg /* As noted above, first try with the fallthru predecessor (or, a
2229 1.1 mrg fallthru predecessor if we are in cfglayout mode). */
2230 1.1 mrg if (fallthru)
2231 1.1 mrg {
2232 1.1 mrg /* Don't combine the fallthru edge into anything else.
2233 1.1 mrg If there is a match, we'll do it the other way around. */
2234 1.1 mrg if (e == fallthru)
2235 1.1 mrg continue;
2236 1.1 mrg /* If nothing changed since the last attempt, there is nothing
2237 1.1 mrg we can do. */
2238 1.1 mrg if (!first_pass
2239 1.1 mrg && !((e->src->flags & BB_MODIFIED)
2240 1.1 mrg || (fallthru->src->flags & BB_MODIFIED)))
2241 1.1 mrg continue;
2242 1.1 mrg
2243 1.1 mrg if (try_crossjump_to_edge (mode, e, fallthru, dir_forward))
2244 1.1 mrg {
2245 1.1 mrg changed = true;
2246 1.1 mrg ix = 0;
2247 1.1 mrg continue;
2248 1.1 mrg }
2249 1.1 mrg }
2250 1.1 mrg
2251 1.1 mrg /* Non-obvious work limiting check: Recognize that we're going
2252 1.1 mrg to call try_crossjump_bb on every basic block. So if we have
2253 1.1 mrg two blocks with lots of outgoing edges (a switch) and they
2254 1.1 mrg share lots of common destinations, then we would do the
2255 1.1 mrg cross-jump check once for each common destination.
2256 1.1 mrg
2257 1.1 mrg Now, if the blocks actually are cross-jump candidates, then
2258 1.1 mrg all of their destinations will be shared. Which means that
2259 1.1 mrg we only need check them for cross-jump candidacy once. We
2260 1.1 mrg can eliminate redundant checks of crossjump(A,B) by arbitrarily
2261 1.1 mrg choosing to do the check from the block for which the edge
2262 1.1 mrg in question is the first successor of A. */
2263 1.1 mrg if (EDGE_SUCC (e->src, 0) != e)
2264 1.1 mrg continue;
2265 1.1 mrg
2266 1.1 mrg for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds); ix2++)
2267 1.1 mrg {
2268 1.1 mrg e2 = EDGE_PRED (bb, ix2);
2269 1.1 mrg
2270 1.1 mrg if (e2 == e)
2271 1.1 mrg continue;
2272 1.1 mrg
2273 1.1 mrg /* We've already checked the fallthru edge above. */
2274 1.1 mrg if (e2 == fallthru)
2275 1.1 mrg continue;
2276 1.1 mrg
2277 1.1 mrg /* The "first successor" check above only prevents multiple
2278 1.1 mrg checks of crossjump(A,B). In order to prevent redundant
2279 1.1 mrg checks of crossjump(B,A), require that A be the block
2280 1.1 mrg with the lowest index. */
2281 1.1 mrg if (e->src->index > e2->src->index)
2282 1.1 mrg continue;
2283 1.1 mrg
2284 1.1 mrg /* If nothing changed since the last attempt, there is nothing
2285 1.1 mrg we can do. */
2286 1.1 mrg if (!first_pass
2287 1.1 mrg && !((e->src->flags & BB_MODIFIED)
2288 1.1 mrg || (e2->src->flags & BB_MODIFIED)))
2289 1.1 mrg continue;
2290 1.1 mrg
2291 1.1 mrg /* Both e and e2 are not fallthru edges, so we can crossjump in either
2292 1.1 mrg direction. */
2293 1.1 mrg if (try_crossjump_to_edge (mode, e, e2, dir_both))
2294 1.1 mrg {
2295 1.1 mrg changed = true;
2296 1.1 mrg ix = 0;
2297 1.1 mrg break;
2298 1.1 mrg }
2299 1.1 mrg }
2300 1.1 mrg }
2301 1.1 mrg
2302 1.1 mrg if (changed)
2303 1.1 mrg crossjumps_occurred = true;
2304 1.1 mrg
2305 1.1 mrg return changed;
2306 1.1 mrg }
2307 1.1 mrg
2308 1.1 mrg /* Search the successors of BB for common insn sequences. When found,
2309 1.1 mrg share code between them by moving it across the basic block
2310 1.1 mrg boundary. Return true if any changes made. */
2311 1.1 mrg
2312 1.1 mrg static bool
2313 1.1 mrg try_head_merge_bb (basic_block bb)
2314 1.1 mrg {
2315 1.1 mrg basic_block final_dest_bb = NULL;
2316 1.1 mrg int max_match = INT_MAX;
2317 1.1 mrg edge e0;
2318 1.1 mrg rtx_insn **headptr, **currptr, **nextptr;
2319 1.1 mrg bool changed, moveall;
2320 1.1 mrg unsigned ix;
2321 1.1 mrg rtx_insn *e0_last_head;
2322 1.1 mrg rtx cond;
2323 1.1 mrg rtx_insn *move_before;
2324 1.1 mrg unsigned nedges = EDGE_COUNT (bb->succs);
2325 1.1 mrg rtx_insn *jump = BB_END (bb);
2326 1.1 mrg regset live, live_union;
2327 1.1 mrg
2328 1.1 mrg /* Nothing to do if there is not at least two outgoing edges. */
2329 1.1 mrg if (nedges < 2)
2330 1.1 mrg return false;
2331 1.1 mrg
2332 1.1 mrg /* Don't crossjump if this block ends in a computed jump,
2333 1.1 mrg unless we are optimizing for size. */
2334 1.1 mrg if (optimize_bb_for_size_p (bb)
2335 1.1 mrg && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
2336 1.1 mrg && computed_jump_p (BB_END (bb)))
2337 1.1 mrg return false;
2338 1.1 mrg
2339 1.1 mrg cond = get_condition (jump, &move_before, true, false);
2340 1.1 mrg if (cond == NULL_RTX)
2341 1.1 mrg move_before = jump;
2342 1.1 mrg
2343 1.1 mrg for (ix = 0; ix < nedges; ix++)
2344 1.1 mrg if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
2345 1.1 mrg return false;
2346 1.1 mrg
2347 1.1 mrg for (ix = 0; ix < nedges; ix++)
2348 1.1 mrg {
2349 1.1 mrg edge e = EDGE_SUCC (bb, ix);
2350 1.1 mrg basic_block other_bb = e->dest;
2351 1.1 mrg
2352 1.1 mrg if (df_get_bb_dirty (other_bb))
2353 1.1 mrg {
2354 1.1 mrg block_was_dirty = true;
2355 1.1 mrg return false;
2356 1.1 mrg }
2357 1.1 mrg
2358 1.1 mrg if (e->flags & EDGE_ABNORMAL)
2359 1.1 mrg return false;
2360 1.1 mrg
2361 1.1 mrg /* Normally, all destination blocks must only be reachable from this
2362 1.1 mrg block, i.e. they must have one incoming edge.
2363 1.1 mrg
2364 1.1 mrg There is one special case we can handle, that of multiple consecutive
2365 1.1 mrg jumps where the first jumps to one of the targets of the second jump.
2366 1.1 mrg This happens frequently in switch statements for default labels.
2367 1.1 mrg The structure is as follows:
2368 1.1 mrg FINAL_DEST_BB
2369 1.1 mrg ....
2370 1.1 mrg if (cond) jump A;
2371 1.1 mrg fall through
2372 1.1 mrg BB
2373 1.1 mrg jump with targets A, B, C, D...
2374 1.1 mrg A
2375 1.1 mrg has two incoming edges, from FINAL_DEST_BB and BB
2376 1.1 mrg
2377 1.1 mrg In this case, we can try to move the insns through BB and into
2378 1.1 mrg FINAL_DEST_BB. */
2379 1.1 mrg if (EDGE_COUNT (other_bb->preds) != 1)
2380 1.1 mrg {
2381 1.1 mrg edge incoming_edge, incoming_bb_other_edge;
2382 1.1 mrg edge_iterator ei;
2383 1.1 mrg
2384 1.1 mrg if (final_dest_bb != NULL
2385 1.1 mrg || EDGE_COUNT (other_bb->preds) != 2)
2386 1.1 mrg return false;
2387 1.1 mrg
2388 1.1 mrg /* We must be able to move the insns across the whole block. */
2389 1.1 mrg move_before = BB_HEAD (bb);
2390 1.1 mrg while (!NONDEBUG_INSN_P (move_before))
2391 1.1 mrg move_before = NEXT_INSN (move_before);
2392 1.1 mrg
2393 1.1 mrg if (EDGE_COUNT (bb->preds) != 1)
2394 1.1 mrg return false;
2395 1.1 mrg incoming_edge = EDGE_PRED (bb, 0);
2396 1.1 mrg final_dest_bb = incoming_edge->src;
2397 1.1 mrg if (EDGE_COUNT (final_dest_bb->succs) != 2)
2398 1.1 mrg return false;
2399 1.1 mrg FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs)
2400 1.1 mrg if (incoming_bb_other_edge != incoming_edge)
2401 1.1 mrg break;
2402 1.1 mrg if (incoming_bb_other_edge->dest != other_bb)
2403 1.1 mrg return false;
2404 1.1 mrg }
2405 1.1 mrg }
2406 1.1 mrg
2407 1.1 mrg e0 = EDGE_SUCC (bb, 0);
2408 1.1 mrg e0_last_head = NULL;
2409 1.1 mrg changed = false;
2410 1.1 mrg
2411 1.1 mrg for (ix = 1; ix < nedges; ix++)
2412 1.1 mrg {
2413 1.1 mrg edge e = EDGE_SUCC (bb, ix);
2414 1.1 mrg rtx_insn *e0_last, *e_last;
2415 1.1 mrg int nmatch;
2416 1.1 mrg
2417 1.1 mrg nmatch = flow_find_head_matching_sequence (e0->dest, e->dest,
2418 1.1 mrg &e0_last, &e_last, 0);
2419 1.1 mrg if (nmatch == 0)
2420 1.1 mrg return false;
2421 1.1 mrg
2422 1.1 mrg if (nmatch < max_match)
2423 1.1 mrg {
2424 1.1 mrg max_match = nmatch;
2425 1.1 mrg e0_last_head = e0_last;
2426 1.1 mrg }
2427 1.1 mrg }
2428 1.1 mrg
2429 1.1 mrg /* If we matched an entire block, we probably have to avoid moving the
2430 1.1 mrg last insn. */
2431 1.1 mrg if (max_match > 0
2432 1.1 mrg && e0_last_head == BB_END (e0->dest)
2433 1.1 mrg && (find_reg_note (e0_last_head, REG_EH_REGION, 0)
2434 1.1 mrg || control_flow_insn_p (e0_last_head)))
2435 1.1 mrg {
2436 1.1 mrg max_match--;
2437 1.1 mrg if (max_match == 0)
2438 1.1 mrg return false;
2439 1.1 mrg e0_last_head = prev_real_nondebug_insn (e0_last_head);
2440 1.1 mrg }
2441 1.1 mrg
2442 1.1 mrg if (max_match == 0)
2443 1.1 mrg return false;
2444 1.1 mrg
2445 1.1 mrg /* We must find a union of the live registers at each of the end points. */
2446 1.1 mrg live = BITMAP_ALLOC (NULL);
2447 1.1 mrg live_union = BITMAP_ALLOC (NULL);
2448 1.1 mrg
2449 1.1 mrg currptr = XNEWVEC (rtx_insn *, nedges);
2450 1.1 mrg headptr = XNEWVEC (rtx_insn *, nedges);
2451 1.1 mrg nextptr = XNEWVEC (rtx_insn *, nedges);
2452 1.1 mrg
2453 1.1 mrg for (ix = 0; ix < nedges; ix++)
2454 1.1 mrg {
2455 1.1 mrg int j;
2456 1.1 mrg basic_block merge_bb = EDGE_SUCC (bb, ix)->dest;
2457 1.1 mrg rtx_insn *head = BB_HEAD (merge_bb);
2458 1.1 mrg
2459 1.1 mrg while (!NONDEBUG_INSN_P (head))
2460 1.1 mrg head = NEXT_INSN (head);
2461 1.1 mrg headptr[ix] = head;
2462 1.1 mrg currptr[ix] = head;
2463 1.1 mrg
2464 1.1 mrg /* Compute the end point and live information */
2465 1.1 mrg for (j = 1; j < max_match; j++)
2466 1.1 mrg do
2467 1.1 mrg head = NEXT_INSN (head);
2468 1.1 mrg while (!NONDEBUG_INSN_P (head));
2469 1.1 mrg simulate_backwards_to_point (merge_bb, live, head);
2470 1.1 mrg IOR_REG_SET (live_union, live);
2471 1.1 mrg }
2472 1.1 mrg
2473 1.1 mrg /* If we're moving across two blocks, verify the validity of the
2474 1.1 mrg first move, then adjust the target and let the loop below deal
2475 1.1 mrg with the final move. */
2476 1.1 mrg if (final_dest_bb != NULL)
2477 1.1 mrg {
2478 1.1 mrg rtx_insn *move_upto;
2479 1.1 mrg
2480 1.1 mrg moveall = can_move_insns_across (currptr[0], e0_last_head, move_before,
2481 1.1 mrg jump, e0->dest, live_union,
2482 1.1 mrg NULL, &move_upto);
2483 1.1 mrg if (!moveall)
2484 1.1 mrg {
2485 1.1 mrg if (move_upto == NULL_RTX)
2486 1.1 mrg goto out;
2487 1.1 mrg
2488 1.1 mrg while (e0_last_head != move_upto)
2489 1.1 mrg {
2490 1.1 mrg df_simulate_one_insn_backwards (e0->dest, e0_last_head,
2491 1.1 mrg live_union);
2492 1.1 mrg e0_last_head = PREV_INSN (e0_last_head);
2493 1.1 mrg }
2494 1.1 mrg }
2495 1.1 mrg if (e0_last_head == NULL_RTX)
2496 1.1 mrg goto out;
2497 1.1 mrg
2498 1.1 mrg jump = BB_END (final_dest_bb);
2499 1.1 mrg cond = get_condition (jump, &move_before, true, false);
2500 1.1 mrg if (cond == NULL_RTX)
2501 1.1 mrg move_before = jump;
2502 1.1 mrg }
2503 1.1 mrg
2504 1.1 mrg do
2505 1.1 mrg {
2506 1.1 mrg rtx_insn *move_upto;
2507 1.1 mrg moveall = can_move_insns_across (currptr[0], e0_last_head,
2508 1.1 mrg move_before, jump, e0->dest, live_union,
2509 1.1 mrg NULL, &move_upto);
2510 1.1 mrg if (!moveall && move_upto == NULL_RTX)
2511 1.1 mrg {
2512 1.1 mrg if (jump == move_before)
2513 1.1 mrg break;
2514 1.1 mrg
2515 1.1 mrg /* Try again, using a different insertion point. */
2516 1.1 mrg move_before = jump;
2517 1.1 mrg
2518 1.1 mrg continue;
2519 1.1 mrg }
2520 1.1 mrg
2521 1.1 mrg if (final_dest_bb && !moveall)
2522 1.1 mrg /* We haven't checked whether a partial move would be OK for the first
2523 1.1 mrg move, so we have to fail this case. */
2524 1.1 mrg break;
2525 1.1 mrg
2526 1.1 mrg changed = true;
2527 1.1 mrg for (;;)
2528 1.1 mrg {
2529 1.1 mrg if (currptr[0] == move_upto)
2530 1.1 mrg break;
2531 1.1 mrg for (ix = 0; ix < nedges; ix++)
2532 1.1 mrg {
2533 1.1 mrg rtx_insn *curr = currptr[ix];
2534 1.1 mrg do
2535 1.1 mrg curr = NEXT_INSN (curr);
2536 1.1 mrg while (!NONDEBUG_INSN_P (curr));
2537 1.1 mrg currptr[ix] = curr;
2538 1.1 mrg }
2539 1.1 mrg }
2540 1.1 mrg
2541 1.1 mrg /* If we can't currently move all of the identical insns, remember
2542 1.1 mrg each insn after the range that we'll merge. */
2543 1.1 mrg if (!moveall)
2544 1.1 mrg for (ix = 0; ix < nedges; ix++)
2545 1.1 mrg {
2546 1.1 mrg rtx_insn *curr = currptr[ix];
2547 1.1 mrg do
2548 1.1 mrg curr = NEXT_INSN (curr);
2549 1.1 mrg while (!NONDEBUG_INSN_P (curr));
2550 1.1 mrg nextptr[ix] = curr;
2551 1.1 mrg }
2552 1.1 mrg
2553 1.1 mrg reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before));
2554 1.1 mrg df_set_bb_dirty (EDGE_SUCC (bb, 0)->dest);
2555 1.1 mrg if (final_dest_bb != NULL)
2556 1.1 mrg df_set_bb_dirty (final_dest_bb);
2557 1.1 mrg df_set_bb_dirty (bb);
2558 1.1 mrg for (ix = 1; ix < nedges; ix++)
2559 1.1 mrg {
2560 1.1 mrg df_set_bb_dirty (EDGE_SUCC (bb, ix)->dest);
2561 1.1 mrg delete_insn_chain (headptr[ix], currptr[ix], false);
2562 1.1 mrg }
2563 1.1 mrg if (!moveall)
2564 1.1 mrg {
2565 1.1 mrg if (jump == move_before)
2566 1.1 mrg break;
2567 1.1 mrg
2568 1.1 mrg /* For the unmerged insns, try a different insertion point. */
2569 1.1 mrg move_before = jump;
2570 1.1 mrg
2571 1.1 mrg for (ix = 0; ix < nedges; ix++)
2572 1.1 mrg currptr[ix] = headptr[ix] = nextptr[ix];
2573 1.1 mrg }
2574 1.1 mrg }
2575 1.1 mrg while (!moveall);
2576 1.1 mrg
2577 1.1 mrg out:
2578 1.1 mrg free (currptr);
2579 1.1 mrg free (headptr);
2580 1.1 mrg free (nextptr);
2581 1.1 mrg
2582 1.1 mrg crossjumps_occurred |= changed;
2583 1.1 mrg
2584 1.1 mrg return changed;
2585 1.1 mrg }
2586 1.1 mrg
2587 1.1 mrg /* Return true if BB contains just bb note, or bb note followed
2588 1.1 mrg by only DEBUG_INSNs. */
2589 1.1 mrg
2590 1.1 mrg static bool
2591 1.1 mrg trivially_empty_bb_p (basic_block bb)
2592 1.1 mrg {
2593 1.1 mrg rtx_insn *insn = BB_END (bb);
2594 1.1 mrg
2595 1.1 mrg while (1)
2596 1.1 mrg {
2597 1.1 mrg if (insn == BB_HEAD (bb))
2598 1.1 mrg return true;
2599 1.1 mrg if (!DEBUG_INSN_P (insn))
2600 1.1 mrg return false;
2601 1.1 mrg insn = PREV_INSN (insn);
2602 1.1 mrg }
2603 1.1 mrg }
2604 1.1 mrg
2605 1.1 mrg /* Return true if BB contains just a return and possibly a USE of the
2606 1.1 mrg return value. Fill in *RET and *USE with the return and use insns
2607 1.1 mrg if any found, otherwise NULL. All CLOBBERs are ignored. */
2608 1.1 mrg
2609 1.1 mrg static bool
2610 1.1 mrg bb_is_just_return (basic_block bb, rtx_insn **ret, rtx_insn **use)
2611 1.1 mrg {
2612 1.1 mrg *ret = *use = NULL;
2613 1.1 mrg rtx_insn *insn;
2614 1.1 mrg
2615 1.1 mrg if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
2616 1.1 mrg return false;
2617 1.1 mrg
2618 1.1 mrg FOR_BB_INSNS (bb, insn)
2619 1.1 mrg if (NONDEBUG_INSN_P (insn))
2620 1.1 mrg {
2621 1.1 mrg rtx pat = PATTERN (insn);
2622 1.1 mrg
2623 1.1 mrg if (!*ret && ANY_RETURN_P (pat))
2624 1.1 mrg *ret = insn;
2625 1.1 mrg else if (!*ret && !*use && GET_CODE (pat) == USE
2626 1.1 mrg && REG_P (XEXP (pat, 0))
2627 1.1 mrg && REG_FUNCTION_VALUE_P (XEXP (pat, 0)))
2628 1.1 mrg *use = insn;
2629 1.1 mrg else if (GET_CODE (pat) != CLOBBER)
2630 1.1 mrg return false;
2631 1.1 mrg }
2632 1.1 mrg
2633 1.1 mrg return !!*ret;
2634 1.1 mrg }
2635 1.1 mrg
2636 1.1 mrg /* Do simple CFG optimizations - basic block merging, simplifying of jump
2637 1.1 mrg instructions etc. Return nonzero if changes were made. */
2638 1.1 mrg
2639 1.1 mrg static bool
2640 1.1 mrg try_optimize_cfg (int mode)
2641 1.1 mrg {
2642 1.1 mrg bool changed_overall = false;
2643 1.1 mrg bool changed;
2644 1.1 mrg int iterations = 0;
2645 1.1 mrg basic_block bb, b, next;
2646 1.1 mrg
2647 1.1 mrg if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
2648 1.1 mrg clear_bb_flags ();
2649 1.1 mrg
2650 1.1 mrg crossjumps_occurred = false;
2651 1.1 mrg
2652 1.1 mrg FOR_EACH_BB_FN (bb, cfun)
2653 1.1 mrg update_forwarder_flag (bb);
2654 1.1 mrg
2655 1.1 mrg if (! targetm.cannot_modify_jumps_p ())
2656 1.1 mrg {
2657 1.1 mrg first_pass = true;
2658 1.1 mrg /* Attempt to merge blocks as made possible by edge removal. If
2659 1.1 mrg a block has only one successor, and the successor has only
2660 1.1 mrg one predecessor, they may be combined. */
2661 1.1 mrg do
2662 1.1 mrg {
2663 1.1 mrg block_was_dirty = false;
2664 1.1 mrg changed = false;
2665 1.1 mrg iterations++;
2666 1.1 mrg
2667 1.1 mrg if (dump_file)
2668 1.1 mrg fprintf (dump_file,
2669 1.1 mrg "\n\ntry_optimize_cfg iteration %i\n\n",
2670 1.1 mrg iterations);
2671 1.1 mrg
2672 1.1 mrg for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
2673 1.1 mrg != EXIT_BLOCK_PTR_FOR_FN (cfun);)
2674 1.1 mrg {
2675 1.1 mrg basic_block c;
2676 1.1 mrg edge s;
2677 1.1 mrg bool changed_here = false;
2678 1.1 mrg
2679 1.1 mrg /* Delete trivially dead basic blocks. This is either
2680 1.1 mrg blocks with no predecessors, or empty blocks with no
2681 1.1 mrg successors. However if the empty block with no
2682 1.1 mrg successors is the successor of the ENTRY_BLOCK, it is
2683 1.1 mrg kept. This ensures that the ENTRY_BLOCK will have a
2684 1.1 mrg successor which is a precondition for many RTL
2685 1.1 mrg passes. Empty blocks may result from expanding
2686 1.1 mrg __builtin_unreachable (). */
2687 1.1 mrg if (EDGE_COUNT (b->preds) == 0
2688 1.1 mrg || (EDGE_COUNT (b->succs) == 0
2689 1.1 mrg && trivially_empty_bb_p (b)
2690 1.1 mrg && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->dest
2691 1.1 mrg != b))
2692 1.1 mrg {
2693 1.1 mrg c = b->prev_bb;
2694 1.1 mrg if (EDGE_COUNT (b->preds) > 0)
2695 1.1 mrg {
2696 1.1 mrg edge e;
2697 1.1 mrg edge_iterator ei;
2698 1.1 mrg
2699 1.1 mrg if (current_ir_type () == IR_RTL_CFGLAYOUT)
2700 1.1 mrg {
2701 1.1 mrg rtx_insn *insn;
2702 1.1 mrg for (insn = BB_FOOTER (b);
2703 1.1 mrg insn; insn = NEXT_INSN (insn))
2704 1.1 mrg if (BARRIER_P (insn))
2705 1.1 mrg break;
2706 1.1 mrg if (insn)
2707 1.1 mrg FOR_EACH_EDGE (e, ei, b->preds)
2708 1.1 mrg if ((e->flags & EDGE_FALLTHRU))
2709 1.1 mrg {
2710 1.1 mrg if (BB_FOOTER (b)
2711 1.1 mrg && BB_FOOTER (e->src) == NULL)
2712 1.1 mrg {
2713 1.1 mrg BB_FOOTER (e->src) = BB_FOOTER (b);
2714 1.1 mrg BB_FOOTER (b) = NULL;
2715 1.1 mrg }
2716 1.1 mrg else
2717 1.1 mrg emit_barrier_after_bb (e->src);
2718 1.1 mrg }
2719 1.1 mrg }
2720 1.1 mrg else
2721 1.1 mrg {
2722 1.1 mrg rtx_insn *last = get_last_bb_insn (b);
2723 1.1 mrg if (last && BARRIER_P (last))
2724 1.1 mrg FOR_EACH_EDGE (e, ei, b->preds)
2725 1.1 mrg if ((e->flags & EDGE_FALLTHRU))
2726 1.1 mrg emit_barrier_after (BB_END (e->src));
2727 1.1 mrg }
2728 1.1 mrg }
2729 1.1 mrg delete_basic_block (b);
2730 1.1 mrg changed = true;
2731 1.1 mrg /* Avoid trying to remove the exit block. */
2732 1.1 mrg b = (c == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? c->next_bb : c);
2733 1.1 mrg continue;
2734 1.1 mrg }
2735 1.1 mrg
2736 1.1 mrg /* Remove code labels no longer used. */
2737 1.1 mrg if (single_pred_p (b)
2738 1.1 mrg && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2739 1.1 mrg && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
2740 1.1 mrg && LABEL_P (BB_HEAD (b))
2741 1.1 mrg && !LABEL_PRESERVE_P (BB_HEAD (b))
2742 1.1 mrg /* If the previous block ends with a branch to this
2743 1.1 mrg block, we can't delete the label. Normally this
2744 1.1 mrg is a condjump that is yet to be simplified, but
2745 1.1 mrg if CASE_DROPS_THRU, this can be a tablejump with
2746 1.1 mrg some element going to the same place as the
2747 1.1 mrg default (fallthru). */
2748 1.1 mrg && (single_pred (b) == ENTRY_BLOCK_PTR_FOR_FN (cfun)
2749 1.1 mrg || !JUMP_P (BB_END (single_pred (b)))
2750 1.1 mrg || ! label_is_jump_target_p (BB_HEAD (b),
2751 1.1 mrg BB_END (single_pred (b)))))
2752 1.1 mrg {
2753 1.1 mrg delete_insn (BB_HEAD (b));
2754 1.1 mrg if (dump_file)
2755 1.1 mrg fprintf (dump_file, "Deleted label in block %i.\n",
2756 1.1 mrg b->index);
2757 1.1 mrg }
2758 1.1 mrg
2759 1.1 mrg /* If we fall through an empty block, we can remove it. */
2760 1.1 mrg if (!(mode & (CLEANUP_CFGLAYOUT | CLEANUP_NO_INSN_DEL))
2761 1.1 mrg && single_pred_p (b)
2762 1.1 mrg && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2763 1.1 mrg && !LABEL_P (BB_HEAD (b))
2764 1.1 mrg && FORWARDER_BLOCK_P (b)
2765 1.1 mrg /* Note that forwarder_block_p true ensures that
2766 1.1 mrg there is a successor for this block. */
2767 1.1 mrg && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
2768 1.1 mrg && n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS + 1)
2769 1.1 mrg {
2770 1.1 mrg if (dump_file)
2771 1.1 mrg fprintf (dump_file,
2772 1.1 mrg "Deleting fallthru block %i.\n",
2773 1.1 mrg b->index);
2774 1.1 mrg
2775 1.1 mrg c = ((b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2776 1.1 mrg ? b->next_bb : b->prev_bb);
2777 1.1 mrg redirect_edge_succ_nodup (single_pred_edge (b),
2778 1.1 mrg single_succ (b));
2779 1.1 mrg delete_basic_block (b);
2780 1.1 mrg changed = true;
2781 1.1 mrg b = c;
2782 1.1 mrg continue;
2783 1.1 mrg }
2784 1.1 mrg
2785 1.1 mrg /* Merge B with its single successor, if any. */
2786 1.1 mrg if (single_succ_p (b)
2787 1.1 mrg && (s = single_succ_edge (b))
2788 1.1 mrg && !(s->flags & EDGE_COMPLEX)
2789 1.1 mrg && (c = s->dest) != EXIT_BLOCK_PTR_FOR_FN (cfun)
2790 1.1 mrg && single_pred_p (c)
2791 1.1 mrg && b != c)
2792 1.1 mrg {
2793 1.1 mrg /* When not in cfg_layout mode use code aware of reordering
2794 1.1 mrg INSN. This code possibly creates new basic blocks so it
2795 1.1 mrg does not fit merge_blocks interface and is kept here in
2796 1.1 mrg hope that it will become useless once more of compiler
2797 1.1 mrg is transformed to use cfg_layout mode. */
2798 1.1 mrg
2799 1.1 mrg if ((mode & CLEANUP_CFGLAYOUT)
2800 1.1 mrg && can_merge_blocks_p (b, c))
2801 1.1 mrg {
2802 1.1 mrg merge_blocks (b, c);
2803 1.1 mrg update_forwarder_flag (b);
2804 1.1 mrg changed_here = true;
2805 1.1 mrg }
2806 1.1 mrg else if (!(mode & CLEANUP_CFGLAYOUT)
2807 1.1 mrg /* If the jump insn has side effects,
2808 1.1 mrg we can't kill the edge. */
2809 1.1 mrg && (!JUMP_P (BB_END (b))
2810 1.1 mrg || (reload_completed
2811 1.1 mrg ? simplejump_p (BB_END (b))
2812 1.1 mrg : (onlyjump_p (BB_END (b))
2813 1.1 mrg && !tablejump_p (BB_END (b),
2814 1.1 mrg NULL, NULL))))
2815 1.1 mrg && (next = merge_blocks_move (s, b, c, mode)))
2816 1.1 mrg {
2817 1.1 mrg b = next;
2818 1.1 mrg changed_here = true;
2819 1.1 mrg }
2820 1.1 mrg }
2821 1.1 mrg
2822 1.1 mrg /* Try to change a branch to a return to just that return. */
2823 1.1 mrg rtx_insn *ret, *use;
2824 1.1 mrg if (single_succ_p (b)
2825 1.1 mrg && onlyjump_p (BB_END (b))
2826 1.1 mrg && bb_is_just_return (single_succ (b), &ret, &use))
2827 1.1 mrg {
2828 1.1 mrg if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2829 1.1 mrg PATTERN (ret), 0))
2830 1.1 mrg {
2831 1.1 mrg if (use)
2832 1.1 mrg emit_insn_before (copy_insn (PATTERN (use)),
2833 1.1 mrg BB_END (b));
2834 1.1 mrg if (dump_file)
2835 1.1 mrg fprintf (dump_file, "Changed jump %d->%d to return.\n",
2836 1.1 mrg b->index, single_succ (b)->index);
2837 1.1 mrg redirect_edge_succ (single_succ_edge (b),
2838 1.1 mrg EXIT_BLOCK_PTR_FOR_FN (cfun));
2839 1.1 mrg single_succ_edge (b)->flags &= ~EDGE_CROSSING;
2840 1.1 mrg changed_here = true;
2841 1.1 mrg }
2842 1.1 mrg }
2843 1.1 mrg
2844 1.1 mrg /* Try to change a conditional branch to a return to the
2845 1.1 mrg respective conditional return. */
2846 1.1 mrg if (EDGE_COUNT (b->succs) == 2
2847 1.1 mrg && any_condjump_p (BB_END (b))
2848 1.1 mrg && bb_is_just_return (BRANCH_EDGE (b)->dest, &ret, &use))
2849 1.1 mrg {
2850 1.1 mrg if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2851 1.1 mrg PATTERN (ret), 0))
2852 1.1 mrg {
2853 1.1 mrg if (use)
2854 1.1 mrg emit_insn_before (copy_insn (PATTERN (use)),
2855 1.1 mrg BB_END (b));
2856 1.1 mrg if (dump_file)
2857 1.1 mrg fprintf (dump_file, "Changed conditional jump %d->%d "
2858 1.1 mrg "to conditional return.\n",
2859 1.1 mrg b->index, BRANCH_EDGE (b)->dest->index);
2860 1.1 mrg redirect_edge_succ (BRANCH_EDGE (b),
2861 1.1 mrg EXIT_BLOCK_PTR_FOR_FN (cfun));
2862 1.1 mrg BRANCH_EDGE (b)->flags &= ~EDGE_CROSSING;
2863 1.1 mrg changed_here = true;
2864 1.1 mrg }
2865 1.1 mrg }
2866 1.1 mrg
2867 1.1 mrg /* Try to flip a conditional branch that falls through to
2868 1.1 mrg a return so that it becomes a conditional return and a
2869 1.1 mrg new jump to the original branch target. */
2870 1.1 mrg if (EDGE_COUNT (b->succs) == 2
2871 1.1 mrg && BRANCH_EDGE (b)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
2872 1.1 mrg && any_condjump_p (BB_END (b))
2873 1.1 mrg && bb_is_just_return (FALLTHRU_EDGE (b)->dest, &ret, &use))
2874 1.1 mrg {
2875 1.1 mrg if (invert_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2876 1.1 mrg JUMP_LABEL (BB_END (b)), 0))
2877 1.1 mrg {
2878 1.1 mrg basic_block new_ft = BRANCH_EDGE (b)->dest;
2879 1.1 mrg if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2880 1.1 mrg PATTERN (ret), 0))
2881 1.1 mrg {
2882 1.1 mrg if (use)
2883 1.1 mrg emit_insn_before (copy_insn (PATTERN (use)),
2884 1.1 mrg BB_END (b));
2885 1.1 mrg if (dump_file)
2886 1.1 mrg fprintf (dump_file, "Changed conditional jump "
2887 1.1 mrg "%d->%d to conditional return, adding "
2888 1.1 mrg "fall-through jump.\n",
2889 1.1 mrg b->index, BRANCH_EDGE (b)->dest->index);
2890 1.1 mrg redirect_edge_succ (BRANCH_EDGE (b),
2891 1.1 mrg EXIT_BLOCK_PTR_FOR_FN (cfun));
2892 1.1 mrg BRANCH_EDGE (b)->flags &= ~EDGE_CROSSING;
2893 1.1 mrg std::swap (BRANCH_EDGE (b)->probability,
2894 1.1 mrg FALLTHRU_EDGE (b)->probability);
2895 1.1 mrg update_br_prob_note (b);
2896 1.1 mrg basic_block jb = force_nonfallthru (FALLTHRU_EDGE (b));
2897 1.1 mrg notice_new_block (jb);
2898 1.1 mrg if (!redirect_jump (as_a <rtx_jump_insn *> (BB_END (jb)),
2899 1.1 mrg block_label (new_ft), 0))
2900 1.1 mrg gcc_unreachable ();
2901 1.1 mrg redirect_edge_succ (single_succ_edge (jb), new_ft);
2902 1.1 mrg changed_here = true;
2903 1.1 mrg }
2904 1.1 mrg else
2905 1.1 mrg {
2906 1.1 mrg /* Invert the jump back to what it was. This should
2907 1.1 mrg never fail. */
2908 1.1 mrg if (!invert_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2909 1.1 mrg JUMP_LABEL (BB_END (b)), 0))
2910 1.1 mrg gcc_unreachable ();
2911 1.1 mrg }
2912 1.1 mrg }
2913 1.1 mrg }
2914 1.1 mrg
2915 1.1 mrg /* Simplify branch over branch. */
2916 1.1 mrg if ((mode & CLEANUP_EXPENSIVE)
2917 1.1 mrg && !(mode & CLEANUP_CFGLAYOUT)
2918 1.1 mrg && try_simplify_condjump (b))
2919 1.1 mrg changed_here = true;
2920 1.1 mrg
2921 1.1 mrg /* If B has a single outgoing edge, but uses a
2922 1.1 mrg non-trivial jump instruction without side-effects, we
2923 1.1 mrg can either delete the jump entirely, or replace it
2924 1.1 mrg with a simple unconditional jump. */
2925 1.1 mrg if (single_succ_p (b)
2926 1.1 mrg && single_succ (b) != EXIT_BLOCK_PTR_FOR_FN (cfun)
2927 1.1 mrg && onlyjump_p (BB_END (b))
2928 1.1 mrg && !CROSSING_JUMP_P (BB_END (b))
2929 1.1 mrg && try_redirect_by_replacing_jump (single_succ_edge (b),
2930 1.1 mrg single_succ (b),
2931 1.1 mrg (mode & CLEANUP_CFGLAYOUT) != 0))
2932 1.1 mrg {
2933 1.1 mrg update_forwarder_flag (b);
2934 1.1 mrg changed_here = true;
2935 1.1 mrg }
2936 1.1 mrg
2937 1.1 mrg /* Simplify branch to branch. */
2938 1.1 mrg if (try_forward_edges (mode, b))
2939 1.1 mrg {
2940 1.1 mrg update_forwarder_flag (b);
2941 1.1 mrg changed_here = true;
2942 1.1 mrg }
2943 1.1 mrg
2944 1.1 mrg /* Look for shared code between blocks. */
2945 1.1 mrg if ((mode & CLEANUP_CROSSJUMP)
2946 1.1 mrg && try_crossjump_bb (mode, b))
2947 1.1 mrg changed_here = true;
2948 1.1 mrg
2949 1.1 mrg if ((mode & CLEANUP_CROSSJUMP)
2950 1.1 mrg /* This can lengthen register lifetimes. Do it only after
2951 1.1 mrg reload. */
2952 1.1 mrg && reload_completed
2953 1.1 mrg && try_head_merge_bb (b))
2954 1.1 mrg changed_here = true;
2955 1.1 mrg
2956 1.1 mrg /* Don't get confused by the index shift caused by
2957 1.1 mrg deleting blocks. */
2958 1.1 mrg if (!changed_here)
2959 1.1 mrg b = b->next_bb;
2960 1.1 mrg else
2961 1.1 mrg changed = true;
2962 1.1 mrg }
2963 1.1 mrg
2964 1.1 mrg if ((mode & CLEANUP_CROSSJUMP)
2965 1.1 mrg && try_crossjump_bb (mode, EXIT_BLOCK_PTR_FOR_FN (cfun)))
2966 1.1 mrg changed = true;
2967 1.1 mrg
2968 1.1 mrg if (block_was_dirty)
2969 1.1 mrg {
2970 1.1 mrg /* This should only be set by head-merging. */
2971 1.1 mrg gcc_assert (mode & CLEANUP_CROSSJUMP);
2972 1.1 mrg df_analyze ();
2973 1.1 mrg }
2974 1.1 mrg
2975 1.1 mrg if (changed)
2976 1.1 mrg {
2977 1.1 mrg /* Edge forwarding in particular can cause hot blocks previously
2978 1.1 mrg reached by both hot and cold blocks to become dominated only
2979 1.1 mrg by cold blocks. This will cause the verification below to fail,
2980 1.1 mrg and lead to now cold code in the hot section. This is not easy
2981 1.1 mrg to detect and fix during edge forwarding, and in some cases
2982 1.1 mrg is only visible after newly unreachable blocks are deleted,
2983 1.1 mrg which will be done in fixup_partitions. */
2984 1.1 mrg if ((mode & CLEANUP_NO_PARTITIONING) == 0)
2985 1.1 mrg {
2986 1.1 mrg fixup_partitions ();
2987 1.1 mrg checking_verify_flow_info ();
2988 1.1 mrg }
2989 1.1 mrg }
2990 1.1 mrg
2991 1.1 mrg changed_overall |= changed;
2992 1.1 mrg first_pass = false;
2993 1.1 mrg }
2994 1.1 mrg while (changed);
2995 1.1 mrg }
2996 1.1 mrg
2997 1.1 mrg FOR_ALL_BB_FN (b, cfun)
2998 1.1 mrg b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
2999 1.1 mrg
3000 1.1 mrg return changed_overall;
3001 1.1 mrg }
3002 1.1 mrg
3003 1.1 mrg /* Delete all unreachable basic blocks. */
3005 1.1 mrg
3006 1.1 mrg bool
3007 1.1 mrg delete_unreachable_blocks (void)
3008 1.1 mrg {
3009 1.1 mrg bool changed = false;
3010 1.1 mrg basic_block b, prev_bb;
3011 1.1 mrg
3012 1.1 mrg find_unreachable_blocks ();
3013 1.1 mrg
3014 1.1 mrg /* When we're in GIMPLE mode and there may be debug bind insns, we
3015 1.1 mrg should delete blocks in reverse dominator order, so as to get a
3016 1.1 mrg chance to substitute all released DEFs into debug bind stmts. If
3017 1.1 mrg we don't have dominators information, walking blocks backward
3018 1.1 mrg gets us a better chance of retaining most debug information than
3019 1.1 mrg otherwise. */
3020 1.1 mrg if (MAY_HAVE_DEBUG_BIND_INSNS && current_ir_type () == IR_GIMPLE
3021 1.1 mrg && dom_info_available_p (CDI_DOMINATORS))
3022 1.1 mrg {
3023 1.1 mrg for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
3024 1.1 mrg b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
3025 1.1 mrg {
3026 1.1 mrg prev_bb = b->prev_bb;
3027 1.1 mrg
3028 1.1 mrg if (!(b->flags & BB_REACHABLE))
3029 1.1 mrg {
3030 1.1 mrg /* Speed up the removal of blocks that don't dominate
3031 1.1 mrg others. Walking backwards, this should be the common
3032 1.1 mrg case. */
3033 1.1 mrg if (!first_dom_son (CDI_DOMINATORS, b))
3034 1.1 mrg delete_basic_block (b);
3035 1.1 mrg else
3036 1.1 mrg {
3037 1.1 mrg auto_vec<basic_block> h
3038 1.1 mrg = get_all_dominated_blocks (CDI_DOMINATORS, b);
3039 1.1 mrg
3040 1.1 mrg while (h.length ())
3041 1.1 mrg {
3042 1.1 mrg b = h.pop ();
3043 1.1 mrg
3044 1.1 mrg prev_bb = b->prev_bb;
3045 1.1 mrg
3046 1.1 mrg gcc_assert (!(b->flags & BB_REACHABLE));
3047 1.1 mrg
3048 1.1 mrg delete_basic_block (b);
3049 1.1 mrg }
3050 1.1 mrg }
3051 1.1 mrg
3052 1.1 mrg changed = true;
3053 1.1 mrg }
3054 1.1 mrg }
3055 1.1 mrg }
3056 1.1 mrg else
3057 1.1 mrg {
3058 1.1 mrg for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
3059 1.1 mrg b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
3060 1.1 mrg {
3061 1.1 mrg prev_bb = b->prev_bb;
3062 1.1 mrg
3063 1.1 mrg if (!(b->flags & BB_REACHABLE))
3064 1.1 mrg {
3065 1.1 mrg delete_basic_block (b);
3066 1.1 mrg changed = true;
3067 1.1 mrg }
3068 1.1 mrg }
3069 1.1 mrg }
3070 1.1 mrg
3071 1.1 mrg if (changed)
3072 1.1 mrg tidy_fallthru_edges ();
3073 1.1 mrg return changed;
3074 1.1 mrg }
3075 1.1 mrg
3076 1.1 mrg /* Delete any jump tables never referenced. We can't delete them at the
3077 1.1 mrg time of removing tablejump insn as they are referenced by the preceding
3078 1.1 mrg insns computing the destination, so we delay deleting and garbagecollect
3079 1.1 mrg them once life information is computed. */
3080 1.1 mrg void
3081 1.1 mrg delete_dead_jumptables (void)
3082 1.1 mrg {
3083 1.1 mrg basic_block bb;
3084 1.1 mrg
3085 1.1 mrg /* A dead jump table does not belong to any basic block. Scan insns
3086 1.1 mrg between two adjacent basic blocks. */
3087 1.1 mrg FOR_EACH_BB_FN (bb, cfun)
3088 1.1 mrg {
3089 1.1 mrg rtx_insn *insn, *next;
3090 1.1 mrg
3091 1.1 mrg for (insn = NEXT_INSN (BB_END (bb));
3092 1.1 mrg insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
3093 1.1 mrg insn = next)
3094 1.1 mrg {
3095 1.1 mrg next = NEXT_INSN (insn);
3096 1.1 mrg if (LABEL_P (insn)
3097 1.1 mrg && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
3098 1.1 mrg && JUMP_TABLE_DATA_P (next))
3099 1.1 mrg {
3100 1.1 mrg rtx_insn *label = insn, *jump = next;
3101 1.1 mrg
3102 1.1 mrg if (dump_file)
3103 1.1 mrg fprintf (dump_file, "Dead jumptable %i removed\n",
3104 1.1 mrg INSN_UID (insn));
3105 1.1 mrg
3106 1.1 mrg next = NEXT_INSN (next);
3107 1.1 mrg delete_insn (jump);
3108 1.1 mrg delete_insn (label);
3109 1.1 mrg }
3110 1.1 mrg }
3111 1.1 mrg }
3112 1.1 mrg }
3113 1.1 mrg
3114 1.1 mrg
3115 1.1 mrg /* Tidy the CFG by deleting unreachable code and whatnot. */
3117 1.1 mrg
3118 1.1 mrg bool
3119 1.1 mrg cleanup_cfg (int mode)
3120 1.1 mrg {
3121 1.1 mrg bool changed = false;
3122 1.1 mrg
3123 1.1 mrg /* Set the cfglayout mode flag here. We could update all the callers
3124 1.1 mrg but that is just inconvenient, especially given that we eventually
3125 1.1 mrg want to have cfglayout mode as the default. */
3126 1.1 mrg if (current_ir_type () == IR_RTL_CFGLAYOUT)
3127 1.1 mrg mode |= CLEANUP_CFGLAYOUT;
3128 1.1 mrg
3129 1.1 mrg timevar_push (TV_CLEANUP_CFG);
3130 1.1 mrg if (delete_unreachable_blocks ())
3131 1.1 mrg {
3132 1.1 mrg changed = true;
3133 1.1 mrg /* We've possibly created trivially dead code. Cleanup it right
3134 1.1 mrg now to introduce more opportunities for try_optimize_cfg. */
3135 1.1 mrg if (!(mode & (CLEANUP_NO_INSN_DEL))
3136 1.1 mrg && !reload_completed)
3137 1.1 mrg delete_trivially_dead_insns (get_insns (), max_reg_num ());
3138 1.1 mrg }
3139 1.1 mrg
3140 1.1 mrg compact_blocks ();
3141 1.1 mrg
3142 1.1 mrg /* To tail-merge blocks ending in the same noreturn function (e.g.
3143 1.1 mrg a call to abort) we have to insert fake edges to exit. Do this
3144 1.1 mrg here once. The fake edges do not interfere with any other CFG
3145 1.1 mrg cleanups. */
3146 1.1 mrg if (mode & CLEANUP_CROSSJUMP)
3147 1.1 mrg add_noreturn_fake_exit_edges ();
3148 1.1 mrg
3149 1.1 mrg if (!dbg_cnt (cfg_cleanup))
3150 1.1 mrg return changed;
3151 1.1 mrg
3152 1.1 mrg while (try_optimize_cfg (mode))
3153 1.1 mrg {
3154 1.1 mrg delete_unreachable_blocks (), changed = true;
3155 1.1 mrg if (!(mode & CLEANUP_NO_INSN_DEL))
3156 1.1 mrg {
3157 1.1 mrg /* Try to remove some trivially dead insns when doing an expensive
3158 1.1 mrg cleanup. But delete_trivially_dead_insns doesn't work after
3159 1.1 mrg reload (it only handles pseudos) and run_fast_dce is too costly
3160 1.1 mrg to run in every iteration.
3161 1.1 mrg
3162 1.1 mrg For effective cross jumping, we really want to run a fast DCE to
3163 1.1 mrg clean up any dead conditions, or they get in the way of performing
3164 1.1 mrg useful tail merges.
3165 1.1 mrg
3166 1.1 mrg Other transformations in cleanup_cfg are not so sensitive to dead
3167 1.1 mrg code, so delete_trivially_dead_insns or even doing nothing at all
3168 1.1 mrg is good enough. */
3169 1.1 mrg if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
3170 1.1 mrg && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
3171 1.1 mrg break;
3172 1.1 mrg if ((mode & CLEANUP_CROSSJUMP) && crossjumps_occurred)
3173 1.1 mrg {
3174 1.1 mrg run_fast_dce ();
3175 1.1 mrg mode &= ~CLEANUP_FORCE_FAST_DCE;
3176 1.1 mrg }
3177 1.1 mrg }
3178 1.1 mrg else
3179 1.1 mrg break;
3180 1.1 mrg }
3181 1.1 mrg
3182 1.1 mrg if (mode & CLEANUP_CROSSJUMP)
3183 1.1 mrg remove_fake_exit_edges ();
3184 1.1 mrg
3185 1.1 mrg if (mode & CLEANUP_FORCE_FAST_DCE)
3186 1.1 mrg run_fast_dce ();
3187 1.1 mrg
3188 1.1 mrg /* Don't call delete_dead_jumptables in cfglayout mode, because
3189 1.1 mrg that function assumes that jump tables are in the insns stream.
3190 1.1 mrg But we also don't _have_ to delete dead jumptables in cfglayout
3191 1.1 mrg mode because we shouldn't even be looking at things that are
3192 1.1 mrg not in a basic block. Dead jumptables are cleaned up when
3193 1.1 mrg going out of cfglayout mode. */
3194 1.1 mrg if (!(mode & CLEANUP_CFGLAYOUT))
3195 1.1 mrg delete_dead_jumptables ();
3196 1.1 mrg
3197 1.1 mrg /* ??? We probably do this way too often. */
3198 1.1 mrg if (current_loops
3199 1.1 mrg && (changed
3200 1.1 mrg || (mode & CLEANUP_CFG_CHANGED)))
3201 1.1 mrg {
3202 1.1 mrg timevar_push (TV_REPAIR_LOOPS);
3203 1.1 mrg /* The above doesn't preserve dominance info if available. */
3204 1.1 mrg gcc_assert (!dom_info_available_p (CDI_DOMINATORS));
3205 1.1 mrg calculate_dominance_info (CDI_DOMINATORS);
3206 1.1 mrg fix_loop_structure (NULL);
3207 1.1 mrg free_dominance_info (CDI_DOMINATORS);
3208 1.1 mrg timevar_pop (TV_REPAIR_LOOPS);
3209 1.1 mrg }
3210 1.1 mrg
3211 1.1 mrg timevar_pop (TV_CLEANUP_CFG);
3212 1.1 mrg
3213 1.1 mrg return changed;
3214 1.1 mrg }
3215 1.1 mrg
3216 1.1 mrg namespace {
3218 1.1 mrg
3219 1.1 mrg const pass_data pass_data_jump =
3220 1.1 mrg {
3221 1.1 mrg RTL_PASS, /* type */
3222 1.1 mrg "jump", /* name */
3223 1.1 mrg OPTGROUP_NONE, /* optinfo_flags */
3224 1.1 mrg TV_JUMP, /* tv_id */
3225 1.1 mrg 0, /* properties_required */
3226 1.1 mrg 0, /* properties_provided */
3227 1.1 mrg 0, /* properties_destroyed */
3228 1.1 mrg 0, /* todo_flags_start */
3229 1.1 mrg 0, /* todo_flags_finish */
3230 1.1 mrg };
3231 1.1 mrg
3232 1.1 mrg class pass_jump : public rtl_opt_pass
3233 1.1 mrg {
3234 1.1 mrg public:
3235 1.1 mrg pass_jump (gcc::context *ctxt)
3236 1.1 mrg : rtl_opt_pass (pass_data_jump, ctxt)
3237 1.1 mrg {}
3238 1.1 mrg
3239 1.1 mrg /* opt_pass methods: */
3240 1.1 mrg virtual unsigned int execute (function *);
3241 1.1 mrg
3242 1.1 mrg }; // class pass_jump
3243 1.1 mrg
3244 1.1 mrg unsigned int
3245 1.1 mrg pass_jump::execute (function *)
3246 1.1 mrg {
3247 1.1 mrg delete_trivially_dead_insns (get_insns (), max_reg_num ());
3248 1.1 mrg if (dump_file)
3249 1.1 mrg dump_flow_info (dump_file, dump_flags);
3250 1.1 mrg cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
3251 1.1 mrg | (flag_thread_jumps && flag_expensive_optimizations
3252 1.1 mrg ? CLEANUP_THREADING : 0));
3253 1.1 mrg return 0;
3254 1.1 mrg }
3255 1.1 mrg
3256 1.1 mrg } // anon namespace
3257 1.1 mrg
3258 1.1 mrg rtl_opt_pass *
3259 1.1 mrg make_pass_jump (gcc::context *ctxt)
3260 1.1 mrg {
3261 1.1 mrg return new pass_jump (ctxt);
3262 1.1 mrg }
3263 1.1 mrg
3264 1.1 mrg namespace {
3266 1.1 mrg
3267 1.1 mrg const pass_data pass_data_jump_after_combine =
3268 1.1 mrg {
3269 1.1 mrg RTL_PASS, /* type */
3270 1.1 mrg "jump_after_combine", /* name */
3271 1.1 mrg OPTGROUP_NONE, /* optinfo_flags */
3272 1.1 mrg TV_JUMP, /* tv_id */
3273 1.1 mrg 0, /* properties_required */
3274 1.1 mrg 0, /* properties_provided */
3275 1.1 mrg 0, /* properties_destroyed */
3276 1.1 mrg 0, /* todo_flags_start */
3277 1.1 mrg 0, /* todo_flags_finish */
3278 1.1 mrg };
3279 1.1 mrg
3280 1.1 mrg class pass_jump_after_combine : public rtl_opt_pass
3281 1.1 mrg {
3282 1.1 mrg public:
3283 1.1 mrg pass_jump_after_combine (gcc::context *ctxt)
3284 1.1 mrg : rtl_opt_pass (pass_data_jump_after_combine, ctxt)
3285 1.1 mrg {}
3286 1.1 mrg
3287 1.1 mrg /* opt_pass methods: */
3288 1.1 mrg virtual bool gate (function *)
3289 1.1 mrg {
3290 1.1 mrg return flag_thread_jumps && flag_expensive_optimizations;
3291 1.1 mrg }
3292 1.1 mrg virtual unsigned int execute (function *);
3293 1.1 mrg
3294 1.1 mrg }; // class pass_jump_after_combine
3295 1.1 mrg
3296 1.1 mrg unsigned int
3297 1.1 mrg pass_jump_after_combine::execute (function *)
3298 1.1 mrg {
3299 1.1 mrg /* Jump threading does not keep dominators up-to-date. */
3300 1.1 mrg free_dominance_info (CDI_DOMINATORS);
3301 1.1 mrg cleanup_cfg (CLEANUP_THREADING);
3302 1.1 mrg return 0;
3303 1.1 mrg }
3304 1.1 mrg
3305 1.1 mrg } // anon namespace
3306 1.1 mrg
3307 1.1 mrg rtl_opt_pass *
3308 1.1 mrg make_pass_jump_after_combine (gcc::context *ctxt)
3309 1.1 mrg {
3310 1.1 mrg return new pass_jump_after_combine (ctxt);
3311 1.1 mrg }
3312 1.1 mrg
3313 1.1 mrg namespace {
3314 1.1 mrg
3315 1.1 mrg const pass_data pass_data_jump2 =
3316 1.1 mrg {
3317 1.1 mrg RTL_PASS, /* type */
3318 1.1 mrg "jump2", /* name */
3319 1.1 mrg OPTGROUP_NONE, /* optinfo_flags */
3320 1.1 mrg TV_JUMP, /* tv_id */
3321 1.1 mrg 0, /* properties_required */
3322 1.1 mrg 0, /* properties_provided */
3323 1.1 mrg 0, /* properties_destroyed */
3324 1.1 mrg 0, /* todo_flags_start */
3325 1.1 mrg 0, /* todo_flags_finish */
3326 1.1 mrg };
3327 1.1 mrg
3328 1.1 mrg class pass_jump2 : public rtl_opt_pass
3329 1.1 mrg {
3330 1.1 mrg public:
3331 1.1 mrg pass_jump2 (gcc::context *ctxt)
3332 1.1 mrg : rtl_opt_pass (pass_data_jump2, ctxt)
3333 1.1 mrg {}
3334 1.1 mrg
3335 1.1 mrg /* opt_pass methods: */
3336 1.1 mrg virtual unsigned int execute (function *)
3337 1.1 mrg {
3338 1.1 mrg cleanup_cfg (flag_crossjumping ? CLEANUP_CROSSJUMP : 0);
3339 1.1 mrg return 0;
3340 }
3341
3342 }; // class pass_jump2
3343
3344 } // anon namespace
3345
3346 rtl_opt_pass *
3347 make_pass_jump2 (gcc::context *ctxt)
3348 {
3349 return new pass_jump2 (ctxt);
3350 }
3351