ira-emit.cc revision 1.1.1.1 1 /* Integrated Register Allocator. Changing code and generating moves.
2 Copyright (C) 2006-2022 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov (at) redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* When we have more one region, we need to change the original RTL
22 code after coloring. Let us consider two allocnos representing the
23 same pseudo-register outside and inside a region respectively.
24 They can get different hard-registers. The reload pass works on
25 pseudo registers basis and there is no way to say the reload that
26 pseudo could be in different registers and it is even more
27 difficult to say in what places of the code the pseudo should have
28 particular hard-registers. So in this case IRA has to create and
29 use a new pseudo-register inside the region and adds code to move
30 allocno values on the region's borders. This is done by the code
31 in this file.
32
33 The code makes top-down traversal of the regions and generate new
34 pseudos and the move code on the region borders. In some
35 complicated cases IRA can create a new pseudo used temporarily to
36 move allocno values when a swap of values stored in two
37 hard-registers is needed (e.g. two allocnos representing different
38 pseudos outside region got respectively hard registers 1 and 2 and
39 the corresponding allocnos inside the region got respectively hard
40 registers 2 and 1). At this stage, the new pseudo is marked as
41 spilled.
42
43 IRA still creates the pseudo-register and the moves on the region
44 borders even when the both corresponding allocnos were assigned to
45 the same hard-register. It is done because, if the reload pass for
46 some reason spills a pseudo-register representing the original
47 pseudo outside or inside the region, the effect will be smaller
48 because another pseudo will still be in the hard-register. In most
49 cases, this is better then spilling the original pseudo in its
50 whole live-range. If reload does not change the allocation for the
51 two pseudo-registers, the trivial move will be removed by
52 post-reload optimizations.
53
54 IRA does not generate a new pseudo and moves for the allocno values
55 if the both allocnos representing an original pseudo inside and
56 outside region assigned to the same hard register when the register
57 pressure in the region for the corresponding pressure class is less
58 than number of available hard registers for given pressure class.
59
60 IRA also does some optimizations to remove redundant moves which is
61 transformed into stores by the reload pass on CFG edges
62 representing exits from the region.
63
64 IRA tries to reduce duplication of code generated on CFG edges
65 which are enters and exits to/from regions by moving some code to
66 the edge sources or destinations when it is possible. */
67
68 #include "config.h"
69 #include "system.h"
70 #include "coretypes.h"
71 #include "backend.h"
72 #include "rtl.h"
73 #include "tree.h"
74 #include "predict.h"
75 #include "df.h"
76 #include "insn-config.h"
77 #include "regs.h"
78 #include "memmodel.h"
79 #include "ira.h"
80 #include "ira-int.h"
81 #include "cfgrtl.h"
82 #include "cfgbuild.h"
83 #include "expr.h"
84 #include "reload.h"
85 #include "cfgloop.h"
86
87
88 /* Data used to emit live range split insns and to flattening IR. */
89 ira_emit_data_t ira_allocno_emit_data;
90
91 /* Definitions for vectors of pointers. */
92 typedef void *void_p;
93
94 /* Pointers to data allocated for allocnos being created during
95 emitting. Usually there are quite few such allocnos because they
96 are created only for resolving loop in register shuffling. */
97 static vec<void_p> new_allocno_emit_data_vec;
98
99 /* Allocate and initiate the emit data. */
100 void
101 ira_initiate_emit_data (void)
102 {
103 ira_allocno_t a;
104 ira_allocno_iterator ai;
105
106 ira_allocno_emit_data
107 = (ira_emit_data_t) ira_allocate (ira_allocnos_num
108 * sizeof (struct ira_emit_data));
109 memset (ira_allocno_emit_data, 0,
110 ira_allocnos_num * sizeof (struct ira_emit_data));
111 FOR_EACH_ALLOCNO (a, ai)
112 ALLOCNO_ADD_DATA (a) = ira_allocno_emit_data + ALLOCNO_NUM (a);
113 new_allocno_emit_data_vec.create (50);
114
115 }
116
117 /* Free the emit data. */
118 void
119 ira_finish_emit_data (void)
120 {
121 void_p p;
122 ira_allocno_t a;
123 ira_allocno_iterator ai;
124
125 ira_free (ira_allocno_emit_data);
126 FOR_EACH_ALLOCNO (a, ai)
127 ALLOCNO_ADD_DATA (a) = NULL;
128 for (;new_allocno_emit_data_vec.length () != 0;)
129 {
130 p = new_allocno_emit_data_vec.pop ();
131 ira_free (p);
132 }
133 new_allocno_emit_data_vec.release ();
134 }
135
136 /* Create and return a new allocno with given REGNO and
137 LOOP_TREE_NODE. Allocate emit data for it. */
138 static ira_allocno_t
139 create_new_allocno (int regno, ira_loop_tree_node_t loop_tree_node)
140 {
141 ira_allocno_t a;
142
143 a = ira_create_allocno (regno, false, loop_tree_node);
144 ALLOCNO_ADD_DATA (a) = ira_allocate (sizeof (struct ira_emit_data));
145 memset (ALLOCNO_ADD_DATA (a), 0, sizeof (struct ira_emit_data));
146 new_allocno_emit_data_vec.safe_push (ALLOCNO_ADD_DATA (a));
147 return a;
148 }
149
150
151
153 /* See comments below. */
154 typedef struct move *move_t;
155
156 /* The structure represents an allocno move. Both allocnos have the
157 same original regno but different allocation. */
158 struct move
159 {
160 /* The allocnos involved in the move. */
161 ira_allocno_t from, to;
162 /* The next move in the move sequence. */
163 move_t next;
164 /* Used for finding dependencies. */
165 bool visited_p;
166 /* The size of the following array. */
167 int deps_num;
168 /* Moves on which given move depends on. Dependency can be cyclic.
169 It means we need a temporary to generates the moves. Sequence
170 A1->A2, B1->B2 where A1 and B2 are assigned to reg R1 and A2 and
171 B1 are assigned to reg R2 is an example of the cyclic
172 dependencies. */
173 move_t *deps;
174 /* First insn generated for the move. */
175 rtx_insn *insn;
176 };
177
178 /* Array of moves (indexed by BB index) which should be put at the
179 start/end of the corresponding basic blocks. */
180 static move_t *at_bb_start, *at_bb_end;
181
182 /* Max regno before renaming some pseudo-registers. For example, the
183 same pseudo-register can be renamed in a loop if its allocation is
184 different outside the loop. */
185 static int max_regno_before_changing;
186
187 /* Return new move of allocnos TO and FROM. */
188 static move_t
189 create_move (ira_allocno_t to, ira_allocno_t from)
190 {
191 move_t move;
192
193 move = (move_t) ira_allocate (sizeof (struct move));
194 move->deps = NULL;
195 move->deps_num = 0;
196 move->to = to;
197 move->from = from;
198 move->next = NULL;
199 move->insn = NULL;
200 move->visited_p = false;
201 return move;
202 }
203
204 /* Free memory for MOVE and its dependencies. */
205 static void
206 free_move (move_t move)
207 {
208 if (move->deps != NULL)
209 ira_free (move->deps);
210 ira_free (move);
211 }
212
213 /* Free memory for list of the moves given by its HEAD. */
214 static void
215 free_move_list (move_t head)
216 {
217 move_t next;
218
219 for (; head != NULL; head = next)
220 {
221 next = head->next;
222 free_move (head);
223 }
224 }
225
226 /* Return TRUE if the move list LIST1 and LIST2 are equal (two
227 moves are equal if they involve the same allocnos). */
228 static bool
229 eq_move_lists_p (move_t list1, move_t list2)
230 {
231 for (; list1 != NULL && list2 != NULL;
232 list1 = list1->next, list2 = list2->next)
233 if (list1->from != list2->from || list1->to != list2->to)
234 return false;
235 return list1 == list2;
236 }
237
238 /* Print move list LIST into file F. */
239 static void
240 print_move_list (FILE *f, move_t list)
241 {
242 for (; list != NULL; list = list->next)
243 fprintf (f, " a%dr%d->a%dr%d",
244 ALLOCNO_NUM (list->from), ALLOCNO_REGNO (list->from),
245 ALLOCNO_NUM (list->to), ALLOCNO_REGNO (list->to));
246 fprintf (f, "\n");
247 }
248
249 extern void ira_debug_move_list (move_t list);
250
251 /* Print move list LIST into stderr. */
252 void
253 ira_debug_move_list (move_t list)
254 {
255 print_move_list (stderr, list);
256 }
257
258 /* This recursive function changes pseudo-registers in *LOC if it is
259 necessary. The function returns TRUE if a change was done. */
260 static bool
261 change_regs (rtx *loc)
262 {
263 int i, regno, result = false;
264 const char *fmt;
265 enum rtx_code code;
266 rtx reg;
267
268 if (*loc == NULL_RTX)
269 return false;
270 code = GET_CODE (*loc);
271 if (code == REG)
272 {
273 regno = REGNO (*loc);
274 if (regno < FIRST_PSEUDO_REGISTER)
275 return false;
276 if (regno >= max_regno_before_changing)
277 /* It is a shared register which was changed already. */
278 return false;
279 if (ira_curr_regno_allocno_map[regno] == NULL)
280 return false;
281 reg = allocno_emit_reg (ira_curr_regno_allocno_map[regno]);
282 if (reg == *loc)
283 return false;
284 *loc = reg;
285 return true;
286 }
287
288 fmt = GET_RTX_FORMAT (code);
289 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
290 {
291 if (fmt[i] == 'e')
292 result = change_regs (&XEXP (*loc, i)) || result;
293 else if (fmt[i] == 'E')
294 {
295 int j;
296
297 for (j = XVECLEN (*loc, i) - 1; j >= 0; j--)
298 result = change_regs (&XVECEXP (*loc, i, j)) || result;
299 }
300 }
301 return result;
302 }
303
304 static bool
305 change_regs_in_insn (rtx_insn **insn_ptr)
306 {
307 rtx rtx = *insn_ptr;
308 bool result = change_regs (&rtx);
309 *insn_ptr = as_a <rtx_insn *> (rtx);
310 return result;
311 }
312
313 /* Attach MOVE to the edge E. The move is attached to the head of the
314 list if HEAD_P is TRUE. */
315 static void
316 add_to_edge_list (edge e, move_t move, bool head_p)
317 {
318 move_t last;
319
320 if (head_p || e->aux == NULL)
321 {
322 move->next = (move_t) e->aux;
323 e->aux = move;
324 }
325 else
326 {
327 for (last = (move_t) e->aux; last->next != NULL; last = last->next)
328 ;
329 last->next = move;
330 move->next = NULL;
331 }
332 }
333
334 /* Create and return new pseudo-register with the same attributes as
335 ORIGINAL_REG. */
336 rtx
337 ira_create_new_reg (rtx original_reg)
338 {
339 rtx new_reg;
340
341 new_reg = gen_reg_rtx (GET_MODE (original_reg));
342 ORIGINAL_REGNO (new_reg) = ORIGINAL_REGNO (original_reg);
343 REG_USERVAR_P (new_reg) = REG_USERVAR_P (original_reg);
344 REG_POINTER (new_reg) = REG_POINTER (original_reg);
345 REG_ATTRS (new_reg) = REG_ATTRS (original_reg);
346 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
347 fprintf (ira_dump_file, " Creating newreg=%i from oldreg=%i\n",
348 REGNO (new_reg), REGNO (original_reg));
349 ira_expand_reg_equiv ();
350 return new_reg;
351 }
352
353 /* Return TRUE if loop given by SUBNODE inside the loop given by
354 NODE. */
355 static bool
356 subloop_tree_node_p (ira_loop_tree_node_t subnode, ira_loop_tree_node_t node)
357 {
358 for (; subnode != NULL; subnode = subnode->parent)
359 if (subnode == node)
360 return true;
361 return false;
362 }
363
364 /* Set up member `reg' to REG for allocnos which has the same regno as
365 ALLOCNO and which are inside the loop corresponding to ALLOCNO. */
366 static void
367 set_allocno_reg (ira_allocno_t allocno, rtx reg)
368 {
369 int regno;
370 ira_allocno_t a;
371 ira_loop_tree_node_t node;
372
373 node = ALLOCNO_LOOP_TREE_NODE (allocno);
374 for (a = ira_regno_allocno_map[ALLOCNO_REGNO (allocno)];
375 a != NULL;
376 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
377 if (subloop_tree_node_p (ALLOCNO_LOOP_TREE_NODE (a), node))
378 ALLOCNO_EMIT_DATA (a)->reg = reg;
379 for (a = ALLOCNO_CAP (allocno); a != NULL; a = ALLOCNO_CAP (a))
380 ALLOCNO_EMIT_DATA (a)->reg = reg;
381 regno = ALLOCNO_REGNO (allocno);
382 for (a = allocno;;)
383 {
384 if (a == NULL || (a = ALLOCNO_CAP (a)) == NULL)
385 {
386 node = node->parent;
387 if (node == NULL)
388 break;
389 a = node->regno_allocno_map[regno];
390 }
391 if (a == NULL)
392 continue;
393 if (ALLOCNO_EMIT_DATA (a)->child_renamed_p)
394 break;
395 ALLOCNO_EMIT_DATA (a)->child_renamed_p = true;
396 }
397 }
398
399 /* Return true if there is an entry to given loop not from its parent
400 (or grandparent) block. For example, it is possible for two
401 adjacent loops inside another loop. */
402 static bool
403 entered_from_non_parent_p (ira_loop_tree_node_t loop_node)
404 {
405 ira_loop_tree_node_t bb_node, src_loop_node, parent;
406 edge e;
407 edge_iterator ei;
408
409 for (bb_node = loop_node->children;
410 bb_node != NULL;
411 bb_node = bb_node->next)
412 if (bb_node->bb != NULL)
413 {
414 FOR_EACH_EDGE (e, ei, bb_node->bb->preds)
415 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
416 && (src_loop_node = IRA_BB_NODE (e->src)->parent) != loop_node)
417 {
418 for (parent = src_loop_node->parent;
419 parent != NULL;
420 parent = parent->parent)
421 if (parent == loop_node)
422 break;
423 if (parent != NULL)
424 /* That is an exit from a nested loop -- skip it. */
425 continue;
426 for (parent = loop_node->parent;
427 parent != NULL;
428 parent = parent->parent)
429 if (src_loop_node == parent)
430 break;
431 if (parent == NULL)
432 return true;
433 }
434 }
435 return false;
436 }
437
438 /* Set up ENTERED_FROM_NON_PARENT_P for each loop region. */
439 static void
440 setup_entered_from_non_parent_p (void)
441 {
442 unsigned int i;
443 loop_p loop;
444
445 ira_assert (current_loops != NULL);
446 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
447 if (ira_loop_nodes[i].regno_allocno_map != NULL)
448 ira_loop_nodes[i].entered_from_non_parent_p
449 = entered_from_non_parent_p (&ira_loop_nodes[i]);
450 }
451
452 /* Return TRUE if move of SRC_ALLOCNO (assigned to hard register) to
453 DEST_ALLOCNO (assigned to memory) can be removed because it does
454 not change value of the destination. One possible reason for this
455 is the situation when SRC_ALLOCNO is not modified in the
456 corresponding loop. */
457 static bool
458 store_can_be_removed_p (ira_allocno_t src_allocno, ira_allocno_t dest_allocno)
459 {
460 int regno, orig_regno;
461 ira_allocno_t a;
462 ira_loop_tree_node_t node;
463
464 ira_assert (ALLOCNO_CAP_MEMBER (src_allocno) == NULL
465 && ALLOCNO_CAP_MEMBER (dest_allocno) == NULL);
466 orig_regno = ALLOCNO_REGNO (src_allocno);
467 regno = REGNO (allocno_emit_reg (dest_allocno));
468 for (node = ALLOCNO_LOOP_TREE_NODE (src_allocno);
469 node != NULL;
470 node = node->parent)
471 {
472 a = node->regno_allocno_map[orig_regno];
473 ira_assert (a != NULL);
474 if (REGNO (allocno_emit_reg (a)) == (unsigned) regno)
475 /* We achieved the destination and everything is ok. */
476 return true;
477 else if (bitmap_bit_p (node->modified_regnos, orig_regno))
478 return false;
479 else if (node->entered_from_non_parent_p)
480 /* If there is a path from a destination loop block to the
481 source loop header containing basic blocks of non-parents
482 (grandparents) of the source loop, we should have checked
483 modifications of the pseudo on this path too to decide
484 about possibility to remove the store. It could be done by
485 solving a data-flow problem. Unfortunately such global
486 solution would complicate IR flattening. Therefore we just
487 prohibit removal of the store in such complicated case. */
488 return false;
489 }
490 /* It is actually a loop entry -- do not remove the store. */
491 return false;
492 }
493
494 /* Generate and attach moves to the edge E. This looks at the final
495 regnos of allocnos living on the edge with the same original regno
496 to figure out when moves should be generated. */
497 static void
498 generate_edge_moves (edge e)
499 {
500 ira_loop_tree_node_t src_loop_node, dest_loop_node;
501 unsigned int regno;
502 bitmap_iterator bi;
503 ira_allocno_t src_allocno, dest_allocno, *src_map, *dest_map;
504 move_t move;
505 bitmap regs_live_in_dest, regs_live_out_src;
506
507 src_loop_node = IRA_BB_NODE (e->src)->parent;
508 dest_loop_node = IRA_BB_NODE (e->dest)->parent;
509 e->aux = NULL;
510 if (src_loop_node == dest_loop_node)
511 return;
512 src_map = src_loop_node->regno_allocno_map;
513 dest_map = dest_loop_node->regno_allocno_map;
514 regs_live_in_dest = df_get_live_in (e->dest);
515 regs_live_out_src = df_get_live_out (e->src);
516 EXECUTE_IF_SET_IN_REG_SET (regs_live_in_dest,
517 FIRST_PSEUDO_REGISTER, regno, bi)
518 if (bitmap_bit_p (regs_live_out_src, regno))
519 {
520 src_allocno = src_map[regno];
521 dest_allocno = dest_map[regno];
522 if (REGNO (allocno_emit_reg (src_allocno))
523 == REGNO (allocno_emit_reg (dest_allocno)))
524 continue;
525 /* Remove unnecessary stores at the region exit. We should do
526 this for readonly memory for sure and this is guaranteed by
527 that we never generate moves on region borders (see
528 checking in function change_loop). */
529 if (ALLOCNO_HARD_REGNO (dest_allocno) < 0
530 && ALLOCNO_HARD_REGNO (src_allocno) >= 0
531 && store_can_be_removed_p (src_allocno, dest_allocno))
532 {
533 ALLOCNO_EMIT_DATA (src_allocno)->mem_optimized_dest = dest_allocno;
534 ALLOCNO_EMIT_DATA (dest_allocno)->mem_optimized_dest_p = true;
535 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
536 fprintf (ira_dump_file, " Remove r%d:a%d->a%d(mem)\n",
537 regno, ALLOCNO_NUM (src_allocno),
538 ALLOCNO_NUM (dest_allocno));
539 continue;
540 }
541 move = create_move (dest_allocno, src_allocno);
542 add_to_edge_list (e, move, true);
543 }
544 }
545
546 /* Bitmap of allocnos local for the current loop. */
547 static bitmap local_allocno_bitmap;
548
549 /* This bitmap is used to find that we need to generate and to use a
550 new pseudo-register when processing allocnos with the same original
551 regno. */
552 static bitmap used_regno_bitmap;
553
554 /* This bitmap contains regnos of allocnos which were renamed locally
555 because the allocnos correspond to disjoint live ranges in loops
556 with a common parent. */
557 static bitmap renamed_regno_bitmap;
558
559 /* Change (if necessary) pseudo-registers inside loop given by loop
560 tree node NODE. */
561 static void
562 change_loop (ira_loop_tree_node_t node)
563 {
564 bitmap_iterator bi;
565 unsigned int i;
566 int regno;
567 bool used_p;
568 ira_allocno_t allocno, parent_allocno, *map;
569 rtx_insn *insn;
570 rtx original_reg;
571 enum reg_class aclass, pclass;
572 ira_loop_tree_node_t parent;
573
574 if (node != ira_loop_tree_root)
575 {
576 ira_assert (current_loops != NULL);
577
578 if (node->bb != NULL)
579 {
580 FOR_BB_INSNS (node->bb, insn)
581 if (INSN_P (insn) && change_regs_in_insn (&insn))
582 {
583 df_insn_rescan (insn);
584 df_notes_rescan (insn);
585 }
586 return;
587 }
588
589 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
590 fprintf (ira_dump_file,
591 " Changing RTL for loop %d (header bb%d)\n",
592 node->loop_num, node->loop->header->index);
593
594 parent = ira_curr_loop_tree_node->parent;
595 map = parent->regno_allocno_map;
596 EXECUTE_IF_SET_IN_REG_SET (ira_curr_loop_tree_node->border_allocnos,
597 0, i, bi)
598 {
599 allocno = ira_allocnos[i];
600 regno = ALLOCNO_REGNO (allocno);
601 aclass = ALLOCNO_CLASS (allocno);
602 pclass = ira_pressure_class_translate[aclass];
603 parent_allocno = map[regno];
604 ira_assert (regno < ira_reg_equiv_len);
605 /* We generate the same hard register move because the
606 reload pass can put an allocno into memory in this case
607 we will have live range splitting. If it does not happen
608 such the same hard register moves will be removed. The
609 worst case when the both allocnos are put into memory by
610 the reload is very rare. */
611 if (parent_allocno != NULL
612 && (ALLOCNO_HARD_REGNO (allocno)
613 == ALLOCNO_HARD_REGNO (parent_allocno))
614 && (ALLOCNO_HARD_REGNO (allocno) < 0
615 || (parent->reg_pressure[pclass] + 1
616 <= ira_class_hard_regs_num[pclass])
617 || TEST_HARD_REG_BIT (ira_prohibited_mode_move_regs
618 [ALLOCNO_MODE (allocno)],
619 ALLOCNO_HARD_REGNO (allocno))
620 /* don't create copies because reload can spill an
621 allocno set by copy although the allocno will not
622 get memory slot. */
623 || ira_equiv_no_lvalue_p (regno)
624 || (pic_offset_table_rtx != NULL
625 && (ALLOCNO_REGNO (allocno)
626 == (int) REGNO (pic_offset_table_rtx)))))
627 continue;
628 original_reg = allocno_emit_reg (allocno);
629 if (parent_allocno == NULL
630 || (REGNO (allocno_emit_reg (parent_allocno))
631 == REGNO (original_reg)))
632 {
633 if (internal_flag_ira_verbose > 3 && ira_dump_file)
634 fprintf (ira_dump_file, " %i vs parent %i:",
635 ALLOCNO_HARD_REGNO (allocno),
636 ALLOCNO_HARD_REGNO (parent_allocno));
637 set_allocno_reg (allocno, ira_create_new_reg (original_reg));
638 }
639 }
640 }
641 /* Rename locals: Local allocnos with same regno in different loops
642 might get the different hard register. So we need to change
643 ALLOCNO_REG. */
644 bitmap_and_compl (local_allocno_bitmap,
645 ira_curr_loop_tree_node->all_allocnos,
646 ira_curr_loop_tree_node->border_allocnos);
647 EXECUTE_IF_SET_IN_REG_SET (local_allocno_bitmap, 0, i, bi)
648 {
649 allocno = ira_allocnos[i];
650 regno = ALLOCNO_REGNO (allocno);
651 if (ALLOCNO_CAP_MEMBER (allocno) != NULL)
652 continue;
653 used_p = !bitmap_set_bit (used_regno_bitmap, regno);
654 ALLOCNO_EMIT_DATA (allocno)->somewhere_renamed_p = true;
655 if (! used_p)
656 continue;
657 bitmap_set_bit (renamed_regno_bitmap, regno);
658 set_allocno_reg (allocno, ira_create_new_reg (allocno_emit_reg (allocno)));
659 }
660 }
661
662 /* Process to set up flag somewhere_renamed_p. */
663 static void
664 set_allocno_somewhere_renamed_p (void)
665 {
666 unsigned int regno;
667 ira_allocno_t allocno;
668 ira_allocno_iterator ai;
669
670 FOR_EACH_ALLOCNO (allocno, ai)
671 {
672 regno = ALLOCNO_REGNO (allocno);
673 if (bitmap_bit_p (renamed_regno_bitmap, regno)
674 && REGNO (allocno_emit_reg (allocno)) == regno)
675 ALLOCNO_EMIT_DATA (allocno)->somewhere_renamed_p = true;
676 }
677 }
678
679 /* Return TRUE if move lists on all edges given in vector VEC are
680 equal. */
681 static bool
682 eq_edge_move_lists_p (vec<edge, va_gc> *vec)
683 {
684 move_t list;
685 int i;
686
687 list = (move_t) EDGE_I (vec, 0)->aux;
688 for (i = EDGE_COUNT (vec) - 1; i > 0; i--)
689 if (! eq_move_lists_p (list, (move_t) EDGE_I (vec, i)->aux))
690 return false;
691 return true;
692 }
693
694 /* Look at all entry edges (if START_P) or exit edges of basic block
695 BB and put move lists at the BB start or end if it is possible. In
696 other words, this decreases code duplication of allocno moves. */
697 static void
698 unify_moves (basic_block bb, bool start_p)
699 {
700 int i;
701 edge e;
702 move_t list;
703 vec<edge, va_gc> *vec;
704
705 vec = (start_p ? bb->preds : bb->succs);
706 if (EDGE_COUNT (vec) == 0 || ! eq_edge_move_lists_p (vec))
707 return;
708 e = EDGE_I (vec, 0);
709 list = (move_t) e->aux;
710 if (! start_p && control_flow_insn_p (BB_END (bb)))
711 return;
712 e->aux = NULL;
713 for (i = EDGE_COUNT (vec) - 1; i > 0; i--)
714 {
715 e = EDGE_I (vec, i);
716 free_move_list ((move_t) e->aux);
717 e->aux = NULL;
718 }
719 if (start_p)
720 at_bb_start[bb->index] = list;
721 else
722 at_bb_end[bb->index] = list;
723 }
724
725 /* Last move (in move sequence being processed) setting up the
726 corresponding hard register. */
727 static move_t hard_regno_last_set[FIRST_PSEUDO_REGISTER];
728
729 /* If the element value is equal to CURR_TICK then the corresponding
730 element in `hard_regno_last_set' is defined and correct. */
731 static int hard_regno_last_set_check[FIRST_PSEUDO_REGISTER];
732
733 /* Last move (in move sequence being processed) setting up the
734 corresponding allocno. */
735 static move_t *allocno_last_set;
736
737 /* If the element value is equal to CURR_TICK then the corresponding
738 element in . `allocno_last_set' is defined and correct. */
739 static int *allocno_last_set_check;
740
741 /* Definition of vector of moves. */
742
743 /* This vec contains moves sorted topologically (depth-first) on their
744 dependency graph. */
745 static vec<move_t> move_vec;
746
747 /* The variable value is used to check correctness of values of
748 elements of arrays `hard_regno_last_set' and
749 `allocno_last_set_check'. */
750 static int curr_tick;
751
752 /* This recursive function traverses dependencies of MOVE and produces
753 topological sorting (in depth-first order). */
754 static void
755 traverse_moves (move_t move)
756 {
757 int i;
758
759 if (move->visited_p)
760 return;
761 move->visited_p = true;
762 for (i = move->deps_num - 1; i >= 0; i--)
763 traverse_moves (move->deps[i]);
764 move_vec.safe_push (move);
765 }
766
767 /* Remove unnecessary moves in the LIST, makes topological sorting,
768 and removes cycles on hard reg dependencies by introducing new
769 allocnos assigned to memory and additional moves. It returns the
770 result move list. */
771 static move_t
772 modify_move_list (move_t list)
773 {
774 int i, n, nregs, hard_regno;
775 ira_allocno_t to, from;
776 move_t move, new_move, set_move, first, last;
777
778 if (list == NULL)
779 return NULL;
780 /* Create move deps. */
781 curr_tick++;
782 for (move = list; move != NULL; move = move->next)
783 {
784 to = move->to;
785 if ((hard_regno = ALLOCNO_HARD_REGNO (to)) < 0)
786 continue;
787 nregs = hard_regno_nregs (hard_regno, ALLOCNO_MODE (to));
788 for (i = 0; i < nregs; i++)
789 {
790 hard_regno_last_set[hard_regno + i] = move;
791 hard_regno_last_set_check[hard_regno + i] = curr_tick;
792 }
793 }
794 for (move = list; move != NULL; move = move->next)
795 {
796 from = move->from;
797 to = move->to;
798 if ((hard_regno = ALLOCNO_HARD_REGNO (from)) >= 0)
799 {
800 nregs = hard_regno_nregs (hard_regno, ALLOCNO_MODE (from));
801 for (n = i = 0; i < nregs; i++)
802 if (hard_regno_last_set_check[hard_regno + i] == curr_tick
803 && (ALLOCNO_REGNO (hard_regno_last_set[hard_regno + i]->to)
804 != ALLOCNO_REGNO (from)))
805 n++;
806 move->deps = (move_t *) ira_allocate (n * sizeof (move_t));
807 for (n = i = 0; i < nregs; i++)
808 if (hard_regno_last_set_check[hard_regno + i] == curr_tick
809 && (ALLOCNO_REGNO (hard_regno_last_set[hard_regno + i]->to)
810 != ALLOCNO_REGNO (from)))
811 move->deps[n++] = hard_regno_last_set[hard_regno + i];
812 move->deps_num = n;
813 }
814 }
815 /* Topological sorting: */
816 move_vec.truncate (0);
817 for (move = list; move != NULL; move = move->next)
818 traverse_moves (move);
819 last = NULL;
820 for (i = (int) move_vec.length () - 1; i >= 0; i--)
821 {
822 move = move_vec[i];
823 move->next = NULL;
824 if (last != NULL)
825 last->next = move;
826 last = move;
827 }
828 first = move_vec.last ();
829 /* Removing cycles: */
830 curr_tick++;
831 move_vec.truncate (0);
832 for (move = first; move != NULL; move = move->next)
833 {
834 from = move->from;
835 to = move->to;
836 if ((hard_regno = ALLOCNO_HARD_REGNO (from)) >= 0)
837 {
838 nregs = hard_regno_nregs (hard_regno, ALLOCNO_MODE (from));
839 for (i = 0; i < nregs; i++)
840 if (hard_regno_last_set_check[hard_regno + i] == curr_tick
841 && ALLOCNO_HARD_REGNO
842 (hard_regno_last_set[hard_regno + i]->to) >= 0)
843 {
844 int n, j;
845 ira_allocno_t new_allocno;
846
847 set_move = hard_regno_last_set[hard_regno + i];
848 /* It does not matter what loop_tree_node (of TO or
849 FROM) to use for the new allocno because of
850 subsequent IRA internal representation
851 flattening. */
852 new_allocno
853 = create_new_allocno (ALLOCNO_REGNO (set_move->to),
854 ALLOCNO_LOOP_TREE_NODE (set_move->to));
855 ALLOCNO_MODE (new_allocno) = ALLOCNO_MODE (set_move->to);
856 ira_set_allocno_class (new_allocno,
857 ALLOCNO_CLASS (set_move->to));
858 ira_create_allocno_objects (new_allocno);
859 ALLOCNO_ASSIGNED_P (new_allocno) = true;
860 ALLOCNO_HARD_REGNO (new_allocno) = -1;
861 ALLOCNO_EMIT_DATA (new_allocno)->reg
862 = ira_create_new_reg (allocno_emit_reg (set_move->to));
863
864 /* Make it possibly conflicting with all earlier
865 created allocnos. Cases where temporary allocnos
866 created to remove the cycles are quite rare. */
867 n = ALLOCNO_NUM_OBJECTS (new_allocno);
868 gcc_assert (n == ALLOCNO_NUM_OBJECTS (set_move->to));
869 for (j = 0; j < n; j++)
870 {
871 ira_object_t new_obj = ALLOCNO_OBJECT (new_allocno, j);
872
873 OBJECT_MIN (new_obj) = 0;
874 OBJECT_MAX (new_obj) = ira_objects_num - 1;
875 }
876
877 new_move = create_move (set_move->to, new_allocno);
878 set_move->to = new_allocno;
879 move_vec.safe_push (new_move);
880 ira_move_loops_num++;
881 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
882 fprintf (ira_dump_file,
883 " Creating temporary allocno a%dr%d\n",
884 ALLOCNO_NUM (new_allocno),
885 REGNO (allocno_emit_reg (new_allocno)));
886 }
887 }
888 if ((hard_regno = ALLOCNO_HARD_REGNO (to)) < 0)
889 continue;
890 nregs = hard_regno_nregs (hard_regno, ALLOCNO_MODE (to));
891 for (i = 0; i < nregs; i++)
892 {
893 hard_regno_last_set[hard_regno + i] = move;
894 hard_regno_last_set_check[hard_regno + i] = curr_tick;
895 }
896 }
897 for (i = (int) move_vec.length () - 1; i >= 0; i--)
898 {
899 move = move_vec[i];
900 move->next = NULL;
901 last->next = move;
902 last = move;
903 }
904 return first;
905 }
906
907 /* Generate RTX move insns from the move list LIST. This updates
908 allocation cost using move execution frequency FREQ. */
909 static rtx_insn *
910 emit_move_list (move_t list, int freq)
911 {
912 rtx to, from, dest;
913 int to_regno, from_regno, cost, regno;
914 rtx_insn *result, *insn;
915 rtx set;
916 machine_mode mode;
917 enum reg_class aclass;
918
919 grow_reg_equivs ();
920 start_sequence ();
921 for (; list != NULL; list = list->next)
922 {
923 start_sequence ();
924 to = allocno_emit_reg (list->to);
925 to_regno = REGNO (to);
926 from = allocno_emit_reg (list->from);
927 from_regno = REGNO (from);
928 emit_move_insn (to, from);
929 list->insn = get_insns ();
930 end_sequence ();
931 for (insn = list->insn; insn != NULL_RTX; insn = NEXT_INSN (insn))
932 {
933 /* The reload needs to have set up insn codes. If the
934 reload sets up insn codes by itself, it may fail because
935 insns will have hard registers instead of pseudos and
936 there may be no machine insn with given hard
937 registers. */
938 recog_memoized (insn);
939 /* Add insn to equiv init insn list if it is necessary.
940 Otherwise reload will not remove this insn if it decides
941 to use the equivalence. */
942 if ((set = single_set (insn)) != NULL_RTX)
943 {
944 dest = SET_DEST (set);
945 if (GET_CODE (dest) == SUBREG)
946 dest = SUBREG_REG (dest);
947 ira_assert (REG_P (dest));
948 regno = REGNO (dest);
949 if (regno >= ira_reg_equiv_len
950 || (ira_reg_equiv[regno].invariant == NULL_RTX
951 && ira_reg_equiv[regno].constant == NULL_RTX))
952 continue; /* regno has no equivalence. */
953 ira_assert ((int) reg_equivs->length () > regno);
954 reg_equiv_init (regno)
955 = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init (regno));
956 }
957 }
958 if (ira_use_lra_p)
959 ira_update_equiv_info_by_shuffle_insn (to_regno, from_regno, list->insn);
960 emit_insn (list->insn);
961 mode = ALLOCNO_MODE (list->to);
962 aclass = ALLOCNO_CLASS (list->to);
963 cost = 0;
964 if (ALLOCNO_HARD_REGNO (list->to) < 0)
965 {
966 if (ALLOCNO_HARD_REGNO (list->from) >= 0)
967 {
968 cost = ira_memory_move_cost[mode][aclass][0] * freq;
969 ira_store_cost += cost;
970 }
971 }
972 else if (ALLOCNO_HARD_REGNO (list->from) < 0)
973 {
974 if (ALLOCNO_HARD_REGNO (list->to) >= 0)
975 {
976 cost = ira_memory_move_cost[mode][aclass][0] * freq;
977 ira_load_cost += cost;
978 }
979 }
980 else
981 {
982 ira_init_register_move_cost_if_necessary (mode);
983 cost = ira_register_move_cost[mode][aclass][aclass] * freq;
984 ira_shuffle_cost += cost;
985 }
986 ira_overall_cost += cost;
987 }
988 result = get_insns ();
989 end_sequence ();
990 return result;
991 }
992
993 /* Generate RTX move insns from move lists attached to basic blocks
994 and edges. */
995 static void
996 emit_moves (void)
997 {
998 basic_block bb;
999 edge_iterator ei;
1000 edge e;
1001 rtx_insn *insns, *tmp, *next;
1002
1003 FOR_EACH_BB_FN (bb, cfun)
1004 {
1005 if (at_bb_start[bb->index] != NULL)
1006 {
1007 at_bb_start[bb->index] = modify_move_list (at_bb_start[bb->index]);
1008 insns
1009 = emit_move_list (at_bb_start[bb->index], REG_FREQ_FROM_BB (bb));
1010 tmp = BB_HEAD (bb);
1011 if (LABEL_P (tmp))
1012 tmp = NEXT_INSN (tmp);
1013 if (NOTE_INSN_BASIC_BLOCK_P (tmp))
1014 tmp = NEXT_INSN (tmp);
1015 /* Make sure to put the location of TMP or a subsequent instruction
1016 to avoid inheriting the location of the previous instruction. */
1017 next = tmp;
1018 while (next && !NONDEBUG_INSN_P (next))
1019 next = NEXT_INSN (next);
1020 if (next)
1021 set_insn_locations (insns, INSN_LOCATION (next));
1022 if (tmp == BB_HEAD (bb))
1023 emit_insn_before (insns, tmp);
1024 else if (tmp)
1025 emit_insn_after (insns, PREV_INSN (tmp));
1026 else
1027 emit_insn_after (insns, get_last_insn ());
1028 }
1029
1030 if (at_bb_end[bb->index] != NULL)
1031 {
1032 at_bb_end[bb->index] = modify_move_list (at_bb_end[bb->index]);
1033 insns = emit_move_list (at_bb_end[bb->index], REG_FREQ_FROM_BB (bb));
1034 ira_assert (! control_flow_insn_p (BB_END (bb)));
1035 emit_insn_after (insns, BB_END (bb));
1036 }
1037
1038 FOR_EACH_EDGE (e, ei, bb->succs)
1039 {
1040 if (e->aux == NULL)
1041 continue;
1042 ira_assert ((e->flags & EDGE_ABNORMAL) == 0
1043 || ! EDGE_CRITICAL_P (e));
1044 e->aux = modify_move_list ((move_t) e->aux);
1045 insert_insn_on_edge
1046 (emit_move_list ((move_t) e->aux,
1047 REG_FREQ_FROM_EDGE_FREQ (EDGE_FREQUENCY (e))),
1048 e);
1049 if (e->src->next_bb != e->dest)
1050 ira_additional_jumps_num++;
1051 }
1052 }
1053 }
1054
1055 /* Update costs of A and corresponding allocnos on upper levels on the
1056 loop tree from reading (if READ_P) or writing A on an execution
1057 path with FREQ. */
1058 static void
1059 update_costs (ira_allocno_t a, bool read_p, int freq)
1060 {
1061 ira_loop_tree_node_t parent;
1062
1063 for (;;)
1064 {
1065 ALLOCNO_NREFS (a)++;
1066 ALLOCNO_FREQ (a) += freq;
1067 ALLOCNO_MEMORY_COST (a)
1068 += (ira_memory_move_cost[ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)]
1069 [read_p ? 1 : 0] * freq);
1070 if (ALLOCNO_CAP (a) != NULL)
1071 a = ALLOCNO_CAP (a);
1072 else if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL
1073 || (a = parent->regno_allocno_map[ALLOCNO_REGNO (a)]) == NULL)
1074 break;
1075 }
1076 }
1077
1078 /* Process moves from LIST with execution FREQ to add ranges, copies,
1079 and modify costs for allocnos involved in the moves. All regnos
1080 living through the list is in LIVE_THROUGH, and the loop tree node
1081 used to find corresponding allocnos is NODE. */
1082 static void
1083 add_range_and_copies_from_move_list (move_t list, ira_loop_tree_node_t node,
1084 bitmap live_through, int freq)
1085 {
1086 int start, n;
1087 unsigned int regno;
1088 move_t move;
1089 ira_allocno_t a;
1090 ira_copy_t cp;
1091 live_range_t r;
1092 bitmap_iterator bi;
1093 HARD_REG_SET hard_regs_live;
1094
1095 if (list == NULL)
1096 return;
1097 n = 0;
1098 EXECUTE_IF_SET_IN_BITMAP (live_through, FIRST_PSEUDO_REGISTER, regno, bi)
1099 n++;
1100 REG_SET_TO_HARD_REG_SET (hard_regs_live, live_through);
1101 /* This is a trick to guarantee that new ranges is not merged with
1102 the old ones. */
1103 ira_max_point++;
1104 start = ira_max_point;
1105 for (move = list; move != NULL; move = move->next)
1106 {
1107 ira_allocno_t from = move->from;
1108 ira_allocno_t to = move->to;
1109 int nr, i;
1110
1111 bitmap_clear_bit (live_through, ALLOCNO_REGNO (from));
1112 bitmap_clear_bit (live_through, ALLOCNO_REGNO (to));
1113
1114 nr = ALLOCNO_NUM_OBJECTS (to);
1115 for (i = 0; i < nr; i++)
1116 {
1117 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1118 if (OBJECT_CONFLICT_ARRAY (to_obj) == NULL)
1119 {
1120 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1121 fprintf (ira_dump_file, " Allocate conflicts for a%dr%d\n",
1122 ALLOCNO_NUM (to), REGNO (allocno_emit_reg (to)));
1123 ira_allocate_object_conflicts (to_obj, n);
1124 }
1125 }
1126 ior_hard_reg_conflicts (from, hard_regs_live);
1127 ior_hard_reg_conflicts (to, hard_regs_live);
1128
1129 update_costs (from, true, freq);
1130 update_costs (to, false, freq);
1131 cp = ira_add_allocno_copy (from, to, freq, false, move->insn, NULL);
1132 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1133 fprintf (ira_dump_file, " Adding cp%d:a%dr%d-a%dr%d\n",
1134 cp->num, ALLOCNO_NUM (cp->first),
1135 REGNO (allocno_emit_reg (cp->first)),
1136 ALLOCNO_NUM (cp->second),
1137 REGNO (allocno_emit_reg (cp->second)));
1138
1139 nr = ALLOCNO_NUM_OBJECTS (from);
1140 for (i = 0; i < nr; i++)
1141 {
1142 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
1143 r = OBJECT_LIVE_RANGES (from_obj);
1144 if (r == NULL || r->finish >= 0)
1145 {
1146 ira_add_live_range_to_object (from_obj, start, ira_max_point);
1147 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1148 fprintf (ira_dump_file,
1149 " Adding range [%d..%d] to allocno a%dr%d\n",
1150 start, ira_max_point, ALLOCNO_NUM (from),
1151 REGNO (allocno_emit_reg (from)));
1152 }
1153 else
1154 {
1155 r->finish = ira_max_point;
1156 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1157 fprintf (ira_dump_file,
1158 " Adding range [%d..%d] to allocno a%dr%d\n",
1159 r->start, ira_max_point, ALLOCNO_NUM (from),
1160 REGNO (allocno_emit_reg (from)));
1161 }
1162 }
1163 ira_max_point++;
1164 nr = ALLOCNO_NUM_OBJECTS (to);
1165 for (i = 0; i < nr; i++)
1166 {
1167 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1168 ira_add_live_range_to_object (to_obj, ira_max_point, -1);
1169 }
1170 ira_max_point++;
1171 }
1172 for (move = list; move != NULL; move = move->next)
1173 {
1174 int nr, i;
1175 nr = ALLOCNO_NUM_OBJECTS (move->to);
1176 for (i = 0; i < nr; i++)
1177 {
1178 ira_object_t to_obj = ALLOCNO_OBJECT (move->to, i);
1179 r = OBJECT_LIVE_RANGES (to_obj);
1180 if (r->finish < 0)
1181 {
1182 r->finish = ira_max_point - 1;
1183 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1184 fprintf (ira_dump_file,
1185 " Adding range [%d..%d] to allocno a%dr%d\n",
1186 r->start, r->finish, ALLOCNO_NUM (move->to),
1187 REGNO (allocno_emit_reg (move->to)));
1188 }
1189 }
1190 }
1191 EXECUTE_IF_SET_IN_BITMAP (live_through, FIRST_PSEUDO_REGISTER, regno, bi)
1192 {
1193 ira_allocno_t to;
1194 int nr, i;
1195
1196 a = node->regno_allocno_map[regno];
1197 if ((to = ALLOCNO_EMIT_DATA (a)->mem_optimized_dest) != NULL)
1198 a = to;
1199 nr = ALLOCNO_NUM_OBJECTS (a);
1200 for (i = 0; i < nr; i++)
1201 {
1202 ira_object_t obj = ALLOCNO_OBJECT (a, i);
1203 ira_add_live_range_to_object (obj, start, ira_max_point - 1);
1204 }
1205 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1206 fprintf
1207 (ira_dump_file,
1208 " Adding range [%d..%d] to live through %s allocno a%dr%d\n",
1209 start, ira_max_point - 1,
1210 to != NULL ? "upper level" : "",
1211 ALLOCNO_NUM (a), REGNO (allocno_emit_reg (a)));
1212 }
1213 }
1214
1215 /* Process all move list to add ranges, conflicts, copies, and modify
1216 costs for allocnos involved in the moves. */
1217 static void
1218 add_ranges_and_copies (void)
1219 {
1220 basic_block bb;
1221 edge_iterator ei;
1222 edge e;
1223 ira_loop_tree_node_t node;
1224 bitmap live_through;
1225
1226 live_through = ira_allocate_bitmap ();
1227 FOR_EACH_BB_FN (bb, cfun)
1228 {
1229 /* It does not matter what loop_tree_node (of source or
1230 destination block) to use for searching allocnos by their
1231 regnos because of subsequent IR flattening. */
1232 node = IRA_BB_NODE (bb)->parent;
1233 bitmap_copy (live_through, df_get_live_in (bb));
1234 add_range_and_copies_from_move_list
1235 (at_bb_start[bb->index], node, live_through, REG_FREQ_FROM_BB (bb));
1236 bitmap_copy (live_through, df_get_live_out (bb));
1237 add_range_and_copies_from_move_list
1238 (at_bb_end[bb->index], node, live_through, REG_FREQ_FROM_BB (bb));
1239 FOR_EACH_EDGE (e, ei, bb->succs)
1240 {
1241 bitmap_and (live_through,
1242 df_get_live_in (e->dest), df_get_live_out (bb));
1243 add_range_and_copies_from_move_list
1244 ((move_t) e->aux, node, live_through,
1245 REG_FREQ_FROM_EDGE_FREQ (EDGE_FREQUENCY (e)));
1246 }
1247 }
1248 ira_free_bitmap (live_through);
1249 }
1250
1251 /* The entry function changes code and generates shuffling allocnos on
1252 region borders for the regional (LOOPS_P is TRUE in this case)
1253 register allocation. */
1254 void
1255 ira_emit (bool loops_p)
1256 {
1257 basic_block bb;
1258 rtx_insn *insn;
1259 edge_iterator ei;
1260 edge e;
1261 ira_allocno_t a;
1262 ira_allocno_iterator ai;
1263 size_t sz;
1264
1265 FOR_EACH_ALLOCNO (a, ai)
1266 ALLOCNO_EMIT_DATA (a)->reg = regno_reg_rtx[ALLOCNO_REGNO (a)];
1267 if (! loops_p)
1268 return;
1269 sz = sizeof (move_t) * last_basic_block_for_fn (cfun);
1270 at_bb_start = (move_t *) ira_allocate (sz);
1271 memset (at_bb_start, 0, sz);
1272 at_bb_end = (move_t *) ira_allocate (sz);
1273 memset (at_bb_end, 0, sz);
1274 local_allocno_bitmap = ira_allocate_bitmap ();
1275 used_regno_bitmap = ira_allocate_bitmap ();
1276 renamed_regno_bitmap = ira_allocate_bitmap ();
1277 max_regno_before_changing = max_reg_num ();
1278 ira_traverse_loop_tree (true, ira_loop_tree_root, change_loop, NULL);
1279 set_allocno_somewhere_renamed_p ();
1280 ira_free_bitmap (used_regno_bitmap);
1281 ira_free_bitmap (renamed_regno_bitmap);
1282 ira_free_bitmap (local_allocno_bitmap);
1283 setup_entered_from_non_parent_p ();
1284 FOR_EACH_BB_FN (bb, cfun)
1285 {
1286 at_bb_start[bb->index] = NULL;
1287 at_bb_end[bb->index] = NULL;
1288 FOR_EACH_EDGE (e, ei, bb->succs)
1289 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1290 generate_edge_moves (e);
1291 }
1292 allocno_last_set
1293 = (move_t *) ira_allocate (sizeof (move_t) * max_reg_num ());
1294 allocno_last_set_check
1295 = (int *) ira_allocate (sizeof (int) * max_reg_num ());
1296 memset (allocno_last_set_check, 0, sizeof (int) * max_reg_num ());
1297 memset (hard_regno_last_set_check, 0, sizeof (hard_regno_last_set_check));
1298 curr_tick = 0;
1299 FOR_EACH_BB_FN (bb, cfun)
1300 unify_moves (bb, true);
1301 FOR_EACH_BB_FN (bb, cfun)
1302 unify_moves (bb, false);
1303 move_vec.create (ira_allocnos_num);
1304 emit_moves ();
1305 add_ranges_and_copies ();
1306 /* Clean up: */
1307 FOR_EACH_BB_FN (bb, cfun)
1308 {
1309 free_move_list (at_bb_start[bb->index]);
1310 free_move_list (at_bb_end[bb->index]);
1311 FOR_EACH_EDGE (e, ei, bb->succs)
1312 {
1313 free_move_list ((move_t) e->aux);
1314 e->aux = NULL;
1315 }
1316 }
1317 move_vec.release ();
1318 ira_free (allocno_last_set_check);
1319 ira_free (allocno_last_set);
1320 commit_edge_insertions ();
1321 /* Fix insn codes. It is necessary to do it before reload because
1322 reload assumes initial insn codes defined. The insn codes can be
1323 invalidated by CFG infrastructure for example in jump
1324 redirection. */
1325 FOR_EACH_BB_FN (bb, cfun)
1326 FOR_BB_INSNS_REVERSE (bb, insn)
1327 if (INSN_P (insn))
1328 recog_memoized (insn);
1329 ira_free (at_bb_end);
1330 ira_free (at_bb_start);
1331 }
1332