ira-build.cc revision 1.1.1.1 1 /* Building internal representation for IRA.
2 Copyright (C) 2006-2022 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov (at) redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "predict.h"
28 #include "df.h"
29 #include "insn-config.h"
30 #include "regs.h"
31 #include "memmodel.h"
32 #include "ira.h"
33 #include "ira-int.h"
34 #include "sparseset.h"
35 #include "cfgloop.h"
36
37 static ira_copy_t find_allocno_copy (ira_allocno_t, ira_allocno_t, rtx_insn *,
38 ira_loop_tree_node_t);
39
40 /* The root of the loop tree corresponding to the all function. */
41 ira_loop_tree_node_t ira_loop_tree_root;
42
43 /* Height of the loop tree. */
44 int ira_loop_tree_height;
45
46 /* All nodes representing basic blocks are referred through the
47 following array. We cannot use basic block member `aux' for this
48 because it is used for insertion of insns on edges. */
49 ira_loop_tree_node_t ira_bb_nodes;
50
51 /* All nodes representing loops are referred through the following
52 array. */
53 ira_loop_tree_node_t ira_loop_nodes;
54
55 /* And size of the ira_loop_nodes array. */
56 unsigned int ira_loop_nodes_count;
57
58 /* Map regno -> allocnos with given regno (see comments for
59 allocno member `next_regno_allocno'). */
60 ira_allocno_t *ira_regno_allocno_map;
61
62 /* Array of references to all allocnos. The order number of the
63 allocno corresponds to the index in the array. Removed allocnos
64 have NULL element value. */
65 ira_allocno_t *ira_allocnos;
66
67 /* Sizes of the previous array. */
68 int ira_allocnos_num;
69
70 /* Count of conflict record structures we've created, used when creating
71 a new conflict id. */
72 int ira_objects_num;
73
74 /* Map a conflict id to its conflict record. */
75 ira_object_t *ira_object_id_map;
76
77 /* Array of references to all allocno preferences. The order number
78 of the preference corresponds to the index in the array. */
79 ira_pref_t *ira_prefs;
80
81 /* Size of the previous array. */
82 int ira_prefs_num;
83
84 /* Array of references to all copies. The order number of the copy
85 corresponds to the index in the array. Removed copies have NULL
86 element value. */
87 ira_copy_t *ira_copies;
88
89 /* Size of the previous array. */
90 int ira_copies_num;
91
92
93
95 /* LAST_BASIC_BLOCK before generating additional insns because of live
96 range splitting. Emitting insns on a critical edge creates a new
97 basic block. */
98 static int last_basic_block_before_change;
99
100 /* Initialize some members in loop tree node NODE. Use LOOP_NUM for
101 the member loop_num. */
102 static void
103 init_loop_tree_node (struct ira_loop_tree_node *node, int loop_num)
104 {
105 int max_regno = max_reg_num ();
106
107 node->regno_allocno_map
108 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t) * max_regno);
109 memset (node->regno_allocno_map, 0, sizeof (ira_allocno_t) * max_regno);
110 memset (node->reg_pressure, 0, sizeof (node->reg_pressure));
111 node->all_allocnos = ira_allocate_bitmap ();
112 node->modified_regnos = ira_allocate_bitmap ();
113 node->border_allocnos = ira_allocate_bitmap ();
114 node->local_copies = ira_allocate_bitmap ();
115 node->loop_num = loop_num;
116 node->children = NULL;
117 node->subloops = NULL;
118 }
119
120
121 /* The following function allocates the loop tree nodes. If
122 CURRENT_LOOPS is NULL, the nodes corresponding to the loops (except
123 the root which corresponds the all function) will be not allocated
124 but nodes will still be allocated for basic blocks. */
125 static void
126 create_loop_tree_nodes (void)
127 {
128 unsigned int i, j;
129 bool skip_p;
130 edge_iterator ei;
131 edge e;
132 loop_p loop;
133
134 ira_bb_nodes
135 = ((struct ira_loop_tree_node *)
136 ira_allocate (sizeof (struct ira_loop_tree_node)
137 * last_basic_block_for_fn (cfun)));
138 last_basic_block_before_change = last_basic_block_for_fn (cfun);
139 for (i = 0; i < (unsigned int) last_basic_block_for_fn (cfun); i++)
140 {
141 ira_bb_nodes[i].regno_allocno_map = NULL;
142 memset (ira_bb_nodes[i].reg_pressure, 0,
143 sizeof (ira_bb_nodes[i].reg_pressure));
144 ira_bb_nodes[i].all_allocnos = NULL;
145 ira_bb_nodes[i].modified_regnos = NULL;
146 ira_bb_nodes[i].border_allocnos = NULL;
147 ira_bb_nodes[i].local_copies = NULL;
148 }
149 if (current_loops == NULL)
150 {
151 ira_loop_nodes_count = 1;
152 ira_loop_nodes = ((struct ira_loop_tree_node *)
153 ira_allocate (sizeof (struct ira_loop_tree_node)));
154 init_loop_tree_node (ira_loop_nodes, 0);
155 return;
156 }
157 ira_loop_nodes_count = number_of_loops (cfun);
158 ira_loop_nodes = ((struct ira_loop_tree_node *)
159 ira_allocate (sizeof (struct ira_loop_tree_node)
160 * ira_loop_nodes_count));
161 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
162 {
163 if (loop_outer (loop) != NULL)
164 {
165 ira_loop_nodes[i].regno_allocno_map = NULL;
166 skip_p = false;
167 FOR_EACH_EDGE (e, ei, loop->header->preds)
168 if (e->src != loop->latch
169 && (e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
170 {
171 skip_p = true;
172 break;
173 }
174 if (skip_p)
175 continue;
176 auto_vec<edge> edges = get_loop_exit_edges (loop);
177 FOR_EACH_VEC_ELT (edges, j, e)
178 if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
179 {
180 skip_p = true;
181 break;
182 }
183 if (skip_p)
184 continue;
185 }
186 init_loop_tree_node (&ira_loop_nodes[i], loop->num);
187 }
188 }
189
190 /* The function returns TRUE if there are more one allocation
191 region. */
192 static bool
193 more_one_region_p (void)
194 {
195 unsigned int i;
196 loop_p loop;
197
198 if (current_loops != NULL)
199 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
200 if (ira_loop_nodes[i].regno_allocno_map != NULL
201 && ira_loop_tree_root != &ira_loop_nodes[i])
202 return true;
203 return false;
204 }
205
206 /* Free the loop tree node of a loop. */
207 static void
208 finish_loop_tree_node (ira_loop_tree_node_t loop)
209 {
210 if (loop->regno_allocno_map != NULL)
211 {
212 ira_assert (loop->bb == NULL);
213 ira_free_bitmap (loop->local_copies);
214 ira_free_bitmap (loop->border_allocnos);
215 ira_free_bitmap (loop->modified_regnos);
216 ira_free_bitmap (loop->all_allocnos);
217 ira_free (loop->regno_allocno_map);
218 loop->regno_allocno_map = NULL;
219 }
220 }
221
222 /* Free the loop tree nodes. */
223 static void
224 finish_loop_tree_nodes (void)
225 {
226 unsigned int i;
227
228 for (i = 0; i < ira_loop_nodes_count; i++)
229 finish_loop_tree_node (&ira_loop_nodes[i]);
230 ira_free (ira_loop_nodes);
231 for (i = 0; i < (unsigned int) last_basic_block_before_change; i++)
232 {
233 if (ira_bb_nodes[i].local_copies != NULL)
234 ira_free_bitmap (ira_bb_nodes[i].local_copies);
235 if (ira_bb_nodes[i].border_allocnos != NULL)
236 ira_free_bitmap (ira_bb_nodes[i].border_allocnos);
237 if (ira_bb_nodes[i].modified_regnos != NULL)
238 ira_free_bitmap (ira_bb_nodes[i].modified_regnos);
239 if (ira_bb_nodes[i].all_allocnos != NULL)
240 ira_free_bitmap (ira_bb_nodes[i].all_allocnos);
241 if (ira_bb_nodes[i].regno_allocno_map != NULL)
242 ira_free (ira_bb_nodes[i].regno_allocno_map);
243 }
244 ira_free (ira_bb_nodes);
245 }
246
247
248
250 /* The following recursive function adds LOOP to the loop tree
251 hierarchy. LOOP is added only once. If LOOP is NULL we adding
252 loop designating the whole function when CFG loops are not
253 built. */
254 static void
255 add_loop_to_tree (class loop *loop)
256 {
257 int loop_num;
258 class loop *parent;
259 ira_loop_tree_node_t loop_node, parent_node;
260
261 /* We cannot use loop node access macros here because of potential
262 checking and because the nodes are not initialized enough
263 yet. */
264 if (loop != NULL && loop_outer (loop) != NULL)
265 add_loop_to_tree (loop_outer (loop));
266 loop_num = loop != NULL ? loop->num : 0;
267 if (ira_loop_nodes[loop_num].regno_allocno_map != NULL
268 && ira_loop_nodes[loop_num].children == NULL)
269 {
270 /* We have not added loop node to the tree yet. */
271 loop_node = &ira_loop_nodes[loop_num];
272 loop_node->loop = loop;
273 loop_node->bb = NULL;
274 if (loop == NULL)
275 parent = NULL;
276 else
277 {
278 for (parent = loop_outer (loop);
279 parent != NULL;
280 parent = loop_outer (parent))
281 if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
282 break;
283 }
284 if (parent == NULL)
285 {
286 loop_node->next = NULL;
287 loop_node->subloop_next = NULL;
288 loop_node->parent = NULL;
289 }
290 else
291 {
292 parent_node = &ira_loop_nodes[parent->num];
293 loop_node->next = parent_node->children;
294 parent_node->children = loop_node;
295 loop_node->subloop_next = parent_node->subloops;
296 parent_node->subloops = loop_node;
297 loop_node->parent = parent_node;
298 }
299 }
300 }
301
302 /* The following recursive function sets up levels of nodes of the
303 tree given its root LOOP_NODE. The enumeration starts with LEVEL.
304 The function returns maximal value of level in the tree + 1. */
305 static int
306 setup_loop_tree_level (ira_loop_tree_node_t loop_node, int level)
307 {
308 int height, max_height;
309 ira_loop_tree_node_t subloop_node;
310
311 ira_assert (loop_node->bb == NULL);
312 loop_node->level = level;
313 max_height = level + 1;
314 for (subloop_node = loop_node->subloops;
315 subloop_node != NULL;
316 subloop_node = subloop_node->subloop_next)
317 {
318 ira_assert (subloop_node->bb == NULL);
319 height = setup_loop_tree_level (subloop_node, level + 1);
320 if (height > max_height)
321 max_height = height;
322 }
323 return max_height;
324 }
325
326 /* Create the loop tree. The algorithm is designed to provide correct
327 order of loops (they are ordered by their last loop BB) and basic
328 blocks in the chain formed by member next. */
329 static void
330 form_loop_tree (void)
331 {
332 basic_block bb;
333 class loop *parent;
334 ira_loop_tree_node_t bb_node, loop_node;
335
336 /* We cannot use loop/bb node access macros because of potential
337 checking and because the nodes are not initialized enough
338 yet. */
339 FOR_EACH_BB_FN (bb, cfun)
340 {
341 bb_node = &ira_bb_nodes[bb->index];
342 bb_node->bb = bb;
343 bb_node->loop = NULL;
344 bb_node->subloops = NULL;
345 bb_node->children = NULL;
346 bb_node->subloop_next = NULL;
347 bb_node->next = NULL;
348 if (current_loops == NULL)
349 parent = NULL;
350 else
351 {
352 for (parent = bb->loop_father;
353 parent != NULL;
354 parent = loop_outer (parent))
355 if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
356 break;
357 }
358 add_loop_to_tree (parent);
359 loop_node = &ira_loop_nodes[parent == NULL ? 0 : parent->num];
360 bb_node->next = loop_node->children;
361 bb_node->parent = loop_node;
362 loop_node->children = bb_node;
363 }
364 ira_loop_tree_root = IRA_LOOP_NODE_BY_INDEX (0);
365 ira_loop_tree_height = setup_loop_tree_level (ira_loop_tree_root, 0);
366 ira_assert (ira_loop_tree_root->regno_allocno_map != NULL);
367 }
368
369
370
372 /* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
373 tree nodes. */
374 static void
375 rebuild_regno_allocno_maps (void)
376 {
377 unsigned int l;
378 int max_regno, regno;
379 ira_allocno_t a;
380 ira_loop_tree_node_t loop_tree_node;
381 loop_p loop;
382 ira_allocno_iterator ai;
383
384 ira_assert (current_loops != NULL);
385 max_regno = max_reg_num ();
386 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), l, loop)
387 if (ira_loop_nodes[l].regno_allocno_map != NULL)
388 {
389 ira_free (ira_loop_nodes[l].regno_allocno_map);
390 ira_loop_nodes[l].regno_allocno_map
391 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
392 * max_regno);
393 memset (ira_loop_nodes[l].regno_allocno_map, 0,
394 sizeof (ira_allocno_t) * max_regno);
395 }
396 ira_free (ira_regno_allocno_map);
397 ira_regno_allocno_map
398 = (ira_allocno_t *) ira_allocate (max_regno * sizeof (ira_allocno_t));
399 memset (ira_regno_allocno_map, 0, max_regno * sizeof (ira_allocno_t));
400 FOR_EACH_ALLOCNO (a, ai)
401 {
402 if (ALLOCNO_CAP_MEMBER (a) != NULL)
403 /* Caps are not in the regno allocno maps. */
404 continue;
405 regno = ALLOCNO_REGNO (a);
406 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
407 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
408 ira_regno_allocno_map[regno] = a;
409 if (loop_tree_node->regno_allocno_map[regno] == NULL)
410 /* Remember that we can create temporary allocnos to break
411 cycles in register shuffle. */
412 loop_tree_node->regno_allocno_map[regno] = a;
413 }
414 }
415
416
418 /* Pools for allocnos, allocno live ranges and objects. */
419 static object_allocator<live_range> live_range_pool ("live ranges");
420 static object_allocator<ira_allocno> allocno_pool ("allocnos");
421 static object_allocator<ira_object> object_pool ("objects");
422
423 /* Vec containing references to all created allocnos. It is a
424 container of array allocnos. */
425 static vec<ira_allocno_t> allocno_vec;
426
427 /* Vec containing references to all created ira_objects. It is a
428 container of ira_object_id_map. */
429 static vec<ira_object_t> ira_object_id_map_vec;
430
431 /* Initialize data concerning allocnos. */
432 static void
433 initiate_allocnos (void)
434 {
435 allocno_vec.create (max_reg_num () * 2);
436 ira_allocnos = NULL;
437 ira_allocnos_num = 0;
438 ira_objects_num = 0;
439 ira_object_id_map_vec.create (max_reg_num () * 2);
440 ira_object_id_map = NULL;
441 ira_regno_allocno_map
442 = (ira_allocno_t *) ira_allocate (max_reg_num ()
443 * sizeof (ira_allocno_t));
444 memset (ira_regno_allocno_map, 0, max_reg_num () * sizeof (ira_allocno_t));
445 }
446
447 /* Create and return an object corresponding to a new allocno A. */
448 static ira_object_t
449 ira_create_object (ira_allocno_t a, int subword)
450 {
451 enum reg_class aclass = ALLOCNO_CLASS (a);
452 ira_object_t obj = object_pool.allocate ();
453
454 OBJECT_ALLOCNO (obj) = a;
455 OBJECT_SUBWORD (obj) = subword;
456 OBJECT_CONFLICT_ID (obj) = ira_objects_num;
457 OBJECT_CONFLICT_VEC_P (obj) = false;
458 OBJECT_CONFLICT_ARRAY (obj) = NULL;
459 OBJECT_NUM_CONFLICTS (obj) = 0;
460 OBJECT_CONFLICT_HARD_REGS (obj) = ira_no_alloc_regs;
461 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) = ira_no_alloc_regs;
462 OBJECT_CONFLICT_HARD_REGS (obj) |= ~reg_class_contents[aclass];
463 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= ~reg_class_contents[aclass];
464 OBJECT_MIN (obj) = INT_MAX;
465 OBJECT_MAX (obj) = -1;
466 OBJECT_LIVE_RANGES (obj) = NULL;
467
468 ira_object_id_map_vec.safe_push (obj);
469 ira_object_id_map
470 = ira_object_id_map_vec.address ();
471 ira_objects_num = ira_object_id_map_vec.length ();
472
473 return obj;
474 }
475
476 /* Create and return the allocno corresponding to REGNO in
477 LOOP_TREE_NODE. Add the allocno to the list of allocnos with the
478 same regno if CAP_P is FALSE. */
479 ira_allocno_t
480 ira_create_allocno (int regno, bool cap_p,
481 ira_loop_tree_node_t loop_tree_node)
482 {
483 ira_allocno_t a;
484
485 a = allocno_pool.allocate ();
486 ALLOCNO_REGNO (a) = regno;
487 ALLOCNO_LOOP_TREE_NODE (a) = loop_tree_node;
488 if (! cap_p)
489 {
490 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
491 ira_regno_allocno_map[regno] = a;
492 if (loop_tree_node->regno_allocno_map[regno] == NULL)
493 /* Remember that we can create temporary allocnos to break
494 cycles in register shuffle on region borders (see
495 ira-emit.cc). */
496 loop_tree_node->regno_allocno_map[regno] = a;
497 }
498 ALLOCNO_CAP (a) = NULL;
499 ALLOCNO_CAP_MEMBER (a) = NULL;
500 ALLOCNO_NUM (a) = ira_allocnos_num;
501 bitmap_set_bit (loop_tree_node->all_allocnos, ALLOCNO_NUM (a));
502 ALLOCNO_NREFS (a) = 0;
503 ALLOCNO_FREQ (a) = 0;
504 ALLOCNO_MIGHT_CONFLICT_WITH_PARENT_P (a) = false;
505 ALLOCNO_HARD_REGNO (a) = -1;
506 ALLOCNO_CALL_FREQ (a) = 0;
507 ALLOCNO_CALLS_CROSSED_NUM (a) = 0;
508 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a) = 0;
509 ALLOCNO_CROSSED_CALLS_ABIS (a) = 0;
510 CLEAR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
511 #ifdef STACK_REGS
512 ALLOCNO_NO_STACK_REG_P (a) = false;
513 ALLOCNO_TOTAL_NO_STACK_REG_P (a) = false;
514 #endif
515 ALLOCNO_DONT_REASSIGN_P (a) = false;
516 ALLOCNO_BAD_SPILL_P (a) = false;
517 ALLOCNO_ASSIGNED_P (a) = false;
518 ALLOCNO_MODE (a) = (regno < 0 ? VOIDmode : PSEUDO_REGNO_MODE (regno));
519 ALLOCNO_WMODE (a) = ALLOCNO_MODE (a);
520 ALLOCNO_PREFS (a) = NULL;
521 ALLOCNO_COPIES (a) = NULL;
522 ALLOCNO_HARD_REG_COSTS (a) = NULL;
523 ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
524 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
525 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
526 ALLOCNO_CLASS (a) = NO_REGS;
527 ALLOCNO_UPDATED_CLASS_COST (a) = 0;
528 ALLOCNO_CLASS_COST (a) = 0;
529 ALLOCNO_MEMORY_COST (a) = 0;
530 ALLOCNO_UPDATED_MEMORY_COST (a) = 0;
531 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) = 0;
532 ALLOCNO_NUM_OBJECTS (a) = 0;
533
534 ALLOCNO_ADD_DATA (a) = NULL;
535 allocno_vec.safe_push (a);
536 ira_allocnos = allocno_vec.address ();
537 ira_allocnos_num = allocno_vec.length ();
538
539 return a;
540 }
541
542 /* Set up register class for A and update its conflict hard
543 registers. */
544 void
545 ira_set_allocno_class (ira_allocno_t a, enum reg_class aclass)
546 {
547 ira_allocno_object_iterator oi;
548 ira_object_t obj;
549
550 ALLOCNO_CLASS (a) = aclass;
551 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
552 {
553 OBJECT_CONFLICT_HARD_REGS (obj) |= ~reg_class_contents[aclass];
554 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= ~reg_class_contents[aclass];
555 }
556 }
557
558 /* Determine the number of objects we should associate with allocno A
559 and allocate them. */
560 void
561 ira_create_allocno_objects (ira_allocno_t a)
562 {
563 machine_mode mode = ALLOCNO_MODE (a);
564 enum reg_class aclass = ALLOCNO_CLASS (a);
565 int n = ira_reg_class_max_nregs[aclass][mode];
566 int i;
567
568 if (n != 2 || maybe_ne (GET_MODE_SIZE (mode), n * UNITS_PER_WORD))
569 n = 1;
570
571 ALLOCNO_NUM_OBJECTS (a) = n;
572 for (i = 0; i < n; i++)
573 ALLOCNO_OBJECT (a, i) = ira_create_object (a, i);
574 }
575
576 /* For each allocno, set ALLOCNO_NUM_OBJECTS and create the
577 ALLOCNO_OBJECT structures. This must be called after the allocno
578 classes are known. */
579 static void
580 create_allocno_objects (void)
581 {
582 ira_allocno_t a;
583 ira_allocno_iterator ai;
584
585 FOR_EACH_ALLOCNO (a, ai)
586 ira_create_allocno_objects (a);
587 }
588
589 /* Merge hard register conflict information for all objects associated with
590 allocno TO into the corresponding objects associated with FROM.
591 If TOTAL_ONLY is true, we only merge OBJECT_TOTAL_CONFLICT_HARD_REGS. */
592 static void
593 merge_hard_reg_conflicts (ira_allocno_t from, ira_allocno_t to,
594 bool total_only)
595 {
596 int i;
597 gcc_assert (ALLOCNO_NUM_OBJECTS (to) == ALLOCNO_NUM_OBJECTS (from));
598 for (i = 0; i < ALLOCNO_NUM_OBJECTS (to); i++)
599 {
600 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
601 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
602
603 if (!total_only)
604 OBJECT_CONFLICT_HARD_REGS (to_obj)
605 |= OBJECT_CONFLICT_HARD_REGS (from_obj);
606 OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj)
607 |= OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj);
608 }
609 #ifdef STACK_REGS
610 if (!total_only && ALLOCNO_NO_STACK_REG_P (from))
611 ALLOCNO_NO_STACK_REG_P (to) = true;
612 if (ALLOCNO_TOTAL_NO_STACK_REG_P (from))
613 ALLOCNO_TOTAL_NO_STACK_REG_P (to) = true;
614 #endif
615 }
616
617 /* Update hard register conflict information for all objects associated with
618 A to include the regs in SET. */
619 void
620 ior_hard_reg_conflicts (ira_allocno_t a, const_hard_reg_set set)
621 {
622 ira_allocno_object_iterator i;
623 ira_object_t obj;
624
625 FOR_EACH_ALLOCNO_OBJECT (a, obj, i)
626 {
627 OBJECT_CONFLICT_HARD_REGS (obj) |= set;
628 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= set;
629 }
630 }
631
632 /* Return TRUE if a conflict vector with NUM elements is more
633 profitable than a conflict bit vector for OBJ. */
634 bool
635 ira_conflict_vector_profitable_p (ira_object_t obj, int num)
636 {
637 int nbytes;
638 int max = OBJECT_MAX (obj);
639 int min = OBJECT_MIN (obj);
640
641 if (max < min)
642 /* We prefer a bit vector in such case because it does not result
643 in allocation. */
644 return false;
645
646 nbytes = (max - min) / 8 + 1;
647 STATIC_ASSERT (sizeof (ira_object_t) <= 8);
648 /* Don't use sizeof (ira_object_t), use constant 8. Size of ira_object_t (a
649 pointer) is different on 32-bit and 64-bit targets. Usage sizeof
650 (ira_object_t) can result in different code generation by GCC built as 32-
651 and 64-bit program. In any case the profitability is just an estimation
652 and border cases are rare. */
653 return (2 * 8 /* sizeof (ira_object_t) */ * (num + 1) < 3 * nbytes);
654 }
655
656 /* Allocates and initialize the conflict vector of OBJ for NUM
657 conflicting objects. */
658 void
659 ira_allocate_conflict_vec (ira_object_t obj, int num)
660 {
661 int size;
662 ira_object_t *vec;
663
664 ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
665 num++; /* for NULL end marker */
666 size = sizeof (ira_object_t) * num;
667 OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
668 vec = (ira_object_t *) OBJECT_CONFLICT_ARRAY (obj);
669 vec[0] = NULL;
670 OBJECT_NUM_CONFLICTS (obj) = 0;
671 OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
672 OBJECT_CONFLICT_VEC_P (obj) = true;
673 }
674
675 /* Allocate and initialize the conflict bit vector of OBJ. */
676 static void
677 allocate_conflict_bit_vec (ira_object_t obj)
678 {
679 unsigned int size;
680
681 ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
682 size = ((OBJECT_MAX (obj) - OBJECT_MIN (obj) + IRA_INT_BITS)
683 / IRA_INT_BITS * sizeof (IRA_INT_TYPE));
684 OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
685 memset (OBJECT_CONFLICT_ARRAY (obj), 0, size);
686 OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
687 OBJECT_CONFLICT_VEC_P (obj) = false;
688 }
689
690 /* Allocate and initialize the conflict vector or conflict bit vector
691 of OBJ for NUM conflicting allocnos whatever is more profitable. */
692 void
693 ira_allocate_object_conflicts (ira_object_t obj, int num)
694 {
695 if (ira_conflict_vector_profitable_p (obj, num))
696 ira_allocate_conflict_vec (obj, num);
697 else
698 allocate_conflict_bit_vec (obj);
699 }
700
701 /* Add OBJ2 to the conflicts of OBJ1. */
702 static void
703 add_to_conflicts (ira_object_t obj1, ira_object_t obj2)
704 {
705 int num;
706 unsigned int size;
707
708 if (OBJECT_CONFLICT_VEC_P (obj1))
709 {
710 ira_object_t *vec = OBJECT_CONFLICT_VEC (obj1);
711 int curr_num = OBJECT_NUM_CONFLICTS (obj1);
712 num = curr_num + 2;
713 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < num * sizeof (ira_object_t))
714 {
715 ira_object_t *newvec;
716 size = (3 * num / 2 + 1) * sizeof (ira_allocno_t);
717 newvec = (ira_object_t *) ira_allocate (size);
718 memcpy (newvec, vec, curr_num * sizeof (ira_object_t));
719 ira_free (vec);
720 vec = newvec;
721 OBJECT_CONFLICT_ARRAY (obj1) = vec;
722 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
723 }
724 vec[num - 2] = obj2;
725 vec[num - 1] = NULL;
726 OBJECT_NUM_CONFLICTS (obj1)++;
727 }
728 else
729 {
730 int nw, added_head_nw, id;
731 IRA_INT_TYPE *vec = OBJECT_CONFLICT_BITVEC (obj1);
732
733 id = OBJECT_CONFLICT_ID (obj2);
734 if (OBJECT_MIN (obj1) > id)
735 {
736 /* Expand head of the bit vector. */
737 added_head_nw = (OBJECT_MIN (obj1) - id - 1) / IRA_INT_BITS + 1;
738 nw = (OBJECT_MAX (obj1) - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
739 size = (nw + added_head_nw) * sizeof (IRA_INT_TYPE);
740 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) >= size)
741 {
742 memmove ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
743 vec, nw * sizeof (IRA_INT_TYPE));
744 memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
745 }
746 else
747 {
748 size
749 = (3 * (nw + added_head_nw) / 2 + 1) * sizeof (IRA_INT_TYPE);
750 vec = (IRA_INT_TYPE *) ira_allocate (size);
751 memcpy ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
752 OBJECT_CONFLICT_ARRAY (obj1), nw * sizeof (IRA_INT_TYPE));
753 memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
754 memset ((char *) vec
755 + (nw + added_head_nw) * sizeof (IRA_INT_TYPE),
756 0, size - (nw + added_head_nw) * sizeof (IRA_INT_TYPE));
757 ira_free (OBJECT_CONFLICT_ARRAY (obj1));
758 OBJECT_CONFLICT_ARRAY (obj1) = vec;
759 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
760 }
761 OBJECT_MIN (obj1) -= added_head_nw * IRA_INT_BITS;
762 }
763 else if (OBJECT_MAX (obj1) < id)
764 {
765 nw = (id - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
766 size = nw * sizeof (IRA_INT_TYPE);
767 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < size)
768 {
769 /* Expand tail of the bit vector. */
770 size = (3 * nw / 2 + 1) * sizeof (IRA_INT_TYPE);
771 vec = (IRA_INT_TYPE *) ira_allocate (size);
772 memcpy (vec, OBJECT_CONFLICT_ARRAY (obj1), OBJECT_CONFLICT_ARRAY_SIZE (obj1));
773 memset ((char *) vec + OBJECT_CONFLICT_ARRAY_SIZE (obj1),
774 0, size - OBJECT_CONFLICT_ARRAY_SIZE (obj1));
775 ira_free (OBJECT_CONFLICT_ARRAY (obj1));
776 OBJECT_CONFLICT_ARRAY (obj1) = vec;
777 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
778 }
779 OBJECT_MAX (obj1) = id;
780 }
781 SET_MINMAX_SET_BIT (vec, id, OBJECT_MIN (obj1), OBJECT_MAX (obj1));
782 }
783 }
784
785 /* Add OBJ1 to the conflicts of OBJ2 and vice versa. */
786 static void
787 ira_add_conflict (ira_object_t obj1, ira_object_t obj2)
788 {
789 add_to_conflicts (obj1, obj2);
790 add_to_conflicts (obj2, obj1);
791 }
792
793 /* Clear all conflicts of OBJ. */
794 static void
795 clear_conflicts (ira_object_t obj)
796 {
797 if (OBJECT_CONFLICT_VEC_P (obj))
798 {
799 OBJECT_NUM_CONFLICTS (obj) = 0;
800 OBJECT_CONFLICT_VEC (obj)[0] = NULL;
801 }
802 else if (OBJECT_CONFLICT_ARRAY_SIZE (obj) != 0)
803 {
804 int nw;
805
806 nw = (OBJECT_MAX (obj) - OBJECT_MIN (obj)) / IRA_INT_BITS + 1;
807 memset (OBJECT_CONFLICT_BITVEC (obj), 0, nw * sizeof (IRA_INT_TYPE));
808 }
809 }
810
811 /* The array used to find duplications in conflict vectors of
812 allocnos. */
813 static int *conflict_check;
814
815 /* The value used to mark allocation presence in conflict vector of
816 the current allocno. */
817 static int curr_conflict_check_tick;
818
819 /* Remove duplications in conflict vector of OBJ. */
820 static void
821 compress_conflict_vec (ira_object_t obj)
822 {
823 ira_object_t *vec, conflict_obj;
824 int i, j;
825
826 ira_assert (OBJECT_CONFLICT_VEC_P (obj));
827 vec = OBJECT_CONFLICT_VEC (obj);
828 curr_conflict_check_tick++;
829 for (i = j = 0; (conflict_obj = vec[i]) != NULL; i++)
830 {
831 int id = OBJECT_CONFLICT_ID (conflict_obj);
832 if (conflict_check[id] != curr_conflict_check_tick)
833 {
834 conflict_check[id] = curr_conflict_check_tick;
835 vec[j++] = conflict_obj;
836 }
837 }
838 OBJECT_NUM_CONFLICTS (obj) = j;
839 vec[j] = NULL;
840 }
841
842 /* Remove duplications in conflict vectors of all allocnos. */
843 static void
844 compress_conflict_vecs (void)
845 {
846 ira_object_t obj;
847 ira_object_iterator oi;
848
849 conflict_check = (int *) ira_allocate (sizeof (int) * ira_objects_num);
850 memset (conflict_check, 0, sizeof (int) * ira_objects_num);
851 curr_conflict_check_tick = 0;
852 FOR_EACH_OBJECT (obj, oi)
853 {
854 if (OBJECT_CONFLICT_VEC_P (obj))
855 compress_conflict_vec (obj);
856 }
857 ira_free (conflict_check);
858 }
859
860 /* This recursive function outputs allocno A and if it is a cap the
861 function outputs its members. */
862 void
863 ira_print_expanded_allocno (ira_allocno_t a)
864 {
865 basic_block bb;
866
867 fprintf (ira_dump_file, " a%d(r%d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
868 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
869 fprintf (ira_dump_file, ",b%d", bb->index);
870 else
871 fprintf (ira_dump_file, ",l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
872 if (ALLOCNO_CAP_MEMBER (a) != NULL)
873 {
874 fprintf (ira_dump_file, ":");
875 ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a));
876 }
877 fprintf (ira_dump_file, ")");
878 }
879
880 /* Create and return the cap representing allocno A in the
881 parent loop. */
882 static ira_allocno_t
883 create_cap_allocno (ira_allocno_t a)
884 {
885 ira_allocno_t cap;
886 ira_loop_tree_node_t parent;
887 enum reg_class aclass;
888
889 parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
890 cap = ira_create_allocno (ALLOCNO_REGNO (a), true, parent);
891 ALLOCNO_MODE (cap) = ALLOCNO_MODE (a);
892 ALLOCNO_WMODE (cap) = ALLOCNO_WMODE (a);
893 aclass = ALLOCNO_CLASS (a);
894 ira_set_allocno_class (cap, aclass);
895 ira_create_allocno_objects (cap);
896 ALLOCNO_CAP_MEMBER (cap) = a;
897 ALLOCNO_CAP (a) = cap;
898 ALLOCNO_CLASS_COST (cap) = ALLOCNO_CLASS_COST (a);
899 ALLOCNO_MEMORY_COST (cap) = ALLOCNO_MEMORY_COST (a);
900 ira_allocate_and_copy_costs
901 (&ALLOCNO_HARD_REG_COSTS (cap), aclass, ALLOCNO_HARD_REG_COSTS (a));
902 ira_allocate_and_copy_costs
903 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap), aclass,
904 ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
905 ALLOCNO_BAD_SPILL_P (cap) = ALLOCNO_BAD_SPILL_P (a);
906 ALLOCNO_NREFS (cap) = ALLOCNO_NREFS (a);
907 ALLOCNO_FREQ (cap) = ALLOCNO_FREQ (a);
908 ALLOCNO_CALL_FREQ (cap) = ALLOCNO_CALL_FREQ (a);
909
910 merge_hard_reg_conflicts (a, cap, false);
911
912 ALLOCNO_CALLS_CROSSED_NUM (cap) = ALLOCNO_CALLS_CROSSED_NUM (a);
913 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (cap) = ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
914 ALLOCNO_CROSSED_CALLS_ABIS (cap) = ALLOCNO_CROSSED_CALLS_ABIS (a);
915 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (cap)
916 = ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a);
917 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
918 {
919 fprintf (ira_dump_file, " Creating cap ");
920 ira_print_expanded_allocno (cap);
921 fprintf (ira_dump_file, "\n");
922 }
923 return cap;
924 }
925
926 /* Create and return a live range for OBJECT with given attributes. */
927 live_range_t
928 ira_create_live_range (ira_object_t obj, int start, int finish,
929 live_range_t next)
930 {
931 live_range_t p;
932
933 p = live_range_pool.allocate ();
934 p->object = obj;
935 p->start = start;
936 p->finish = finish;
937 p->next = next;
938 return p;
939 }
940
941 /* Create a new live range for OBJECT and queue it at the head of its
942 live range list. */
943 void
944 ira_add_live_range_to_object (ira_object_t object, int start, int finish)
945 {
946 live_range_t p;
947 p = ira_create_live_range (object, start, finish,
948 OBJECT_LIVE_RANGES (object));
949 OBJECT_LIVE_RANGES (object) = p;
950 }
951
952 /* Copy allocno live range R and return the result. */
953 static live_range_t
954 copy_live_range (live_range_t r)
955 {
956 live_range_t p;
957
958 p = live_range_pool.allocate ();
959 *p = *r;
960 return p;
961 }
962
963 /* Copy allocno live range list given by its head R and return the
964 result. */
965 live_range_t
966 ira_copy_live_range_list (live_range_t r)
967 {
968 live_range_t p, first, last;
969
970 if (r == NULL)
971 return NULL;
972 for (first = last = NULL; r != NULL; r = r->next)
973 {
974 p = copy_live_range (r);
975 if (first == NULL)
976 first = p;
977 else
978 last->next = p;
979 last = p;
980 }
981 return first;
982 }
983
984 /* Merge ranges R1 and R2 and returns the result. The function
985 maintains the order of ranges and tries to minimize number of the
986 result ranges. */
987 live_range_t
988 ira_merge_live_ranges (live_range_t r1, live_range_t r2)
989 {
990 live_range_t first, last;
991
992 if (r1 == NULL)
993 return r2;
994 if (r2 == NULL)
995 return r1;
996 for (first = last = NULL; r1 != NULL && r2 != NULL;)
997 {
998 if (r1->start < r2->start)
999 std::swap (r1, r2);
1000 if (r1->start <= r2->finish + 1)
1001 {
1002 /* Intersected ranges: merge r1 and r2 into r1. */
1003 r1->start = r2->start;
1004 if (r1->finish < r2->finish)
1005 r1->finish = r2->finish;
1006 live_range_t temp = r2;
1007 r2 = r2->next;
1008 ira_finish_live_range (temp);
1009 if (r2 == NULL)
1010 {
1011 /* To try to merge with subsequent ranges in r1. */
1012 r2 = r1->next;
1013 r1->next = NULL;
1014 }
1015 }
1016 else
1017 {
1018 /* Add r1 to the result. */
1019 if (first == NULL)
1020 first = last = r1;
1021 else
1022 {
1023 last->next = r1;
1024 last = r1;
1025 }
1026 r1 = r1->next;
1027 if (r1 == NULL)
1028 {
1029 /* To try to merge with subsequent ranges in r2. */
1030 r1 = r2->next;
1031 r2->next = NULL;
1032 }
1033 }
1034 }
1035 if (r1 != NULL)
1036 {
1037 if (first == NULL)
1038 first = r1;
1039 else
1040 last->next = r1;
1041 ira_assert (r1->next == NULL);
1042 }
1043 else if (r2 != NULL)
1044 {
1045 if (first == NULL)
1046 first = r2;
1047 else
1048 last->next = r2;
1049 ira_assert (r2->next == NULL);
1050 }
1051 else
1052 {
1053 ira_assert (last->next == NULL);
1054 }
1055 return first;
1056 }
1057
1058 /* Return TRUE if live ranges R1 and R2 intersect. */
1059 bool
1060 ira_live_ranges_intersect_p (live_range_t r1, live_range_t r2)
1061 {
1062 /* Remember the live ranges are always kept ordered. */
1063 while (r1 != NULL && r2 != NULL)
1064 {
1065 if (r1->start > r2->finish)
1066 r1 = r1->next;
1067 else if (r2->start > r1->finish)
1068 r2 = r2->next;
1069 else
1070 return true;
1071 }
1072 return false;
1073 }
1074
1075 /* Free allocno live range R. */
1076 void
1077 ira_finish_live_range (live_range_t r)
1078 {
1079 live_range_pool.remove (r);
1080 }
1081
1082 /* Free list of allocno live ranges starting with R. */
1083 void
1084 ira_finish_live_range_list (live_range_t r)
1085 {
1086 live_range_t next_r;
1087
1088 for (; r != NULL; r = next_r)
1089 {
1090 next_r = r->next;
1091 ira_finish_live_range (r);
1092 }
1093 }
1094
1095 /* Free updated register costs of allocno A. */
1096 void
1097 ira_free_allocno_updated_costs (ira_allocno_t a)
1098 {
1099 enum reg_class aclass;
1100
1101 aclass = ALLOCNO_CLASS (a);
1102 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1103 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
1104 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1105 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1106 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1107 aclass);
1108 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1109 }
1110
1111 /* Free and nullify all cost vectors allocated earlier for allocno
1112 A. */
1113 static void
1114 ira_free_allocno_costs (ira_allocno_t a)
1115 {
1116 enum reg_class aclass = ALLOCNO_CLASS (a);
1117 ira_object_t obj;
1118 ira_allocno_object_iterator oi;
1119
1120 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
1121 {
1122 ira_finish_live_range_list (OBJECT_LIVE_RANGES (obj));
1123 ira_object_id_map[OBJECT_CONFLICT_ID (obj)] = NULL;
1124 if (OBJECT_CONFLICT_ARRAY (obj) != NULL)
1125 ira_free (OBJECT_CONFLICT_ARRAY (obj));
1126 object_pool.remove (obj);
1127 }
1128
1129 ira_allocnos[ALLOCNO_NUM (a)] = NULL;
1130 if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
1131 ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a), aclass);
1132 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL)
1133 ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a), aclass);
1134 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1135 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
1136 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1137 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1138 aclass);
1139 ALLOCNO_HARD_REG_COSTS (a) = NULL;
1140 ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
1141 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1142 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1143 }
1144
1145 /* Free the memory allocated for allocno A. */
1146 static void
1147 finish_allocno (ira_allocno_t a)
1148 {
1149 ira_free_allocno_costs (a);
1150 allocno_pool.remove (a);
1151 }
1152
1153 /* Free the memory allocated for all allocnos. */
1154 static void
1155 finish_allocnos (void)
1156 {
1157 ira_allocno_t a;
1158 ira_allocno_iterator ai;
1159
1160 FOR_EACH_ALLOCNO (a, ai)
1161 finish_allocno (a);
1162 ira_free (ira_regno_allocno_map);
1163 ira_object_id_map_vec.release ();
1164 allocno_vec.release ();
1165 allocno_pool.release ();
1166 object_pool.release ();
1167 live_range_pool.release ();
1168 }
1169
1170
1171
1173 /* Pools for allocno preferences. */
1174 static object_allocator <ira_allocno_pref> pref_pool ("prefs");
1175
1176 /* Vec containing references to all created preferences. It is a
1177 container of array ira_prefs. */
1178 static vec<ira_pref_t> pref_vec;
1179
1180 /* The function initializes data concerning allocno prefs. */
1181 static void
1182 initiate_prefs (void)
1183 {
1184 pref_vec.create (get_max_uid ());
1185 ira_prefs = NULL;
1186 ira_prefs_num = 0;
1187 }
1188
1189 /* Return pref for A and HARD_REGNO if any. */
1190 static ira_pref_t
1191 find_allocno_pref (ira_allocno_t a, int hard_regno)
1192 {
1193 ira_pref_t pref;
1194
1195 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = pref->next_pref)
1196 if (pref->allocno == a && pref->hard_regno == hard_regno)
1197 return pref;
1198 return NULL;
1199 }
1200
1201 /* Create and return pref with given attributes A, HARD_REGNO, and FREQ. */
1202 ira_pref_t
1203 ira_create_pref (ira_allocno_t a, int hard_regno, int freq)
1204 {
1205 ira_pref_t pref;
1206
1207 pref = pref_pool.allocate ();
1208 pref->num = ira_prefs_num;
1209 pref->allocno = a;
1210 pref->hard_regno = hard_regno;
1211 pref->freq = freq;
1212 pref_vec.safe_push (pref);
1213 ira_prefs = pref_vec.address ();
1214 ira_prefs_num = pref_vec.length ();
1215 return pref;
1216 }
1217
1218 /* Attach a pref PREF to the corresponding allocno. */
1219 static void
1220 add_allocno_pref_to_list (ira_pref_t pref)
1221 {
1222 ira_allocno_t a = pref->allocno;
1223
1224 pref->next_pref = ALLOCNO_PREFS (a);
1225 ALLOCNO_PREFS (a) = pref;
1226 }
1227
1228 /* Create (or update frequency if the pref already exists) the pref of
1229 allocnos A preferring HARD_REGNO with frequency FREQ. */
1230 void
1231 ira_add_allocno_pref (ira_allocno_t a, int hard_regno, int freq)
1232 {
1233 ira_pref_t pref;
1234
1235 if (freq <= 0)
1236 return;
1237 if ((pref = find_allocno_pref (a, hard_regno)) != NULL)
1238 {
1239 pref->freq += freq;
1240 return;
1241 }
1242 pref = ira_create_pref (a, hard_regno, freq);
1243 ira_assert (a != NULL);
1244 add_allocno_pref_to_list (pref);
1245 }
1246
1247 /* Print info about PREF into file F. */
1248 static void
1249 print_pref (FILE *f, ira_pref_t pref)
1250 {
1251 fprintf (f, " pref%d:a%d(r%d)<-hr%d@%d\n", pref->num,
1252 ALLOCNO_NUM (pref->allocno), ALLOCNO_REGNO (pref->allocno),
1253 pref->hard_regno, pref->freq);
1254 }
1255
1256 /* Print info about PREF into stderr. */
1257 void
1258 ira_debug_pref (ira_pref_t pref)
1259 {
1260 print_pref (stderr, pref);
1261 }
1262
1263 /* Print info about all prefs into file F. */
1264 static void
1265 print_prefs (FILE *f)
1266 {
1267 ira_pref_t pref;
1268 ira_pref_iterator pi;
1269
1270 FOR_EACH_PREF (pref, pi)
1271 print_pref (f, pref);
1272 }
1273
1274 /* Print info about all prefs into stderr. */
1275 void
1276 ira_debug_prefs (void)
1277 {
1278 print_prefs (stderr);
1279 }
1280
1281 /* Print info about prefs involving allocno A into file F. */
1282 static void
1283 print_allocno_prefs (FILE *f, ira_allocno_t a)
1284 {
1285 ira_pref_t pref;
1286
1287 fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1288 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = pref->next_pref)
1289 fprintf (f, " pref%d:hr%d@%d", pref->num, pref->hard_regno, pref->freq);
1290 fprintf (f, "\n");
1291 }
1292
1293 /* Print info about prefs involving allocno A into stderr. */
1294 void
1295 ira_debug_allocno_prefs (ira_allocno_t a)
1296 {
1297 print_allocno_prefs (stderr, a);
1298 }
1299
1300 /* The function frees memory allocated for PREF. */
1301 static void
1302 finish_pref (ira_pref_t pref)
1303 {
1304 ira_prefs[pref->num] = NULL;
1305 pref_pool.remove (pref);
1306 }
1307
1308 /* Remove PREF from the list of allocno prefs and free memory for
1309 it. */
1310 void
1311 ira_remove_pref (ira_pref_t pref)
1312 {
1313 ira_pref_t cpref, prev;
1314
1315 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1316 fprintf (ira_dump_file, " Removing pref%d:hr%d@%d\n",
1317 pref->num, pref->hard_regno, pref->freq);
1318 for (prev = NULL, cpref = ALLOCNO_PREFS (pref->allocno);
1319 cpref != NULL;
1320 prev = cpref, cpref = cpref->next_pref)
1321 if (cpref == pref)
1322 break;
1323 ira_assert (cpref != NULL);
1324 if (prev == NULL)
1325 ALLOCNO_PREFS (pref->allocno) = pref->next_pref;
1326 else
1327 prev->next_pref = pref->next_pref;
1328 finish_pref (pref);
1329 }
1330
1331 /* Remove all prefs of allocno A. */
1332 void
1333 ira_remove_allocno_prefs (ira_allocno_t a)
1334 {
1335 ira_pref_t pref, next_pref;
1336
1337 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = next_pref)
1338 {
1339 next_pref = pref->next_pref;
1340 finish_pref (pref);
1341 }
1342 ALLOCNO_PREFS (a) = NULL;
1343 }
1344
1345 /* Free memory allocated for all prefs. */
1346 static void
1347 finish_prefs (void)
1348 {
1349 ira_pref_t pref;
1350 ira_pref_iterator pi;
1351
1352 FOR_EACH_PREF (pref, pi)
1353 finish_pref (pref);
1354 pref_vec.release ();
1355 pref_pool.release ();
1356 }
1357
1358
1359
1361 /* Pools for copies. */
1362 static object_allocator<ira_allocno_copy> copy_pool ("copies");
1363
1364 /* Vec containing references to all created copies. It is a
1365 container of array ira_copies. */
1366 static vec<ira_copy_t> copy_vec;
1367
1368 /* The function initializes data concerning allocno copies. */
1369 static void
1370 initiate_copies (void)
1371 {
1372 copy_vec.create (get_max_uid ());
1373 ira_copies = NULL;
1374 ira_copies_num = 0;
1375 }
1376
1377 /* Return copy connecting A1 and A2 and originated from INSN of
1378 LOOP_TREE_NODE if any. */
1379 static ira_copy_t
1380 find_allocno_copy (ira_allocno_t a1, ira_allocno_t a2, rtx_insn *insn,
1381 ira_loop_tree_node_t loop_tree_node)
1382 {
1383 ira_copy_t cp, next_cp;
1384 ira_allocno_t another_a;
1385
1386 for (cp = ALLOCNO_COPIES (a1); cp != NULL; cp = next_cp)
1387 {
1388 if (cp->first == a1)
1389 {
1390 next_cp = cp->next_first_allocno_copy;
1391 another_a = cp->second;
1392 }
1393 else if (cp->second == a1)
1394 {
1395 next_cp = cp->next_second_allocno_copy;
1396 another_a = cp->first;
1397 }
1398 else
1399 gcc_unreachable ();
1400 if (another_a == a2 && cp->insn == insn
1401 && cp->loop_tree_node == loop_tree_node)
1402 return cp;
1403 }
1404 return NULL;
1405 }
1406
1407 /* Create and return copy with given attributes LOOP_TREE_NODE, FIRST,
1408 SECOND, FREQ, CONSTRAINT_P, and INSN. */
1409 ira_copy_t
1410 ira_create_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1411 bool constraint_p, rtx_insn *insn,
1412 ira_loop_tree_node_t loop_tree_node)
1413 {
1414 ira_copy_t cp;
1415
1416 cp = copy_pool.allocate ();
1417 cp->num = ira_copies_num;
1418 cp->first = first;
1419 cp->second = second;
1420 cp->freq = freq;
1421 cp->constraint_p = constraint_p;
1422 cp->insn = insn;
1423 cp->loop_tree_node = loop_tree_node;
1424 copy_vec.safe_push (cp);
1425 ira_copies = copy_vec.address ();
1426 ira_copies_num = copy_vec.length ();
1427 return cp;
1428 }
1429
1430 /* Attach a copy CP to allocnos involved into the copy. */
1431 static void
1432 add_allocno_copy_to_list (ira_copy_t cp)
1433 {
1434 ira_allocno_t first = cp->first, second = cp->second;
1435
1436 cp->prev_first_allocno_copy = NULL;
1437 cp->prev_second_allocno_copy = NULL;
1438 cp->next_first_allocno_copy = ALLOCNO_COPIES (first);
1439 if (cp->next_first_allocno_copy != NULL)
1440 {
1441 if (cp->next_first_allocno_copy->first == first)
1442 cp->next_first_allocno_copy->prev_first_allocno_copy = cp;
1443 else
1444 cp->next_first_allocno_copy->prev_second_allocno_copy = cp;
1445 }
1446 cp->next_second_allocno_copy = ALLOCNO_COPIES (second);
1447 if (cp->next_second_allocno_copy != NULL)
1448 {
1449 if (cp->next_second_allocno_copy->second == second)
1450 cp->next_second_allocno_copy->prev_second_allocno_copy = cp;
1451 else
1452 cp->next_second_allocno_copy->prev_first_allocno_copy = cp;
1453 }
1454 ALLOCNO_COPIES (first) = cp;
1455 ALLOCNO_COPIES (second) = cp;
1456 }
1457
1458 /* Make a copy CP a canonical copy where number of the
1459 first allocno is less than the second one. */
1460 static void
1461 swap_allocno_copy_ends_if_necessary (ira_copy_t cp)
1462 {
1463 if (ALLOCNO_NUM (cp->first) <= ALLOCNO_NUM (cp->second))
1464 return;
1465
1466 std::swap (cp->first, cp->second);
1467 std::swap (cp->prev_first_allocno_copy, cp->prev_second_allocno_copy);
1468 std::swap (cp->next_first_allocno_copy, cp->next_second_allocno_copy);
1469 }
1470
1471 /* Create (or update frequency if the copy already exists) and return
1472 the copy of allocnos FIRST and SECOND with frequency FREQ
1473 corresponding to move insn INSN (if any) and originated from
1474 LOOP_TREE_NODE. */
1475 ira_copy_t
1476 ira_add_allocno_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1477 bool constraint_p, rtx_insn *insn,
1478 ira_loop_tree_node_t loop_tree_node)
1479 {
1480 ira_copy_t cp;
1481
1482 if ((cp = find_allocno_copy (first, second, insn, loop_tree_node)) != NULL)
1483 {
1484 cp->freq += freq;
1485 return cp;
1486 }
1487 cp = ira_create_copy (first, second, freq, constraint_p, insn,
1488 loop_tree_node);
1489 ira_assert (first != NULL && second != NULL);
1490 add_allocno_copy_to_list (cp);
1491 swap_allocno_copy_ends_if_necessary (cp);
1492 return cp;
1493 }
1494
1495 /* Print info about copy CP into file F. */
1496 static void
1497 print_copy (FILE *f, ira_copy_t cp)
1498 {
1499 fprintf (f, " cp%d:a%d(r%d)<->a%d(r%d)@%d:%s\n", cp->num,
1500 ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
1501 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second), cp->freq,
1502 cp->insn != NULL
1503 ? "move" : cp->constraint_p ? "constraint" : "shuffle");
1504 }
1505
1506 DEBUG_FUNCTION void
1507 debug (ira_allocno_copy &ref)
1508 {
1509 print_copy (stderr, &ref);
1510 }
1511
1512 DEBUG_FUNCTION void
1513 debug (ira_allocno_copy *ptr)
1514 {
1515 if (ptr)
1516 debug (*ptr);
1517 else
1518 fprintf (stderr, "<nil>\n");
1519 }
1520
1521 /* Print info about copy CP into stderr. */
1522 void
1523 ira_debug_copy (ira_copy_t cp)
1524 {
1525 print_copy (stderr, cp);
1526 }
1527
1528 /* Print info about all copies into file F. */
1529 static void
1530 print_copies (FILE *f)
1531 {
1532 ira_copy_t cp;
1533 ira_copy_iterator ci;
1534
1535 FOR_EACH_COPY (cp, ci)
1536 print_copy (f, cp);
1537 }
1538
1539 /* Print info about all copies into stderr. */
1540 void
1541 ira_debug_copies (void)
1542 {
1543 print_copies (stderr);
1544 }
1545
1546 /* Print info about copies involving allocno A into file F. */
1547 static void
1548 print_allocno_copies (FILE *f, ira_allocno_t a)
1549 {
1550 ira_allocno_t another_a;
1551 ira_copy_t cp, next_cp;
1552
1553 fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1554 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
1555 {
1556 if (cp->first == a)
1557 {
1558 next_cp = cp->next_first_allocno_copy;
1559 another_a = cp->second;
1560 }
1561 else if (cp->second == a)
1562 {
1563 next_cp = cp->next_second_allocno_copy;
1564 another_a = cp->first;
1565 }
1566 else
1567 gcc_unreachable ();
1568 fprintf (f, " cp%d:a%d(r%d)@%d", cp->num,
1569 ALLOCNO_NUM (another_a), ALLOCNO_REGNO (another_a), cp->freq);
1570 }
1571 fprintf (f, "\n");
1572 }
1573
1574 DEBUG_FUNCTION void
1575 debug (ira_allocno &ref)
1576 {
1577 print_allocno_copies (stderr, &ref);
1578 }
1579
1580 DEBUG_FUNCTION void
1581 debug (ira_allocno *ptr)
1582 {
1583 if (ptr)
1584 debug (*ptr);
1585 else
1586 fprintf (stderr, "<nil>\n");
1587 }
1588
1589
1590 /* Print info about copies involving allocno A into stderr. */
1591 void
1592 ira_debug_allocno_copies (ira_allocno_t a)
1593 {
1594 print_allocno_copies (stderr, a);
1595 }
1596
1597 /* The function frees memory allocated for copy CP. */
1598 static void
1599 finish_copy (ira_copy_t cp)
1600 {
1601 copy_pool.remove (cp);
1602 }
1603
1604
1605 /* Free memory allocated for all copies. */
1606 static void
1607 finish_copies (void)
1608 {
1609 ira_copy_t cp;
1610 ira_copy_iterator ci;
1611
1612 FOR_EACH_COPY (cp, ci)
1613 finish_copy (cp);
1614 copy_vec.release ();
1615 copy_pool.release ();
1616 }
1617
1618
1619
1621 /* Pools for cost vectors. It is defined only for allocno classes. */
1622 static pool_allocator *cost_vector_pool[N_REG_CLASSES];
1623
1624 /* The function initiates work with hard register cost vectors. It
1625 creates allocation pool for each allocno class. */
1626 static void
1627 initiate_cost_vectors (void)
1628 {
1629 int i;
1630 enum reg_class aclass;
1631
1632 for (i = 0; i < ira_allocno_classes_num; i++)
1633 {
1634 aclass = ira_allocno_classes[i];
1635 cost_vector_pool[aclass] = new pool_allocator
1636 ("cost vectors", sizeof (int) * (ira_class_hard_regs_num[aclass]));
1637 }
1638 }
1639
1640 /* Allocate and return a cost vector VEC for ACLASS. */
1641 int *
1642 ira_allocate_cost_vector (reg_class_t aclass)
1643 {
1644 return (int*) cost_vector_pool[(int) aclass]->allocate ();
1645 }
1646
1647 /* Free a cost vector VEC for ACLASS. */
1648 void
1649 ira_free_cost_vector (int *vec, reg_class_t aclass)
1650 {
1651 ira_assert (vec != NULL);
1652 cost_vector_pool[(int) aclass]->remove (vec);
1653 }
1654
1655 /* Finish work with hard register cost vectors. Release allocation
1656 pool for each allocno class. */
1657 static void
1658 finish_cost_vectors (void)
1659 {
1660 int i;
1661 enum reg_class aclass;
1662
1663 for (i = 0; i < ira_allocno_classes_num; i++)
1664 {
1665 aclass = ira_allocno_classes[i];
1666 delete cost_vector_pool[aclass];
1667 }
1668 }
1669
1670
1671
1673 /* Compute a post-ordering of the reverse control flow of the loop body
1674 designated by the children nodes of LOOP_NODE, whose body nodes in
1675 pre-order are input as LOOP_PREORDER. Return a VEC with a post-order
1676 of the reverse loop body.
1677
1678 For the post-order of the reverse CFG, we visit the basic blocks in
1679 LOOP_PREORDER array in the reverse order of where they appear.
1680 This is important: We do not just want to compute a post-order of
1681 the reverse CFG, we want to make a best-guess for a visiting order that
1682 minimizes the number of chain elements per allocno live range. If the
1683 blocks would be visited in a different order, we would still compute a
1684 correct post-ordering but it would be less likely that two nodes
1685 connected by an edge in the CFG are neighbors in the topsort. */
1686
1687 static vec<ira_loop_tree_node_t>
1688 ira_loop_tree_body_rev_postorder (ira_loop_tree_node_t loop_node ATTRIBUTE_UNUSED,
1689 const vec<ira_loop_tree_node_t> &loop_preorder)
1690 {
1691 vec<ira_loop_tree_node_t> topsort_nodes = vNULL;
1692 unsigned int n_loop_preorder;
1693
1694 n_loop_preorder = loop_preorder.length ();
1695 if (n_loop_preorder != 0)
1696 {
1697 ira_loop_tree_node_t subloop_node;
1698 unsigned int i;
1699 auto_vec<ira_loop_tree_node_t> dfs_stack;
1700
1701 /* This is a bit of strange abuse of the BB_VISITED flag: We use
1702 the flag to mark blocks we still have to visit to add them to
1703 our post-order. Define an alias to avoid confusion. */
1704 #define BB_TO_VISIT BB_VISITED
1705
1706 FOR_EACH_VEC_ELT (loop_preorder, i, subloop_node)
1707 {
1708 gcc_checking_assert (! (subloop_node->bb->flags & BB_TO_VISIT));
1709 subloop_node->bb->flags |= BB_TO_VISIT;
1710 }
1711
1712 topsort_nodes.create (n_loop_preorder);
1713 dfs_stack.create (n_loop_preorder);
1714
1715 FOR_EACH_VEC_ELT_REVERSE (loop_preorder, i, subloop_node)
1716 {
1717 if (! (subloop_node->bb->flags & BB_TO_VISIT))
1718 continue;
1719
1720 subloop_node->bb->flags &= ~BB_TO_VISIT;
1721 dfs_stack.quick_push (subloop_node);
1722 while (! dfs_stack.is_empty ())
1723 {
1724 edge e;
1725 edge_iterator ei;
1726
1727 ira_loop_tree_node_t n = dfs_stack.last ();
1728 FOR_EACH_EDGE (e, ei, n->bb->preds)
1729 {
1730 ira_loop_tree_node_t pred_node;
1731 basic_block pred_bb = e->src;
1732
1733 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1734 continue;
1735
1736 pred_node = IRA_BB_NODE_BY_INDEX (pred_bb->index);
1737 if (pred_node != n
1738 && (pred_node->bb->flags & BB_TO_VISIT))
1739 {
1740 pred_node->bb->flags &= ~BB_TO_VISIT;
1741 dfs_stack.quick_push (pred_node);
1742 }
1743 }
1744 if (n == dfs_stack.last ())
1745 {
1746 dfs_stack.pop ();
1747 topsort_nodes.quick_push (n);
1748 }
1749 }
1750 }
1751
1752 #undef BB_TO_VISIT
1753 }
1754
1755 gcc_assert (topsort_nodes.length () == n_loop_preorder);
1756 return topsort_nodes;
1757 }
1758
1759 /* The current loop tree node and its regno allocno map. */
1760 ira_loop_tree_node_t ira_curr_loop_tree_node;
1761 ira_allocno_t *ira_curr_regno_allocno_map;
1762
1763 /* This recursive function traverses loop tree with root LOOP_NODE
1764 calling non-null functions PREORDER_FUNC and POSTORDER_FUNC
1765 correspondingly in preorder and postorder. The function sets up
1766 IRA_CURR_LOOP_TREE_NODE and IRA_CURR_REGNO_ALLOCNO_MAP. If BB_P,
1767 basic block nodes of LOOP_NODE is also processed (before its
1768 subloop nodes).
1769
1770 If BB_P is set and POSTORDER_FUNC is given, the basic blocks in
1771 the loop are passed in the *reverse* post-order of the *reverse*
1772 CFG. This is only used by ira_create_allocno_live_ranges, which
1773 wants to visit basic blocks in this order to minimize the number
1774 of elements per live range chain.
1775 Note that the loop tree nodes are still visited in the normal,
1776 forward post-order of the loop tree. */
1777
1778 void
1779 ira_traverse_loop_tree (bool bb_p, ira_loop_tree_node_t loop_node,
1780 void (*preorder_func) (ira_loop_tree_node_t),
1781 void (*postorder_func) (ira_loop_tree_node_t))
1782 {
1783 ira_loop_tree_node_t subloop_node;
1784
1785 ira_assert (loop_node->bb == NULL);
1786 ira_curr_loop_tree_node = loop_node;
1787 ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1788
1789 if (preorder_func != NULL)
1790 (*preorder_func) (loop_node);
1791
1792 if (bb_p)
1793 {
1794 auto_vec<ira_loop_tree_node_t> loop_preorder;
1795 unsigned int i;
1796
1797 /* Add all nodes to the set of nodes to visit. The IRA loop tree
1798 is set up such that nodes in the loop body appear in a pre-order
1799 of their place in the CFG. */
1800 for (subloop_node = loop_node->children;
1801 subloop_node != NULL;
1802 subloop_node = subloop_node->next)
1803 if (subloop_node->bb != NULL)
1804 loop_preorder.safe_push (subloop_node);
1805
1806 if (preorder_func != NULL)
1807 FOR_EACH_VEC_ELT (loop_preorder, i, subloop_node)
1808 (*preorder_func) (subloop_node);
1809
1810 if (postorder_func != NULL)
1811 {
1812 vec<ira_loop_tree_node_t> loop_rev_postorder =
1813 ira_loop_tree_body_rev_postorder (loop_node, loop_preorder);
1814 FOR_EACH_VEC_ELT_REVERSE (loop_rev_postorder, i, subloop_node)
1815 (*postorder_func) (subloop_node);
1816 loop_rev_postorder.release ();
1817 }
1818 }
1819
1820 for (subloop_node = loop_node->subloops;
1821 subloop_node != NULL;
1822 subloop_node = subloop_node->subloop_next)
1823 {
1824 ira_assert (subloop_node->bb == NULL);
1825 ira_traverse_loop_tree (bb_p, subloop_node,
1826 preorder_func, postorder_func);
1827 }
1828
1829 ira_curr_loop_tree_node = loop_node;
1830 ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1831
1832 if (postorder_func != NULL)
1833 (*postorder_func) (loop_node);
1834 }
1835
1836
1837
1839 /* The basic block currently being processed. */
1840 static basic_block curr_bb;
1841
1842 /* This recursive function creates allocnos corresponding to
1843 pseudo-registers containing in X. True OUTPUT_P means that X is
1844 an lvalue. PARENT corresponds to the parent expression of X. */
1845 static void
1846 create_insn_allocnos (rtx x, rtx outer, bool output_p)
1847 {
1848 int i, j;
1849 const char *fmt;
1850 enum rtx_code code = GET_CODE (x);
1851
1852 if (code == REG)
1853 {
1854 int regno;
1855
1856 if ((regno = REGNO (x)) >= FIRST_PSEUDO_REGISTER)
1857 {
1858 ira_allocno_t a;
1859
1860 if ((a = ira_curr_regno_allocno_map[regno]) == NULL)
1861 {
1862 a = ira_create_allocno (regno, false, ira_curr_loop_tree_node);
1863 if (outer != NULL && GET_CODE (outer) == SUBREG)
1864 {
1865 machine_mode wmode = GET_MODE (outer);
1866 if (partial_subreg_p (ALLOCNO_WMODE (a), wmode))
1867 ALLOCNO_WMODE (a) = wmode;
1868 }
1869 }
1870
1871 ALLOCNO_NREFS (a)++;
1872 ALLOCNO_FREQ (a) += REG_FREQ_FROM_BB (curr_bb);
1873 if (output_p)
1874 bitmap_set_bit (ira_curr_loop_tree_node->modified_regnos, regno);
1875 }
1876 return;
1877 }
1878 else if (code == SET)
1879 {
1880 create_insn_allocnos (SET_DEST (x), NULL, true);
1881 create_insn_allocnos (SET_SRC (x), NULL, false);
1882 return;
1883 }
1884 else if (code == CLOBBER)
1885 {
1886 create_insn_allocnos (XEXP (x, 0), NULL, true);
1887 return;
1888 }
1889 else if (code == MEM)
1890 {
1891 create_insn_allocnos (XEXP (x, 0), NULL, false);
1892 return;
1893 }
1894 else if (code == PRE_DEC || code == POST_DEC || code == PRE_INC ||
1895 code == POST_INC || code == POST_MODIFY || code == PRE_MODIFY)
1896 {
1897 create_insn_allocnos (XEXP (x, 0), NULL, true);
1898 create_insn_allocnos (XEXP (x, 0), NULL, false);
1899 return;
1900 }
1901
1902 fmt = GET_RTX_FORMAT (code);
1903 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1904 {
1905 if (fmt[i] == 'e')
1906 create_insn_allocnos (XEXP (x, i), x, output_p);
1907 else if (fmt[i] == 'E')
1908 for (j = 0; j < XVECLEN (x, i); j++)
1909 create_insn_allocnos (XVECEXP (x, i, j), x, output_p);
1910 }
1911 }
1912
1913 /* Create allocnos corresponding to pseudo-registers living in the
1914 basic block represented by the corresponding loop tree node
1915 BB_NODE. */
1916 static void
1917 create_bb_allocnos (ira_loop_tree_node_t bb_node)
1918 {
1919 basic_block bb;
1920 rtx_insn *insn;
1921 unsigned int i;
1922 bitmap_iterator bi;
1923
1924 curr_bb = bb = bb_node->bb;
1925 ira_assert (bb != NULL);
1926 FOR_BB_INSNS_REVERSE (bb, insn)
1927 if (NONDEBUG_INSN_P (insn))
1928 create_insn_allocnos (PATTERN (insn), NULL, false);
1929 /* It might be a allocno living through from one subloop to
1930 another. */
1931 EXECUTE_IF_SET_IN_REG_SET (df_get_live_in (bb), FIRST_PSEUDO_REGISTER, i, bi)
1932 if (ira_curr_regno_allocno_map[i] == NULL)
1933 ira_create_allocno (i, false, ira_curr_loop_tree_node);
1934 }
1935
1936 /* Create allocnos corresponding to pseudo-registers living on edge E
1937 (a loop entry or exit). Also mark the allocnos as living on the
1938 loop border. */
1939 static void
1940 create_loop_allocnos (edge e)
1941 {
1942 unsigned int i;
1943 bitmap live_in_regs, border_allocnos;
1944 bitmap_iterator bi;
1945 ira_loop_tree_node_t parent;
1946
1947 live_in_regs = df_get_live_in (e->dest);
1948 border_allocnos = ira_curr_loop_tree_node->border_allocnos;
1949 EXECUTE_IF_SET_IN_REG_SET (df_get_live_out (e->src),
1950 FIRST_PSEUDO_REGISTER, i, bi)
1951 if (bitmap_bit_p (live_in_regs, i))
1952 {
1953 if (ira_curr_regno_allocno_map[i] == NULL)
1954 {
1955 /* The order of creations is important for right
1956 ira_regno_allocno_map. */
1957 if ((parent = ira_curr_loop_tree_node->parent) != NULL
1958 && parent->regno_allocno_map[i] == NULL)
1959 ira_create_allocno (i, false, parent);
1960 ira_create_allocno (i, false, ira_curr_loop_tree_node);
1961 }
1962 bitmap_set_bit (border_allocnos,
1963 ALLOCNO_NUM (ira_curr_regno_allocno_map[i]));
1964 }
1965 }
1966
1967 /* Create allocnos corresponding to pseudo-registers living in loop
1968 represented by the corresponding loop tree node LOOP_NODE. This
1969 function is called by ira_traverse_loop_tree. */
1970 static void
1971 create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node)
1972 {
1973 if (loop_node->bb != NULL)
1974 create_bb_allocnos (loop_node);
1975 else if (loop_node != ira_loop_tree_root)
1976 {
1977 int i;
1978 edge_iterator ei;
1979 edge e;
1980
1981 ira_assert (current_loops != NULL);
1982 FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
1983 if (e->src != loop_node->loop->latch)
1984 create_loop_allocnos (e);
1985
1986 auto_vec<edge> edges = get_loop_exit_edges (loop_node->loop);
1987 FOR_EACH_VEC_ELT (edges, i, e)
1988 create_loop_allocnos (e);
1989 }
1990 }
1991
1992 /* Propagate information about allocnos modified inside the loop given
1993 by its LOOP_TREE_NODE to its parent. */
1994 static void
1995 propagate_modified_regnos (ira_loop_tree_node_t loop_tree_node)
1996 {
1997 if (loop_tree_node == ira_loop_tree_root)
1998 return;
1999 ira_assert (loop_tree_node->bb == NULL);
2000 bitmap_ior_into (loop_tree_node->parent->modified_regnos,
2001 loop_tree_node->modified_regnos);
2002 }
2003
2004 /* Propagate ALLOCNO_HARD_REG_COSTS from A to PARENT_A. Use SPILL_COST
2005 as the cost of spilling a register throughout A (which we have to do
2006 for PARENT_A allocations that conflict with A). */
2007 static void
2008 ira_propagate_hard_reg_costs (ira_allocno_t parent_a, ira_allocno_t a,
2009 int spill_cost)
2010 {
2011 HARD_REG_SET conflicts = ira_total_conflict_hard_regs (a);
2012 if (ira_caller_save_loop_spill_p (parent_a, a, spill_cost))
2013 conflicts |= ira_need_caller_save_regs (a);
2014 conflicts &= ~ira_total_conflict_hard_regs (parent_a);
2015
2016 auto costs = ALLOCNO_HARD_REG_COSTS (a);
2017 if (!hard_reg_set_empty_p (conflicts))
2018 ALLOCNO_MIGHT_CONFLICT_WITH_PARENT_P (a) = true;
2019 else if (!costs)
2020 return;
2021
2022 auto aclass = ALLOCNO_CLASS (a);
2023 ira_allocate_and_set_costs (&ALLOCNO_HARD_REG_COSTS (parent_a),
2024 aclass, ALLOCNO_CLASS_COST (parent_a));
2025 auto parent_costs = ALLOCNO_HARD_REG_COSTS (parent_a);
2026 for (int i = 0; i < ira_class_hard_regs_num[aclass]; ++i)
2027 if (TEST_HARD_REG_BIT (conflicts, ira_class_hard_regs[aclass][i]))
2028 parent_costs[i] += spill_cost;
2029 else if (costs)
2030 /* The cost to A of allocating this register to PARENT_A can't
2031 be more than the cost of spilling the register throughout A. */
2032 parent_costs[i] += MIN (costs[i], spill_cost);
2033 }
2034
2035 /* Propagate new info about allocno A (see comments about accumulated
2036 info in allocno definition) to the corresponding allocno on upper
2037 loop tree level. So allocnos on upper levels accumulate
2038 information about the corresponding allocnos in nested regions.
2039 The new info means allocno info finally calculated in this
2040 file. */
2041 static void
2042 propagate_allocno_info (void)
2043 {
2044 int i;
2045 ira_allocno_t a, parent_a;
2046 ira_loop_tree_node_t parent;
2047 enum reg_class aclass;
2048
2049 if (flag_ira_region != IRA_REGION_ALL
2050 && flag_ira_region != IRA_REGION_MIXED)
2051 return;
2052 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2053 for (a = ira_regno_allocno_map[i];
2054 a != NULL;
2055 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2056 if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
2057 && (parent_a = parent->regno_allocno_map[i]) != NULL
2058 /* There are no caps yet at this point. So use
2059 border_allocnos to find allocnos for the propagation. */
2060 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE (a)->border_allocnos,
2061 ALLOCNO_NUM (a)))
2062 {
2063 /* Calculate the cost of storing to memory on entry to A's loop,
2064 referencing as memory within A's loop, and restoring from
2065 memory on exit from A's loop. */
2066 ira_loop_border_costs border_costs (a);
2067 int spill_cost = INT_MAX;
2068 if (ira_subloop_allocnos_can_differ_p (parent_a))
2069 spill_cost = (border_costs.spill_inside_loop_cost ()
2070 + ALLOCNO_MEMORY_COST (a));
2071
2072 if (! ALLOCNO_BAD_SPILL_P (a))
2073 ALLOCNO_BAD_SPILL_P (parent_a) = false;
2074 ALLOCNO_NREFS (parent_a) += ALLOCNO_NREFS (a);
2075 ALLOCNO_FREQ (parent_a) += ALLOCNO_FREQ (a);
2076
2077 /* If A's allocation can differ from PARENT_A's, we can if necessary
2078 spill PARENT_A on entry to A's loop and restore it afterwards.
2079 Doing that has cost SPILL_COST. */
2080 if (!ira_subloop_allocnos_can_differ_p (parent_a))
2081 merge_hard_reg_conflicts (a, parent_a, true);
2082
2083 if (!ira_caller_save_loop_spill_p (parent_a, a, spill_cost))
2084 {
2085 ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
2086 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
2087 += ALLOCNO_CALLS_CROSSED_NUM (a);
2088 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
2089 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
2090 ALLOCNO_CROSSED_CALLS_ABIS (parent_a)
2091 |= ALLOCNO_CROSSED_CALLS_ABIS (a);
2092 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a)
2093 |= ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a);
2094 }
2095 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
2096 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2097 aclass = ALLOCNO_CLASS (a);
2098 ira_assert (aclass == ALLOCNO_CLASS (parent_a));
2099 ira_propagate_hard_reg_costs (parent_a, a, spill_cost);
2100 ira_allocate_and_accumulate_costs
2101 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a),
2102 aclass,
2103 ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
2104 /* The cost to A of allocating a register to PARENT_A can't be
2105 more than the cost of spilling the register throughout A. */
2106 ALLOCNO_CLASS_COST (parent_a)
2107 += MIN (ALLOCNO_CLASS_COST (a), spill_cost);
2108 ALLOCNO_MEMORY_COST (parent_a) += ALLOCNO_MEMORY_COST (a);
2109 }
2110 }
2111
2112 /* Create allocnos corresponding to pseudo-registers in the current
2113 function. Traverse the loop tree for this. */
2114 static void
2115 create_allocnos (void)
2116 {
2117 /* We need to process BB first to correctly link allocnos by member
2118 next_regno_allocno. */
2119 ira_traverse_loop_tree (true, ira_loop_tree_root,
2120 create_loop_tree_node_allocnos, NULL);
2121 if (optimize)
2122 ira_traverse_loop_tree (false, ira_loop_tree_root, NULL,
2123 propagate_modified_regnos);
2124 }
2125
2126
2127
2129 /* The page contains function to remove some regions from a separate
2130 register allocation. We remove regions whose separate allocation
2131 will hardly improve the result. As a result we speed up regional
2132 register allocation. */
2133
2134 /* The function changes the object in range list given by R to OBJ. */
2135 static void
2136 change_object_in_range_list (live_range_t r, ira_object_t obj)
2137 {
2138 for (; r != NULL; r = r->next)
2139 r->object = obj;
2140 }
2141
2142 /* Move all live ranges associated with allocno FROM to allocno TO. */
2143 static void
2144 move_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
2145 {
2146 int i;
2147 int n = ALLOCNO_NUM_OBJECTS (from);
2148
2149 gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
2150
2151 for (i = 0; i < n; i++)
2152 {
2153 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
2154 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
2155 live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
2156
2157 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2158 {
2159 fprintf (ira_dump_file,
2160 " Moving ranges of a%dr%d to a%dr%d: ",
2161 ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
2162 ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
2163 ira_print_live_range_list (ira_dump_file, lr);
2164 }
2165 change_object_in_range_list (lr, to_obj);
2166 OBJECT_LIVE_RANGES (to_obj)
2167 = ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
2168 OBJECT_LIVE_RANGES (from_obj) = NULL;
2169 }
2170 }
2171
2172 static void
2173 copy_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
2174 {
2175 int i;
2176 int n = ALLOCNO_NUM_OBJECTS (from);
2177
2178 gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
2179
2180 for (i = 0; i < n; i++)
2181 {
2182 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
2183 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
2184 live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
2185
2186 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2187 {
2188 fprintf (ira_dump_file, " Copying ranges of a%dr%d to a%dr%d: ",
2189 ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
2190 ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
2191 ira_print_live_range_list (ira_dump_file, lr);
2192 }
2193 lr = ira_copy_live_range_list (lr);
2194 change_object_in_range_list (lr, to_obj);
2195 OBJECT_LIVE_RANGES (to_obj)
2196 = ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
2197 }
2198 }
2199
2200 /* Return TRUE if NODE represents a loop with low register
2201 pressure. */
2202 static bool
2203 low_pressure_loop_node_p (ira_loop_tree_node_t node)
2204 {
2205 int i;
2206 enum reg_class pclass;
2207
2208 if (node->bb != NULL)
2209 return false;
2210
2211 for (i = 0; i < ira_pressure_classes_num; i++)
2212 {
2213 pclass = ira_pressure_classes[i];
2214 if (node->reg_pressure[pclass] > ira_class_hard_regs_num[pclass]
2215 && ira_class_hard_regs_num[pclass] > 1)
2216 return false;
2217 }
2218 return true;
2219 }
2220
2221 #ifdef STACK_REGS
2222 /* Return TRUE if LOOP has a complex enter or exit edge. We don't
2223 form a region from such loop if the target use stack register
2224 because reg-stack.cc cannot deal with such edges. */
2225 static bool
2226 loop_with_complex_edge_p (class loop *loop)
2227 {
2228 int i;
2229 edge_iterator ei;
2230 edge e;
2231 bool res;
2232
2233 FOR_EACH_EDGE (e, ei, loop->header->preds)
2234 if (e->flags & EDGE_EH)
2235 return true;
2236 auto_vec<edge> edges = get_loop_exit_edges (loop);
2237 res = false;
2238 FOR_EACH_VEC_ELT (edges, i, e)
2239 if (e->flags & EDGE_COMPLEX)
2240 {
2241 res = true;
2242 break;
2243 }
2244 return res;
2245 }
2246 #endif
2247
2248 /* Sort loops for marking them for removal. We put already marked
2249 loops first, then less frequent loops next, and then outer loops
2250 next. */
2251 static int
2252 loop_compare_func (const void *v1p, const void *v2p)
2253 {
2254 int diff;
2255 ira_loop_tree_node_t l1 = *(const ira_loop_tree_node_t *) v1p;
2256 ira_loop_tree_node_t l2 = *(const ira_loop_tree_node_t *) v2p;
2257
2258 ira_assert (l1->parent != NULL && l2->parent != NULL);
2259 if (l1->to_remove_p && ! l2->to_remove_p)
2260 return -1;
2261 if (! l1->to_remove_p && l2->to_remove_p)
2262 return 1;
2263 if ((diff = l1->loop->header->count.to_frequency (cfun)
2264 - l2->loop->header->count.to_frequency (cfun)) != 0)
2265 return diff;
2266 if ((diff = (int) loop_depth (l1->loop) - (int) loop_depth (l2->loop)) != 0)
2267 return diff;
2268 /* Make sorting stable. */
2269 return l1->loop_num - l2->loop_num;
2270 }
2271
2272 /* Mark loops which should be removed from regional allocation. We
2273 remove a loop with low register pressure inside another loop with
2274 register pressure. In this case a separate allocation of the loop
2275 hardly helps (for irregular register file architecture it could
2276 help by choosing a better hard register in the loop but we prefer
2277 faster allocation even in this case). We also remove cheap loops
2278 if there are more than param_ira_max_loops_num of them. Loop with EH
2279 exit or enter edges are removed too because the allocation might
2280 require put pseudo moves on the EH edges (we could still do this
2281 for pseudos with caller saved hard registers in some cases but it
2282 is impossible to say here or during top-down allocation pass what
2283 hard register the pseudos get finally). */
2284 static void
2285 mark_loops_for_removal (void)
2286 {
2287 int i, n;
2288 ira_loop_tree_node_t *sorted_loops;
2289 loop_p loop;
2290
2291 ira_assert (current_loops != NULL);
2292 sorted_loops
2293 = (ira_loop_tree_node_t *) ira_allocate (sizeof (ira_loop_tree_node_t)
2294 * number_of_loops (cfun));
2295 for (n = i = 0; vec_safe_iterate (get_loops (cfun), i, &loop); i++)
2296 if (ira_loop_nodes[i].regno_allocno_map != NULL)
2297 {
2298 if (ira_loop_nodes[i].parent == NULL)
2299 {
2300 /* Don't remove the root. */
2301 ira_loop_nodes[i].to_remove_p = false;
2302 continue;
2303 }
2304 sorted_loops[n++] = &ira_loop_nodes[i];
2305 ira_loop_nodes[i].to_remove_p
2306 = ((low_pressure_loop_node_p (ira_loop_nodes[i].parent)
2307 && low_pressure_loop_node_p (&ira_loop_nodes[i]))
2308 #ifdef STACK_REGS
2309 || loop_with_complex_edge_p (ira_loop_nodes[i].loop)
2310 #endif
2311 );
2312 }
2313 qsort (sorted_loops, n, sizeof (ira_loop_tree_node_t), loop_compare_func);
2314 for (i = 0; i < n - param_ira_max_loops_num; i++)
2315 {
2316 sorted_loops[i]->to_remove_p = true;
2317 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2318 fprintf
2319 (ira_dump_file,
2320 " Mark loop %d (header %d, freq %d, depth %d) for removal (%s)\n",
2321 sorted_loops[i]->loop_num, sorted_loops[i]->loop->header->index,
2322 sorted_loops[i]->loop->header->count.to_frequency (cfun),
2323 loop_depth (sorted_loops[i]->loop),
2324 low_pressure_loop_node_p (sorted_loops[i]->parent)
2325 && low_pressure_loop_node_p (sorted_loops[i])
2326 ? "low pressure" : "cheap loop");
2327 }
2328 ira_free (sorted_loops);
2329 }
2330
2331 /* Mark all loops but root for removing. */
2332 static void
2333 mark_all_loops_for_removal (void)
2334 {
2335 int i;
2336 loop_p loop;
2337
2338 ira_assert (current_loops != NULL);
2339 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
2340 if (ira_loop_nodes[i].regno_allocno_map != NULL)
2341 {
2342 if (ira_loop_nodes[i].parent == NULL)
2343 {
2344 /* Don't remove the root. */
2345 ira_loop_nodes[i].to_remove_p = false;
2346 continue;
2347 }
2348 ira_loop_nodes[i].to_remove_p = true;
2349 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2350 fprintf
2351 (ira_dump_file,
2352 " Mark loop %d (header %d, freq %d, depth %d) for removal\n",
2353 ira_loop_nodes[i].loop_num,
2354 ira_loop_nodes[i].loop->header->index,
2355 ira_loop_nodes[i].loop->header->count.to_frequency (cfun),
2356 loop_depth (ira_loop_nodes[i].loop));
2357 }
2358 }
2359
2360 /* Definition of vector of loop tree nodes. */
2361
2362 /* Vec containing references to all removed loop tree nodes. */
2363 static vec<ira_loop_tree_node_t> removed_loop_vec;
2364
2365 /* Vec containing references to all children of loop tree nodes. */
2366 static vec<ira_loop_tree_node_t> children_vec;
2367
2368 /* Remove subregions of NODE if their separate allocation will not
2369 improve the result. */
2370 static void
2371 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_node_t node)
2372 {
2373 unsigned int start;
2374 bool remove_p;
2375 ira_loop_tree_node_t subnode;
2376
2377 remove_p = node->to_remove_p;
2378 if (! remove_p)
2379 children_vec.safe_push (node);
2380 start = children_vec.length ();
2381 for (subnode = node->children; subnode != NULL; subnode = subnode->next)
2382 if (subnode->bb == NULL)
2383 remove_uneccesary_loop_nodes_from_loop_tree (subnode);
2384 else
2385 children_vec.safe_push (subnode);
2386 node->children = node->subloops = NULL;
2387 if (remove_p)
2388 {
2389 removed_loop_vec.safe_push (node);
2390 return;
2391 }
2392 while (children_vec.length () > start)
2393 {
2394 subnode = children_vec.pop ();
2395 subnode->parent = node;
2396 subnode->next = node->children;
2397 node->children = subnode;
2398 if (subnode->bb == NULL)
2399 {
2400 subnode->subloop_next = node->subloops;
2401 node->subloops = subnode;
2402 }
2403 }
2404 }
2405
2406 /* Return TRUE if NODE is inside PARENT. */
2407 static bool
2408 loop_is_inside_p (ira_loop_tree_node_t node, ira_loop_tree_node_t parent)
2409 {
2410 for (node = node->parent; node != NULL; node = node->parent)
2411 if (node == parent)
2412 return true;
2413 return false;
2414 }
2415
2416 /* Sort allocnos according to their order in regno allocno list. */
2417 static int
2418 regno_allocno_order_compare_func (const void *v1p, const void *v2p)
2419 {
2420 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2421 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2422 ira_loop_tree_node_t n1 = ALLOCNO_LOOP_TREE_NODE (a1);
2423 ira_loop_tree_node_t n2 = ALLOCNO_LOOP_TREE_NODE (a2);
2424
2425 if (loop_is_inside_p (n1, n2))
2426 return -1;
2427 else if (loop_is_inside_p (n2, n1))
2428 return 1;
2429 /* If allocnos are equally good, sort by allocno numbers, so that
2430 the results of qsort leave nothing to chance. We put allocnos
2431 with higher number first in the list because it is the original
2432 order for allocnos from loops on the same levels. */
2433 return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
2434 }
2435
2436 /* This array is used to sort allocnos to restore allocno order in
2437 the regno allocno list. */
2438 static ira_allocno_t *regno_allocnos;
2439
2440 /* Restore allocno order for REGNO in the regno allocno list. */
2441 static void
2442 ira_rebuild_regno_allocno_list (int regno)
2443 {
2444 int i, n;
2445 ira_allocno_t a;
2446
2447 for (n = 0, a = ira_regno_allocno_map[regno];
2448 a != NULL;
2449 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2450 regno_allocnos[n++] = a;
2451 ira_assert (n > 0);
2452 qsort (regno_allocnos, n, sizeof (ira_allocno_t),
2453 regno_allocno_order_compare_func);
2454 for (i = 1; i < n; i++)
2455 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[i - 1]) = regno_allocnos[i];
2456 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[n - 1]) = NULL;
2457 ira_regno_allocno_map[regno] = regno_allocnos[0];
2458 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2459 fprintf (ira_dump_file, " Rebuilding regno allocno list for %d\n", regno);
2460 }
2461
2462 /* Propagate info from allocno FROM_A to allocno A. */
2463 static void
2464 propagate_some_info_from_allocno (ira_allocno_t a, ira_allocno_t from_a)
2465 {
2466 enum reg_class aclass;
2467
2468 merge_hard_reg_conflicts (from_a, a, false);
2469 ALLOCNO_NREFS (a) += ALLOCNO_NREFS (from_a);
2470 ALLOCNO_FREQ (a) += ALLOCNO_FREQ (from_a);
2471 ALLOCNO_CALL_FREQ (a) += ALLOCNO_CALL_FREQ (from_a);
2472 ALLOCNO_CALLS_CROSSED_NUM (a) += ALLOCNO_CALLS_CROSSED_NUM (from_a);
2473 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a)
2474 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (from_a);
2475 ALLOCNO_CROSSED_CALLS_ABIS (a) |= ALLOCNO_CROSSED_CALLS_ABIS (from_a);
2476 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a)
2477 |= ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (from_a);
2478
2479 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
2480 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a);
2481 if (! ALLOCNO_BAD_SPILL_P (from_a))
2482 ALLOCNO_BAD_SPILL_P (a) = false;
2483 aclass = ALLOCNO_CLASS (from_a);
2484 ira_assert (aclass == ALLOCNO_CLASS (a));
2485 ira_allocate_and_accumulate_costs (&ALLOCNO_HARD_REG_COSTS (a), aclass,
2486 ALLOCNO_HARD_REG_COSTS (from_a));
2487 ira_allocate_and_accumulate_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2488 aclass,
2489 ALLOCNO_CONFLICT_HARD_REG_COSTS (from_a));
2490 ALLOCNO_CLASS_COST (a) += ALLOCNO_CLASS_COST (from_a);
2491 ALLOCNO_MEMORY_COST (a) += ALLOCNO_MEMORY_COST (from_a);
2492 }
2493
2494 /* Remove allocnos from loops removed from the allocation
2495 consideration. */
2496 static void
2497 remove_unnecessary_allocnos (void)
2498 {
2499 int regno;
2500 bool merged_p, rebuild_p;
2501 ira_allocno_t a, prev_a, next_a, parent_a;
2502 ira_loop_tree_node_t a_node, parent;
2503
2504 merged_p = false;
2505 regno_allocnos = NULL;
2506 for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
2507 {
2508 rebuild_p = false;
2509 for (prev_a = NULL, a = ira_regno_allocno_map[regno];
2510 a != NULL;
2511 a = next_a)
2512 {
2513 next_a = ALLOCNO_NEXT_REGNO_ALLOCNO (a);
2514 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2515 if (! a_node->to_remove_p)
2516 prev_a = a;
2517 else
2518 {
2519 for (parent = a_node->parent;
2520 (parent_a = parent->regno_allocno_map[regno]) == NULL
2521 && parent->to_remove_p;
2522 parent = parent->parent)
2523 ;
2524 if (parent_a == NULL)
2525 {
2526 /* There are no allocnos with the same regno in
2527 upper region -- just move the allocno to the
2528 upper region. */
2529 prev_a = a;
2530 ALLOCNO_LOOP_TREE_NODE (a) = parent;
2531 parent->regno_allocno_map[regno] = a;
2532 bitmap_set_bit (parent->all_allocnos, ALLOCNO_NUM (a));
2533 rebuild_p = true;
2534 }
2535 else
2536 {
2537 /* Remove the allocno and update info of allocno in
2538 the upper region. */
2539 if (prev_a == NULL)
2540 ira_regno_allocno_map[regno] = next_a;
2541 else
2542 ALLOCNO_NEXT_REGNO_ALLOCNO (prev_a) = next_a;
2543 move_allocno_live_ranges (a, parent_a);
2544 merged_p = true;
2545 propagate_some_info_from_allocno (parent_a, a);
2546 /* Remove it from the corresponding regno allocno
2547 map to avoid info propagation of subsequent
2548 allocno into this already removed allocno. */
2549 a_node->regno_allocno_map[regno] = NULL;
2550 ira_remove_allocno_prefs (a);
2551 finish_allocno (a);
2552 }
2553 }
2554 }
2555 if (rebuild_p)
2556 /* We need to restore the order in regno allocno list. */
2557 {
2558 if (regno_allocnos == NULL)
2559 regno_allocnos
2560 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
2561 * ira_allocnos_num);
2562 ira_rebuild_regno_allocno_list (regno);
2563 }
2564 }
2565 if (merged_p)
2566 ira_rebuild_start_finish_chains ();
2567 if (regno_allocnos != NULL)
2568 ira_free (regno_allocnos);
2569 }
2570
2571 /* Remove allocnos from all loops but the root. */
2572 static void
2573 remove_low_level_allocnos (void)
2574 {
2575 int regno;
2576 bool merged_p, propagate_p;
2577 ira_allocno_t a, top_a;
2578 ira_loop_tree_node_t a_node, parent;
2579 ira_allocno_iterator ai;
2580
2581 merged_p = false;
2582 FOR_EACH_ALLOCNO (a, ai)
2583 {
2584 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2585 if (a_node == ira_loop_tree_root || ALLOCNO_CAP_MEMBER (a) != NULL)
2586 continue;
2587 regno = ALLOCNO_REGNO (a);
2588 if ((top_a = ira_loop_tree_root->regno_allocno_map[regno]) == NULL)
2589 {
2590 ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
2591 ira_loop_tree_root->regno_allocno_map[regno] = a;
2592 continue;
2593 }
2594 propagate_p = a_node->parent->regno_allocno_map[regno] == NULL;
2595 /* Remove the allocno and update info of allocno in the upper
2596 region. */
2597 move_allocno_live_ranges (a, top_a);
2598 merged_p = true;
2599 if (propagate_p)
2600 propagate_some_info_from_allocno (top_a, a);
2601 }
2602 FOR_EACH_ALLOCNO (a, ai)
2603 {
2604 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2605 if (a_node == ira_loop_tree_root)
2606 continue;
2607 parent = a_node->parent;
2608 regno = ALLOCNO_REGNO (a);
2609 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2610 ira_assert (ALLOCNO_CAP (a) != NULL);
2611 else if (ALLOCNO_CAP (a) == NULL)
2612 ira_assert (parent->regno_allocno_map[regno] != NULL);
2613 }
2614 FOR_EACH_ALLOCNO (a, ai)
2615 {
2616 regno = ALLOCNO_REGNO (a);
2617 if (ira_loop_tree_root->regno_allocno_map[regno] == a)
2618 {
2619 ira_object_t obj;
2620 ira_allocno_object_iterator oi;
2621
2622 ira_regno_allocno_map[regno] = a;
2623 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = NULL;
2624 ALLOCNO_CAP_MEMBER (a) = NULL;
2625 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2626 OBJECT_CONFLICT_HARD_REGS (obj)
2627 = OBJECT_TOTAL_CONFLICT_HARD_REGS (obj);
2628 #ifdef STACK_REGS
2629 if (ALLOCNO_TOTAL_NO_STACK_REG_P (a))
2630 ALLOCNO_NO_STACK_REG_P (a) = true;
2631 #endif
2632 }
2633 else
2634 {
2635 ira_remove_allocno_prefs (a);
2636 finish_allocno (a);
2637 }
2638 }
2639 if (merged_p)
2640 ira_rebuild_start_finish_chains ();
2641 }
2642
2643 /* Remove loops from consideration. We remove all loops except for
2644 root if ALL_P or loops for which a separate allocation will not
2645 improve the result. We have to do this after allocno creation and
2646 their costs and allocno class evaluation because only after that
2647 the register pressure can be known and is calculated. */
2648 static void
2649 remove_unnecessary_regions (bool all_p)
2650 {
2651 if (current_loops == NULL)
2652 return;
2653 if (all_p)
2654 mark_all_loops_for_removal ();
2655 else
2656 mark_loops_for_removal ();
2657 children_vec.create (last_basic_block_for_fn (cfun)
2658 + number_of_loops (cfun));
2659 removed_loop_vec.create (last_basic_block_for_fn (cfun)
2660 + number_of_loops (cfun));
2661 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root);
2662 children_vec.release ();
2663 if (all_p)
2664 remove_low_level_allocnos ();
2665 else
2666 remove_unnecessary_allocnos ();
2667 while (removed_loop_vec.length () > 0)
2668 finish_loop_tree_node (removed_loop_vec.pop ());
2669 removed_loop_vec.release ();
2670 }
2671
2672
2673
2675 /* At this point true value of allocno attribute bad_spill_p means
2676 that there is an insn where allocno occurs and where the allocno
2677 cannot be used as memory. The function updates the attribute, now
2678 it can be true only for allocnos which cannot be used as memory in
2679 an insn and in whose live ranges there is other allocno deaths.
2680 Spilling allocnos with true value will not improve the code because
2681 it will not make other allocnos colorable and additional reloads
2682 for the corresponding pseudo will be generated in reload pass for
2683 each insn it occurs.
2684
2685 This is a trick mentioned in one classic article of Chaitin etc
2686 which is frequently omitted in other implementations of RA based on
2687 graph coloring. */
2688 static void
2689 update_bad_spill_attribute (void)
2690 {
2691 int i;
2692 ira_allocno_t a;
2693 ira_allocno_iterator ai;
2694 ira_allocno_object_iterator aoi;
2695 ira_object_t obj;
2696 live_range_t r;
2697 enum reg_class aclass;
2698 bitmap_head dead_points[N_REG_CLASSES];
2699
2700 for (i = 0; i < ira_allocno_classes_num; i++)
2701 {
2702 aclass = ira_allocno_classes[i];
2703 bitmap_initialize (&dead_points[aclass], ®_obstack);
2704 }
2705 FOR_EACH_ALLOCNO (a, ai)
2706 {
2707 aclass = ALLOCNO_CLASS (a);
2708 if (aclass == NO_REGS)
2709 continue;
2710 FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2711 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2712 bitmap_set_bit (&dead_points[aclass], r->finish);
2713 }
2714 FOR_EACH_ALLOCNO (a, ai)
2715 {
2716 aclass = ALLOCNO_CLASS (a);
2717 if (aclass == NO_REGS)
2718 continue;
2719 if (! ALLOCNO_BAD_SPILL_P (a))
2720 continue;
2721 FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2722 {
2723 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2724 {
2725 for (i = r->start + 1; i < r->finish; i++)
2726 if (bitmap_bit_p (&dead_points[aclass], i))
2727 break;
2728 if (i < r->finish)
2729 break;
2730 }
2731 if (r != NULL)
2732 {
2733 ALLOCNO_BAD_SPILL_P (a) = false;
2734 break;
2735 }
2736 }
2737 }
2738 for (i = 0; i < ira_allocno_classes_num; i++)
2739 {
2740 aclass = ira_allocno_classes[i];
2741 bitmap_clear (&dead_points[aclass]);
2742 }
2743 }
2744
2745
2746
2748 /* Set up minimal and maximal live range points for allocnos. */
2749 static void
2750 setup_min_max_allocno_live_range_point (void)
2751 {
2752 int i;
2753 ira_allocno_t a, parent_a, cap;
2754 ira_allocno_iterator ai;
2755 #ifdef ENABLE_IRA_CHECKING
2756 ira_object_iterator oi;
2757 ira_object_t obj;
2758 #endif
2759 live_range_t r;
2760 ira_loop_tree_node_t parent;
2761
2762 FOR_EACH_ALLOCNO (a, ai)
2763 {
2764 int n = ALLOCNO_NUM_OBJECTS (a);
2765
2766 for (i = 0; i < n; i++)
2767 {
2768 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2769 r = OBJECT_LIVE_RANGES (obj);
2770 if (r == NULL)
2771 continue;
2772 OBJECT_MAX (obj) = r->finish;
2773 for (; r->next != NULL; r = r->next)
2774 ;
2775 OBJECT_MIN (obj) = r->start;
2776 }
2777 }
2778 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2779 for (a = ira_regno_allocno_map[i];
2780 a != NULL;
2781 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2782 {
2783 int j;
2784 int n = ALLOCNO_NUM_OBJECTS (a);
2785
2786 for (j = 0; j < n; j++)
2787 {
2788 ira_object_t obj = ALLOCNO_OBJECT (a, j);
2789 ira_object_t parent_obj;
2790
2791 if (OBJECT_MAX (obj) < 0)
2792 {
2793 /* The object is not used and hence does not live. */
2794 ira_assert (OBJECT_LIVE_RANGES (obj) == NULL);
2795 OBJECT_MAX (obj) = 0;
2796 OBJECT_MIN (obj) = 1;
2797 continue;
2798 }
2799 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2800 /* Accumulation of range info. */
2801 if (ALLOCNO_CAP (a) != NULL)
2802 {
2803 for (cap = ALLOCNO_CAP (a); cap != NULL; cap = ALLOCNO_CAP (cap))
2804 {
2805 ira_object_t cap_obj = ALLOCNO_OBJECT (cap, j);
2806 if (OBJECT_MAX (cap_obj) < OBJECT_MAX (obj))
2807 OBJECT_MAX (cap_obj) = OBJECT_MAX (obj);
2808 if (OBJECT_MIN (cap_obj) > OBJECT_MIN (obj))
2809 OBJECT_MIN (cap_obj) = OBJECT_MIN (obj);
2810 }
2811 continue;
2812 }
2813 if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL)
2814 continue;
2815 parent_a = parent->regno_allocno_map[i];
2816 parent_obj = ALLOCNO_OBJECT (parent_a, j);
2817 if (OBJECT_MAX (parent_obj) < OBJECT_MAX (obj))
2818 OBJECT_MAX (parent_obj) = OBJECT_MAX (obj);
2819 if (OBJECT_MIN (parent_obj) > OBJECT_MIN (obj))
2820 OBJECT_MIN (parent_obj) = OBJECT_MIN (obj);
2821 }
2822 }
2823 #ifdef ENABLE_IRA_CHECKING
2824 FOR_EACH_OBJECT (obj, oi)
2825 {
2826 if ((OBJECT_MIN (obj) >= 0 && OBJECT_MIN (obj) <= ira_max_point)
2827 && (OBJECT_MAX (obj) >= 0 && OBJECT_MAX (obj) <= ira_max_point))
2828 continue;
2829 gcc_unreachable ();
2830 }
2831 #endif
2832 }
2833
2834 /* Sort allocnos according to their live ranges. Allocnos with
2835 smaller allocno class are put first unless we use priority
2836 coloring. Allocnos with the same class are ordered according
2837 their start (min). Allocnos with the same start are ordered
2838 according their finish (max). */
2839 static int
2840 object_range_compare_func (const void *v1p, const void *v2p)
2841 {
2842 int diff;
2843 ira_object_t obj1 = *(const ira_object_t *) v1p;
2844 ira_object_t obj2 = *(const ira_object_t *) v2p;
2845 ira_allocno_t a1 = OBJECT_ALLOCNO (obj1);
2846 ira_allocno_t a2 = OBJECT_ALLOCNO (obj2);
2847
2848 if ((diff = OBJECT_MIN (obj1) - OBJECT_MIN (obj2)) != 0)
2849 return diff;
2850 if ((diff = OBJECT_MAX (obj1) - OBJECT_MAX (obj2)) != 0)
2851 return diff;
2852 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2853 }
2854
2855 /* Sort ira_object_id_map and set up conflict id of allocnos. */
2856 static void
2857 sort_conflict_id_map (void)
2858 {
2859 int i, num;
2860 ira_allocno_t a;
2861 ira_allocno_iterator ai;
2862
2863 num = 0;
2864 FOR_EACH_ALLOCNO (a, ai)
2865 {
2866 ira_allocno_object_iterator oi;
2867 ira_object_t obj;
2868
2869 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2870 ira_object_id_map[num++] = obj;
2871 }
2872 if (num > 1)
2873 qsort (ira_object_id_map, num, sizeof (ira_object_t),
2874 object_range_compare_func);
2875 for (i = 0; i < num; i++)
2876 {
2877 ira_object_t obj = ira_object_id_map[i];
2878
2879 gcc_assert (obj != NULL);
2880 OBJECT_CONFLICT_ID (obj) = i;
2881 }
2882 for (i = num; i < ira_objects_num; i++)
2883 ira_object_id_map[i] = NULL;
2884 }
2885
2886 /* Set up minimal and maximal conflict ids of allocnos with which
2887 given allocno can conflict. */
2888 static void
2889 setup_min_max_conflict_allocno_ids (void)
2890 {
2891 int aclass;
2892 int i, j, min, max, start, finish, first_not_finished, filled_area_start;
2893 int *live_range_min, *last_lived;
2894 int word0_min, word0_max;
2895 ira_allocno_t a;
2896 ira_allocno_iterator ai;
2897
2898 live_range_min = (int *) ira_allocate (sizeof (int) * ira_objects_num);
2899 aclass = -1;
2900 first_not_finished = -1;
2901 for (i = 0; i < ira_objects_num; i++)
2902 {
2903 ira_object_t obj = ira_object_id_map[i];
2904
2905 if (obj == NULL)
2906 continue;
2907
2908 a = OBJECT_ALLOCNO (obj);
2909
2910 if (aclass < 0)
2911 {
2912 aclass = ALLOCNO_CLASS (a);
2913 min = i;
2914 first_not_finished = i;
2915 }
2916 else
2917 {
2918 start = OBJECT_MIN (obj);
2919 /* If we skip an allocno, the allocno with smaller ids will
2920 be also skipped because of the secondary sorting the
2921 range finishes (see function
2922 object_range_compare_func). */
2923 while (first_not_finished < i
2924 && start > OBJECT_MAX (ira_object_id_map
2925 [first_not_finished]))
2926 first_not_finished++;
2927 min = first_not_finished;
2928 }
2929 if (min == i)
2930 /* We could increase min further in this case but it is good
2931 enough. */
2932 min++;
2933 live_range_min[i] = OBJECT_MIN (obj);
2934 OBJECT_MIN (obj) = min;
2935 }
2936 last_lived = (int *) ira_allocate (sizeof (int) * ira_max_point);
2937 aclass = -1;
2938 filled_area_start = -1;
2939 for (i = ira_objects_num - 1; i >= 0; i--)
2940 {
2941 ira_object_t obj = ira_object_id_map[i];
2942
2943 if (obj == NULL)
2944 continue;
2945
2946 a = OBJECT_ALLOCNO (obj);
2947 if (aclass < 0)
2948 {
2949 aclass = ALLOCNO_CLASS (a);
2950 for (j = 0; j < ira_max_point; j++)
2951 last_lived[j] = -1;
2952 filled_area_start = ira_max_point;
2953 }
2954 min = live_range_min[i];
2955 finish = OBJECT_MAX (obj);
2956 max = last_lived[finish];
2957 if (max < 0)
2958 /* We could decrease max further in this case but it is good
2959 enough. */
2960 max = OBJECT_CONFLICT_ID (obj) - 1;
2961 OBJECT_MAX (obj) = max;
2962 /* In filling, we can go further A range finish to recognize
2963 intersection quickly because if the finish of subsequently
2964 processed allocno (it has smaller conflict id) range is
2965 further A range finish than they are definitely intersected
2966 (the reason for this is the allocnos with bigger conflict id
2967 have their range starts not smaller than allocnos with
2968 smaller ids. */
2969 for (j = min; j < filled_area_start; j++)
2970 last_lived[j] = i;
2971 filled_area_start = min;
2972 }
2973 ira_free (last_lived);
2974 ira_free (live_range_min);
2975
2976 /* For allocnos with more than one object, we may later record extra conflicts in
2977 subobject 0 that we cannot really know about here.
2978 For now, simply widen the min/max range of these subobjects. */
2979
2980 word0_min = INT_MAX;
2981 word0_max = INT_MIN;
2982
2983 FOR_EACH_ALLOCNO (a, ai)
2984 {
2985 int n = ALLOCNO_NUM_OBJECTS (a);
2986 ira_object_t obj0;
2987
2988 if (n < 2)
2989 continue;
2990 obj0 = ALLOCNO_OBJECT (a, 0);
2991 if (OBJECT_CONFLICT_ID (obj0) < word0_min)
2992 word0_min = OBJECT_CONFLICT_ID (obj0);
2993 if (OBJECT_CONFLICT_ID (obj0) > word0_max)
2994 word0_max = OBJECT_CONFLICT_ID (obj0);
2995 }
2996 FOR_EACH_ALLOCNO (a, ai)
2997 {
2998 int n = ALLOCNO_NUM_OBJECTS (a);
2999 ira_object_t obj0;
3000
3001 if (n < 2)
3002 continue;
3003 obj0 = ALLOCNO_OBJECT (a, 0);
3004 if (OBJECT_MIN (obj0) > word0_min)
3005 OBJECT_MIN (obj0) = word0_min;
3006 if (OBJECT_MAX (obj0) < word0_max)
3007 OBJECT_MAX (obj0) = word0_max;
3008 }
3009 }
3010
3011
3012
3014 static void
3015 create_caps (void)
3016 {
3017 ira_allocno_t a;
3018 ira_allocno_iterator ai;
3019 ira_loop_tree_node_t loop_tree_node;
3020
3021 FOR_EACH_ALLOCNO (a, ai)
3022 {
3023 if (ALLOCNO_LOOP_TREE_NODE (a) == ira_loop_tree_root)
3024 continue;
3025 if (ALLOCNO_CAP_MEMBER (a) != NULL)
3026 create_cap_allocno (a);
3027 else if (ALLOCNO_CAP (a) == NULL)
3028 {
3029 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
3030 if (!bitmap_bit_p (loop_tree_node->border_allocnos, ALLOCNO_NUM (a)))
3031 create_cap_allocno (a);
3032 }
3033 }
3034 }
3035
3036
3037
3039 /* The page contains code transforming more one region internal
3040 representation (IR) to one region IR which is necessary for reload.
3041 This transformation is called IR flattening. We might just rebuild
3042 the IR for one region but we don't do it because it takes a lot of
3043 time. */
3044
3045 /* Map: regno -> allocnos which will finally represent the regno for
3046 IR with one region. */
3047 static ira_allocno_t *regno_top_level_allocno_map;
3048
3049 /* Find the allocno that corresponds to A at a level one higher up in the
3050 loop tree. Returns NULL if A is a cap, or if it has no parent. */
3051 ira_allocno_t
3052 ira_parent_allocno (ira_allocno_t a)
3053 {
3054 ira_loop_tree_node_t parent;
3055
3056 if (ALLOCNO_CAP (a) != NULL)
3057 return NULL;
3058
3059 parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
3060 if (parent == NULL)
3061 return NULL;
3062
3063 return parent->regno_allocno_map[ALLOCNO_REGNO (a)];
3064 }
3065
3066 /* Find the allocno that corresponds to A at a level one higher up in the
3067 loop tree. If ALLOCNO_CAP is set for A, return that. */
3068 ira_allocno_t
3069 ira_parent_or_cap_allocno (ira_allocno_t a)
3070 {
3071 if (ALLOCNO_CAP (a) != NULL)
3072 return ALLOCNO_CAP (a);
3073
3074 return ira_parent_allocno (a);
3075 }
3076
3077 /* Process all allocnos originated from pseudo REGNO and copy live
3078 ranges, hard reg conflicts, and allocno stack reg attributes from
3079 low level allocnos to final allocnos which are destinations of
3080 removed stores at a loop exit. Return true if we copied live
3081 ranges. */
3082 static bool
3083 copy_info_to_removed_store_destinations (int regno)
3084 {
3085 ira_allocno_t a;
3086 ira_allocno_t parent_a = NULL;
3087 ira_loop_tree_node_t parent;
3088 bool merged_p;
3089
3090 merged_p = false;
3091 for (a = ira_regno_allocno_map[regno];
3092 a != NULL;
3093 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
3094 {
3095 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))])
3096 /* This allocno will be removed. */
3097 continue;
3098
3099 /* Caps will be removed. */
3100 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
3101 for (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
3102 parent != NULL;
3103 parent = parent->parent)
3104 if ((parent_a = parent->regno_allocno_map[regno]) == NULL
3105 || (parent_a
3106 == regno_top_level_allocno_map[REGNO
3107 (allocno_emit_reg (parent_a))]
3108 && ALLOCNO_EMIT_DATA (parent_a)->mem_optimized_dest_p))
3109 break;
3110 if (parent == NULL || parent_a == NULL)
3111 continue;
3112
3113 copy_allocno_live_ranges (a, parent_a);
3114 merge_hard_reg_conflicts (a, parent_a, true);
3115
3116 ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
3117 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
3118 += ALLOCNO_CALLS_CROSSED_NUM (a);
3119 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
3120 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
3121 ALLOCNO_CROSSED_CALLS_ABIS (parent_a)
3122 |= ALLOCNO_CROSSED_CALLS_ABIS (a);
3123 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a)
3124 |= ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a);
3125 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
3126 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
3127 merged_p = true;
3128 }
3129 return merged_p;
3130 }
3131
3132 /* Flatten the IR. In other words, this function transforms IR as if
3133 it were built with one region (without loops). We could make it
3134 much simpler by rebuilding IR with one region, but unfortunately it
3135 takes a lot of time. MAX_REGNO_BEFORE_EMIT and
3136 IRA_MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
3137 IRA_MAX_POINT before emitting insns on the loop borders. */
3138 void
3139 ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
3140 {
3141 int i, j;
3142 bool keep_p;
3143 int hard_regs_num;
3144 bool new_pseudos_p, merged_p, mem_dest_p;
3145 unsigned int n;
3146 enum reg_class aclass;
3147 ira_allocno_t a, parent_a, first, second, node_first, node_second;
3148 ira_copy_t cp;
3149 ira_loop_tree_node_t node;
3150 live_range_t r;
3151 ira_allocno_iterator ai;
3152 ira_copy_iterator ci;
3153
3154 regno_top_level_allocno_map
3155 = (ira_allocno_t *) ira_allocate (max_reg_num ()
3156 * sizeof (ira_allocno_t));
3157 memset (regno_top_level_allocno_map, 0,
3158 max_reg_num () * sizeof (ira_allocno_t));
3159 new_pseudos_p = merged_p = false;
3160 FOR_EACH_ALLOCNO (a, ai)
3161 {
3162 ira_allocno_object_iterator oi;
3163 ira_object_t obj;
3164
3165 if (ALLOCNO_CAP_MEMBER (a) != NULL)
3166 /* Caps are not in the regno allocno maps and they are never
3167 will be transformed into allocnos existing after IR
3168 flattening. */
3169 continue;
3170 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
3171 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)
3172 = OBJECT_CONFLICT_HARD_REGS (obj);
3173 #ifdef STACK_REGS
3174 ALLOCNO_TOTAL_NO_STACK_REG_P (a) = ALLOCNO_NO_STACK_REG_P (a);
3175 #endif
3176 }
3177 /* Fix final allocno attributes. */
3178 for (i = max_regno_before_emit - 1; i >= FIRST_PSEUDO_REGISTER; i--)
3179 {
3180 mem_dest_p = false;
3181 for (a = ira_regno_allocno_map[i];
3182 a != NULL;
3183 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
3184 {
3185 ira_emit_data_t parent_data, data = ALLOCNO_EMIT_DATA (a);
3186
3187 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
3188 if (data->somewhere_renamed_p)
3189 new_pseudos_p = true;
3190 parent_a = ira_parent_allocno (a);
3191 if (parent_a == NULL)
3192 {
3193 ALLOCNO_COPIES (a) = NULL;
3194 regno_top_level_allocno_map[REGNO (data->reg)] = a;
3195 continue;
3196 }
3197 ira_assert (ALLOCNO_CAP_MEMBER (parent_a) == NULL);
3198
3199 if (data->mem_optimized_dest != NULL)
3200 mem_dest_p = true;
3201 parent_data = ALLOCNO_EMIT_DATA (parent_a);
3202 if (REGNO (data->reg) == REGNO (parent_data->reg))
3203 {
3204 merge_hard_reg_conflicts (a, parent_a, true);
3205 move_allocno_live_ranges (a, parent_a);
3206 merged_p = true;
3207 parent_data->mem_optimized_dest_p
3208 = (parent_data->mem_optimized_dest_p
3209 || data->mem_optimized_dest_p);
3210 continue;
3211 }
3212 new_pseudos_p = true;
3213 for (;;)
3214 {
3215 ALLOCNO_NREFS (parent_a) -= ALLOCNO_NREFS (a);
3216 ALLOCNO_FREQ (parent_a) -= ALLOCNO_FREQ (a);
3217 ALLOCNO_CALL_FREQ (parent_a) -= ALLOCNO_CALL_FREQ (a);
3218 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
3219 -= ALLOCNO_CALLS_CROSSED_NUM (a);
3220 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
3221 -= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
3222 /* Assume that ALLOCNO_CROSSED_CALLS_ABIS and
3223 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS stay the same.
3224 We'd need to rebuild the IR to do better. */
3225 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
3226 -= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
3227 ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a) >= 0
3228 && ALLOCNO_NREFS (parent_a) >= 0
3229 && ALLOCNO_FREQ (parent_a) >= 0);
3230 aclass = ALLOCNO_CLASS (parent_a);
3231 hard_regs_num = ira_class_hard_regs_num[aclass];
3232 if (ALLOCNO_HARD_REG_COSTS (a) != NULL
3233 && ALLOCNO_HARD_REG_COSTS (parent_a) != NULL)
3234 for (j = 0; j < hard_regs_num; j++)
3235 ALLOCNO_HARD_REG_COSTS (parent_a)[j]
3236 -= ALLOCNO_HARD_REG_COSTS (a)[j];
3237 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL
3238 && ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a) != NULL)
3239 for (j = 0; j < hard_regs_num; j++)
3240 ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a)[j]
3241 -= ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[j];
3242 ALLOCNO_CLASS_COST (parent_a)
3243 -= ALLOCNO_CLASS_COST (a);
3244 ALLOCNO_MEMORY_COST (parent_a) -= ALLOCNO_MEMORY_COST (a);
3245 parent_a = ira_parent_allocno (parent_a);
3246 if (parent_a == NULL)
3247 break;
3248 }
3249 ALLOCNO_COPIES (a) = NULL;
3250 regno_top_level_allocno_map[REGNO (data->reg)] = a;
3251 }
3252 if (mem_dest_p && copy_info_to_removed_store_destinations (i))
3253 merged_p = true;
3254 }
3255 ira_assert (new_pseudos_p || ira_max_point_before_emit == ira_max_point);
3256 if (merged_p || ira_max_point_before_emit != ira_max_point)
3257 ira_rebuild_start_finish_chains ();
3258 if (new_pseudos_p)
3259 {
3260 sparseset objects_live;
3261
3262 /* Rebuild conflicts. */
3263 FOR_EACH_ALLOCNO (a, ai)
3264 {
3265 ira_allocno_object_iterator oi;
3266 ira_object_t obj;
3267
3268 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3269 || ALLOCNO_CAP_MEMBER (a) != NULL)
3270 continue;
3271 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
3272 {
3273 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3274 ira_assert (r->object == obj);
3275 clear_conflicts (obj);
3276 }
3277 }
3278 objects_live = sparseset_alloc (ira_objects_num);
3279 for (i = 0; i < ira_max_point; i++)
3280 {
3281 for (r = ira_start_point_ranges[i]; r != NULL; r = r->start_next)
3282 {
3283 ira_object_t obj = r->object;
3284
3285 a = OBJECT_ALLOCNO (obj);
3286 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3287 || ALLOCNO_CAP_MEMBER (a) != NULL)
3288 continue;
3289
3290 aclass = ALLOCNO_CLASS (a);
3291 EXECUTE_IF_SET_IN_SPARSESET (objects_live, n)
3292 {
3293 ira_object_t live_obj = ira_object_id_map[n];
3294 ira_allocno_t live_a = OBJECT_ALLOCNO (live_obj);
3295 enum reg_class live_aclass = ALLOCNO_CLASS (live_a);
3296
3297 if (ira_reg_classes_intersect_p[aclass][live_aclass]
3298 /* Don't set up conflict for the allocno with itself. */
3299 && live_a != a)
3300 ira_add_conflict (obj, live_obj);
3301 }
3302 sparseset_set_bit (objects_live, OBJECT_CONFLICT_ID (obj));
3303 }
3304
3305 for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
3306 sparseset_clear_bit (objects_live, OBJECT_CONFLICT_ID (r->object));
3307 }
3308 sparseset_free (objects_live);
3309 compress_conflict_vecs ();
3310 }
3311 /* Mark some copies for removing and change allocnos in the rest
3312 copies. */
3313 FOR_EACH_COPY (cp, ci)
3314 {
3315 if (ALLOCNO_CAP_MEMBER (cp->first) != NULL
3316 || ALLOCNO_CAP_MEMBER (cp->second) != NULL)
3317 {
3318 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3319 fprintf
3320 (ira_dump_file, " Remove cp%d:%c%dr%d-%c%dr%d\n",
3321 cp->num, ALLOCNO_CAP_MEMBER (cp->first) != NULL ? 'c' : 'a',
3322 ALLOCNO_NUM (cp->first),
3323 REGNO (allocno_emit_reg (cp->first)),
3324 ALLOCNO_CAP_MEMBER (cp->second) != NULL ? 'c' : 'a',
3325 ALLOCNO_NUM (cp->second),
3326 REGNO (allocno_emit_reg (cp->second)));
3327 cp->loop_tree_node = NULL;
3328 continue;
3329 }
3330 first
3331 = regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->first))];
3332 second
3333 = regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->second))];
3334 node = cp->loop_tree_node;
3335 if (node == NULL)
3336 keep_p = true; /* It copy generated in ira-emit.cc. */
3337 else
3338 {
3339 /* Check that the copy was not propagated from level on
3340 which we will have different pseudos. */
3341 node_first = node->regno_allocno_map[ALLOCNO_REGNO (cp->first)];
3342 node_second = node->regno_allocno_map[ALLOCNO_REGNO (cp->second)];
3343 keep_p = ((REGNO (allocno_emit_reg (first))
3344 == REGNO (allocno_emit_reg (node_first)))
3345 && (REGNO (allocno_emit_reg (second))
3346 == REGNO (allocno_emit_reg (node_second))));
3347 }
3348 if (keep_p)
3349 {
3350 cp->loop_tree_node = ira_loop_tree_root;
3351 cp->first = first;
3352 cp->second = second;
3353 }
3354 else
3355 {
3356 cp->loop_tree_node = NULL;
3357 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3358 fprintf (ira_dump_file, " Remove cp%d:a%dr%d-a%dr%d\n",
3359 cp->num, ALLOCNO_NUM (cp->first),
3360 REGNO (allocno_emit_reg (cp->first)),
3361 ALLOCNO_NUM (cp->second),
3362 REGNO (allocno_emit_reg (cp->second)));
3363 }
3364 }
3365 /* Remove unnecessary allocnos on lower levels of the loop tree. */
3366 FOR_EACH_ALLOCNO (a, ai)
3367 {
3368 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3369 || ALLOCNO_CAP_MEMBER (a) != NULL)
3370 {
3371 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3372 fprintf (ira_dump_file, " Remove a%dr%d\n",
3373 ALLOCNO_NUM (a), REGNO (allocno_emit_reg (a)));
3374 ira_remove_allocno_prefs (a);
3375 finish_allocno (a);
3376 continue;
3377 }
3378 ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
3379 ALLOCNO_REGNO (a) = REGNO (allocno_emit_reg (a));
3380 ALLOCNO_CAP (a) = NULL;
3381 /* Restore updated costs for assignments from reload. */
3382 ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
3383 ALLOCNO_UPDATED_CLASS_COST (a) = ALLOCNO_CLASS_COST (a);
3384 if (! ALLOCNO_ASSIGNED_P (a))
3385 ira_free_allocno_updated_costs (a);
3386 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
3387 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
3388 }
3389 /* Remove unnecessary copies. */
3390 FOR_EACH_COPY (cp, ci)
3391 {
3392 if (cp->loop_tree_node == NULL)
3393 {
3394 ira_copies[cp->num] = NULL;
3395 finish_copy (cp);
3396 continue;
3397 }
3398 ira_assert
3399 (ALLOCNO_LOOP_TREE_NODE (cp->first) == ira_loop_tree_root
3400 && ALLOCNO_LOOP_TREE_NODE (cp->second) == ira_loop_tree_root);
3401 add_allocno_copy_to_list (cp);
3402 swap_allocno_copy_ends_if_necessary (cp);
3403 }
3404 rebuild_regno_allocno_maps ();
3405 if (ira_max_point != ira_max_point_before_emit)
3406 ira_compress_allocno_live_ranges ();
3407 ira_free (regno_top_level_allocno_map);
3408 }
3409
3410
3411
3413 #ifdef ENABLE_IRA_CHECKING
3414 /* Check creation of all allocnos. Allocnos on lower levels should
3415 have allocnos or caps on all upper levels. */
3416 static void
3417 check_allocno_creation (void)
3418 {
3419 ira_allocno_t a;
3420 ira_allocno_iterator ai;
3421 ira_loop_tree_node_t loop_tree_node;
3422
3423 FOR_EACH_ALLOCNO (a, ai)
3424 {
3425 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
3426 ira_assert (bitmap_bit_p (loop_tree_node->all_allocnos,
3427 ALLOCNO_NUM (a)));
3428 if (loop_tree_node == ira_loop_tree_root)
3429 continue;
3430 if (ALLOCNO_CAP_MEMBER (a) != NULL)
3431 ira_assert (ALLOCNO_CAP (a) != NULL);
3432 else if (ALLOCNO_CAP (a) == NULL)
3433 ira_assert (loop_tree_node->parent
3434 ->regno_allocno_map[ALLOCNO_REGNO (a)] != NULL
3435 && bitmap_bit_p (loop_tree_node->border_allocnos,
3436 ALLOCNO_NUM (a)));
3437 }
3438 }
3439 #endif
3440
3441 /* Identify allocnos which prefer a register class with a single hard register.
3442 Adjust ALLOCNO_CONFLICT_HARD_REG_COSTS so that conflicting allocnos are
3443 less likely to use the preferred singleton register. */
3444 static void
3445 update_conflict_hard_reg_costs (void)
3446 {
3447 ira_allocno_t a;
3448 ira_allocno_iterator ai;
3449 int i, index, min;
3450
3451 FOR_EACH_ALLOCNO (a, ai)
3452 {
3453 reg_class_t aclass = ALLOCNO_CLASS (a);
3454 reg_class_t pref = reg_preferred_class (ALLOCNO_REGNO (a));
3455 int singleton = ira_class_singleton[pref][ALLOCNO_MODE (a)];
3456 if (singleton < 0)
3457 continue;
3458 index = ira_class_hard_reg_index[(int) aclass][singleton];
3459 if (index < 0)
3460 continue;
3461 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) == NULL
3462 || ALLOCNO_HARD_REG_COSTS (a) == NULL)
3463 continue;
3464 min = INT_MAX;
3465 for (i = ira_class_hard_regs_num[(int) aclass] - 1; i >= 0; i--)
3466 if (ALLOCNO_HARD_REG_COSTS (a)[i] > ALLOCNO_CLASS_COST (a)
3467 && min > ALLOCNO_HARD_REG_COSTS (a)[i])
3468 min = ALLOCNO_HARD_REG_COSTS (a)[i];
3469 if (min == INT_MAX)
3470 continue;
3471 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
3472 aclass, 0);
3473 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[index]
3474 -= min - ALLOCNO_CLASS_COST (a);
3475 }
3476 }
3477
3478 /* Create a internal representation (IR) for IRA (allocnos, copies,
3479 loop tree nodes). The function returns TRUE if we generate loop
3480 structure (besides nodes representing all function and the basic
3481 blocks) for regional allocation. A true return means that we
3482 really need to flatten IR before the reload. */
3483 bool
3484 ira_build (void)
3485 {
3486 bool loops_p;
3487
3488 df_analyze ();
3489 initiate_cost_vectors ();
3490 initiate_allocnos ();
3491 initiate_prefs ();
3492 initiate_copies ();
3493 create_loop_tree_nodes ();
3494 form_loop_tree ();
3495 create_allocnos ();
3496 ira_costs ();
3497 create_allocno_objects ();
3498 ira_create_allocno_live_ranges ();
3499 remove_unnecessary_regions (false);
3500 ira_compress_allocno_live_ranges ();
3501 update_bad_spill_attribute ();
3502 loops_p = more_one_region_p ();
3503 if (loops_p)
3504 {
3505 propagate_allocno_info ();
3506 create_caps ();
3507 }
3508 ira_tune_allocno_costs ();
3509 #ifdef ENABLE_IRA_CHECKING
3510 check_allocno_creation ();
3511 #endif
3512 setup_min_max_allocno_live_range_point ();
3513 sort_conflict_id_map ();
3514 setup_min_max_conflict_allocno_ids ();
3515 ira_build_conflicts ();
3516 update_conflict_hard_reg_costs ();
3517 if (! ira_conflicts_p)
3518 {
3519 ira_allocno_t a;
3520 ira_allocno_iterator ai;
3521
3522 /* Remove all regions but root one. */
3523 if (loops_p)
3524 {
3525 remove_unnecessary_regions (true);
3526 loops_p = false;
3527 }
3528 /* We don't save hard registers around calls for fast allocation
3529 -- add caller clobbered registers as conflicting ones to
3530 allocno crossing calls. */
3531 FOR_EACH_ALLOCNO (a, ai)
3532 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
3533 ior_hard_reg_conflicts (a, ira_need_caller_save_regs (a));
3534 }
3535 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3536 print_copies (ira_dump_file);
3537 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3538 print_prefs (ira_dump_file);
3539 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3540 {
3541 int n, nr, nr_big;
3542 ira_allocno_t a;
3543 live_range_t r;
3544 ira_allocno_iterator ai;
3545
3546 n = 0;
3547 nr = 0;
3548 nr_big = 0;
3549 FOR_EACH_ALLOCNO (a, ai)
3550 {
3551 int j, nobj = ALLOCNO_NUM_OBJECTS (a);
3552
3553 if (nobj > 1)
3554 nr_big++;
3555 for (j = 0; j < nobj; j++)
3556 {
3557 ira_object_t obj = ALLOCNO_OBJECT (a, j);
3558 n += OBJECT_NUM_CONFLICTS (obj);
3559 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3560 nr++;
3561 }
3562 }
3563 fprintf (ira_dump_file, " regions=%d, blocks=%d, points=%d\n",
3564 current_loops == NULL ? 1 : number_of_loops (cfun),
3565 n_basic_blocks_for_fn (cfun), ira_max_point);
3566 fprintf (ira_dump_file,
3567 " allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
3568 ira_allocnos_num, nr_big, ira_copies_num, n, nr);
3569 }
3570 return loops_p;
3571 }
3572
3573 /* Release the data created by function ira_build. */
3574 void
3575 ira_destroy (void)
3576 {
3577 finish_loop_tree_nodes ();
3578 finish_prefs ();
3579 finish_copies ();
3580 finish_allocnos ();
3581 finish_cost_vectors ();
3582 ira_finish_allocno_live_ranges ();
3583 }
3584