ira-int.h revision 1.1.1.7 1 /* Integrated Register Allocator (IRA) intercommunication header file.
2 Copyright (C) 2006-2018 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov (at) redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #ifndef GCC_IRA_INT_H
22 #define GCC_IRA_INT_H
23
24 #include "recog.h"
25
26 /* To provide consistency in naming, all IRA external variables,
27 functions, common typedefs start with prefix ira_. */
28
29 #if CHECKING_P
30 #define ENABLE_IRA_CHECKING
31 #endif
32
33 #ifdef ENABLE_IRA_CHECKING
34 #define ira_assert(c) gcc_assert (c)
35 #else
36 /* Always define and include C, so that warnings for empty body in an
37 'if' statement and unused variable do not occur. */
38 #define ira_assert(c) ((void)(0 && (c)))
39 #endif
40
41 /* Compute register frequency from edge frequency FREQ. It is
42 analogous to REG_FREQ_FROM_BB. When optimizing for size, or
43 profile driven feedback is available and the function is never
44 executed, frequency is always equivalent. Otherwise rescale the
45 edge frequency. */
46 #define REG_FREQ_FROM_EDGE_FREQ(freq) \
47 (optimize_function_for_size_p (cfun) \
48 ? REG_FREQ_MAX : (freq * REG_FREQ_MAX / BB_FREQ_MAX) \
49 ? (freq * REG_FREQ_MAX / BB_FREQ_MAX) : 1)
50
51 /* A modified value of flag `-fira-verbose' used internally. */
52 extern int internal_flag_ira_verbose;
53
54 /* Dump file of the allocator if it is not NULL. */
55 extern FILE *ira_dump_file;
56
57 /* Typedefs for pointers to allocno live range, allocno, and copy of
58 allocnos. */
59 typedef struct live_range *live_range_t;
60 typedef struct ira_allocno *ira_allocno_t;
61 typedef struct ira_allocno_pref *ira_pref_t;
62 typedef struct ira_allocno_copy *ira_copy_t;
63 typedef struct ira_object *ira_object_t;
64
65 /* Definition of vector of allocnos and copies. */
66
67 /* Typedef for pointer to the subsequent structure. */
68 typedef struct ira_loop_tree_node *ira_loop_tree_node_t;
69
70 typedef unsigned short move_table[N_REG_CLASSES];
71
72 /* In general case, IRA is a regional allocator. The regions are
73 nested and form a tree. Currently regions are natural loops. The
74 following structure describes loop tree node (representing basic
75 block or loop). We need such tree because the loop tree from
76 cfgloop.h is not convenient for the optimization: basic blocks are
77 not a part of the tree from cfgloop.h. We also use the nodes for
78 storing additional information about basic blocks/loops for the
79 register allocation purposes. */
80 struct ira_loop_tree_node
81 {
82 /* The node represents basic block if children == NULL. */
83 basic_block bb; /* NULL for loop. */
84 /* NULL for BB or for loop tree root if we did not build CFG loop tree. */
85 struct loop *loop;
86 /* NEXT/SUBLOOP_NEXT is the next node/loop-node of the same parent.
87 SUBLOOP_NEXT is always NULL for BBs. */
88 ira_loop_tree_node_t subloop_next, next;
89 /* CHILDREN/SUBLOOPS is the first node/loop-node immediately inside
90 the node. They are NULL for BBs. */
91 ira_loop_tree_node_t subloops, children;
92 /* The node immediately containing given node. */
93 ira_loop_tree_node_t parent;
94
95 /* Loop level in range [0, ira_loop_tree_height). */
96 int level;
97
98 /* All the following members are defined only for nodes representing
99 loops. */
100
101 /* The loop number from CFG loop tree. The root number is 0. */
102 int loop_num;
103
104 /* True if the loop was marked for removal from the register
105 allocation. */
106 bool to_remove_p;
107
108 /* Allocnos in the loop corresponding to their regnos. If it is
109 NULL the loop does not form a separate register allocation region
110 (e.g. because it has abnormal enter/exit edges and we can not put
111 code for register shuffling on the edges if a different
112 allocation is used for a pseudo-register on different sides of
113 the edges). Caps are not in the map (remember we can have more
114 one cap with the same regno in a region). */
115 ira_allocno_t *regno_allocno_map;
116
117 /* True if there is an entry to given loop not from its parent (or
118 grandparent) basic block. For example, it is possible for two
119 adjacent loops inside another loop. */
120 bool entered_from_non_parent_p;
121
122 /* Maximal register pressure inside loop for given register class
123 (defined only for the pressure classes). */
124 int reg_pressure[N_REG_CLASSES];
125
126 /* Numbers of allocnos referred or living in the loop node (except
127 for its subloops). */
128 bitmap all_allocnos;
129
130 /* Numbers of allocnos living at the loop borders. */
131 bitmap border_allocnos;
132
133 /* Regnos of pseudos modified in the loop node (including its
134 subloops). */
135 bitmap modified_regnos;
136
137 /* Numbers of copies referred in the corresponding loop. */
138 bitmap local_copies;
139 };
140
141 /* The root of the loop tree corresponding to the all function. */
142 extern ira_loop_tree_node_t ira_loop_tree_root;
143
144 /* Height of the loop tree. */
145 extern int ira_loop_tree_height;
146
147 /* All nodes representing basic blocks are referred through the
148 following array. We can not use basic block member `aux' for this
149 because it is used for insertion of insns on edges. */
150 extern ira_loop_tree_node_t ira_bb_nodes;
151
152 /* Two access macros to the nodes representing basic blocks. */
153 #if defined ENABLE_IRA_CHECKING && (GCC_VERSION >= 2007)
154 #define IRA_BB_NODE_BY_INDEX(index) __extension__ \
155 (({ ira_loop_tree_node_t _node = (&ira_bb_nodes[index]); \
156 if (_node->children != NULL || _node->loop != NULL || _node->bb == NULL)\
157 { \
158 fprintf (stderr, \
159 "\n%s: %d: error in %s: it is not a block node\n", \
160 __FILE__, __LINE__, __FUNCTION__); \
161 gcc_unreachable (); \
162 } \
163 _node; }))
164 #else
165 #define IRA_BB_NODE_BY_INDEX(index) (&ira_bb_nodes[index])
166 #endif
167
168 #define IRA_BB_NODE(bb) IRA_BB_NODE_BY_INDEX ((bb)->index)
169
170 /* All nodes representing loops are referred through the following
171 array. */
172 extern ira_loop_tree_node_t ira_loop_nodes;
173
174 /* Two access macros to the nodes representing loops. */
175 #if defined ENABLE_IRA_CHECKING && (GCC_VERSION >= 2007)
176 #define IRA_LOOP_NODE_BY_INDEX(index) __extension__ \
177 (({ ira_loop_tree_node_t const _node = (&ira_loop_nodes[index]); \
178 if (_node->children == NULL || _node->bb != NULL \
179 || (_node->loop == NULL && current_loops != NULL)) \
180 { \
181 fprintf (stderr, \
182 "\n%s: %d: error in %s: it is not a loop node\n", \
183 __FILE__, __LINE__, __FUNCTION__); \
184 gcc_unreachable (); \
185 } \
186 _node; }))
187 #else
188 #define IRA_LOOP_NODE_BY_INDEX(index) (&ira_loop_nodes[index])
189 #endif
190
191 #define IRA_LOOP_NODE(loop) IRA_LOOP_NODE_BY_INDEX ((loop)->num)
192
193
194 /* The structure describes program points where a given allocno lives.
196 If the live ranges of two allocnos are intersected, the allocnos
197 are in conflict. */
198 struct live_range
199 {
200 /* Object whose live range is described by given structure. */
201 ira_object_t object;
202 /* Program point range. */
203 int start, finish;
204 /* Next structure describing program points where the allocno
205 lives. */
206 live_range_t next;
207 /* Pointer to structures with the same start/finish. */
208 live_range_t start_next, finish_next;
209 };
210
211 /* Program points are enumerated by numbers from range
212 0..IRA_MAX_POINT-1. There are approximately two times more program
213 points than insns. Program points are places in the program where
214 liveness info can be changed. In most general case (there are more
215 complicated cases too) some program points correspond to places
216 where input operand dies and other ones correspond to places where
217 output operands are born. */
218 extern int ira_max_point;
219
220 /* Arrays of size IRA_MAX_POINT mapping a program point to the allocno
221 live ranges with given start/finish point. */
222 extern live_range_t *ira_start_point_ranges, *ira_finish_point_ranges;
223
224 /* A structure representing conflict information for an allocno
225 (or one of its subwords). */
226 struct ira_object
227 {
228 /* The allocno associated with this record. */
229 ira_allocno_t allocno;
230 /* Vector of accumulated conflicting conflict_redords with NULL end
231 marker (if OBJECT_CONFLICT_VEC_P is true) or conflict bit vector
232 otherwise. */
233 void *conflicts_array;
234 /* Pointer to structures describing at what program point the
235 object lives. We always maintain the list in such way that *the
236 ranges in the list are not intersected and ordered by decreasing
237 their program points*. */
238 live_range_t live_ranges;
239 /* The subword within ALLOCNO which is represented by this object.
240 Zero means the lowest-order subword (or the entire allocno in case
241 it is not being tracked in subwords). */
242 int subword;
243 /* Allocated size of the conflicts array. */
244 unsigned int conflicts_array_size;
245 /* A unique number for every instance of this structure, which is used
246 to represent it in conflict bit vectors. */
247 int id;
248 /* Before building conflicts, MIN and MAX are initialized to
249 correspondingly minimal and maximal points of the accumulated
250 live ranges. Afterwards, they hold the minimal and maximal ids
251 of other ira_objects that this one can conflict with. */
252 int min, max;
253 /* Initial and accumulated hard registers conflicting with this
254 object and as a consequences can not be assigned to the allocno.
255 All non-allocatable hard regs and hard regs of register classes
256 different from given allocno one are included in the sets. */
257 HARD_REG_SET conflict_hard_regs, total_conflict_hard_regs;
258 /* Number of accumulated conflicts in the vector of conflicting
259 objects. */
260 int num_accumulated_conflicts;
261 /* TRUE if conflicts are represented by a vector of pointers to
262 ira_object structures. Otherwise, we use a bit vector indexed
263 by conflict ID numbers. */
264 unsigned int conflict_vec_p : 1;
265 };
266
267 /* A structure representing an allocno (allocation entity). Allocno
268 represents a pseudo-register in an allocation region. If
269 pseudo-register does not live in a region but it lives in the
270 nested regions, it is represented in the region by special allocno
271 called *cap*. There may be more one cap representing the same
272 pseudo-register in region. It means that the corresponding
273 pseudo-register lives in more one non-intersected subregion. */
274 struct ira_allocno
275 {
276 /* The allocno order number starting with 0. Each allocno has an
277 unique number and the number is never changed for the
278 allocno. */
279 int num;
280 /* Regno for allocno or cap. */
281 int regno;
282 /* Mode of the allocno which is the mode of the corresponding
283 pseudo-register. */
284 ENUM_BITFIELD (machine_mode) mode : 8;
285 /* Widest mode of the allocno which in at least one case could be
286 for paradoxical subregs where wmode > mode. */
287 ENUM_BITFIELD (machine_mode) wmode : 8;
288 /* Register class which should be used for allocation for given
289 allocno. NO_REGS means that we should use memory. */
290 ENUM_BITFIELD (reg_class) aclass : 16;
291 /* During the reload, value TRUE means that we should not reassign a
292 hard register to the allocno got memory earlier. It is set up
293 when we removed memory-memory move insn before each iteration of
294 the reload. */
295 unsigned int dont_reassign_p : 1;
296 #ifdef STACK_REGS
297 /* Set to TRUE if allocno can't be assigned to the stack hard
298 register correspondingly in this region and area including the
299 region and all its subregions recursively. */
300 unsigned int no_stack_reg_p : 1, total_no_stack_reg_p : 1;
301 #endif
302 /* TRUE value means that there is no sense to spill the allocno
303 during coloring because the spill will result in additional
304 reloads in reload pass. */
305 unsigned int bad_spill_p : 1;
306 /* TRUE if a hard register or memory has been assigned to the
307 allocno. */
308 unsigned int assigned_p : 1;
309 /* TRUE if conflicts for given allocno are represented by vector of
310 pointers to the conflicting allocnos. Otherwise, we use a bit
311 vector where a bit with given index represents allocno with the
312 same number. */
313 unsigned int conflict_vec_p : 1;
314 /* Hard register assigned to given allocno. Negative value means
315 that memory was allocated to the allocno. During the reload,
316 spilled allocno has value equal to the corresponding stack slot
317 number (0, ...) - 2. Value -1 is used for allocnos spilled by the
318 reload (at this point pseudo-register has only one allocno) which
319 did not get stack slot yet. */
320 signed int hard_regno : 16;
321 /* Allocnos with the same regno are linked by the following member.
322 Allocnos corresponding to inner loops are first in the list (it
323 corresponds to depth-first traverse of the loops). */
324 ira_allocno_t next_regno_allocno;
325 /* There may be different allocnos with the same regno in different
326 regions. Allocnos are bound to the corresponding loop tree node.
327 Pseudo-register may have only one regular allocno with given loop
328 tree node but more than one cap (see comments above). */
329 ira_loop_tree_node_t loop_tree_node;
330 /* Accumulated usage references of the allocno. Here and below,
331 word 'accumulated' means info for given region and all nested
332 subregions. In this case, 'accumulated' means sum of references
333 of the corresponding pseudo-register in this region and in all
334 nested subregions recursively. */
335 int nrefs;
336 /* Accumulated frequency of usage of the allocno. */
337 int freq;
338 /* Minimal accumulated and updated costs of usage register of the
339 allocno class. */
340 int class_cost, updated_class_cost;
341 /* Minimal accumulated, and updated costs of memory for the allocno.
342 At the allocation start, the original and updated costs are
343 equal. The updated cost may be changed after finishing
344 allocation in a region and starting allocation in a subregion.
345 The change reflects the cost of spill/restore code on the
346 subregion border if we assign memory to the pseudo in the
347 subregion. */
348 int memory_cost, updated_memory_cost;
349 /* Accumulated number of points where the allocno lives and there is
350 excess pressure for its class. Excess pressure for a register
351 class at some point means that there are more allocnos of given
352 register class living at the point than number of hard-registers
353 of the class available for the allocation. */
354 int excess_pressure_points_num;
355 /* Allocno hard reg preferences. */
356 ira_pref_t allocno_prefs;
357 /* Copies to other non-conflicting allocnos. The copies can
358 represent move insn or potential move insn usually because of two
359 operand insn constraints. */
360 ira_copy_t allocno_copies;
361 /* It is a allocno (cap) representing given allocno on upper loop tree
362 level. */
363 ira_allocno_t cap;
364 /* It is a link to allocno (cap) on lower loop level represented by
365 given cap. Null if given allocno is not a cap. */
366 ira_allocno_t cap_member;
367 /* The number of objects tracked in the following array. */
368 int num_objects;
369 /* An array of structures describing conflict information and live
370 ranges for each object associated with the allocno. There may be
371 more than one such object in cases where the allocno represents a
372 multi-word register. */
373 ira_object_t objects[2];
374 /* Accumulated frequency of calls which given allocno
375 intersects. */
376 int call_freq;
377 /* Accumulated number of the intersected calls. */
378 int calls_crossed_num;
379 /* The number of calls across which it is live, but which should not
380 affect register preferences. */
381 int cheap_calls_crossed_num;
382 /* Registers clobbered by intersected calls. */
383 HARD_REG_SET crossed_calls_clobbered_regs;
384 /* Array of usage costs (accumulated and the one updated during
385 coloring) for each hard register of the allocno class. The
386 member value can be NULL if all costs are the same and equal to
387 CLASS_COST. For example, the costs of two different hard
388 registers can be different if one hard register is callee-saved
389 and another one is callee-used and the allocno lives through
390 calls. Another example can be case when for some insn the
391 corresponding pseudo-register value should be put in specific
392 register class (e.g. AREG for x86) which is a strict subset of
393 the allocno class (GENERAL_REGS for x86). We have updated costs
394 to reflect the situation when the usage cost of a hard register
395 is decreased because the allocno is connected to another allocno
396 by a copy and the another allocno has been assigned to the hard
397 register. */
398 int *hard_reg_costs, *updated_hard_reg_costs;
399 /* Array of decreasing costs (accumulated and the one updated during
400 coloring) for allocnos conflicting with given allocno for hard
401 regno of the allocno class. The member value can be NULL if all
402 costs are the same. These costs are used to reflect preferences
403 of other allocnos not assigned yet during assigning to given
404 allocno. */
405 int *conflict_hard_reg_costs, *updated_conflict_hard_reg_costs;
406 /* Different additional data. It is used to decrease size of
407 allocno data footprint. */
408 void *add_data;
409 };
410
411
412 /* All members of the allocno structures should be accessed only
413 through the following macros. */
414 #define ALLOCNO_NUM(A) ((A)->num)
415 #define ALLOCNO_REGNO(A) ((A)->regno)
416 #define ALLOCNO_REG(A) ((A)->reg)
417 #define ALLOCNO_NEXT_REGNO_ALLOCNO(A) ((A)->next_regno_allocno)
418 #define ALLOCNO_LOOP_TREE_NODE(A) ((A)->loop_tree_node)
419 #define ALLOCNO_CAP(A) ((A)->cap)
420 #define ALLOCNO_CAP_MEMBER(A) ((A)->cap_member)
421 #define ALLOCNO_NREFS(A) ((A)->nrefs)
422 #define ALLOCNO_FREQ(A) ((A)->freq)
423 #define ALLOCNO_HARD_REGNO(A) ((A)->hard_regno)
424 #define ALLOCNO_CALL_FREQ(A) ((A)->call_freq)
425 #define ALLOCNO_CALLS_CROSSED_NUM(A) ((A)->calls_crossed_num)
426 #define ALLOCNO_CHEAP_CALLS_CROSSED_NUM(A) ((A)->cheap_calls_crossed_num)
427 #define ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS(A) \
428 ((A)->crossed_calls_clobbered_regs)
429 #define ALLOCNO_MEM_OPTIMIZED_DEST(A) ((A)->mem_optimized_dest)
430 #define ALLOCNO_MEM_OPTIMIZED_DEST_P(A) ((A)->mem_optimized_dest_p)
431 #define ALLOCNO_SOMEWHERE_RENAMED_P(A) ((A)->somewhere_renamed_p)
432 #define ALLOCNO_CHILD_RENAMED_P(A) ((A)->child_renamed_p)
433 #define ALLOCNO_DONT_REASSIGN_P(A) ((A)->dont_reassign_p)
434 #ifdef STACK_REGS
435 #define ALLOCNO_NO_STACK_REG_P(A) ((A)->no_stack_reg_p)
436 #define ALLOCNO_TOTAL_NO_STACK_REG_P(A) ((A)->total_no_stack_reg_p)
437 #endif
438 #define ALLOCNO_BAD_SPILL_P(A) ((A)->bad_spill_p)
439 #define ALLOCNO_ASSIGNED_P(A) ((A)->assigned_p)
440 #define ALLOCNO_MODE(A) ((A)->mode)
441 #define ALLOCNO_WMODE(A) ((A)->wmode)
442 #define ALLOCNO_PREFS(A) ((A)->allocno_prefs)
443 #define ALLOCNO_COPIES(A) ((A)->allocno_copies)
444 #define ALLOCNO_HARD_REG_COSTS(A) ((A)->hard_reg_costs)
445 #define ALLOCNO_UPDATED_HARD_REG_COSTS(A) ((A)->updated_hard_reg_costs)
446 #define ALLOCNO_CONFLICT_HARD_REG_COSTS(A) \
447 ((A)->conflict_hard_reg_costs)
448 #define ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS(A) \
449 ((A)->updated_conflict_hard_reg_costs)
450 #define ALLOCNO_CLASS(A) ((A)->aclass)
451 #define ALLOCNO_CLASS_COST(A) ((A)->class_cost)
452 #define ALLOCNO_UPDATED_CLASS_COST(A) ((A)->updated_class_cost)
453 #define ALLOCNO_MEMORY_COST(A) ((A)->memory_cost)
454 #define ALLOCNO_UPDATED_MEMORY_COST(A) ((A)->updated_memory_cost)
455 #define ALLOCNO_EXCESS_PRESSURE_POINTS_NUM(A) \
456 ((A)->excess_pressure_points_num)
457 #define ALLOCNO_OBJECT(A,N) ((A)->objects[N])
458 #define ALLOCNO_NUM_OBJECTS(A) ((A)->num_objects)
459 #define ALLOCNO_ADD_DATA(A) ((A)->add_data)
460
461 /* Typedef for pointer to the subsequent structure. */
462 typedef struct ira_emit_data *ira_emit_data_t;
463
464 /* Allocno bound data used for emit pseudo live range split insns and
465 to flattening IR. */
466 struct ira_emit_data
467 {
468 /* TRUE if the allocno assigned to memory was a destination of
469 removed move (see ira-emit.c) at loop exit because the value of
470 the corresponding pseudo-register is not changed inside the
471 loop. */
472 unsigned int mem_optimized_dest_p : 1;
473 /* TRUE if the corresponding pseudo-register has disjoint live
474 ranges and the other allocnos of the pseudo-register except this
475 one changed REG. */
476 unsigned int somewhere_renamed_p : 1;
477 /* TRUE if allocno with the same REGNO in a subregion has been
478 renamed, in other words, got a new pseudo-register. */
479 unsigned int child_renamed_p : 1;
480 /* Final rtx representation of the allocno. */
481 rtx reg;
482 /* Non NULL if we remove restoring value from given allocno to
483 MEM_OPTIMIZED_DEST at loop exit (see ira-emit.c) because the
484 allocno value is not changed inside the loop. */
485 ira_allocno_t mem_optimized_dest;
486 };
487
488 #define ALLOCNO_EMIT_DATA(a) ((ira_emit_data_t) ALLOCNO_ADD_DATA (a))
489
490 /* Data used to emit live range split insns and to flattening IR. */
491 extern ira_emit_data_t ira_allocno_emit_data;
492
493 /* Abbreviation for frequent emit data access. */
494 static inline rtx
495 allocno_emit_reg (ira_allocno_t a)
496 {
497 return ALLOCNO_EMIT_DATA (a)->reg;
498 }
499
500 #define OBJECT_ALLOCNO(O) ((O)->allocno)
501 #define OBJECT_SUBWORD(O) ((O)->subword)
502 #define OBJECT_CONFLICT_ARRAY(O) ((O)->conflicts_array)
503 #define OBJECT_CONFLICT_VEC(O) ((ira_object_t *)(O)->conflicts_array)
504 #define OBJECT_CONFLICT_BITVEC(O) ((IRA_INT_TYPE *)(O)->conflicts_array)
505 #define OBJECT_CONFLICT_ARRAY_SIZE(O) ((O)->conflicts_array_size)
506 #define OBJECT_CONFLICT_VEC_P(O) ((O)->conflict_vec_p)
507 #define OBJECT_NUM_CONFLICTS(O) ((O)->num_accumulated_conflicts)
508 #define OBJECT_CONFLICT_HARD_REGS(O) ((O)->conflict_hard_regs)
509 #define OBJECT_TOTAL_CONFLICT_HARD_REGS(O) ((O)->total_conflict_hard_regs)
510 #define OBJECT_MIN(O) ((O)->min)
511 #define OBJECT_MAX(O) ((O)->max)
512 #define OBJECT_CONFLICT_ID(O) ((O)->id)
513 #define OBJECT_LIVE_RANGES(O) ((O)->live_ranges)
514
515 /* Map regno -> allocnos with given regno (see comments for
516 allocno member `next_regno_allocno'). */
517 extern ira_allocno_t *ira_regno_allocno_map;
518
519 /* Array of references to all allocnos. The order number of the
520 allocno corresponds to the index in the array. Removed allocnos
521 have NULL element value. */
522 extern ira_allocno_t *ira_allocnos;
523
524 /* The size of the previous array. */
525 extern int ira_allocnos_num;
526
527 /* Map a conflict id to its corresponding ira_object structure. */
528 extern ira_object_t *ira_object_id_map;
529
530 /* The size of the previous array. */
531 extern int ira_objects_num;
532
533 /* The following structure represents a hard register preference of
534 allocno. The preference represent move insns or potential move
535 insns usually because of two operand insn constraints. One move
536 operand is a hard register. */
537 struct ira_allocno_pref
538 {
539 /* The unique order number of the preference node starting with 0. */
540 int num;
541 /* Preferred hard register. */
542 int hard_regno;
543 /* Accumulated execution frequency of insns from which the
544 preference created. */
545 int freq;
546 /* Given allocno. */
547 ira_allocno_t allocno;
548 /* All preferences with the same allocno are linked by the following
549 member. */
550 ira_pref_t next_pref;
551 };
552
553 /* Array of references to all allocno preferences. The order number
554 of the preference corresponds to the index in the array. */
555 extern ira_pref_t *ira_prefs;
556
557 /* Size of the previous array. */
558 extern int ira_prefs_num;
559
560 /* The following structure represents a copy of two allocnos. The
561 copies represent move insns or potential move insns usually because
562 of two operand insn constraints. To remove register shuffle, we
563 also create copies between allocno which is output of an insn and
564 allocno becoming dead in the insn. */
565 struct ira_allocno_copy
566 {
567 /* The unique order number of the copy node starting with 0. */
568 int num;
569 /* Allocnos connected by the copy. The first allocno should have
570 smaller order number than the second one. */
571 ira_allocno_t first, second;
572 /* Execution frequency of the copy. */
573 int freq;
574 bool constraint_p;
575 /* It is a move insn which is an origin of the copy. The member
576 value for the copy representing two operand insn constraints or
577 for the copy created to remove register shuffle is NULL. In last
578 case the copy frequency is smaller than the corresponding insn
579 execution frequency. */
580 rtx_insn *insn;
581 /* All copies with the same allocno as FIRST are linked by the two
582 following members. */
583 ira_copy_t prev_first_allocno_copy, next_first_allocno_copy;
584 /* All copies with the same allocno as SECOND are linked by the two
585 following members. */
586 ira_copy_t prev_second_allocno_copy, next_second_allocno_copy;
587 /* Region from which given copy is originated. */
588 ira_loop_tree_node_t loop_tree_node;
589 };
590
591 /* Array of references to all copies. The order number of the copy
592 corresponds to the index in the array. Removed copies have NULL
593 element value. */
594 extern ira_copy_t *ira_copies;
595
596 /* Size of the previous array. */
597 extern int ira_copies_num;
598
599 /* The following structure describes a stack slot used for spilled
600 pseudo-registers. */
601 struct ira_spilled_reg_stack_slot
602 {
603 /* pseudo-registers assigned to the stack slot. */
604 bitmap_head spilled_regs;
605 /* RTL representation of the stack slot. */
606 rtx mem;
607 /* Size of the stack slot. */
608 poly_uint64_pod width;
609 };
610
611 /* The number of elements in the following array. */
612 extern int ira_spilled_reg_stack_slots_num;
613
614 /* The following array contains info about spilled pseudo-registers
615 stack slots used in current function so far. */
616 extern struct ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
617
618 /* Correspondingly overall cost of the allocation, cost of the
619 allocnos assigned to hard-registers, cost of the allocnos assigned
620 to memory, cost of loads, stores and register move insns generated
621 for pseudo-register live range splitting (see ira-emit.c). */
622 extern int64_t ira_overall_cost;
623 extern int64_t ira_reg_cost, ira_mem_cost;
624 extern int64_t ira_load_cost, ira_store_cost, ira_shuffle_cost;
625 extern int ira_move_loops_num, ira_additional_jumps_num;
626
627
628 /* This page contains a bitset implementation called 'min/max sets' used to
630 record conflicts in IRA.
631 They are named min/maxs set since we keep track of a minimum and a maximum
632 bit number for each set representing the bounds of valid elements. Otherwise,
633 the implementation resembles sbitmaps in that we store an array of integers
634 whose bits directly represent the members of the set. */
635
636 /* The type used as elements in the array, and the number of bits in
637 this type. */
638
639 #define IRA_INT_BITS HOST_BITS_PER_WIDE_INT
640 #define IRA_INT_TYPE HOST_WIDE_INT
641
642 /* Set, clear or test bit number I in R, a bit vector of elements with
643 minimal index and maximal index equal correspondingly to MIN and
644 MAX. */
645 #if defined ENABLE_IRA_CHECKING && (GCC_VERSION >= 2007)
646
647 #define SET_MINMAX_SET_BIT(R, I, MIN, MAX) __extension__ \
648 (({ int _min = (MIN), _max = (MAX), _i = (I); \
649 if (_i < _min || _i > _max) \
650 { \
651 fprintf (stderr, \
652 "\n%s: %d: error in %s: %d not in range [%d,%d]\n", \
653 __FILE__, __LINE__, __FUNCTION__, _i, _min, _max); \
654 gcc_unreachable (); \
655 } \
656 ((R)[(unsigned) (_i - _min) / IRA_INT_BITS] \
657 |= ((IRA_INT_TYPE) 1 << ((unsigned) (_i - _min) % IRA_INT_BITS))); }))
658
659
660 #define CLEAR_MINMAX_SET_BIT(R, I, MIN, MAX) __extension__ \
661 (({ int _min = (MIN), _max = (MAX), _i = (I); \
662 if (_i < _min || _i > _max) \
663 { \
664 fprintf (stderr, \
665 "\n%s: %d: error in %s: %d not in range [%d,%d]\n", \
666 __FILE__, __LINE__, __FUNCTION__, _i, _min, _max); \
667 gcc_unreachable (); \
668 } \
669 ((R)[(unsigned) (_i - _min) / IRA_INT_BITS] \
670 &= ~((IRA_INT_TYPE) 1 << ((unsigned) (_i - _min) % IRA_INT_BITS))); }))
671
672 #define TEST_MINMAX_SET_BIT(R, I, MIN, MAX) __extension__ \
673 (({ int _min = (MIN), _max = (MAX), _i = (I); \
674 if (_i < _min || _i > _max) \
675 { \
676 fprintf (stderr, \
677 "\n%s: %d: error in %s: %d not in range [%d,%d]\n", \
678 __FILE__, __LINE__, __FUNCTION__, _i, _min, _max); \
679 gcc_unreachable (); \
680 } \
681 ((R)[(unsigned) (_i - _min) / IRA_INT_BITS] \
682 & ((IRA_INT_TYPE) 1 << ((unsigned) (_i - _min) % IRA_INT_BITS))); }))
683
684 #else
685
686 #define SET_MINMAX_SET_BIT(R, I, MIN, MAX) \
687 ((R)[(unsigned) ((I) - (MIN)) / IRA_INT_BITS] \
688 |= ((IRA_INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % IRA_INT_BITS)))
689
690 #define CLEAR_MINMAX_SET_BIT(R, I, MIN, MAX) \
691 ((R)[(unsigned) ((I) - (MIN)) / IRA_INT_BITS] \
692 &= ~((IRA_INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % IRA_INT_BITS)))
693
694 #define TEST_MINMAX_SET_BIT(R, I, MIN, MAX) \
695 ((R)[(unsigned) ((I) - (MIN)) / IRA_INT_BITS] \
696 & ((IRA_INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % IRA_INT_BITS)))
697
698 #endif
699
700 /* The iterator for min/max sets. */
701 struct minmax_set_iterator {
702
703 /* Array containing the bit vector. */
704 IRA_INT_TYPE *vec;
705
706 /* The number of the current element in the vector. */
707 unsigned int word_num;
708
709 /* The number of bits in the bit vector. */
710 unsigned int nel;
711
712 /* The current bit index of the bit vector. */
713 unsigned int bit_num;
714
715 /* Index corresponding to the 1st bit of the bit vector. */
716 int start_val;
717
718 /* The word of the bit vector currently visited. */
719 unsigned IRA_INT_TYPE word;
720 };
721
722 /* Initialize the iterator I for bit vector VEC containing minimal and
723 maximal values MIN and MAX. */
724 static inline void
725 minmax_set_iter_init (minmax_set_iterator *i, IRA_INT_TYPE *vec, int min,
726 int max)
727 {
728 i->vec = vec;
729 i->word_num = 0;
730 i->nel = max < min ? 0 : max - min + 1;
731 i->start_val = min;
732 i->bit_num = 0;
733 i->word = i->nel == 0 ? 0 : vec[0];
734 }
735
736 /* Return TRUE if we have more allocnos to visit, in which case *N is
737 set to the number of the element to be visited. Otherwise, return
738 FALSE. */
739 static inline bool
740 minmax_set_iter_cond (minmax_set_iterator *i, int *n)
741 {
742 /* Skip words that are zeros. */
743 for (; i->word == 0; i->word = i->vec[i->word_num])
744 {
745 i->word_num++;
746 i->bit_num = i->word_num * IRA_INT_BITS;
747
748 /* If we have reached the end, break. */
749 if (i->bit_num >= i->nel)
750 return false;
751 }
752
753 /* Skip bits that are zero. */
754 for (; (i->word & 1) == 0; i->word >>= 1)
755 i->bit_num++;
756
757 *n = (int) i->bit_num + i->start_val;
758
759 return true;
760 }
761
762 /* Advance to the next element in the set. */
763 static inline void
764 minmax_set_iter_next (minmax_set_iterator *i)
765 {
766 i->word >>= 1;
767 i->bit_num++;
768 }
769
770 /* Loop over all elements of a min/max set given by bit vector VEC and
771 their minimal and maximal values MIN and MAX. In each iteration, N
772 is set to the number of next allocno. ITER is an instance of
773 minmax_set_iterator used to iterate over the set. */
774 #define FOR_EACH_BIT_IN_MINMAX_SET(VEC, MIN, MAX, N, ITER) \
775 for (minmax_set_iter_init (&(ITER), (VEC), (MIN), (MAX)); \
776 minmax_set_iter_cond (&(ITER), &(N)); \
777 minmax_set_iter_next (&(ITER)))
778
779 struct target_ira_int {
781 ~target_ira_int ();
782
783 void free_ira_costs ();
784 void free_register_move_costs ();
785
786 /* Initialized once. It is a maximal possible size of the allocated
787 struct costs. */
788 size_t x_max_struct_costs_size;
789
790 /* Allocated and initialized once, and used to initialize cost values
791 for each insn. */
792 struct costs *x_init_cost;
793
794 /* Allocated once, and used for temporary purposes. */
795 struct costs *x_temp_costs;
796
797 /* Allocated once, and used for the cost calculation. */
798 struct costs *x_op_costs[MAX_RECOG_OPERANDS];
799 struct costs *x_this_op_costs[MAX_RECOG_OPERANDS];
800
801 /* Hard registers that can not be used for the register allocator for
802 all functions of the current compilation unit. */
803 HARD_REG_SET x_no_unit_alloc_regs;
804
805 /* Map: hard regs X modes -> set of hard registers for storing value
806 of given mode starting with given hard register. */
807 HARD_REG_SET (x_ira_reg_mode_hard_regset
808 [FIRST_PSEUDO_REGISTER][NUM_MACHINE_MODES]);
809
810 /* Maximum cost of moving from a register in one class to a register
811 in another class. Based on TARGET_REGISTER_MOVE_COST. */
812 move_table *x_ira_register_move_cost[MAX_MACHINE_MODE];
813
814 /* Similar, but here we don't have to move if the first index is a
815 subset of the second so in that case the cost is zero. */
816 move_table *x_ira_may_move_in_cost[MAX_MACHINE_MODE];
817
818 /* Similar, but here we don't have to move if the first index is a
819 superset of the second so in that case the cost is zero. */
820 move_table *x_ira_may_move_out_cost[MAX_MACHINE_MODE];
821
822 /* Keep track of the last mode we initialized move costs for. */
823 int x_last_mode_for_init_move_cost;
824
825 /* Array analog of the macro MEMORY_MOVE_COST but they contain maximal
826 cost not minimal. */
827 short int x_ira_max_memory_move_cost[MAX_MACHINE_MODE][N_REG_CLASSES][2];
828
829 /* Map class->true if class is a possible allocno class, false
830 otherwise. */
831 bool x_ira_reg_allocno_class_p[N_REG_CLASSES];
832
833 /* Map class->true if class is a pressure class, false otherwise. */
834 bool x_ira_reg_pressure_class_p[N_REG_CLASSES];
835
836 /* Array of the number of hard registers of given class which are
837 available for allocation. The order is defined by the hard
838 register numbers. */
839 short x_ira_non_ordered_class_hard_regs[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
840
841 /* Index (in ira_class_hard_regs; for given register class and hard
842 register (in general case a hard register can belong to several
843 register classes;. The index is negative for hard registers
844 unavailable for the allocation. */
845 short x_ira_class_hard_reg_index[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
846
847 /* Index [CL][M] contains R if R appears somewhere in a register of the form:
848
849 (reg:M R'), R' not in x_ira_prohibited_class_mode_regs[CL][M]
850
851 For example, if:
852
853 - (reg:M 2) is valid and occupies two registers;
854 - register 2 belongs to CL; and
855 - register 3 belongs to the same pressure class as CL
856
857 then (reg:M 2) contributes to [CL][M] and registers 2 and 3 will be
858 in the set. */
859 HARD_REG_SET x_ira_useful_class_mode_regs[N_REG_CLASSES][NUM_MACHINE_MODES];
860
861 /* The value is number of elements in the subsequent array. */
862 int x_ira_important_classes_num;
863
864 /* The array containing all non-empty classes. Such classes is
865 important for calculation of the hard register usage costs. */
866 enum reg_class x_ira_important_classes[N_REG_CLASSES];
867
868 /* The array containing indexes of important classes in the previous
869 array. The array elements are defined only for important
870 classes. */
871 int x_ira_important_class_nums[N_REG_CLASSES];
872
873 /* Map class->true if class is an uniform class, false otherwise. */
874 bool x_ira_uniform_class_p[N_REG_CLASSES];
875
876 /* The biggest important class inside of intersection of the two
877 classes (that is calculated taking only hard registers available
878 for allocation into account;. If the both classes contain no hard
879 registers available for allocation, the value is calculated with
880 taking all hard-registers including fixed ones into account. */
881 enum reg_class x_ira_reg_class_intersect[N_REG_CLASSES][N_REG_CLASSES];
882
883 /* Classes with end marker LIM_REG_CLASSES which are intersected with
884 given class (the first index). That includes given class itself.
885 This is calculated taking only hard registers available for
886 allocation into account. */
887 enum reg_class x_ira_reg_class_super_classes[N_REG_CLASSES][N_REG_CLASSES];
888
889 /* The biggest (smallest) important class inside of (covering) union
890 of the two classes (that is calculated taking only hard registers
891 available for allocation into account). If the both classes
892 contain no hard registers available for allocation, the value is
893 calculated with taking all hard-registers including fixed ones
894 into account. In other words, the value is the corresponding
895 reg_class_subunion (reg_class_superunion) value. */
896 enum reg_class x_ira_reg_class_subunion[N_REG_CLASSES][N_REG_CLASSES];
897 enum reg_class x_ira_reg_class_superunion[N_REG_CLASSES][N_REG_CLASSES];
898
899 /* For each reg class, table listing all the classes contained in it
900 (excluding the class itself. Non-allocatable registers are
901 excluded from the consideration). */
902 enum reg_class x_alloc_reg_class_subclasses[N_REG_CLASSES][N_REG_CLASSES];
903
904 /* Array whose values are hard regset of hard registers for which
905 move of the hard register in given mode into itself is
906 prohibited. */
907 HARD_REG_SET x_ira_prohibited_mode_move_regs[NUM_MACHINE_MODES];
908
909 /* Flag of that the above array has been initialized. */
910 bool x_ira_prohibited_mode_move_regs_initialized_p;
911 };
912
913 extern struct target_ira_int default_target_ira_int;
914 #if SWITCHABLE_TARGET
915 extern struct target_ira_int *this_target_ira_int;
916 #else
917 #define this_target_ira_int (&default_target_ira_int)
918 #endif
919
920 #define ira_reg_mode_hard_regset \
921 (this_target_ira_int->x_ira_reg_mode_hard_regset)
922 #define ira_register_move_cost \
923 (this_target_ira_int->x_ira_register_move_cost)
924 #define ira_max_memory_move_cost \
925 (this_target_ira_int->x_ira_max_memory_move_cost)
926 #define ira_may_move_in_cost \
927 (this_target_ira_int->x_ira_may_move_in_cost)
928 #define ira_may_move_out_cost \
929 (this_target_ira_int->x_ira_may_move_out_cost)
930 #define ira_reg_allocno_class_p \
931 (this_target_ira_int->x_ira_reg_allocno_class_p)
932 #define ira_reg_pressure_class_p \
933 (this_target_ira_int->x_ira_reg_pressure_class_p)
934 #define ira_non_ordered_class_hard_regs \
935 (this_target_ira_int->x_ira_non_ordered_class_hard_regs)
936 #define ira_class_hard_reg_index \
937 (this_target_ira_int->x_ira_class_hard_reg_index)
938 #define ira_useful_class_mode_regs \
939 (this_target_ira_int->x_ira_useful_class_mode_regs)
940 #define ira_important_classes_num \
941 (this_target_ira_int->x_ira_important_classes_num)
942 #define ira_important_classes \
943 (this_target_ira_int->x_ira_important_classes)
944 #define ira_important_class_nums \
945 (this_target_ira_int->x_ira_important_class_nums)
946 #define ira_uniform_class_p \
947 (this_target_ira_int->x_ira_uniform_class_p)
948 #define ira_reg_class_intersect \
949 (this_target_ira_int->x_ira_reg_class_intersect)
950 #define ira_reg_class_super_classes \
951 (this_target_ira_int->x_ira_reg_class_super_classes)
952 #define ira_reg_class_subunion \
953 (this_target_ira_int->x_ira_reg_class_subunion)
954 #define ira_reg_class_superunion \
955 (this_target_ira_int->x_ira_reg_class_superunion)
956 #define ira_prohibited_mode_move_regs \
957 (this_target_ira_int->x_ira_prohibited_mode_move_regs)
958
959 /* ira.c: */
961
962 extern void *ira_allocate (size_t);
963 extern void ira_free (void *addr);
964 extern bitmap ira_allocate_bitmap (void);
965 extern void ira_free_bitmap (bitmap);
966 extern void ira_print_disposition (FILE *);
967 extern void ira_debug_disposition (void);
968 extern void ira_debug_allocno_classes (void);
969 extern void ira_init_register_move_cost (machine_mode);
970 extern void ira_setup_alts (rtx_insn *insn, HARD_REG_SET &alts);
971 extern int ira_get_dup_out_num (int op_num, HARD_REG_SET &alts);
972
973 /* ira-build.c */
974
975 /* The current loop tree node and its regno allocno map. */
976 extern ira_loop_tree_node_t ira_curr_loop_tree_node;
977 extern ira_allocno_t *ira_curr_regno_allocno_map;
978
979 extern void ira_debug_pref (ira_pref_t);
980 extern void ira_debug_prefs (void);
981 extern void ira_debug_allocno_prefs (ira_allocno_t);
982
983 extern void ira_debug_copy (ira_copy_t);
984 extern void debug (ira_allocno_copy &ref);
985 extern void debug (ira_allocno_copy *ptr);
986
987 extern void ira_debug_copies (void);
988 extern void ira_debug_allocno_copies (ira_allocno_t);
989 extern void debug (ira_allocno &ref);
990 extern void debug (ira_allocno *ptr);
991
992 extern void ira_traverse_loop_tree (bool, ira_loop_tree_node_t,
993 void (*) (ira_loop_tree_node_t),
994 void (*) (ira_loop_tree_node_t));
995 extern ira_allocno_t ira_parent_allocno (ira_allocno_t);
996 extern ira_allocno_t ira_parent_or_cap_allocno (ira_allocno_t);
997 extern ira_allocno_t ira_create_allocno (int, bool, ira_loop_tree_node_t);
998 extern void ira_create_allocno_objects (ira_allocno_t);
999 extern void ira_set_allocno_class (ira_allocno_t, enum reg_class);
1000 extern bool ira_conflict_vector_profitable_p (ira_object_t, int);
1001 extern void ira_allocate_conflict_vec (ira_object_t, int);
1002 extern void ira_allocate_object_conflicts (ira_object_t, int);
1003 extern void ior_hard_reg_conflicts (ira_allocno_t, HARD_REG_SET *);
1004 extern void ira_print_expanded_allocno (ira_allocno_t);
1005 extern void ira_add_live_range_to_object (ira_object_t, int, int);
1006 extern live_range_t ira_create_live_range (ira_object_t, int, int,
1007 live_range_t);
1008 extern live_range_t ira_copy_live_range_list (live_range_t);
1009 extern live_range_t ira_merge_live_ranges (live_range_t, live_range_t);
1010 extern bool ira_live_ranges_intersect_p (live_range_t, live_range_t);
1011 extern void ira_finish_live_range (live_range_t);
1012 extern void ira_finish_live_range_list (live_range_t);
1013 extern void ira_free_allocno_updated_costs (ira_allocno_t);
1014 extern ira_pref_t ira_create_pref (ira_allocno_t, int, int);
1015 extern void ira_add_allocno_pref (ira_allocno_t, int, int);
1016 extern void ira_remove_pref (ira_pref_t);
1017 extern void ira_remove_allocno_prefs (ira_allocno_t);
1018 extern ira_copy_t ira_create_copy (ira_allocno_t, ira_allocno_t,
1019 int, bool, rtx_insn *,
1020 ira_loop_tree_node_t);
1021 extern ira_copy_t ira_add_allocno_copy (ira_allocno_t, ira_allocno_t, int,
1022 bool, rtx_insn *,
1023 ira_loop_tree_node_t);
1024
1025 extern int *ira_allocate_cost_vector (reg_class_t);
1026 extern void ira_free_cost_vector (int *, reg_class_t);
1027
1028 extern void ira_flattening (int, int);
1029 extern bool ira_build (void);
1030 extern void ira_destroy (void);
1031
1032 /* ira-costs.c */
1033 extern void ira_init_costs_once (void);
1034 extern void ira_init_costs (void);
1035 extern void ira_costs (void);
1036 extern void ira_tune_allocno_costs (void);
1037
1038 /* ira-lives.c */
1039
1040 extern void ira_rebuild_start_finish_chains (void);
1041 extern void ira_print_live_range_list (FILE *, live_range_t);
1042 extern void debug (live_range &ref);
1043 extern void debug (live_range *ptr);
1044 extern void ira_debug_live_range_list (live_range_t);
1045 extern void ira_debug_allocno_live_ranges (ira_allocno_t);
1046 extern void ira_debug_live_ranges (void);
1047 extern void ira_create_allocno_live_ranges (void);
1048 extern void ira_compress_allocno_live_ranges (void);
1049 extern void ira_finish_allocno_live_ranges (void);
1050 extern void ira_implicitly_set_insn_hard_regs (HARD_REG_SET *,
1051 alternative_mask);
1052
1053 /* ira-conflicts.c */
1054 extern void ira_debug_conflicts (bool);
1055 extern void ira_build_conflicts (void);
1056
1057 /* ira-color.c */
1058 extern void ira_debug_hard_regs_forest (void);
1059 extern int ira_loop_edge_freq (ira_loop_tree_node_t, int, bool);
1060 extern void ira_reassign_conflict_allocnos (int);
1061 extern void ira_initiate_assign (void);
1062 extern void ira_finish_assign (void);
1063 extern void ira_color (void);
1064
1065 /* ira-emit.c */
1066 extern void ira_initiate_emit_data (void);
1067 extern void ira_finish_emit_data (void);
1068 extern void ira_emit (bool);
1069
1070
1071
1073 /* Return true if equivalence of pseudo REGNO is not a lvalue. */
1074 static inline bool
1075 ira_equiv_no_lvalue_p (int regno)
1076 {
1077 if (regno >= ira_reg_equiv_len)
1078 return false;
1079 return (ira_reg_equiv[regno].constant != NULL_RTX
1080 || ira_reg_equiv[regno].invariant != NULL_RTX
1081 || (ira_reg_equiv[regno].memory != NULL_RTX
1082 && MEM_READONLY_P (ira_reg_equiv[regno].memory)));
1083 }
1084
1085
1086
1088 /* Initialize register costs for MODE if necessary. */
1089 static inline void
1090 ira_init_register_move_cost_if_necessary (machine_mode mode)
1091 {
1092 if (ira_register_move_cost[mode] == NULL)
1093 ira_init_register_move_cost (mode);
1094 }
1095
1096
1097
1099 /* The iterator for all allocnos. */
1100 struct ira_allocno_iterator {
1101 /* The number of the current element in IRA_ALLOCNOS. */
1102 int n;
1103 };
1104
1105 /* Initialize the iterator I. */
1106 static inline void
1107 ira_allocno_iter_init (ira_allocno_iterator *i)
1108 {
1109 i->n = 0;
1110 }
1111
1112 /* Return TRUE if we have more allocnos to visit, in which case *A is
1113 set to the allocno to be visited. Otherwise, return FALSE. */
1114 static inline bool
1115 ira_allocno_iter_cond (ira_allocno_iterator *i, ira_allocno_t *a)
1116 {
1117 int n;
1118
1119 for (n = i->n; n < ira_allocnos_num; n++)
1120 if (ira_allocnos[n] != NULL)
1121 {
1122 *a = ira_allocnos[n];
1123 i->n = n + 1;
1124 return true;
1125 }
1126 return false;
1127 }
1128
1129 /* Loop over all allocnos. In each iteration, A is set to the next
1130 allocno. ITER is an instance of ira_allocno_iterator used to iterate
1131 the allocnos. */
1132 #define FOR_EACH_ALLOCNO(A, ITER) \
1133 for (ira_allocno_iter_init (&(ITER)); \
1134 ira_allocno_iter_cond (&(ITER), &(A));)
1135
1136 /* The iterator for all objects. */
1138 struct ira_object_iterator {
1139 /* The number of the current element in ira_object_id_map. */
1140 int n;
1141 };
1142
1143 /* Initialize the iterator I. */
1144 static inline void
1145 ira_object_iter_init (ira_object_iterator *i)
1146 {
1147 i->n = 0;
1148 }
1149
1150 /* Return TRUE if we have more objects to visit, in which case *OBJ is
1151 set to the object to be visited. Otherwise, return FALSE. */
1152 static inline bool
1153 ira_object_iter_cond (ira_object_iterator *i, ira_object_t *obj)
1154 {
1155 int n;
1156
1157 for (n = i->n; n < ira_objects_num; n++)
1158 if (ira_object_id_map[n] != NULL)
1159 {
1160 *obj = ira_object_id_map[n];
1161 i->n = n + 1;
1162 return true;
1163 }
1164 return false;
1165 }
1166
1167 /* Loop over all objects. In each iteration, OBJ is set to the next
1168 object. ITER is an instance of ira_object_iterator used to iterate
1169 the objects. */
1170 #define FOR_EACH_OBJECT(OBJ, ITER) \
1171 for (ira_object_iter_init (&(ITER)); \
1172 ira_object_iter_cond (&(ITER), &(OBJ));)
1173
1174 /* The iterator for objects associated with an allocno. */
1176 struct ira_allocno_object_iterator {
1177 /* The number of the element the allocno's object array. */
1178 int n;
1179 };
1180
1181 /* Initialize the iterator I. */
1182 static inline void
1183 ira_allocno_object_iter_init (ira_allocno_object_iterator *i)
1184 {
1185 i->n = 0;
1186 }
1187
1188 /* Return TRUE if we have more objects to visit in allocno A, in which
1189 case *O is set to the object to be visited. Otherwise, return
1190 FALSE. */
1191 static inline bool
1192 ira_allocno_object_iter_cond (ira_allocno_object_iterator *i, ira_allocno_t a,
1193 ira_object_t *o)
1194 {
1195 int n = i->n++;
1196 if (n < ALLOCNO_NUM_OBJECTS (a))
1197 {
1198 *o = ALLOCNO_OBJECT (a, n);
1199 return true;
1200 }
1201 return false;
1202 }
1203
1204 /* Loop over all objects associated with allocno A. In each
1205 iteration, O is set to the next object. ITER is an instance of
1206 ira_allocno_object_iterator used to iterate the conflicts. */
1207 #define FOR_EACH_ALLOCNO_OBJECT(A, O, ITER) \
1208 for (ira_allocno_object_iter_init (&(ITER)); \
1209 ira_allocno_object_iter_cond (&(ITER), (A), &(O));)
1210
1211
1213 /* The iterator for prefs. */
1214 struct ira_pref_iterator {
1215 /* The number of the current element in IRA_PREFS. */
1216 int n;
1217 };
1218
1219 /* Initialize the iterator I. */
1220 static inline void
1221 ira_pref_iter_init (ira_pref_iterator *i)
1222 {
1223 i->n = 0;
1224 }
1225
1226 /* Return TRUE if we have more prefs to visit, in which case *PREF is
1227 set to the pref to be visited. Otherwise, return FALSE. */
1228 static inline bool
1229 ira_pref_iter_cond (ira_pref_iterator *i, ira_pref_t *pref)
1230 {
1231 int n;
1232
1233 for (n = i->n; n < ira_prefs_num; n++)
1234 if (ira_prefs[n] != NULL)
1235 {
1236 *pref = ira_prefs[n];
1237 i->n = n + 1;
1238 return true;
1239 }
1240 return false;
1241 }
1242
1243 /* Loop over all prefs. In each iteration, P is set to the next
1244 pref. ITER is an instance of ira_pref_iterator used to iterate
1245 the prefs. */
1246 #define FOR_EACH_PREF(P, ITER) \
1247 for (ira_pref_iter_init (&(ITER)); \
1248 ira_pref_iter_cond (&(ITER), &(P));)
1249
1250
1252 /* The iterator for copies. */
1253 struct ira_copy_iterator {
1254 /* The number of the current element in IRA_COPIES. */
1255 int n;
1256 };
1257
1258 /* Initialize the iterator I. */
1259 static inline void
1260 ira_copy_iter_init (ira_copy_iterator *i)
1261 {
1262 i->n = 0;
1263 }
1264
1265 /* Return TRUE if we have more copies to visit, in which case *CP is
1266 set to the copy to be visited. Otherwise, return FALSE. */
1267 static inline bool
1268 ira_copy_iter_cond (ira_copy_iterator *i, ira_copy_t *cp)
1269 {
1270 int n;
1271
1272 for (n = i->n; n < ira_copies_num; n++)
1273 if (ira_copies[n] != NULL)
1274 {
1275 *cp = ira_copies[n];
1276 i->n = n + 1;
1277 return true;
1278 }
1279 return false;
1280 }
1281
1282 /* Loop over all copies. In each iteration, C is set to the next
1283 copy. ITER is an instance of ira_copy_iterator used to iterate
1284 the copies. */
1285 #define FOR_EACH_COPY(C, ITER) \
1286 for (ira_copy_iter_init (&(ITER)); \
1287 ira_copy_iter_cond (&(ITER), &(C));)
1288
1289 /* The iterator for object conflicts. */
1291 struct ira_object_conflict_iterator {
1292
1293 /* TRUE if the conflicts are represented by vector of allocnos. */
1294 bool conflict_vec_p;
1295
1296 /* The conflict vector or conflict bit vector. */
1297 void *vec;
1298
1299 /* The number of the current element in the vector (of type
1300 ira_object_t or IRA_INT_TYPE). */
1301 unsigned int word_num;
1302
1303 /* The bit vector size. It is defined only if
1304 OBJECT_CONFLICT_VEC_P is FALSE. */
1305 unsigned int size;
1306
1307 /* The current bit index of bit vector. It is defined only if
1308 OBJECT_CONFLICT_VEC_P is FALSE. */
1309 unsigned int bit_num;
1310
1311 /* The object id corresponding to the 1st bit of the bit vector. It
1312 is defined only if OBJECT_CONFLICT_VEC_P is FALSE. */
1313 int base_conflict_id;
1314
1315 /* The word of bit vector currently visited. It is defined only if
1316 OBJECT_CONFLICT_VEC_P is FALSE. */
1317 unsigned IRA_INT_TYPE word;
1318 };
1319
1320 /* Initialize the iterator I with ALLOCNO conflicts. */
1321 static inline void
1322 ira_object_conflict_iter_init (ira_object_conflict_iterator *i,
1323 ira_object_t obj)
1324 {
1325 i->conflict_vec_p = OBJECT_CONFLICT_VEC_P (obj);
1326 i->vec = OBJECT_CONFLICT_ARRAY (obj);
1327 i->word_num = 0;
1328 if (i->conflict_vec_p)
1329 i->size = i->bit_num = i->base_conflict_id = i->word = 0;
1330 else
1331 {
1332 if (OBJECT_MIN (obj) > OBJECT_MAX (obj))
1333 i->size = 0;
1334 else
1335 i->size = ((OBJECT_MAX (obj) - OBJECT_MIN (obj)
1336 + IRA_INT_BITS)
1337 / IRA_INT_BITS) * sizeof (IRA_INT_TYPE);
1338 i->bit_num = 0;
1339 i->base_conflict_id = OBJECT_MIN (obj);
1340 i->word = (i->size == 0 ? 0 : ((IRA_INT_TYPE *) i->vec)[0]);
1341 }
1342 }
1343
1344 /* Return TRUE if we have more conflicting allocnos to visit, in which
1345 case *A is set to the allocno to be visited. Otherwise, return
1346 FALSE. */
1347 static inline bool
1348 ira_object_conflict_iter_cond (ira_object_conflict_iterator *i,
1349 ira_object_t *pobj)
1350 {
1351 ira_object_t obj;
1352
1353 if (i->conflict_vec_p)
1354 {
1355 obj = ((ira_object_t *) i->vec)[i->word_num++];
1356 if (obj == NULL)
1357 return false;
1358 }
1359 else
1360 {
1361 unsigned IRA_INT_TYPE word = i->word;
1362 unsigned int bit_num = i->bit_num;
1363
1364 /* Skip words that are zeros. */
1365 for (; word == 0; word = ((IRA_INT_TYPE *) i->vec)[i->word_num])
1366 {
1367 i->word_num++;
1368
1369 /* If we have reached the end, break. */
1370 if (i->word_num * sizeof (IRA_INT_TYPE) >= i->size)
1371 return false;
1372
1373 bit_num = i->word_num * IRA_INT_BITS;
1374 }
1375
1376 /* Skip bits that are zero. */
1377 for (; (word & 1) == 0; word >>= 1)
1378 bit_num++;
1379
1380 obj = ira_object_id_map[bit_num + i->base_conflict_id];
1381 i->bit_num = bit_num + 1;
1382 i->word = word >> 1;
1383 }
1384
1385 *pobj = obj;
1386 return true;
1387 }
1388
1389 /* Loop over all objects conflicting with OBJ. In each iteration,
1390 CONF is set to the next conflicting object. ITER is an instance
1391 of ira_object_conflict_iterator used to iterate the conflicts. */
1392 #define FOR_EACH_OBJECT_CONFLICT(OBJ, CONF, ITER) \
1393 for (ira_object_conflict_iter_init (&(ITER), (OBJ)); \
1394 ira_object_conflict_iter_cond (&(ITER), &(CONF));)
1395
1396
1397
1399 /* The function returns TRUE if at least one hard register from ones
1400 starting with HARD_REGNO and containing value of MODE are in set
1401 HARD_REGSET. */
1402 static inline bool
1403 ira_hard_reg_set_intersection_p (int hard_regno, machine_mode mode,
1404 HARD_REG_SET hard_regset)
1405 {
1406 int i;
1407
1408 gcc_assert (hard_regno >= 0);
1409 for (i = hard_regno_nregs (hard_regno, mode) - 1; i >= 0; i--)
1410 if (TEST_HARD_REG_BIT (hard_regset, hard_regno + i))
1411 return true;
1412 return false;
1413 }
1414
1415 /* Return number of hard registers in hard register SET. */
1416 static inline int
1417 hard_reg_set_size (HARD_REG_SET set)
1418 {
1419 int i, size;
1420
1421 for (size = i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1422 if (TEST_HARD_REG_BIT (set, i))
1423 size++;
1424 return size;
1425 }
1426
1427 /* The function returns TRUE if hard registers starting with
1428 HARD_REGNO and containing value of MODE are fully in set
1429 HARD_REGSET. */
1430 static inline bool
1431 ira_hard_reg_in_set_p (int hard_regno, machine_mode mode,
1432 HARD_REG_SET hard_regset)
1433 {
1434 int i;
1435
1436 ira_assert (hard_regno >= 0);
1437 for (i = hard_regno_nregs (hard_regno, mode) - 1; i >= 0; i--)
1438 if (!TEST_HARD_REG_BIT (hard_regset, hard_regno + i))
1439 return false;
1440 return true;
1441 }
1442
1443
1444
1446 /* To save memory we use a lazy approach for allocation and
1447 initialization of the cost vectors. We do this only when it is
1448 really necessary. */
1449
1450 /* Allocate cost vector *VEC for hard registers of ACLASS and
1451 initialize the elements by VAL if it is necessary */
1452 static inline void
1453 ira_allocate_and_set_costs (int **vec, reg_class_t aclass, int val)
1454 {
1455 int i, *reg_costs;
1456 int len;
1457
1458 if (*vec != NULL)
1459 return;
1460 *vec = reg_costs = ira_allocate_cost_vector (aclass);
1461 len = ira_class_hard_regs_num[(int) aclass];
1462 for (i = 0; i < len; i++)
1463 reg_costs[i] = val;
1464 }
1465
1466 /* Allocate cost vector *VEC for hard registers of ACLASS and copy
1467 values of vector SRC into the vector if it is necessary */
1468 static inline void
1469 ira_allocate_and_copy_costs (int **vec, enum reg_class aclass, int *src)
1470 {
1471 int len;
1472
1473 if (*vec != NULL || src == NULL)
1474 return;
1475 *vec = ira_allocate_cost_vector (aclass);
1476 len = ira_class_hard_regs_num[aclass];
1477 memcpy (*vec, src, sizeof (int) * len);
1478 }
1479
1480 /* Allocate cost vector *VEC for hard registers of ACLASS and add
1481 values of vector SRC into the vector if it is necessary */
1482 static inline void
1483 ira_allocate_and_accumulate_costs (int **vec, enum reg_class aclass, int *src)
1484 {
1485 int i, len;
1486
1487 if (src == NULL)
1488 return;
1489 len = ira_class_hard_regs_num[aclass];
1490 if (*vec == NULL)
1491 {
1492 *vec = ira_allocate_cost_vector (aclass);
1493 memset (*vec, 0, sizeof (int) * len);
1494 }
1495 for (i = 0; i < len; i++)
1496 (*vec)[i] += src[i];
1497 }
1498
1499 /* Allocate cost vector *VEC for hard registers of ACLASS and copy
1500 values of vector SRC into the vector or initialize it by VAL (if
1501 SRC is null). */
1502 static inline void
1503 ira_allocate_and_set_or_copy_costs (int **vec, enum reg_class aclass,
1504 int val, int *src)
1505 {
1506 int i, *reg_costs;
1507 int len;
1508
1509 if (*vec != NULL)
1510 return;
1511 *vec = reg_costs = ira_allocate_cost_vector (aclass);
1512 len = ira_class_hard_regs_num[aclass];
1513 if (src != NULL)
1514 memcpy (reg_costs, src, sizeof (int) * len);
1515 else
1516 {
1517 for (i = 0; i < len; i++)
1518 reg_costs[i] = val;
1519 }
1520 }
1521
1522 extern rtx ira_create_new_reg (rtx);
1523 extern int first_moveable_pseudo, last_moveable_pseudo;
1524
1525 #endif /* GCC_IRA_INT_H */
1526