ira-lives.cc revision 1.1 1 1.1 mrg /* IRA processing allocno lives to build allocno live ranges.
2 1.1 mrg Copyright (C) 2006-2022 Free Software Foundation, Inc.
3 1.1 mrg Contributed by Vladimir Makarov <vmakarov (at) redhat.com>.
4 1.1 mrg
5 1.1 mrg This file is part of GCC.
6 1.1 mrg
7 1.1 mrg GCC is free software; you can redistribute it and/or modify it under
8 1.1 mrg the terms of the GNU General Public License as published by the Free
9 1.1 mrg Software Foundation; either version 3, or (at your option) any later
10 1.1 mrg version.
11 1.1 mrg
12 1.1 mrg GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 1.1 mrg WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 1.1 mrg FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 1.1 mrg for more details.
16 1.1 mrg
17 1.1 mrg You should have received a copy of the GNU General Public License
18 1.1 mrg along with GCC; see the file COPYING3. If not see
19 1.1 mrg <http://www.gnu.org/licenses/>. */
20 1.1 mrg
21 1.1 mrg #include "config.h"
22 1.1 mrg #include "system.h"
23 1.1 mrg #include "coretypes.h"
24 1.1 mrg #include "backend.h"
25 1.1 mrg #include "target.h"
26 1.1 mrg #include "rtl.h"
27 1.1 mrg #include "predict.h"
28 1.1 mrg #include "df.h"
29 1.1 mrg #include "memmodel.h"
30 1.1 mrg #include "tm_p.h"
31 1.1 mrg #include "insn-config.h"
32 1.1 mrg #include "regs.h"
33 1.1 mrg #include "ira.h"
34 1.1 mrg #include "ira-int.h"
35 1.1 mrg #include "sparseset.h"
36 1.1 mrg #include "function-abi.h"
37 1.1 mrg
38 1.1 mrg /* The code in this file is similar to one in global but the code
39 1.1 mrg works on the allocno basis and creates live ranges instead of
40 1.1 mrg pseudo-register conflicts. */
41 1.1 mrg
42 1.1 mrg /* Program points are enumerated by numbers from range
43 1.1 mrg 0..IRA_MAX_POINT-1. There are approximately two times more program
44 1.1 mrg points than insns. Program points are places in the program where
45 1.1 mrg liveness info can be changed. In most general case (there are more
46 1.1 mrg complicated cases too) some program points correspond to places
47 1.1 mrg where input operand dies and other ones correspond to places where
48 1.1 mrg output operands are born. */
49 1.1 mrg int ira_max_point;
50 1.1 mrg
51 1.1 mrg /* Arrays of size IRA_MAX_POINT mapping a program point to the allocno
52 1.1 mrg live ranges with given start/finish point. */
53 1.1 mrg live_range_t *ira_start_point_ranges, *ira_finish_point_ranges;
54 1.1 mrg
55 1.1 mrg /* Number of the current program point. */
56 1.1 mrg static int curr_point;
57 1.1 mrg
58 1.1 mrg /* Point where register pressure excess started or -1 if there is no
59 1.1 mrg register pressure excess. Excess pressure for a register class at
60 1.1 mrg some point means that there are more allocnos of given register
61 1.1 mrg class living at the point than number of hard-registers of the
62 1.1 mrg class available for the allocation. It is defined only for
63 1.1 mrg pressure classes. */
64 1.1 mrg static int high_pressure_start_point[N_REG_CLASSES];
65 1.1 mrg
66 1.1 mrg /* Objects live at current point in the scan. */
67 1.1 mrg static sparseset objects_live;
68 1.1 mrg
69 1.1 mrg /* A temporary bitmap used in functions that wish to avoid visiting an allocno
70 1.1 mrg multiple times. */
71 1.1 mrg static sparseset allocnos_processed;
72 1.1 mrg
73 1.1 mrg /* Set of hard regs (except eliminable ones) currently live. */
74 1.1 mrg static HARD_REG_SET hard_regs_live;
75 1.1 mrg
76 1.1 mrg /* The loop tree node corresponding to the current basic block. */
77 1.1 mrg static ira_loop_tree_node_t curr_bb_node;
78 1.1 mrg
79 1.1 mrg /* The number of the last processed call. */
80 1.1 mrg static int last_call_num;
81 1.1 mrg /* The number of last call at which given allocno was saved. */
82 1.1 mrg static int *allocno_saved_at_call;
83 1.1 mrg
84 1.1 mrg /* The value returned by ira_setup_alts for the current instruction;
85 1.1 mrg i.e. the set of alternatives that we should consider to be likely
86 1.1 mrg candidates during reloading. */
87 1.1 mrg static alternative_mask preferred_alternatives;
88 1.1 mrg
89 1.1 mrg /* If non-NULL, the source operand of a register to register copy for which
90 1.1 mrg we should not add a conflict with the copy's destination operand. */
91 1.1 mrg static rtx ignore_reg_for_conflicts;
92 1.1 mrg
93 1.1 mrg /* Record hard register REGNO as now being live. */
94 1.1 mrg static void
95 1.1 mrg make_hard_regno_live (int regno)
96 1.1 mrg {
97 1.1 mrg SET_HARD_REG_BIT (hard_regs_live, regno);
98 1.1 mrg }
99 1.1 mrg
100 1.1 mrg /* Process the definition of hard register REGNO. This updates
101 1.1 mrg hard_regs_live and hard reg conflict information for living allocnos. */
102 1.1 mrg static void
103 1.1 mrg make_hard_regno_dead (int regno)
104 1.1 mrg {
105 1.1 mrg unsigned int i;
106 1.1 mrg EXECUTE_IF_SET_IN_SPARSESET (objects_live, i)
107 1.1 mrg {
108 1.1 mrg ira_object_t obj = ira_object_id_map[i];
109 1.1 mrg
110 1.1 mrg if (ignore_reg_for_conflicts != NULL_RTX
111 1.1 mrg && REGNO (ignore_reg_for_conflicts)
112 1.1 mrg == (unsigned int) ALLOCNO_REGNO (OBJECT_ALLOCNO (obj)))
113 1.1 mrg continue;
114 1.1 mrg
115 1.1 mrg SET_HARD_REG_BIT (OBJECT_CONFLICT_HARD_REGS (obj), regno);
116 1.1 mrg SET_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), regno);
117 1.1 mrg }
118 1.1 mrg CLEAR_HARD_REG_BIT (hard_regs_live, regno);
119 1.1 mrg }
120 1.1 mrg
121 1.1 mrg /* Record object OBJ as now being live. Set a bit for it in objects_live,
122 1.1 mrg and start a new live range for it if necessary. */
123 1.1 mrg static void
124 1.1 mrg make_object_live (ira_object_t obj)
125 1.1 mrg {
126 1.1 mrg sparseset_set_bit (objects_live, OBJECT_CONFLICT_ID (obj));
127 1.1 mrg
128 1.1 mrg live_range_t lr = OBJECT_LIVE_RANGES (obj);
129 1.1 mrg if (lr == NULL
130 1.1 mrg || (lr->finish != curr_point && lr->finish + 1 != curr_point))
131 1.1 mrg ira_add_live_range_to_object (obj, curr_point, -1);
132 1.1 mrg }
133 1.1 mrg
134 1.1 mrg /* Update ALLOCNO_EXCESS_PRESSURE_POINTS_NUM for the allocno
135 1.1 mrg associated with object OBJ. */
136 1.1 mrg static void
137 1.1 mrg update_allocno_pressure_excess_length (ira_object_t obj)
138 1.1 mrg {
139 1.1 mrg ira_allocno_t a = OBJECT_ALLOCNO (obj);
140 1.1 mrg int start, i;
141 1.1 mrg enum reg_class aclass, pclass, cl;
142 1.1 mrg live_range_t p;
143 1.1 mrg
144 1.1 mrg aclass = ALLOCNO_CLASS (a);
145 1.1 mrg pclass = ira_pressure_class_translate[aclass];
146 1.1 mrg for (i = 0;
147 1.1 mrg (cl = ira_reg_class_super_classes[pclass][i]) != LIM_REG_CLASSES;
148 1.1 mrg i++)
149 1.1 mrg {
150 1.1 mrg if (! ira_reg_pressure_class_p[cl])
151 1.1 mrg continue;
152 1.1 mrg if (high_pressure_start_point[cl] < 0)
153 1.1 mrg continue;
154 1.1 mrg p = OBJECT_LIVE_RANGES (obj);
155 1.1 mrg ira_assert (p != NULL);
156 1.1 mrg start = (high_pressure_start_point[cl] > p->start
157 1.1 mrg ? high_pressure_start_point[cl] : p->start);
158 1.1 mrg ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) += curr_point - start + 1;
159 1.1 mrg }
160 1.1 mrg }
161 1.1 mrg
162 1.1 mrg /* Process the definition of object OBJ, which is associated with allocno A.
163 1.1 mrg This finishes the current live range for it. */
164 1.1 mrg static void
165 1.1 mrg make_object_dead (ira_object_t obj)
166 1.1 mrg {
167 1.1 mrg live_range_t lr;
168 1.1 mrg int regno;
169 1.1 mrg int ignore_regno = -1;
170 1.1 mrg int ignore_total_regno = -1;
171 1.1 mrg int end_regno = -1;
172 1.1 mrg
173 1.1 mrg sparseset_clear_bit (objects_live, OBJECT_CONFLICT_ID (obj));
174 1.1 mrg
175 1.1 mrg /* Check whether any part of IGNORE_REG_FOR_CONFLICTS already conflicts
176 1.1 mrg with OBJ. */
177 1.1 mrg if (ignore_reg_for_conflicts != NULL_RTX
178 1.1 mrg && REGNO (ignore_reg_for_conflicts) < FIRST_PSEUDO_REGISTER)
179 1.1 mrg {
180 1.1 mrg end_regno = END_REGNO (ignore_reg_for_conflicts);
181 1.1 mrg ignore_regno = ignore_total_regno = REGNO (ignore_reg_for_conflicts);
182 1.1 mrg
183 1.1 mrg for (regno = ignore_regno; regno < end_regno; regno++)
184 1.1 mrg {
185 1.1 mrg if (TEST_HARD_REG_BIT (OBJECT_CONFLICT_HARD_REGS (obj), regno))
186 1.1 mrg ignore_regno = end_regno;
187 1.1 mrg if (TEST_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), regno))
188 1.1 mrg ignore_total_regno = end_regno;
189 1.1 mrg }
190 1.1 mrg }
191 1.1 mrg
192 1.1 mrg OBJECT_CONFLICT_HARD_REGS (obj) |= hard_regs_live;
193 1.1 mrg OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= hard_regs_live;
194 1.1 mrg
195 1.1 mrg /* If IGNORE_REG_FOR_CONFLICTS did not already conflict with OBJ, make
196 1.1 mrg sure it still doesn't. */
197 1.1 mrg for (regno = ignore_regno; regno < end_regno; regno++)
198 1.1 mrg CLEAR_HARD_REG_BIT (OBJECT_CONFLICT_HARD_REGS (obj), regno);
199 1.1 mrg for (regno = ignore_total_regno; regno < end_regno; regno++)
200 1.1 mrg CLEAR_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), regno);
201 1.1 mrg
202 1.1 mrg lr = OBJECT_LIVE_RANGES (obj);
203 1.1 mrg ira_assert (lr != NULL);
204 1.1 mrg lr->finish = curr_point;
205 1.1 mrg update_allocno_pressure_excess_length (obj);
206 1.1 mrg }
207 1.1 mrg
208 1.1 mrg /* The current register pressures for each pressure class for the current
209 1.1 mrg basic block. */
210 1.1 mrg static int curr_reg_pressure[N_REG_CLASSES];
211 1.1 mrg
212 1.1 mrg /* Record that register pressure for PCLASS increased by N registers.
213 1.1 mrg Update the current register pressure, maximal register pressure for
214 1.1 mrg the current BB and the start point of the register pressure
215 1.1 mrg excess. */
216 1.1 mrg static void
217 1.1 mrg inc_register_pressure (enum reg_class pclass, int n)
218 1.1 mrg {
219 1.1 mrg int i;
220 1.1 mrg enum reg_class cl;
221 1.1 mrg
222 1.1 mrg for (i = 0;
223 1.1 mrg (cl = ira_reg_class_super_classes[pclass][i]) != LIM_REG_CLASSES;
224 1.1 mrg i++)
225 1.1 mrg {
226 1.1 mrg if (! ira_reg_pressure_class_p[cl])
227 1.1 mrg continue;
228 1.1 mrg curr_reg_pressure[cl] += n;
229 1.1 mrg if (high_pressure_start_point[cl] < 0
230 1.1 mrg && (curr_reg_pressure[cl] > ira_class_hard_regs_num[cl]))
231 1.1 mrg high_pressure_start_point[cl] = curr_point;
232 1.1 mrg if (curr_bb_node->reg_pressure[cl] < curr_reg_pressure[cl])
233 1.1 mrg curr_bb_node->reg_pressure[cl] = curr_reg_pressure[cl];
234 1.1 mrg }
235 1.1 mrg }
236 1.1 mrg
237 1.1 mrg /* Record that register pressure for PCLASS has decreased by NREGS
238 1.1 mrg registers; update current register pressure, start point of the
239 1.1 mrg register pressure excess, and register pressure excess length for
240 1.1 mrg living allocnos. */
241 1.1 mrg
242 1.1 mrg static void
243 1.1 mrg dec_register_pressure (enum reg_class pclass, int nregs)
244 1.1 mrg {
245 1.1 mrg int i;
246 1.1 mrg unsigned int j;
247 1.1 mrg enum reg_class cl;
248 1.1 mrg bool set_p = false;
249 1.1 mrg
250 1.1 mrg for (i = 0;
251 1.1 mrg (cl = ira_reg_class_super_classes[pclass][i]) != LIM_REG_CLASSES;
252 1.1 mrg i++)
253 1.1 mrg {
254 1.1 mrg if (! ira_reg_pressure_class_p[cl])
255 1.1 mrg continue;
256 1.1 mrg curr_reg_pressure[cl] -= nregs;
257 1.1 mrg ira_assert (curr_reg_pressure[cl] >= 0);
258 1.1 mrg if (high_pressure_start_point[cl] >= 0
259 1.1 mrg && curr_reg_pressure[cl] <= ira_class_hard_regs_num[cl])
260 1.1 mrg set_p = true;
261 1.1 mrg }
262 1.1 mrg if (set_p)
263 1.1 mrg {
264 1.1 mrg EXECUTE_IF_SET_IN_SPARSESET (objects_live, j)
265 1.1 mrg update_allocno_pressure_excess_length (ira_object_id_map[j]);
266 1.1 mrg for (i = 0;
267 1.1 mrg (cl = ira_reg_class_super_classes[pclass][i]) != LIM_REG_CLASSES;
268 1.1 mrg i++)
269 1.1 mrg {
270 1.1 mrg if (! ira_reg_pressure_class_p[cl])
271 1.1 mrg continue;
272 1.1 mrg if (high_pressure_start_point[cl] >= 0
273 1.1 mrg && curr_reg_pressure[cl] <= ira_class_hard_regs_num[cl])
274 1.1 mrg high_pressure_start_point[cl] = -1;
275 1.1 mrg }
276 1.1 mrg }
277 1.1 mrg }
278 1.1 mrg
279 1.1 mrg /* Determine from the objects_live bitmap whether REGNO is currently live,
280 1.1 mrg and occupies only one object. Return false if we have no information. */
281 1.1 mrg static bool
282 1.1 mrg pseudo_regno_single_word_and_live_p (int regno)
283 1.1 mrg {
284 1.1 mrg ira_allocno_t a = ira_curr_regno_allocno_map[regno];
285 1.1 mrg ira_object_t obj;
286 1.1 mrg
287 1.1 mrg if (a == NULL)
288 1.1 mrg return false;
289 1.1 mrg if (ALLOCNO_NUM_OBJECTS (a) > 1)
290 1.1 mrg return false;
291 1.1 mrg
292 1.1 mrg obj = ALLOCNO_OBJECT (a, 0);
293 1.1 mrg
294 1.1 mrg return sparseset_bit_p (objects_live, OBJECT_CONFLICT_ID (obj));
295 1.1 mrg }
296 1.1 mrg
297 1.1 mrg /* Mark the pseudo register REGNO as live. Update all information about
298 1.1 mrg live ranges and register pressure. */
299 1.1 mrg static void
300 1.1 mrg mark_pseudo_regno_live (int regno)
301 1.1 mrg {
302 1.1 mrg ira_allocno_t a = ira_curr_regno_allocno_map[regno];
303 1.1 mrg enum reg_class pclass;
304 1.1 mrg int i, n, nregs;
305 1.1 mrg
306 1.1 mrg if (a == NULL)
307 1.1 mrg return;
308 1.1 mrg
309 1.1 mrg /* Invalidate because it is referenced. */
310 1.1 mrg allocno_saved_at_call[ALLOCNO_NUM (a)] = 0;
311 1.1 mrg
312 1.1 mrg n = ALLOCNO_NUM_OBJECTS (a);
313 1.1 mrg pclass = ira_pressure_class_translate[ALLOCNO_CLASS (a)];
314 1.1 mrg nregs = ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)];
315 1.1 mrg if (n > 1)
316 1.1 mrg {
317 1.1 mrg /* We track every subobject separately. */
318 1.1 mrg gcc_assert (nregs == n);
319 1.1 mrg nregs = 1;
320 1.1 mrg }
321 1.1 mrg
322 1.1 mrg for (i = 0; i < n; i++)
323 1.1 mrg {
324 1.1 mrg ira_object_t obj = ALLOCNO_OBJECT (a, i);
325 1.1 mrg
326 1.1 mrg if (sparseset_bit_p (objects_live, OBJECT_CONFLICT_ID (obj)))
327 1.1 mrg continue;
328 1.1 mrg
329 1.1 mrg inc_register_pressure (pclass, nregs);
330 1.1 mrg make_object_live (obj);
331 1.1 mrg }
332 1.1 mrg }
333 1.1 mrg
334 1.1 mrg /* Like mark_pseudo_regno_live, but try to only mark one subword of
335 1.1 mrg the pseudo as live. SUBWORD indicates which; a value of 0
336 1.1 mrg indicates the low part. */
337 1.1 mrg static void
338 1.1 mrg mark_pseudo_regno_subword_live (int regno, int subword)
339 1.1 mrg {
340 1.1 mrg ira_allocno_t a = ira_curr_regno_allocno_map[regno];
341 1.1 mrg int n;
342 1.1 mrg enum reg_class pclass;
343 1.1 mrg ira_object_t obj;
344 1.1 mrg
345 1.1 mrg if (a == NULL)
346 1.1 mrg return;
347 1.1 mrg
348 1.1 mrg /* Invalidate because it is referenced. */
349 1.1 mrg allocno_saved_at_call[ALLOCNO_NUM (a)] = 0;
350 1.1 mrg
351 1.1 mrg n = ALLOCNO_NUM_OBJECTS (a);
352 1.1 mrg if (n == 1)
353 1.1 mrg {
354 1.1 mrg mark_pseudo_regno_live (regno);
355 1.1 mrg return;
356 1.1 mrg }
357 1.1 mrg
358 1.1 mrg pclass = ira_pressure_class_translate[ALLOCNO_CLASS (a)];
359 1.1 mrg gcc_assert
360 1.1 mrg (n == ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
361 1.1 mrg obj = ALLOCNO_OBJECT (a, subword);
362 1.1 mrg
363 1.1 mrg if (sparseset_bit_p (objects_live, OBJECT_CONFLICT_ID (obj)))
364 1.1 mrg return;
365 1.1 mrg
366 1.1 mrg inc_register_pressure (pclass, 1);
367 1.1 mrg make_object_live (obj);
368 1.1 mrg }
369 1.1 mrg
370 1.1 mrg /* Mark the register REG as live. Store a 1 in hard_regs_live for
371 1.1 mrg this register, record how many consecutive hardware registers it
372 1.1 mrg actually needs. */
373 1.1 mrg static void
374 1.1 mrg mark_hard_reg_live (rtx reg)
375 1.1 mrg {
376 1.1 mrg int regno = REGNO (reg);
377 1.1 mrg
378 1.1 mrg if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
379 1.1 mrg {
380 1.1 mrg int last = END_REGNO (reg);
381 1.1 mrg enum reg_class aclass, pclass;
382 1.1 mrg
383 1.1 mrg while (regno < last)
384 1.1 mrg {
385 1.1 mrg if (! TEST_HARD_REG_BIT (hard_regs_live, regno)
386 1.1 mrg && ! TEST_HARD_REG_BIT (eliminable_regset, regno))
387 1.1 mrg {
388 1.1 mrg aclass = ira_hard_regno_allocno_class[regno];
389 1.1 mrg pclass = ira_pressure_class_translate[aclass];
390 1.1 mrg inc_register_pressure (pclass, 1);
391 1.1 mrg make_hard_regno_live (regno);
392 1.1 mrg }
393 1.1 mrg regno++;
394 1.1 mrg }
395 1.1 mrg }
396 1.1 mrg }
397 1.1 mrg
398 1.1 mrg /* Mark a pseudo, or one of its subwords, as live. REGNO is the pseudo's
399 1.1 mrg register number; ORIG_REG is the access in the insn, which may be a
400 1.1 mrg subreg. */
401 1.1 mrg static void
402 1.1 mrg mark_pseudo_reg_live (rtx orig_reg, unsigned regno)
403 1.1 mrg {
404 1.1 mrg if (read_modify_subreg_p (orig_reg))
405 1.1 mrg {
406 1.1 mrg mark_pseudo_regno_subword_live (regno,
407 1.1 mrg subreg_lowpart_p (orig_reg) ? 0 : 1);
408 1.1 mrg }
409 1.1 mrg else
410 1.1 mrg mark_pseudo_regno_live (regno);
411 1.1 mrg }
412 1.1 mrg
413 1.1 mrg /* Mark the register referenced by use or def REF as live. */
414 1.1 mrg static void
415 1.1 mrg mark_ref_live (df_ref ref)
416 1.1 mrg {
417 1.1 mrg rtx reg = DF_REF_REG (ref);
418 1.1 mrg rtx orig_reg = reg;
419 1.1 mrg
420 1.1 mrg if (GET_CODE (reg) == SUBREG)
421 1.1 mrg reg = SUBREG_REG (reg);
422 1.1 mrg
423 1.1 mrg if (REGNO (reg) >= FIRST_PSEUDO_REGISTER)
424 1.1 mrg mark_pseudo_reg_live (orig_reg, REGNO (reg));
425 1.1 mrg else
426 1.1 mrg mark_hard_reg_live (reg);
427 1.1 mrg }
428 1.1 mrg
429 1.1 mrg /* Mark the pseudo register REGNO as dead. Update all information about
430 1.1 mrg live ranges and register pressure. */
431 1.1 mrg static void
432 1.1 mrg mark_pseudo_regno_dead (int regno)
433 1.1 mrg {
434 1.1 mrg ira_allocno_t a = ira_curr_regno_allocno_map[regno];
435 1.1 mrg int n, i, nregs;
436 1.1 mrg enum reg_class cl;
437 1.1 mrg
438 1.1 mrg if (a == NULL)
439 1.1 mrg return;
440 1.1 mrg
441 1.1 mrg /* Invalidate because it is referenced. */
442 1.1 mrg allocno_saved_at_call[ALLOCNO_NUM (a)] = 0;
443 1.1 mrg
444 1.1 mrg n = ALLOCNO_NUM_OBJECTS (a);
445 1.1 mrg cl = ira_pressure_class_translate[ALLOCNO_CLASS (a)];
446 1.1 mrg nregs = ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)];
447 1.1 mrg if (n > 1)
448 1.1 mrg {
449 1.1 mrg /* We track every subobject separately. */
450 1.1 mrg gcc_assert (nregs == n);
451 1.1 mrg nregs = 1;
452 1.1 mrg }
453 1.1 mrg for (i = 0; i < n; i++)
454 1.1 mrg {
455 1.1 mrg ira_object_t obj = ALLOCNO_OBJECT (a, i);
456 1.1 mrg if (!sparseset_bit_p (objects_live, OBJECT_CONFLICT_ID (obj)))
457 1.1 mrg continue;
458 1.1 mrg
459 1.1 mrg dec_register_pressure (cl, nregs);
460 1.1 mrg make_object_dead (obj);
461 1.1 mrg }
462 1.1 mrg }
463 1.1 mrg
464 1.1 mrg /* Like mark_pseudo_regno_dead, but called when we know that only part of the
465 1.1 mrg register dies. SUBWORD indicates which; a value of 0 indicates the low part. */
466 1.1 mrg static void
467 1.1 mrg mark_pseudo_regno_subword_dead (int regno, int subword)
468 1.1 mrg {
469 1.1 mrg ira_allocno_t a = ira_curr_regno_allocno_map[regno];
470 1.1 mrg int n;
471 1.1 mrg enum reg_class cl;
472 1.1 mrg ira_object_t obj;
473 1.1 mrg
474 1.1 mrg if (a == NULL)
475 1.1 mrg return;
476 1.1 mrg
477 1.1 mrg /* Invalidate because it is referenced. */
478 1.1 mrg allocno_saved_at_call[ALLOCNO_NUM (a)] = 0;
479 1.1 mrg
480 1.1 mrg n = ALLOCNO_NUM_OBJECTS (a);
481 1.1 mrg if (n == 1)
482 1.1 mrg /* The allocno as a whole doesn't die in this case. */
483 1.1 mrg return;
484 1.1 mrg
485 1.1 mrg cl = ira_pressure_class_translate[ALLOCNO_CLASS (a)];
486 1.1 mrg gcc_assert
487 1.1 mrg (n == ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
488 1.1 mrg
489 1.1 mrg obj = ALLOCNO_OBJECT (a, subword);
490 1.1 mrg if (!sparseset_bit_p (objects_live, OBJECT_CONFLICT_ID (obj)))
491 1.1 mrg return;
492 1.1 mrg
493 1.1 mrg dec_register_pressure (cl, 1);
494 1.1 mrg make_object_dead (obj);
495 1.1 mrg }
496 1.1 mrg
497 1.1 mrg /* Process the definition of hard register REG. This updates hard_regs_live
498 1.1 mrg and hard reg conflict information for living allocnos. */
499 1.1 mrg static void
500 1.1 mrg mark_hard_reg_dead (rtx reg)
501 1.1 mrg {
502 1.1 mrg int regno = REGNO (reg);
503 1.1 mrg
504 1.1 mrg if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
505 1.1 mrg {
506 1.1 mrg int last = END_REGNO (reg);
507 1.1 mrg enum reg_class aclass, pclass;
508 1.1 mrg
509 1.1 mrg while (regno < last)
510 1.1 mrg {
511 1.1 mrg if (TEST_HARD_REG_BIT (hard_regs_live, regno))
512 1.1 mrg {
513 1.1 mrg aclass = ira_hard_regno_allocno_class[regno];
514 1.1 mrg pclass = ira_pressure_class_translate[aclass];
515 1.1 mrg dec_register_pressure (pclass, 1);
516 1.1 mrg make_hard_regno_dead (regno);
517 1.1 mrg }
518 1.1 mrg regno++;
519 1.1 mrg }
520 1.1 mrg }
521 1.1 mrg }
522 1.1 mrg
523 1.1 mrg /* Mark a pseudo, or one of its subwords, as dead. REGNO is the pseudo's
524 1.1 mrg register number; ORIG_REG is the access in the insn, which may be a
525 1.1 mrg subreg. */
526 1.1 mrg static void
527 1.1 mrg mark_pseudo_reg_dead (rtx orig_reg, unsigned regno)
528 1.1 mrg {
529 1.1 mrg if (read_modify_subreg_p (orig_reg))
530 1.1 mrg {
531 1.1 mrg mark_pseudo_regno_subword_dead (regno,
532 1.1 mrg subreg_lowpart_p (orig_reg) ? 0 : 1);
533 1.1 mrg }
534 1.1 mrg else
535 1.1 mrg mark_pseudo_regno_dead (regno);
536 1.1 mrg }
537 1.1 mrg
538 1.1 mrg /* Mark the register referenced by definition DEF as dead, if the
539 1.1 mrg definition is a total one. */
540 1.1 mrg static void
541 1.1 mrg mark_ref_dead (df_ref def)
542 1.1 mrg {
543 1.1 mrg rtx reg = DF_REF_REG (def);
544 1.1 mrg rtx orig_reg = reg;
545 1.1 mrg
546 1.1 mrg if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL))
547 1.1 mrg return;
548 1.1 mrg
549 1.1 mrg if (GET_CODE (reg) == SUBREG)
550 1.1 mrg reg = SUBREG_REG (reg);
551 1.1 mrg
552 1.1 mrg if (DF_REF_FLAGS_IS_SET (def, DF_REF_PARTIAL)
553 1.1 mrg && (GET_CODE (orig_reg) != SUBREG
554 1.1 mrg || REGNO (reg) < FIRST_PSEUDO_REGISTER
555 1.1 mrg || !read_modify_subreg_p (orig_reg)))
556 1.1 mrg return;
557 1.1 mrg
558 1.1 mrg if (REGNO (reg) >= FIRST_PSEUDO_REGISTER)
559 1.1 mrg mark_pseudo_reg_dead (orig_reg, REGNO (reg));
560 1.1 mrg else
561 1.1 mrg mark_hard_reg_dead (reg);
562 1.1 mrg }
563 1.1 mrg
564 1.1 mrg /* If REG is a pseudo or a subreg of it, and the class of its allocno
565 1.1 mrg intersects CL, make a conflict with pseudo DREG. ORIG_DREG is the
566 1.1 mrg rtx actually accessed, it may be identical to DREG or a subreg of it.
567 1.1 mrg Advance the current program point before making the conflict if
568 1.1 mrg ADVANCE_P. Return TRUE if we will need to advance the current
569 1.1 mrg program point. */
570 1.1 mrg static bool
571 1.1 mrg make_pseudo_conflict (rtx reg, enum reg_class cl, rtx dreg, rtx orig_dreg,
572 1.1 mrg bool advance_p)
573 1.1 mrg {
574 1.1 mrg rtx orig_reg = reg;
575 1.1 mrg ira_allocno_t a;
576 1.1 mrg
577 1.1 mrg if (GET_CODE (reg) == SUBREG)
578 1.1 mrg reg = SUBREG_REG (reg);
579 1.1 mrg
580 1.1 mrg if (! REG_P (reg) || REGNO (reg) < FIRST_PSEUDO_REGISTER)
581 1.1 mrg return advance_p;
582 1.1 mrg
583 1.1 mrg a = ira_curr_regno_allocno_map[REGNO (reg)];
584 1.1 mrg if (! reg_classes_intersect_p (cl, ALLOCNO_CLASS (a)))
585 1.1 mrg return advance_p;
586 1.1 mrg
587 1.1 mrg if (advance_p)
588 1.1 mrg curr_point++;
589 1.1 mrg
590 1.1 mrg mark_pseudo_reg_live (orig_reg, REGNO (reg));
591 1.1 mrg mark_pseudo_reg_live (orig_dreg, REGNO (dreg));
592 1.1 mrg mark_pseudo_reg_dead (orig_reg, REGNO (reg));
593 1.1 mrg mark_pseudo_reg_dead (orig_dreg, REGNO (dreg));
594 1.1 mrg
595 1.1 mrg return false;
596 1.1 mrg }
597 1.1 mrg
598 1.1 mrg /* Check and make if necessary conflicts for pseudo DREG of class
599 1.1 mrg DEF_CL of the current insn with input operand USE of class USE_CL.
600 1.1 mrg ORIG_DREG is the rtx actually accessed, it may be identical to
601 1.1 mrg DREG or a subreg of it. Advance the current program point before
602 1.1 mrg making the conflict if ADVANCE_P. Return TRUE if we will need to
603 1.1 mrg advance the current program point. */
604 1.1 mrg static bool
605 1.1 mrg check_and_make_def_use_conflict (rtx dreg, rtx orig_dreg,
606 1.1 mrg enum reg_class def_cl, int use,
607 1.1 mrg enum reg_class use_cl, bool advance_p)
608 1.1 mrg {
609 1.1 mrg if (! reg_classes_intersect_p (def_cl, use_cl))
610 1.1 mrg return advance_p;
611 1.1 mrg
612 1.1 mrg advance_p = make_pseudo_conflict (recog_data.operand[use],
613 1.1 mrg use_cl, dreg, orig_dreg, advance_p);
614 1.1 mrg
615 1.1 mrg /* Reload may end up swapping commutative operands, so you
616 1.1 mrg have to take both orderings into account. The
617 1.1 mrg constraints for the two operands can be completely
618 1.1 mrg different. (Indeed, if the constraints for the two
619 1.1 mrg operands are the same for all alternatives, there's no
620 1.1 mrg point marking them as commutative.) */
621 1.1 mrg if (use < recog_data.n_operands - 1
622 1.1 mrg && recog_data.constraints[use][0] == '%')
623 1.1 mrg advance_p
624 1.1 mrg = make_pseudo_conflict (recog_data.operand[use + 1],
625 1.1 mrg use_cl, dreg, orig_dreg, advance_p);
626 1.1 mrg if (use >= 1
627 1.1 mrg && recog_data.constraints[use - 1][0] == '%')
628 1.1 mrg advance_p
629 1.1 mrg = make_pseudo_conflict (recog_data.operand[use - 1],
630 1.1 mrg use_cl, dreg, orig_dreg, advance_p);
631 1.1 mrg return advance_p;
632 1.1 mrg }
633 1.1 mrg
634 1.1 mrg /* Check and make if necessary conflicts for definition DEF of class
635 1.1 mrg DEF_CL of the current insn with input operands. Process only
636 1.1 mrg constraints of alternative ALT.
637 1.1 mrg
638 1.1 mrg One of three things is true when this function is called:
639 1.1 mrg
640 1.1 mrg (1) DEF is an earlyclobber for alternative ALT. Input operands then
641 1.1 mrg conflict with DEF in ALT unless they explicitly match DEF via 0-9
642 1.1 mrg constraints.
643 1.1 mrg
644 1.1 mrg (2) DEF matches (via 0-9 constraints) an operand that is an
645 1.1 mrg earlyclobber for alternative ALT. Other input operands then
646 1.1 mrg conflict with DEF in ALT.
647 1.1 mrg
648 1.1 mrg (3) [FOR_TIE_P] Some input operand X matches DEF for alternative ALT.
649 1.1 mrg Input operands with a different value from X then conflict with
650 1.1 mrg DEF in ALT.
651 1.1 mrg
652 1.1 mrg However, there's still a judgement call to make when deciding
653 1.1 mrg whether a conflict in ALT is important enough to be reflected
654 1.1 mrg in the pan-alternative allocno conflict set. */
655 1.1 mrg static void
656 1.1 mrg check_and_make_def_conflict (int alt, int def, enum reg_class def_cl,
657 1.1 mrg bool for_tie_p)
658 1.1 mrg {
659 1.1 mrg int use, use_match;
660 1.1 mrg ira_allocno_t a;
661 1.1 mrg enum reg_class use_cl, acl;
662 1.1 mrg bool advance_p;
663 1.1 mrg rtx dreg = recog_data.operand[def];
664 1.1 mrg rtx orig_dreg = dreg;
665 1.1 mrg
666 1.1 mrg if (def_cl == NO_REGS)
667 1.1 mrg return;
668 1.1 mrg
669 1.1 mrg if (GET_CODE (dreg) == SUBREG)
670 1.1 mrg dreg = SUBREG_REG (dreg);
671 1.1 mrg
672 1.1 mrg if (! REG_P (dreg) || REGNO (dreg) < FIRST_PSEUDO_REGISTER)
673 1.1 mrg return;
674 1.1 mrg
675 1.1 mrg a = ira_curr_regno_allocno_map[REGNO (dreg)];
676 1.1 mrg acl = ALLOCNO_CLASS (a);
677 1.1 mrg if (! reg_classes_intersect_p (acl, def_cl))
678 1.1 mrg return;
679 1.1 mrg
680 1.1 mrg advance_p = true;
681 1.1 mrg
682 1.1 mrg int n_operands = recog_data.n_operands;
683 1.1 mrg const operand_alternative *op_alt = &recog_op_alt[alt * n_operands];
684 1.1 mrg for (use = 0; use < n_operands; use++)
685 1.1 mrg {
686 1.1 mrg int alt1;
687 1.1 mrg
688 1.1 mrg if (use == def || recog_data.operand_type[use] == OP_OUT)
689 1.1 mrg continue;
690 1.1 mrg
691 1.1 mrg /* An earlyclobber on DEF doesn't apply to an input operand X if X
692 1.1 mrg explicitly matches DEF, but it applies to other input operands
693 1.1 mrg even if they happen to be the same value as X.
694 1.1 mrg
695 1.1 mrg In contrast, if an input operand X is tied to a non-earlyclobber
696 1.1 mrg DEF, there's no conflict with other input operands that have the
697 1.1 mrg same value as X. */
698 1.1 mrg if (op_alt[use].matches == def
699 1.1 mrg || (for_tie_p
700 1.1 mrg && rtx_equal_p (recog_data.operand[use],
701 1.1 mrg recog_data.operand[op_alt[def].matched])))
702 1.1 mrg continue;
703 1.1 mrg
704 1.1 mrg if (op_alt[use].anything_ok)
705 1.1 mrg use_cl = ALL_REGS;
706 1.1 mrg else
707 1.1 mrg use_cl = op_alt[use].cl;
708 1.1 mrg if (use_cl == NO_REGS)
709 1.1 mrg continue;
710 1.1 mrg
711 1.1 mrg /* If DEF is simply a tied operand, ignore cases in which this
712 1.1 mrg alternative requires USE to have a likely-spilled class.
713 1.1 mrg Adding a conflict would just constrain USE further if DEF
714 1.1 mrg happens to be allocated first. */
715 1.1 mrg if (for_tie_p && targetm.class_likely_spilled_p (use_cl))
716 1.1 mrg continue;
717 1.1 mrg
718 1.1 mrg /* If there's any alternative that allows USE to match DEF, do not
719 1.1 mrg record a conflict. If that causes us to create an invalid
720 1.1 mrg instruction due to the earlyclobber, reload must fix it up.
721 1.1 mrg
722 1.1 mrg Likewise, if we're treating a tied DEF like a partial earlyclobber,
723 1.1 mrg do not record a conflict if there's another alternative in which
724 1.1 mrg DEF is neither tied nor earlyclobber. */
725 1.1 mrg for (alt1 = 0; alt1 < recog_data.n_alternatives; alt1++)
726 1.1 mrg {
727 1.1 mrg if (!TEST_BIT (preferred_alternatives, alt1))
728 1.1 mrg continue;
729 1.1 mrg const operand_alternative *op_alt1
730 1.1 mrg = &recog_op_alt[alt1 * n_operands];
731 1.1 mrg if (op_alt1[use].matches == def
732 1.1 mrg || (use < n_operands - 1
733 1.1 mrg && recog_data.constraints[use][0] == '%'
734 1.1 mrg && op_alt1[use + 1].matches == def)
735 1.1 mrg || (use >= 1
736 1.1 mrg && recog_data.constraints[use - 1][0] == '%'
737 1.1 mrg && op_alt1[use - 1].matches == def))
738 1.1 mrg break;
739 1.1 mrg if (for_tie_p
740 1.1 mrg && !op_alt1[def].earlyclobber
741 1.1 mrg && op_alt1[def].matched < 0
742 1.1 mrg && alternative_class (op_alt1, def) != NO_REGS
743 1.1 mrg && alternative_class (op_alt1, use) != NO_REGS)
744 1.1 mrg break;
745 1.1 mrg }
746 1.1 mrg
747 1.1 mrg if (alt1 < recog_data.n_alternatives)
748 1.1 mrg continue;
749 1.1 mrg
750 1.1 mrg advance_p = check_and_make_def_use_conflict (dreg, orig_dreg, def_cl,
751 1.1 mrg use, use_cl, advance_p);
752 1.1 mrg
753 1.1 mrg if ((use_match = op_alt[use].matches) >= 0)
754 1.1 mrg {
755 1.1 mrg gcc_checking_assert (use_match != def);
756 1.1 mrg
757 1.1 mrg if (op_alt[use_match].anything_ok)
758 1.1 mrg use_cl = ALL_REGS;
759 1.1 mrg else
760 1.1 mrg use_cl = op_alt[use_match].cl;
761 1.1 mrg advance_p = check_and_make_def_use_conflict (dreg, orig_dreg, def_cl,
762 1.1 mrg use, use_cl, advance_p);
763 1.1 mrg }
764 1.1 mrg }
765 1.1 mrg }
766 1.1 mrg
767 1.1 mrg /* Make conflicts of early clobber pseudo registers of the current
768 1.1 mrg insn with its inputs. Avoid introducing unnecessary conflicts by
769 1.1 mrg checking classes of the constraints and pseudos because otherwise
770 1.1 mrg significant code degradation is possible for some targets.
771 1.1 mrg
772 1.1 mrg For these purposes, tying an input to an output makes that output act
773 1.1 mrg like an earlyclobber for inputs with a different value, since the output
774 1.1 mrg register then has a predetermined purpose on input to the instruction. */
775 1.1 mrg static void
776 1.1 mrg make_early_clobber_and_input_conflicts (void)
777 1.1 mrg {
778 1.1 mrg int alt;
779 1.1 mrg int def, def_match;
780 1.1 mrg enum reg_class def_cl;
781 1.1 mrg
782 1.1 mrg int n_alternatives = recog_data.n_alternatives;
783 1.1 mrg int n_operands = recog_data.n_operands;
784 1.1 mrg const operand_alternative *op_alt = recog_op_alt;
785 1.1 mrg for (alt = 0; alt < n_alternatives; alt++, op_alt += n_operands)
786 1.1 mrg if (TEST_BIT (preferred_alternatives, alt))
787 1.1 mrg for (def = 0; def < n_operands; def++)
788 1.1 mrg {
789 1.1 mrg if (op_alt[def].anything_ok)
790 1.1 mrg def_cl = ALL_REGS;
791 1.1 mrg else
792 1.1 mrg def_cl = op_alt[def].cl;
793 1.1 mrg if (def_cl != NO_REGS)
794 1.1 mrg {
795 1.1 mrg if (op_alt[def].earlyclobber)
796 1.1 mrg check_and_make_def_conflict (alt, def, def_cl, false);
797 1.1 mrg else if (op_alt[def].matched >= 0
798 1.1 mrg && !targetm.class_likely_spilled_p (def_cl))
799 1.1 mrg check_and_make_def_conflict (alt, def, def_cl, true);
800 1.1 mrg }
801 1.1 mrg
802 1.1 mrg if ((def_match = op_alt[def].matches) >= 0
803 1.1 mrg && (op_alt[def_match].earlyclobber
804 1.1 mrg || op_alt[def].earlyclobber))
805 1.1 mrg {
806 1.1 mrg if (op_alt[def_match].anything_ok)
807 1.1 mrg def_cl = ALL_REGS;
808 1.1 mrg else
809 1.1 mrg def_cl = op_alt[def_match].cl;
810 1.1 mrg check_and_make_def_conflict (alt, def, def_cl, false);
811 1.1 mrg }
812 1.1 mrg }
813 1.1 mrg }
814 1.1 mrg
815 1.1 mrg /* Mark early clobber hard registers of the current INSN as live (if
816 1.1 mrg LIVE_P) or dead. Return true if there are such registers. */
817 1.1 mrg static bool
818 1.1 mrg mark_hard_reg_early_clobbers (rtx_insn *insn, bool live_p)
819 1.1 mrg {
820 1.1 mrg df_ref def;
821 1.1 mrg bool set_p = false;
822 1.1 mrg
823 1.1 mrg FOR_EACH_INSN_DEF (def, insn)
824 1.1 mrg if (DF_REF_FLAGS_IS_SET (def, DF_REF_MUST_CLOBBER))
825 1.1 mrg {
826 1.1 mrg rtx dreg = DF_REF_REG (def);
827 1.1 mrg
828 1.1 mrg if (GET_CODE (dreg) == SUBREG)
829 1.1 mrg dreg = SUBREG_REG (dreg);
830 1.1 mrg if (! REG_P (dreg) || REGNO (dreg) >= FIRST_PSEUDO_REGISTER)
831 1.1 mrg continue;
832 1.1 mrg
833 1.1 mrg /* Hard register clobbers are believed to be early clobber
834 1.1 mrg because there is no way to say that non-operand hard
835 1.1 mrg register clobbers are not early ones. */
836 1.1 mrg if (live_p)
837 1.1 mrg mark_ref_live (def);
838 1.1 mrg else
839 1.1 mrg mark_ref_dead (def);
840 1.1 mrg set_p = true;
841 1.1 mrg }
842 1.1 mrg
843 1.1 mrg return set_p;
844 1.1 mrg }
845 1.1 mrg
846 1.1 mrg /* Checks that CONSTRAINTS permits to use only one hard register. If
847 1.1 mrg it is so, the function returns the class of the hard register.
848 1.1 mrg Otherwise it returns NO_REGS. */
849 1.1 mrg static enum reg_class
850 1.1 mrg single_reg_class (const char *constraints, rtx op, rtx equiv_const)
851 1.1 mrg {
852 1.1 mrg int c;
853 1.1 mrg enum reg_class cl, next_cl;
854 1.1 mrg enum constraint_num cn;
855 1.1 mrg
856 1.1 mrg cl = NO_REGS;
857 1.1 mrg alternative_mask preferred = preferred_alternatives;
858 1.1 mrg while ((c = *constraints))
859 1.1 mrg {
860 1.1 mrg if (c == '#')
861 1.1 mrg preferred &= ~ALTERNATIVE_BIT (0);
862 1.1 mrg else if (c == ',')
863 1.1 mrg preferred >>= 1;
864 1.1 mrg else if (preferred & 1)
865 1.1 mrg switch (c)
866 1.1 mrg {
867 1.1 mrg case 'g':
868 1.1 mrg return NO_REGS;
869 1.1 mrg
870 1.1 mrg default:
871 1.1 mrg /* ??? Is this the best way to handle memory constraints? */
872 1.1 mrg cn = lookup_constraint (constraints);
873 1.1 mrg if (insn_extra_memory_constraint (cn)
874 1.1 mrg || insn_extra_special_memory_constraint (cn)
875 1.1 mrg || insn_extra_relaxed_memory_constraint (cn)
876 1.1 mrg || insn_extra_address_constraint (cn))
877 1.1 mrg return NO_REGS;
878 1.1 mrg if (constraint_satisfied_p (op, cn)
879 1.1 mrg || (equiv_const != NULL_RTX
880 1.1 mrg && CONSTANT_P (equiv_const)
881 1.1 mrg && constraint_satisfied_p (equiv_const, cn)))
882 1.1 mrg return NO_REGS;
883 1.1 mrg next_cl = reg_class_for_constraint (cn);
884 1.1 mrg if (next_cl == NO_REGS)
885 1.1 mrg break;
886 1.1 mrg if (cl == NO_REGS
887 1.1 mrg ? ira_class_singleton[next_cl][GET_MODE (op)] < 0
888 1.1 mrg : (ira_class_singleton[cl][GET_MODE (op)]
889 1.1 mrg != ira_class_singleton[next_cl][GET_MODE (op)]))
890 1.1 mrg return NO_REGS;
891 1.1 mrg cl = next_cl;
892 1.1 mrg break;
893 1.1 mrg
894 1.1 mrg case '0': case '1': case '2': case '3': case '4':
895 1.1 mrg case '5': case '6': case '7': case '8': case '9':
896 1.1 mrg {
897 1.1 mrg char *end;
898 1.1 mrg unsigned long dup = strtoul (constraints, &end, 10);
899 1.1 mrg constraints = end;
900 1.1 mrg next_cl
901 1.1 mrg = single_reg_class (recog_data.constraints[dup],
902 1.1 mrg recog_data.operand[dup], NULL_RTX);
903 1.1 mrg if (cl == NO_REGS
904 1.1 mrg ? ira_class_singleton[next_cl][GET_MODE (op)] < 0
905 1.1 mrg : (ira_class_singleton[cl][GET_MODE (op)]
906 1.1 mrg != ira_class_singleton[next_cl][GET_MODE (op)]))
907 1.1 mrg return NO_REGS;
908 1.1 mrg cl = next_cl;
909 1.1 mrg continue;
910 1.1 mrg }
911 1.1 mrg }
912 1.1 mrg constraints += CONSTRAINT_LEN (c, constraints);
913 1.1 mrg }
914 1.1 mrg return cl;
915 1.1 mrg }
916 1.1 mrg
917 1.1 mrg /* The function checks that operand OP_NUM of the current insn can use
918 1.1 mrg only one hard register. If it is so, the function returns the
919 1.1 mrg class of the hard register. Otherwise it returns NO_REGS. */
920 1.1 mrg static enum reg_class
921 1.1 mrg single_reg_operand_class (int op_num)
922 1.1 mrg {
923 1.1 mrg if (op_num < 0 || recog_data.n_alternatives == 0)
924 1.1 mrg return NO_REGS;
925 1.1 mrg return single_reg_class (recog_data.constraints[op_num],
926 1.1 mrg recog_data.operand[op_num], NULL_RTX);
927 1.1 mrg }
928 1.1 mrg
929 1.1 mrg /* The function sets up hard register set *SET to hard registers which
930 1.1 mrg might be used by insn reloads because the constraints are too
931 1.1 mrg strict. */
932 1.1 mrg void
933 1.1 mrg ira_implicitly_set_insn_hard_regs (HARD_REG_SET *set,
934 1.1 mrg alternative_mask preferred)
935 1.1 mrg {
936 1.1 mrg int i, c, regno = 0;
937 1.1 mrg enum reg_class cl;
938 1.1 mrg rtx op;
939 1.1 mrg machine_mode mode;
940 1.1 mrg
941 1.1 mrg CLEAR_HARD_REG_SET (*set);
942 1.1 mrg for (i = 0; i < recog_data.n_operands; i++)
943 1.1 mrg {
944 1.1 mrg op = recog_data.operand[i];
945 1.1 mrg
946 1.1 mrg if (GET_CODE (op) == SUBREG)
947 1.1 mrg op = SUBREG_REG (op);
948 1.1 mrg
949 1.1 mrg if (GET_CODE (op) == SCRATCH
950 1.1 mrg || (REG_P (op) && (regno = REGNO (op)) >= FIRST_PSEUDO_REGISTER))
951 1.1 mrg {
952 1.1 mrg const char *p = recog_data.constraints[i];
953 1.1 mrg
954 1.1 mrg mode = (GET_CODE (op) == SCRATCH
955 1.1 mrg ? GET_MODE (op) : PSEUDO_REGNO_MODE (regno));
956 1.1 mrg cl = NO_REGS;
957 1.1 mrg for (; (c = *p); p += CONSTRAINT_LEN (c, p))
958 1.1 mrg if (c == '#')
959 1.1 mrg preferred &= ~ALTERNATIVE_BIT (0);
960 1.1 mrg else if (c == ',')
961 1.1 mrg preferred >>= 1;
962 1.1 mrg else if (preferred & 1)
963 1.1 mrg {
964 1.1 mrg cl = reg_class_for_constraint (lookup_constraint (p));
965 1.1 mrg if (cl != NO_REGS)
966 1.1 mrg {
967 1.1 mrg /* There is no register pressure problem if all of the
968 1.1 mrg regs in this class are fixed. */
969 1.1 mrg int regno = ira_class_singleton[cl][mode];
970 1.1 mrg if (regno >= 0)
971 1.1 mrg add_to_hard_reg_set (set, mode, regno);
972 1.1 mrg }
973 1.1 mrg }
974 1.1 mrg }
975 1.1 mrg }
976 1.1 mrg }
977 1.1 mrg /* Processes input operands, if IN_P, or output operands otherwise of
978 1.1 mrg the current insn with FREQ to find allocno which can use only one
979 1.1 mrg hard register and makes other currently living allocnos conflicting
980 1.1 mrg with the hard register. */
981 1.1 mrg static void
982 1.1 mrg process_single_reg_class_operands (bool in_p, int freq)
983 1.1 mrg {
984 1.1 mrg int i, regno;
985 1.1 mrg unsigned int px;
986 1.1 mrg enum reg_class cl;
987 1.1 mrg rtx operand;
988 1.1 mrg ira_allocno_t operand_a, a;
989 1.1 mrg
990 1.1 mrg for (i = 0; i < recog_data.n_operands; i++)
991 1.1 mrg {
992 1.1 mrg operand = recog_data.operand[i];
993 1.1 mrg if (in_p && recog_data.operand_type[i] != OP_IN
994 1.1 mrg && recog_data.operand_type[i] != OP_INOUT)
995 1.1 mrg continue;
996 1.1 mrg if (! in_p && recog_data.operand_type[i] != OP_OUT
997 1.1 mrg && recog_data.operand_type[i] != OP_INOUT)
998 1.1 mrg continue;
999 1.1 mrg cl = single_reg_operand_class (i);
1000 1.1 mrg if (cl == NO_REGS)
1001 1.1 mrg continue;
1002 1.1 mrg
1003 1.1 mrg operand_a = NULL;
1004 1.1 mrg
1005 1.1 mrg if (GET_CODE (operand) == SUBREG)
1006 1.1 mrg operand = SUBREG_REG (operand);
1007 1.1 mrg
1008 1.1 mrg if (REG_P (operand)
1009 1.1 mrg && (regno = REGNO (operand)) >= FIRST_PSEUDO_REGISTER)
1010 1.1 mrg {
1011 1.1 mrg enum reg_class aclass;
1012 1.1 mrg
1013 1.1 mrg operand_a = ira_curr_regno_allocno_map[regno];
1014 1.1 mrg aclass = ALLOCNO_CLASS (operand_a);
1015 1.1 mrg if (ira_class_subset_p[cl][aclass])
1016 1.1 mrg {
1017 1.1 mrg /* View the desired allocation of OPERAND as:
1018 1.1 mrg
1019 1.1 mrg (REG:YMODE YREGNO),
1020 1.1 mrg
1021 1.1 mrg a simplification of:
1022 1.1 mrg
1023 1.1 mrg (subreg:YMODE (reg:XMODE XREGNO) OFFSET). */
1024 1.1 mrg machine_mode ymode, xmode;
1025 1.1 mrg int xregno, yregno;
1026 1.1 mrg poly_int64 offset;
1027 1.1 mrg
1028 1.1 mrg xmode = recog_data.operand_mode[i];
1029 1.1 mrg xregno = ira_class_singleton[cl][xmode];
1030 1.1 mrg gcc_assert (xregno >= 0);
1031 1.1 mrg ymode = ALLOCNO_MODE (operand_a);
1032 1.1 mrg offset = subreg_lowpart_offset (ymode, xmode);
1033 1.1 mrg yregno = simplify_subreg_regno (xregno, xmode, offset, ymode);
1034 1.1 mrg if (yregno >= 0
1035 1.1 mrg && ira_class_hard_reg_index[aclass][yregno] >= 0)
1036 1.1 mrg {
1037 1.1 mrg int cost;
1038 1.1 mrg
1039 1.1 mrg ira_allocate_and_set_costs
1040 1.1 mrg (&ALLOCNO_CONFLICT_HARD_REG_COSTS (operand_a),
1041 1.1 mrg aclass, 0);
1042 1.1 mrg ira_init_register_move_cost_if_necessary (xmode);
1043 1.1 mrg cost = freq * (in_p
1044 1.1 mrg ? ira_register_move_cost[xmode][aclass][cl]
1045 1.1 mrg : ira_register_move_cost[xmode][cl][aclass]);
1046 1.1 mrg ALLOCNO_CONFLICT_HARD_REG_COSTS (operand_a)
1047 1.1 mrg [ira_class_hard_reg_index[aclass][yregno]] -= cost;
1048 1.1 mrg }
1049 1.1 mrg }
1050 1.1 mrg }
1051 1.1 mrg
1052 1.1 mrg EXECUTE_IF_SET_IN_SPARSESET (objects_live, px)
1053 1.1 mrg {
1054 1.1 mrg ira_object_t obj = ira_object_id_map[px];
1055 1.1 mrg a = OBJECT_ALLOCNO (obj);
1056 1.1 mrg if (a != operand_a)
1057 1.1 mrg {
1058 1.1 mrg /* We could increase costs of A instead of making it
1059 1.1 mrg conflicting with the hard register. But it works worse
1060 1.1 mrg because it will be spilled in reload in anyway. */
1061 1.1 mrg OBJECT_CONFLICT_HARD_REGS (obj) |= reg_class_contents[cl];
1062 1.1 mrg OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= reg_class_contents[cl];
1063 1.1 mrg }
1064 1.1 mrg }
1065 1.1 mrg }
1066 1.1 mrg }
1067 1.1 mrg
1068 1.1 mrg /* Look through the CALL_INSN_FUNCTION_USAGE of a call insn INSN, and see if
1069 1.1 mrg we find a SET rtx that we can use to deduce that a register can be cheaply
1070 1.1 mrg caller-saved. Return such a register, or NULL_RTX if none is found. */
1071 1.1 mrg static rtx
1072 1.1 mrg find_call_crossed_cheap_reg (rtx_insn *insn)
1073 1.1 mrg {
1074 1.1 mrg rtx cheap_reg = NULL_RTX;
1075 1.1 mrg rtx exp = CALL_INSN_FUNCTION_USAGE (insn);
1076 1.1 mrg
1077 1.1 mrg while (exp != NULL)
1078 1.1 mrg {
1079 1.1 mrg rtx x = XEXP (exp, 0);
1080 1.1 mrg if (GET_CODE (x) == SET)
1081 1.1 mrg {
1082 1.1 mrg exp = x;
1083 1.1 mrg break;
1084 1.1 mrg }
1085 1.1 mrg exp = XEXP (exp, 1);
1086 1.1 mrg }
1087 1.1 mrg if (exp != NULL)
1088 1.1 mrg {
1089 1.1 mrg basic_block bb = BLOCK_FOR_INSN (insn);
1090 1.1 mrg rtx reg = SET_SRC (exp);
1091 1.1 mrg rtx_insn *prev = PREV_INSN (insn);
1092 1.1 mrg while (prev && !(INSN_P (prev)
1093 1.1 mrg && BLOCK_FOR_INSN (prev) != bb))
1094 1.1 mrg {
1095 1.1 mrg if (NONDEBUG_INSN_P (prev))
1096 1.1 mrg {
1097 1.1 mrg rtx set = single_set (prev);
1098 1.1 mrg
1099 1.1 mrg if (set && rtx_equal_p (SET_DEST (set), reg))
1100 1.1 mrg {
1101 1.1 mrg rtx src = SET_SRC (set);
1102 1.1 mrg if (!REG_P (src) || HARD_REGISTER_P (src)
1103 1.1 mrg || !pseudo_regno_single_word_and_live_p (REGNO (src)))
1104 1.1 mrg break;
1105 1.1 mrg if (!modified_between_p (src, prev, insn))
1106 1.1 mrg cheap_reg = src;
1107 1.1 mrg break;
1108 1.1 mrg }
1109 1.1 mrg if (set && rtx_equal_p (SET_SRC (set), reg))
1110 1.1 mrg {
1111 1.1 mrg rtx dest = SET_DEST (set);
1112 1.1 mrg if (!REG_P (dest) || HARD_REGISTER_P (dest)
1113 1.1 mrg || !pseudo_regno_single_word_and_live_p (REGNO (dest)))
1114 1.1 mrg break;
1115 1.1 mrg if (!modified_between_p (dest, prev, insn))
1116 1.1 mrg cheap_reg = dest;
1117 1.1 mrg break;
1118 1.1 mrg }
1119 1.1 mrg
1120 1.1 mrg if (reg_set_p (reg, prev))
1121 1.1 mrg break;
1122 1.1 mrg }
1123 1.1 mrg prev = PREV_INSN (prev);
1124 1.1 mrg }
1125 1.1 mrg }
1126 1.1 mrg return cheap_reg;
1127 1.1 mrg }
1128 1.1 mrg
1129 1.1 mrg /* Determine whether INSN is a register to register copy of the type where
1130 1.1 mrg we do not need to make the source and destiniation registers conflict.
1131 1.1 mrg If this is a copy instruction, then return the source reg. Otherwise,
1132 1.1 mrg return NULL_RTX. */
1133 1.1 mrg rtx
1134 1.1 mrg non_conflicting_reg_copy_p (rtx_insn *insn)
1135 1.1 mrg {
1136 1.1 mrg /* Reload has issues with overlapping pseudos being assigned to the
1137 1.1 mrg same hard register, so don't allow it. See PR87600 for details. */
1138 1.1 mrg if (!targetm.lra_p ())
1139 1.1 mrg return NULL_RTX;
1140 1.1 mrg
1141 1.1 mrg rtx set = single_set (insn);
1142 1.1 mrg
1143 1.1 mrg /* Disallow anything other than a simple register to register copy
1144 1.1 mrg that has no side effects. */
1145 1.1 mrg if (set == NULL_RTX
1146 1.1 mrg || !REG_P (SET_DEST (set))
1147 1.1 mrg || !REG_P (SET_SRC (set))
1148 1.1 mrg || side_effects_p (set))
1149 1.1 mrg return NULL_RTX;
1150 1.1 mrg
1151 1.1 mrg int dst_regno = REGNO (SET_DEST (set));
1152 1.1 mrg int src_regno = REGNO (SET_SRC (set));
1153 1.1 mrg machine_mode mode = GET_MODE (SET_DEST (set));
1154 1.1 mrg
1155 1.1 mrg /* By definition, a register does not conflict with itself, therefore we
1156 1.1 mrg do not have to handle it specially. Returning NULL_RTX now, helps
1157 1.1 mrg simplify the callers of this function. */
1158 1.1 mrg if (dst_regno == src_regno)
1159 1.1 mrg return NULL_RTX;
1160 1.1 mrg
1161 1.1 mrg /* Computing conflicts for register pairs is difficult to get right, so
1162 1.1 mrg for now, disallow it. */
1163 1.1 mrg if ((HARD_REGISTER_NUM_P (dst_regno)
1164 1.1 mrg && hard_regno_nregs (dst_regno, mode) != 1)
1165 1.1 mrg || (HARD_REGISTER_NUM_P (src_regno)
1166 1.1 mrg && hard_regno_nregs (src_regno, mode) != 1))
1167 1.1 mrg return NULL_RTX;
1168 1.1 mrg
1169 1.1 mrg return SET_SRC (set);
1170 1.1 mrg }
1171 1.1 mrg
1172 1.1 mrg #ifdef EH_RETURN_DATA_REGNO
1173 1.1 mrg
1174 1.1 mrg /* Add EH return hard registers as conflict hard registers to allocnos
1175 1.1 mrg living at end of BB. For most allocnos it is already done in
1176 1.1 mrg process_bb_node_lives when we processing input edges but it does
1177 1.1 mrg not work when and EH edge is edge out of the current region. This
1178 1.1 mrg function covers such out of region edges. */
1179 1.1 mrg static void
1180 1.1 mrg process_out_of_region_eh_regs (basic_block bb)
1181 1.1 mrg {
1182 1.1 mrg edge e;
1183 1.1 mrg edge_iterator ei;
1184 1.1 mrg unsigned int i;
1185 1.1 mrg bitmap_iterator bi;
1186 1.1 mrg bool eh_p = false;
1187 1.1 mrg
1188 1.1 mrg FOR_EACH_EDGE (e, ei, bb->succs)
1189 1.1 mrg if ((e->flags & EDGE_EH)
1190 1.1 mrg && IRA_BB_NODE (e->dest)->parent != IRA_BB_NODE (bb)->parent)
1191 1.1 mrg eh_p = true;
1192 1.1 mrg
1193 1.1 mrg if (! eh_p)
1194 1.1 mrg return;
1195 1.1 mrg
1196 1.1 mrg EXECUTE_IF_SET_IN_BITMAP (df_get_live_out (bb), FIRST_PSEUDO_REGISTER, i, bi)
1197 1.1 mrg {
1198 1.1 mrg ira_allocno_t a = ira_curr_regno_allocno_map[i];
1199 1.1 mrg for (int n = ALLOCNO_NUM_OBJECTS (a) - 1; n >= 0; n--)
1200 1.1 mrg {
1201 1.1 mrg ira_object_t obj = ALLOCNO_OBJECT (a, n);
1202 1.1 mrg for (int k = 0; ; k++)
1203 1.1 mrg {
1204 1.1 mrg unsigned int regno = EH_RETURN_DATA_REGNO (k);
1205 1.1 mrg if (regno == INVALID_REGNUM)
1206 1.1 mrg break;
1207 1.1 mrg SET_HARD_REG_BIT (OBJECT_CONFLICT_HARD_REGS (obj), regno);
1208 1.1 mrg SET_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), regno);
1209 1.1 mrg }
1210 1.1 mrg }
1211 1.1 mrg }
1212 1.1 mrg }
1213 1.1 mrg
1214 1.1 mrg #endif
1215 1.1 mrg
1216 1.1 mrg /* Process insns of the basic block given by its LOOP_TREE_NODE to
1217 1.1 mrg update allocno live ranges, allocno hard register conflicts,
1218 1.1 mrg intersected calls, and register pressure info for allocnos for the
1219 1.1 mrg basic block for and regions containing the basic block. */
1220 1.1 mrg static void
1221 1.1 mrg process_bb_node_lives (ira_loop_tree_node_t loop_tree_node)
1222 1.1 mrg {
1223 1.1 mrg int i, freq;
1224 1.1 mrg unsigned int j;
1225 1.1 mrg basic_block bb;
1226 1.1 mrg rtx_insn *insn;
1227 1.1 mrg bitmap_iterator bi;
1228 1.1 mrg bitmap reg_live_out;
1229 1.1 mrg unsigned int px;
1230 1.1 mrg bool set_p;
1231 1.1 mrg
1232 1.1 mrg bb = loop_tree_node->bb;
1233 1.1 mrg if (bb != NULL)
1234 1.1 mrg {
1235 1.1 mrg for (i = 0; i < ira_pressure_classes_num; i++)
1236 1.1 mrg {
1237 1.1 mrg curr_reg_pressure[ira_pressure_classes[i]] = 0;
1238 1.1 mrg high_pressure_start_point[ira_pressure_classes[i]] = -1;
1239 1.1 mrg }
1240 1.1 mrg curr_bb_node = loop_tree_node;
1241 1.1 mrg reg_live_out = df_get_live_out (bb);
1242 1.1 mrg sparseset_clear (objects_live);
1243 1.1 mrg REG_SET_TO_HARD_REG_SET (hard_regs_live, reg_live_out);
1244 1.1 mrg hard_regs_live &= ~(eliminable_regset | ira_no_alloc_regs);
1245 1.1 mrg for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1246 1.1 mrg if (TEST_HARD_REG_BIT (hard_regs_live, i))
1247 1.1 mrg {
1248 1.1 mrg enum reg_class aclass, pclass, cl;
1249 1.1 mrg
1250 1.1 mrg aclass = ira_allocno_class_translate[REGNO_REG_CLASS (i)];
1251 1.1 mrg pclass = ira_pressure_class_translate[aclass];
1252 1.1 mrg for (j = 0;
1253 1.1 mrg (cl = ira_reg_class_super_classes[pclass][j])
1254 1.1 mrg != LIM_REG_CLASSES;
1255 1.1 mrg j++)
1256 1.1 mrg {
1257 1.1 mrg if (! ira_reg_pressure_class_p[cl])
1258 1.1 mrg continue;
1259 1.1 mrg curr_reg_pressure[cl]++;
1260 1.1 mrg if (curr_bb_node->reg_pressure[cl] < curr_reg_pressure[cl])
1261 1.1 mrg curr_bb_node->reg_pressure[cl] = curr_reg_pressure[cl];
1262 1.1 mrg ira_assert (curr_reg_pressure[cl]
1263 1.1 mrg <= ira_class_hard_regs_num[cl]);
1264 1.1 mrg }
1265 1.1 mrg }
1266 1.1 mrg EXECUTE_IF_SET_IN_BITMAP (reg_live_out, FIRST_PSEUDO_REGISTER, j, bi)
1267 1.1 mrg mark_pseudo_regno_live (j);
1268 1.1 mrg
1269 1.1 mrg #ifdef EH_RETURN_DATA_REGNO
1270 1.1 mrg process_out_of_region_eh_regs (bb);
1271 1.1 mrg #endif
1272 1.1 mrg
1273 1.1 mrg freq = REG_FREQ_FROM_BB (bb);
1274 1.1 mrg if (freq == 0)
1275 1.1 mrg freq = 1;
1276 1.1 mrg
1277 1.1 mrg /* Invalidate all allocno_saved_at_call entries. */
1278 1.1 mrg last_call_num++;
1279 1.1 mrg
1280 1.1 mrg /* Scan the code of this basic block, noting which allocnos and
1281 1.1 mrg hard regs are born or die.
1282 1.1 mrg
1283 1.1 mrg Note that this loop treats uninitialized values as live until
1284 1.1 mrg the beginning of the block. For example, if an instruction
1285 1.1 mrg uses (reg:DI foo), and only (subreg:SI (reg:DI foo) 0) is ever
1286 1.1 mrg set, FOO will remain live until the beginning of the block.
1287 1.1 mrg Likewise if FOO is not set at all. This is unnecessarily
1288 1.1 mrg pessimistic, but it probably doesn't matter much in practice. */
1289 1.1 mrg FOR_BB_INSNS_REVERSE (bb, insn)
1290 1.1 mrg {
1291 1.1 mrg ira_allocno_t a;
1292 1.1 mrg df_ref def, use;
1293 1.1 mrg bool call_p;
1294 1.1 mrg
1295 1.1 mrg if (!NONDEBUG_INSN_P (insn))
1296 1.1 mrg continue;
1297 1.1 mrg
1298 1.1 mrg if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1299 1.1 mrg fprintf (ira_dump_file, " Insn %u(l%d): point = %d\n",
1300 1.1 mrg INSN_UID (insn), loop_tree_node->parent->loop_num,
1301 1.1 mrg curr_point);
1302 1.1 mrg
1303 1.1 mrg call_p = CALL_P (insn);
1304 1.1 mrg ignore_reg_for_conflicts = non_conflicting_reg_copy_p (insn);
1305 1.1 mrg
1306 1.1 mrg /* Mark each defined value as live. We need to do this for
1307 1.1 mrg unused values because they still conflict with quantities
1308 1.1 mrg that are live at the time of the definition.
1309 1.1 mrg
1310 1.1 mrg Ignore DF_REF_MAY_CLOBBERs on a call instruction. Such
1311 1.1 mrg references represent the effect of the called function
1312 1.1 mrg on a call-clobbered register. Marking the register as
1313 1.1 mrg live would stop us from allocating it to a call-crossing
1314 1.1 mrg allocno. */
1315 1.1 mrg FOR_EACH_INSN_DEF (def, insn)
1316 1.1 mrg if (!call_p || !DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
1317 1.1 mrg mark_ref_live (def);
1318 1.1 mrg
1319 1.1 mrg /* If INSN has multiple outputs, then any value used in one
1320 1.1 mrg of the outputs conflicts with the other outputs. Model this
1321 1.1 mrg by making the used value live during the output phase.
1322 1.1 mrg
1323 1.1 mrg It is unsafe to use !single_set here since it will ignore
1324 1.1 mrg an unused output. Just because an output is unused does
1325 1.1 mrg not mean the compiler can assume the side effect will not
1326 1.1 mrg occur. Consider if ALLOCNO appears in the address of an
1327 1.1 mrg output and we reload the output. If we allocate ALLOCNO
1328 1.1 mrg to the same hard register as an unused output we could
1329 1.1 mrg set the hard register before the output reload insn. */
1330 1.1 mrg if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
1331 1.1 mrg FOR_EACH_INSN_USE (use, insn)
1332 1.1 mrg {
1333 1.1 mrg int i;
1334 1.1 mrg rtx reg;
1335 1.1 mrg
1336 1.1 mrg reg = DF_REF_REG (use);
1337 1.1 mrg for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
1338 1.1 mrg {
1339 1.1 mrg rtx set;
1340 1.1 mrg
1341 1.1 mrg set = XVECEXP (PATTERN (insn), 0, i);
1342 1.1 mrg if (GET_CODE (set) == SET
1343 1.1 mrg && reg_overlap_mentioned_p (reg, SET_DEST (set)))
1344 1.1 mrg {
1345 1.1 mrg /* After the previous loop, this is a no-op if
1346 1.1 mrg REG is contained within SET_DEST (SET). */
1347 1.1 mrg mark_ref_live (use);
1348 1.1 mrg break;
1349 1.1 mrg }
1350 1.1 mrg }
1351 1.1 mrg }
1352 1.1 mrg
1353 1.1 mrg preferred_alternatives = ira_setup_alts (insn);
1354 1.1 mrg process_single_reg_class_operands (false, freq);
1355 1.1 mrg
1356 1.1 mrg if (call_p)
1357 1.1 mrg {
1358 1.1 mrg /* Try to find a SET in the CALL_INSN_FUNCTION_USAGE, and from
1359 1.1 mrg there, try to find a pseudo that is live across the call but
1360 1.1 mrg can be cheaply reconstructed from the return value. */
1361 1.1 mrg rtx cheap_reg = find_call_crossed_cheap_reg (insn);
1362 1.1 mrg if (cheap_reg != NULL_RTX)
1363 1.1 mrg add_reg_note (insn, REG_RETURNED, cheap_reg);
1364 1.1 mrg
1365 1.1 mrg last_call_num++;
1366 1.1 mrg sparseset_clear (allocnos_processed);
1367 1.1 mrg /* The current set of live allocnos are live across the call. */
1368 1.1 mrg EXECUTE_IF_SET_IN_SPARSESET (objects_live, i)
1369 1.1 mrg {
1370 1.1 mrg ira_object_t obj = ira_object_id_map[i];
1371 1.1 mrg a = OBJECT_ALLOCNO (obj);
1372 1.1 mrg int num = ALLOCNO_NUM (a);
1373 1.1 mrg function_abi callee_abi = insn_callee_abi (insn);
1374 1.1 mrg
1375 1.1 mrg /* Don't allocate allocnos that cross setjmps or any
1376 1.1 mrg call, if this function receives a nonlocal
1377 1.1 mrg goto. */
1378 1.1 mrg if (cfun->has_nonlocal_label
1379 1.1 mrg || (!targetm.setjmp_preserves_nonvolatile_regs_p ()
1380 1.1 mrg && (find_reg_note (insn, REG_SETJMP, NULL_RTX)
1381 1.1 mrg != NULL_RTX)))
1382 1.1 mrg {
1383 1.1 mrg SET_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj));
1384 1.1 mrg SET_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
1385 1.1 mrg }
1386 1.1 mrg if (can_throw_internal (insn))
1387 1.1 mrg {
1388 1.1 mrg OBJECT_CONFLICT_HARD_REGS (obj)
1389 1.1 mrg |= callee_abi.mode_clobbers (ALLOCNO_MODE (a));
1390 1.1 mrg OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)
1391 1.1 mrg |= callee_abi.mode_clobbers (ALLOCNO_MODE (a));
1392 1.1 mrg }
1393 1.1 mrg
1394 1.1 mrg if (sparseset_bit_p (allocnos_processed, num))
1395 1.1 mrg continue;
1396 1.1 mrg sparseset_set_bit (allocnos_processed, num);
1397 1.1 mrg
1398 1.1 mrg if (allocno_saved_at_call[num] != last_call_num)
1399 1.1 mrg /* Here we are mimicking caller-save.cc behavior
1400 1.1 mrg which does not save hard register at a call if
1401 1.1 mrg it was saved on previous call in the same basic
1402 1.1 mrg block and the hard register was not mentioned
1403 1.1 mrg between the two calls. */
1404 1.1 mrg ALLOCNO_CALL_FREQ (a) += freq;
1405 1.1 mrg /* Mark it as saved at the next call. */
1406 1.1 mrg allocno_saved_at_call[num] = last_call_num + 1;
1407 1.1 mrg ALLOCNO_CALLS_CROSSED_NUM (a)++;
1408 1.1 mrg ALLOCNO_CROSSED_CALLS_ABIS (a) |= 1 << callee_abi.id ();
1409 1.1 mrg ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a)
1410 1.1 mrg |= callee_abi.full_and_partial_reg_clobbers ();
1411 1.1 mrg if (cheap_reg != NULL_RTX
1412 1.1 mrg && ALLOCNO_REGNO (a) == (int) REGNO (cheap_reg))
1413 1.1 mrg ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a)++;
1414 1.1 mrg }
1415 1.1 mrg }
1416 1.1 mrg
1417 1.1 mrg /* See which defined values die here. Note that we include
1418 1.1 mrg the call insn in the lifetimes of these values, so we don't
1419 1.1 mrg mistakenly consider, for e.g. an addressing mode with a
1420 1.1 mrg side-effect like a post-increment fetching the address,
1421 1.1 mrg that the use happens before the call, and the def to happen
1422 1.1 mrg after the call: we believe both to happen before the actual
1423 1.1 mrg call. (We don't handle return-values here.) */
1424 1.1 mrg FOR_EACH_INSN_DEF (def, insn)
1425 1.1 mrg if (!call_p || !DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
1426 1.1 mrg mark_ref_dead (def);
1427 1.1 mrg
1428 1.1 mrg make_early_clobber_and_input_conflicts ();
1429 1.1 mrg
1430 1.1 mrg curr_point++;
1431 1.1 mrg
1432 1.1 mrg /* Mark each used value as live. */
1433 1.1 mrg FOR_EACH_INSN_USE (use, insn)
1434 1.1 mrg mark_ref_live (use);
1435 1.1 mrg
1436 1.1 mrg process_single_reg_class_operands (true, freq);
1437 1.1 mrg
1438 1.1 mrg set_p = mark_hard_reg_early_clobbers (insn, true);
1439 1.1 mrg
1440 1.1 mrg if (set_p)
1441 1.1 mrg {
1442 1.1 mrg mark_hard_reg_early_clobbers (insn, false);
1443 1.1 mrg
1444 1.1 mrg /* Mark each hard reg as live again. For example, a
1445 1.1 mrg hard register can be in clobber and in an insn
1446 1.1 mrg input. */
1447 1.1 mrg FOR_EACH_INSN_USE (use, insn)
1448 1.1 mrg {
1449 1.1 mrg rtx ureg = DF_REF_REG (use);
1450 1.1 mrg
1451 1.1 mrg if (GET_CODE (ureg) == SUBREG)
1452 1.1 mrg ureg = SUBREG_REG (ureg);
1453 1.1 mrg if (! REG_P (ureg) || REGNO (ureg) >= FIRST_PSEUDO_REGISTER)
1454 1.1 mrg continue;
1455 1.1 mrg
1456 1.1 mrg mark_ref_live (use);
1457 1.1 mrg }
1458 1.1 mrg }
1459 1.1 mrg
1460 1.1 mrg curr_point++;
1461 1.1 mrg }
1462 1.1 mrg ignore_reg_for_conflicts = NULL_RTX;
1463 1.1 mrg
1464 1.1 mrg if (bb_has_eh_pred (bb))
1465 1.1 mrg for (j = 0; ; ++j)
1466 1.1 mrg {
1467 1.1 mrg unsigned int regno = EH_RETURN_DATA_REGNO (j);
1468 1.1 mrg if (regno == INVALID_REGNUM)
1469 1.1 mrg break;
1470 1.1 mrg make_hard_regno_live (regno);
1471 1.1 mrg }
1472 1.1 mrg
1473 1.1 mrg /* Allocnos can't go in stack regs at the start of a basic block
1474 1.1 mrg that is reached by an abnormal edge. Likewise for registers
1475 1.1 mrg that are at least partly call clobbered, because caller-save,
1476 1.1 mrg fixup_abnormal_edges and possibly the table driven EH machinery
1477 1.1 mrg are not quite ready to handle such allocnos live across such
1478 1.1 mrg edges. */
1479 1.1 mrg if (bb_has_abnormal_pred (bb))
1480 1.1 mrg {
1481 1.1 mrg #ifdef STACK_REGS
1482 1.1 mrg EXECUTE_IF_SET_IN_SPARSESET (objects_live, px)
1483 1.1 mrg {
1484 1.1 mrg ira_allocno_t a = OBJECT_ALLOCNO (ira_object_id_map[px]);
1485 1.1 mrg
1486 1.1 mrg ALLOCNO_NO_STACK_REG_P (a) = true;
1487 1.1 mrg ALLOCNO_TOTAL_NO_STACK_REG_P (a) = true;
1488 1.1 mrg }
1489 1.1 mrg for (px = FIRST_STACK_REG; px <= LAST_STACK_REG; px++)
1490 1.1 mrg make_hard_regno_live (px);
1491 1.1 mrg #endif
1492 1.1 mrg /* No need to record conflicts for call clobbered regs if we
1493 1.1 mrg have nonlocal labels around, as we don't ever try to
1494 1.1 mrg allocate such regs in this case. */
1495 1.1 mrg if (!cfun->has_nonlocal_label
1496 1.1 mrg && has_abnormal_call_or_eh_pred_edge_p (bb))
1497 1.1 mrg for (px = 0; px < FIRST_PSEUDO_REGISTER; px++)
1498 1.1 mrg if (eh_edge_abi.clobbers_at_least_part_of_reg_p (px)
1499 1.1 mrg #ifdef REAL_PIC_OFFSET_TABLE_REGNUM
1500 1.1 mrg /* We should create a conflict of PIC pseudo with
1501 1.1 mrg PIC hard reg as PIC hard reg can have a wrong
1502 1.1 mrg value after jump described by the abnormal edge.
1503 1.1 mrg In this case we cannot allocate PIC hard reg to
1504 1.1 mrg PIC pseudo as PIC pseudo will also have a wrong
1505 1.1 mrg value. This code is not critical as LRA can fix
1506 1.1 mrg it but it is better to have the right allocation
1507 1.1 mrg earlier. */
1508 1.1 mrg || (px == REAL_PIC_OFFSET_TABLE_REGNUM
1509 1.1 mrg && pic_offset_table_rtx != NULL_RTX
1510 1.1 mrg && REGNO (pic_offset_table_rtx) >= FIRST_PSEUDO_REGISTER)
1511 1.1 mrg #endif
1512 1.1 mrg )
1513 1.1 mrg make_hard_regno_live (px);
1514 1.1 mrg }
1515 1.1 mrg
1516 1.1 mrg EXECUTE_IF_SET_IN_SPARSESET (objects_live, i)
1517 1.1 mrg make_object_dead (ira_object_id_map[i]);
1518 1.1 mrg
1519 1.1 mrg curr_point++;
1520 1.1 mrg
1521 1.1 mrg }
1522 1.1 mrg /* Propagate register pressure to upper loop tree nodes. */
1523 1.1 mrg if (loop_tree_node != ira_loop_tree_root)
1524 1.1 mrg for (i = 0; i < ira_pressure_classes_num; i++)
1525 1.1 mrg {
1526 1.1 mrg enum reg_class pclass;
1527 1.1 mrg
1528 1.1 mrg pclass = ira_pressure_classes[i];
1529 1.1 mrg if (loop_tree_node->reg_pressure[pclass]
1530 1.1 mrg > loop_tree_node->parent->reg_pressure[pclass])
1531 1.1 mrg loop_tree_node->parent->reg_pressure[pclass]
1532 1.1 mrg = loop_tree_node->reg_pressure[pclass];
1533 1.1 mrg }
1534 1.1 mrg }
1535 1.1 mrg
1536 1.1 mrg /* Create and set up IRA_START_POINT_RANGES and
1537 1.1 mrg IRA_FINISH_POINT_RANGES. */
1538 1.1 mrg static void
1539 1.1 mrg create_start_finish_chains (void)
1540 1.1 mrg {
1541 1.1 mrg ira_object_t obj;
1542 1.1 mrg ira_object_iterator oi;
1543 1.1 mrg live_range_t r;
1544 1.1 mrg
1545 1.1 mrg ira_start_point_ranges
1546 1.1 mrg = (live_range_t *) ira_allocate (ira_max_point * sizeof (live_range_t));
1547 1.1 mrg memset (ira_start_point_ranges, 0, ira_max_point * sizeof (live_range_t));
1548 1.1 mrg ira_finish_point_ranges
1549 1.1 mrg = (live_range_t *) ira_allocate (ira_max_point * sizeof (live_range_t));
1550 1.1 mrg memset (ira_finish_point_ranges, 0, ira_max_point * sizeof (live_range_t));
1551 1.1 mrg FOR_EACH_OBJECT (obj, oi)
1552 1.1 mrg for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
1553 1.1 mrg {
1554 1.1 mrg r->start_next = ira_start_point_ranges[r->start];
1555 1.1 mrg ira_start_point_ranges[r->start] = r;
1556 1.1 mrg r->finish_next = ira_finish_point_ranges[r->finish];
1557 1.1 mrg ira_finish_point_ranges[r->finish] = r;
1558 1.1 mrg }
1559 1.1 mrg }
1560 1.1 mrg
1561 1.1 mrg /* Rebuild IRA_START_POINT_RANGES and IRA_FINISH_POINT_RANGES after
1562 1.1 mrg new live ranges and program points were added as a result if new
1563 1.1 mrg insn generation. */
1564 1.1 mrg void
1565 1.1 mrg ira_rebuild_start_finish_chains (void)
1566 1.1 mrg {
1567 1.1 mrg ira_free (ira_finish_point_ranges);
1568 1.1 mrg ira_free (ira_start_point_ranges);
1569 1.1 mrg create_start_finish_chains ();
1570 1.1 mrg }
1571 1.1 mrg
1572 1.1 mrg /* Compress allocno live ranges by removing program points where
1573 1.1 mrg nothing happens. */
1574 1.1 mrg static void
1575 1.1 mrg remove_some_program_points_and_update_live_ranges (void)
1576 1.1 mrg {
1577 1.1 mrg unsigned i;
1578 1.1 mrg int n;
1579 1.1 mrg int *map;
1580 1.1 mrg ira_object_t obj;
1581 1.1 mrg ira_object_iterator oi;
1582 1.1 mrg live_range_t r, prev_r, next_r;
1583 1.1 mrg sbitmap_iterator sbi;
1584 1.1 mrg bool born_p, dead_p, prev_born_p, prev_dead_p;
1585 1.1 mrg
1586 1.1 mrg auto_sbitmap born (ira_max_point);
1587 1.1 mrg auto_sbitmap dead (ira_max_point);
1588 1.1 mrg bitmap_clear (born);
1589 1.1 mrg bitmap_clear (dead);
1590 1.1 mrg FOR_EACH_OBJECT (obj, oi)
1591 1.1 mrg for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
1592 1.1 mrg {
1593 1.1 mrg ira_assert (r->start <= r->finish);
1594 1.1 mrg bitmap_set_bit (born, r->start);
1595 1.1 mrg bitmap_set_bit (dead, r->finish);
1596 1.1 mrg }
1597 1.1 mrg
1598 1.1 mrg auto_sbitmap born_or_dead (ira_max_point);
1599 1.1 mrg bitmap_ior (born_or_dead, born, dead);
1600 1.1 mrg map = (int *) ira_allocate (sizeof (int) * ira_max_point);
1601 1.1 mrg n = -1;
1602 1.1 mrg prev_born_p = prev_dead_p = false;
1603 1.1 mrg EXECUTE_IF_SET_IN_BITMAP (born_or_dead, 0, i, sbi)
1604 1.1 mrg {
1605 1.1 mrg born_p = bitmap_bit_p (born, i);
1606 1.1 mrg dead_p = bitmap_bit_p (dead, i);
1607 1.1 mrg if ((prev_born_p && ! prev_dead_p && born_p && ! dead_p)
1608 1.1 mrg || (prev_dead_p && ! prev_born_p && dead_p && ! born_p))
1609 1.1 mrg map[i] = n;
1610 1.1 mrg else
1611 1.1 mrg map[i] = ++n;
1612 1.1 mrg prev_born_p = born_p;
1613 1.1 mrg prev_dead_p = dead_p;
1614 1.1 mrg }
1615 1.1 mrg
1616 1.1 mrg n++;
1617 1.1 mrg if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1618 1.1 mrg fprintf (ira_dump_file, "Compressing live ranges: from %d to %d - %d%%\n",
1619 1.1 mrg ira_max_point, n, 100 * n / ira_max_point);
1620 1.1 mrg ira_max_point = n;
1621 1.1 mrg
1622 1.1 mrg FOR_EACH_OBJECT (obj, oi)
1623 1.1 mrg for (r = OBJECT_LIVE_RANGES (obj), prev_r = NULL; r != NULL; r = next_r)
1624 1.1 mrg {
1625 1.1 mrg next_r = r->next;
1626 1.1 mrg r->start = map[r->start];
1627 1.1 mrg r->finish = map[r->finish];
1628 1.1 mrg if (prev_r == NULL || prev_r->start > r->finish + 1)
1629 1.1 mrg {
1630 1.1 mrg prev_r = r;
1631 1.1 mrg continue;
1632 1.1 mrg }
1633 1.1 mrg prev_r->start = r->start;
1634 1.1 mrg prev_r->next = next_r;
1635 1.1 mrg ira_finish_live_range (r);
1636 1.1 mrg }
1637 1.1 mrg
1638 1.1 mrg ira_free (map);
1639 1.1 mrg }
1640 1.1 mrg
1641 1.1 mrg /* Print live ranges R to file F. */
1642 1.1 mrg void
1643 1.1 mrg ira_print_live_range_list (FILE *f, live_range_t r)
1644 1.1 mrg {
1645 1.1 mrg for (; r != NULL; r = r->next)
1646 1.1 mrg fprintf (f, " [%d..%d]", r->start, r->finish);
1647 1.1 mrg fprintf (f, "\n");
1648 1.1 mrg }
1649 1.1 mrg
1650 1.1 mrg DEBUG_FUNCTION void
1651 1.1 mrg debug (live_range &ref)
1652 1.1 mrg {
1653 1.1 mrg ira_print_live_range_list (stderr, &ref);
1654 1.1 mrg }
1655 1.1 mrg
1656 1.1 mrg DEBUG_FUNCTION void
1657 1.1 mrg debug (live_range *ptr)
1658 1.1 mrg {
1659 1.1 mrg if (ptr)
1660 1.1 mrg debug (*ptr);
1661 1.1 mrg else
1662 1.1 mrg fprintf (stderr, "<nil>\n");
1663 1.1 mrg }
1664 1.1 mrg
1665 1.1 mrg /* Print live ranges R to stderr. */
1666 1.1 mrg void
1667 1.1 mrg ira_debug_live_range_list (live_range_t r)
1668 1.1 mrg {
1669 1.1 mrg ira_print_live_range_list (stderr, r);
1670 1.1 mrg }
1671 1.1 mrg
1672 1.1 mrg /* Print live ranges of object OBJ to file F. */
1673 1.1 mrg static void
1674 1.1 mrg print_object_live_ranges (FILE *f, ira_object_t obj)
1675 1.1 mrg {
1676 1.1 mrg ira_print_live_range_list (f, OBJECT_LIVE_RANGES (obj));
1677 1.1 mrg }
1678 1.1 mrg
1679 1.1 mrg /* Print live ranges of allocno A to file F. */
1680 1.1 mrg static void
1681 1.1 mrg print_allocno_live_ranges (FILE *f, ira_allocno_t a)
1682 1.1 mrg {
1683 1.1 mrg int n = ALLOCNO_NUM_OBJECTS (a);
1684 1.1 mrg int i;
1685 1.1 mrg
1686 1.1 mrg for (i = 0; i < n; i++)
1687 1.1 mrg {
1688 1.1 mrg fprintf (f, " a%d(r%d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1689 1.1 mrg if (n > 1)
1690 1.1 mrg fprintf (f, " [%d]", i);
1691 1.1 mrg fprintf (f, "):");
1692 1.1 mrg print_object_live_ranges (f, ALLOCNO_OBJECT (a, i));
1693 1.1 mrg }
1694 1.1 mrg }
1695 1.1 mrg
1696 1.1 mrg /* Print live ranges of allocno A to stderr. */
1697 1.1 mrg void
1698 1.1 mrg ira_debug_allocno_live_ranges (ira_allocno_t a)
1699 1.1 mrg {
1700 1.1 mrg print_allocno_live_ranges (stderr, a);
1701 1.1 mrg }
1702 1.1 mrg
1703 1.1 mrg /* Print live ranges of all allocnos to file F. */
1704 1.1 mrg static void
1705 1.1 mrg print_live_ranges (FILE *f)
1706 1.1 mrg {
1707 1.1 mrg ira_allocno_t a;
1708 1.1 mrg ira_allocno_iterator ai;
1709 1.1 mrg
1710 1.1 mrg FOR_EACH_ALLOCNO (a, ai)
1711 1.1 mrg print_allocno_live_ranges (f, a);
1712 1.1 mrg }
1713 1.1 mrg
1714 1.1 mrg /* Print live ranges of all allocnos to stderr. */
1715 1.1 mrg void
1716 1.1 mrg ira_debug_live_ranges (void)
1717 1.1 mrg {
1718 1.1 mrg print_live_ranges (stderr);
1719 1.1 mrg }
1720 1.1 mrg
1721 1.1 mrg /* The main entry function creates live ranges, set up
1722 1.1 mrg CONFLICT_HARD_REGS and TOTAL_CONFLICT_HARD_REGS for objects, and
1723 1.1 mrg calculate register pressure info. */
1724 1.1 mrg void
1725 1.1 mrg ira_create_allocno_live_ranges (void)
1726 1.1 mrg {
1727 1.1 mrg objects_live = sparseset_alloc (ira_objects_num);
1728 1.1 mrg allocnos_processed = sparseset_alloc (ira_allocnos_num);
1729 1.1 mrg curr_point = 0;
1730 1.1 mrg last_call_num = 0;
1731 1.1 mrg allocno_saved_at_call
1732 1.1 mrg = (int *) ira_allocate (ira_allocnos_num * sizeof (int));
1733 1.1 mrg memset (allocno_saved_at_call, 0, ira_allocnos_num * sizeof (int));
1734 1.1 mrg ira_traverse_loop_tree (true, ira_loop_tree_root, NULL,
1735 1.1 mrg process_bb_node_lives);
1736 1.1 mrg ira_max_point = curr_point;
1737 1.1 mrg create_start_finish_chains ();
1738 1.1 mrg if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1739 1.1 mrg print_live_ranges (ira_dump_file);
1740 1.1 mrg /* Clean up. */
1741 1.1 mrg ira_free (allocno_saved_at_call);
1742 1.1 mrg sparseset_free (objects_live);
1743 1.1 mrg sparseset_free (allocnos_processed);
1744 1.1 mrg }
1745 1.1 mrg
1746 1.1 mrg /* Compress allocno live ranges. */
1747 1.1 mrg void
1748 1.1 mrg ira_compress_allocno_live_ranges (void)
1749 1.1 mrg {
1750 1.1 mrg remove_some_program_points_and_update_live_ranges ();
1751 1.1 mrg ira_rebuild_start_finish_chains ();
1752 1.1 mrg if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1753 1.1 mrg {
1754 1.1 mrg fprintf (ira_dump_file, "Ranges after the compression:\n");
1755 1.1 mrg print_live_ranges (ira_dump_file);
1756 1.1 mrg }
1757 1.1 mrg }
1758 1.1 mrg
1759 1.1 mrg /* Free arrays IRA_START_POINT_RANGES and IRA_FINISH_POINT_RANGES. */
1760 1.1 mrg void
1761 1.1 mrg ira_finish_allocno_live_ranges (void)
1762 1.1 mrg {
1763 1.1 mrg ira_free (ira_finish_point_ranges);
1764 1.1 mrg ira_free (ira_start_point_ranges);
1765 1.1 mrg }
1766