lra.cc revision 1.1 1 1.1 mrg /* LRA (local register allocator) driver and LRA utilities.
2 1.1 mrg Copyright (C) 2010-2022 Free Software Foundation, Inc.
3 1.1 mrg Contributed by Vladimir Makarov <vmakarov (at) redhat.com>.
4 1.1 mrg
5 1.1 mrg This file is part of GCC.
6 1.1 mrg
7 1.1 mrg GCC is free software; you can redistribute it and/or modify it under
8 1.1 mrg the terms of the GNU General Public License as published by the Free
9 1.1 mrg Software Foundation; either version 3, or (at your option) any later
10 1.1 mrg version.
11 1.1 mrg
12 1.1 mrg GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 1.1 mrg WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 1.1 mrg FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 1.1 mrg for more details.
16 1.1 mrg
17 1.1 mrg You should have received a copy of the GNU General Public License
18 1.1 mrg along with GCC; see the file COPYING3. If not see
19 1.1 mrg <http://www.gnu.org/licenses/>. */
20 1.1 mrg
21 1.1 mrg
22 1.1 mrg /* The Local Register Allocator (LRA) is a replacement of former
23 1.1 mrg reload pass. It is focused to simplify code solving the reload
24 1.1 mrg pass tasks, to make the code maintenance easier, and to implement new
25 1.1 mrg perspective optimizations.
26 1.1 mrg
27 1.1 mrg The major LRA design solutions are:
28 1.1 mrg o division small manageable, separated sub-tasks
29 1.1 mrg o reflection of all transformations and decisions in RTL as more
30 1.1 mrg as possible
31 1.1 mrg o insn constraints as a primary source of the info (minimizing
32 1.1 mrg number of target-depended macros/hooks)
33 1.1 mrg
34 1.1 mrg In brief LRA works by iterative insn process with the final goal is
35 1.1 mrg to satisfy all insn and address constraints:
36 1.1 mrg o New reload insns (in brief reloads) and reload pseudos might be
37 1.1 mrg generated;
38 1.1 mrg o Some pseudos might be spilled to assign hard registers to
39 1.1 mrg new reload pseudos;
40 1.1 mrg o Recalculating spilled pseudo values (rematerialization);
41 1.1 mrg o Changing spilled pseudos to stack memory or their equivalences;
42 1.1 mrg o Allocation stack memory changes the address displacement and
43 1.1 mrg new iteration is needed.
44 1.1 mrg
45 1.1 mrg Here is block diagram of LRA passes:
46 1.1 mrg
47 1.1 mrg ------------------------
48 1.1 mrg --------------- | Undo inheritance for | ---------------
49 1.1 mrg | Memory-memory | | spilled pseudos, | | New (and old) |
50 1.1 mrg | move coalesce |<---| splits for pseudos got |<-- | pseudos |
51 1.1 mrg --------------- | the same hard regs, | | assignment |
52 1.1 mrg Start | | and optional reloads | ---------------
53 1.1 mrg | | ------------------------ ^
54 1.1 mrg V | ---------------- |
55 1.1 mrg ----------- V | Update virtual | |
56 1.1 mrg | Remove |----> ------------>| register | |
57 1.1 mrg | scratches | ^ | displacements | |
58 1.1 mrg ----------- | ---------------- |
59 1.1 mrg | | |
60 1.1 mrg | V New |
61 1.1 mrg | ------------ pseudos -------------------
62 1.1 mrg | |Constraints:| or insns | Inheritance/split |
63 1.1 mrg | | RTL |--------->| transformations |
64 1.1 mrg | | transfor- | | in EBB scope |
65 1.1 mrg | substi- | mations | -------------------
66 1.1 mrg | tutions ------------
67 1.1 mrg | | No change
68 1.1 mrg ---------------- V
69 1.1 mrg | Spilled pseudo | -------------------
70 1.1 mrg | to memory |<----| Rematerialization |
71 1.1 mrg | substitution | -------------------
72 1.1 mrg ----------------
73 1.1 mrg | No susbtitions
74 1.1 mrg V
75 1.1 mrg -------------------------
76 1.1 mrg | Hard regs substitution, |
77 1.1 mrg | devirtalization, and |------> Finish
78 1.1 mrg | restoring scratches got |
79 1.1 mrg | memory |
80 1.1 mrg -------------------------
81 1.1 mrg
82 1.1 mrg To speed up the process:
83 1.1 mrg o We process only insns affected by changes on previous
84 1.1 mrg iterations;
85 1.1 mrg o We don't use DFA-infrastructure because it results in much slower
86 1.1 mrg compiler speed than a special IR described below does;
87 1.1 mrg o We use a special insn representation for quick access to insn
88 1.1 mrg info which is always *synchronized* with the current RTL;
89 1.1 mrg o Insn IR is minimized by memory. It is divided on three parts:
90 1.1 mrg o one specific for each insn in RTL (only operand locations);
91 1.1 mrg o one common for all insns in RTL with the same insn code
92 1.1 mrg (different operand attributes from machine descriptions);
93 1.1 mrg o one oriented for maintenance of live info (list of pseudos).
94 1.1 mrg o Pseudo data:
95 1.1 mrg o all insns where the pseudo is referenced;
96 1.1 mrg o live info (conflicting hard regs, live ranges, # of
97 1.1 mrg references etc);
98 1.1 mrg o data used for assigning (preferred hard regs, costs etc).
99 1.1 mrg
100 1.1 mrg This file contains LRA driver, LRA utility functions and data, and
101 1.1 mrg code for dealing with scratches. */
102 1.1 mrg
103 1.1 mrg #include "config.h"
104 1.1 mrg #include "system.h"
105 1.1 mrg #include "coretypes.h"
106 1.1 mrg #include "backend.h"
107 1.1 mrg #include "target.h"
108 1.1 mrg #include "rtl.h"
109 1.1 mrg #include "tree.h"
110 1.1 mrg #include "predict.h"
111 1.1 mrg #include "df.h"
112 1.1 mrg #include "memmodel.h"
113 1.1 mrg #include "tm_p.h"
114 1.1 mrg #include "optabs.h"
115 1.1 mrg #include "regs.h"
116 1.1 mrg #include "ira.h"
117 1.1 mrg #include "recog.h"
118 1.1 mrg #include "expr.h"
119 1.1 mrg #include "cfgrtl.h"
120 1.1 mrg #include "cfgbuild.h"
121 1.1 mrg #include "lra.h"
122 1.1 mrg #include "lra-int.h"
123 1.1 mrg #include "print-rtl.h"
124 1.1 mrg #include "function-abi.h"
125 1.1 mrg
126 1.1 mrg /* Dump bitmap SET with TITLE and BB INDEX. */
127 1.1 mrg void
128 1.1 mrg lra_dump_bitmap_with_title (const char *title, bitmap set, int index)
129 1.1 mrg {
130 1.1 mrg unsigned int i;
131 1.1 mrg int count;
132 1.1 mrg bitmap_iterator bi;
133 1.1 mrg static const int max_nums_on_line = 10;
134 1.1 mrg
135 1.1 mrg if (bitmap_empty_p (set))
136 1.1 mrg return;
137 1.1 mrg fprintf (lra_dump_file, " %s %d:", title, index);
138 1.1 mrg fprintf (lra_dump_file, "\n");
139 1.1 mrg count = max_nums_on_line + 1;
140 1.1 mrg EXECUTE_IF_SET_IN_BITMAP (set, 0, i, bi)
141 1.1 mrg {
142 1.1 mrg if (count > max_nums_on_line)
143 1.1 mrg {
144 1.1 mrg fprintf (lra_dump_file, "\n ");
145 1.1 mrg count = 0;
146 1.1 mrg }
147 1.1 mrg fprintf (lra_dump_file, " %4u", i);
148 1.1 mrg count++;
149 1.1 mrg }
150 1.1 mrg fprintf (lra_dump_file, "\n");
151 1.1 mrg }
152 1.1 mrg
153 1.1 mrg /* Hard registers currently not available for allocation. It can
154 1.1 mrg changed after some hard registers become not eliminable. */
155 1.1 mrg HARD_REG_SET lra_no_alloc_regs;
156 1.1 mrg
157 1.1 mrg static int get_new_reg_value (void);
158 1.1 mrg static void expand_reg_info (void);
159 1.1 mrg static void invalidate_insn_recog_data (int);
160 1.1 mrg static int get_insn_freq (rtx_insn *);
161 1.1 mrg static void invalidate_insn_data_regno_info (lra_insn_recog_data_t,
162 1.1 mrg rtx_insn *, int);
163 1.1 mrg /* Expand all regno related info needed for LRA. */
164 1.1 mrg static void
165 1.1 mrg expand_reg_data (int old)
166 1.1 mrg {
167 1.1 mrg resize_reg_info ();
168 1.1 mrg expand_reg_info ();
169 1.1 mrg ira_expand_reg_equiv ();
170 1.1 mrg for (int i = (int) max_reg_num () - 1; i >= old; i--)
171 1.1 mrg lra_change_class (i, ALL_REGS, " Set", true);
172 1.1 mrg }
173 1.1 mrg
174 1.1 mrg /* Create and return a new reg of ORIGINAL mode. If ORIGINAL is NULL
175 1.1 mrg or of VOIDmode, use MD_MODE for the new reg. Initialize its
176 1.1 mrg register class to RCLASS. Print message about assigning class
177 1.1 mrg RCLASS containing new register name TITLE unless it is NULL. Use
178 1.1 mrg attributes of ORIGINAL if it is a register. The created register
179 1.1 mrg will have unique held value. */
180 1.1 mrg rtx
181 1.1 mrg lra_create_new_reg_with_unique_value (machine_mode md_mode, rtx original,
182 1.1 mrg enum reg_class rclass,
183 1.1 mrg HARD_REG_SET *exclude_start_hard_regs,
184 1.1 mrg const char *title)
185 1.1 mrg {
186 1.1 mrg machine_mode mode;
187 1.1 mrg rtx new_reg;
188 1.1 mrg
189 1.1 mrg if (original == NULL_RTX || (mode = GET_MODE (original)) == VOIDmode)
190 1.1 mrg mode = md_mode;
191 1.1 mrg lra_assert (mode != VOIDmode);
192 1.1 mrg new_reg = gen_reg_rtx (mode);
193 1.1 mrg if (original == NULL_RTX || ! REG_P (original))
194 1.1 mrg {
195 1.1 mrg if (lra_dump_file != NULL)
196 1.1 mrg fprintf (lra_dump_file, " Creating newreg=%i", REGNO (new_reg));
197 1.1 mrg }
198 1.1 mrg else
199 1.1 mrg {
200 1.1 mrg if (ORIGINAL_REGNO (original) >= FIRST_PSEUDO_REGISTER)
201 1.1 mrg ORIGINAL_REGNO (new_reg) = ORIGINAL_REGNO (original);
202 1.1 mrg REG_USERVAR_P (new_reg) = REG_USERVAR_P (original);
203 1.1 mrg REG_POINTER (new_reg) = REG_POINTER (original);
204 1.1 mrg REG_ATTRS (new_reg) = REG_ATTRS (original);
205 1.1 mrg if (lra_dump_file != NULL)
206 1.1 mrg fprintf (lra_dump_file, " Creating newreg=%i from oldreg=%i",
207 1.1 mrg REGNO (new_reg), REGNO (original));
208 1.1 mrg }
209 1.1 mrg if (lra_dump_file != NULL)
210 1.1 mrg {
211 1.1 mrg if (title != NULL)
212 1.1 mrg fprintf (lra_dump_file, ", assigning class %s to%s%s r%d",
213 1.1 mrg reg_class_names[rclass], *title == '\0' ? "" : " ",
214 1.1 mrg title, REGNO (new_reg));
215 1.1 mrg fprintf (lra_dump_file, "\n");
216 1.1 mrg }
217 1.1 mrg expand_reg_data (max_reg_num ());
218 1.1 mrg setup_reg_classes (REGNO (new_reg), rclass, NO_REGS, rclass);
219 1.1 mrg if (exclude_start_hard_regs != NULL)
220 1.1 mrg lra_reg_info[REGNO (new_reg)].exclude_start_hard_regs
221 1.1 mrg = *exclude_start_hard_regs;
222 1.1 mrg return new_reg;
223 1.1 mrg }
224 1.1 mrg
225 1.1 mrg /* Analogous to the previous function but also inherits value of
226 1.1 mrg ORIGINAL. */
227 1.1 mrg rtx
228 1.1 mrg lra_create_new_reg (machine_mode md_mode, rtx original, enum reg_class rclass,
229 1.1 mrg HARD_REG_SET *exclude_start_hard_regs, const char *title)
230 1.1 mrg {
231 1.1 mrg rtx new_reg;
232 1.1 mrg
233 1.1 mrg new_reg
234 1.1 mrg = lra_create_new_reg_with_unique_value (md_mode, original, rclass,
235 1.1 mrg exclude_start_hard_regs, title);
236 1.1 mrg if (original != NULL_RTX && REG_P (original))
237 1.1 mrg lra_assign_reg_val (REGNO (original), REGNO (new_reg));
238 1.1 mrg return new_reg;
239 1.1 mrg }
240 1.1 mrg
241 1.1 mrg /* Set up for REGNO unique hold value. */
242 1.1 mrg void
243 1.1 mrg lra_set_regno_unique_value (int regno)
244 1.1 mrg {
245 1.1 mrg lra_reg_info[regno].val = get_new_reg_value ();
246 1.1 mrg }
247 1.1 mrg
248 1.1 mrg /* Invalidate INSN related info used by LRA. The info should never be
249 1.1 mrg used after that. */
250 1.1 mrg void
251 1.1 mrg lra_invalidate_insn_data (rtx_insn *insn)
252 1.1 mrg {
253 1.1 mrg lra_invalidate_insn_regno_info (insn);
254 1.1 mrg invalidate_insn_recog_data (INSN_UID (insn));
255 1.1 mrg }
256 1.1 mrg
257 1.1 mrg /* Mark INSN deleted and invalidate the insn related info used by
258 1.1 mrg LRA. */
259 1.1 mrg void
260 1.1 mrg lra_set_insn_deleted (rtx_insn *insn)
261 1.1 mrg {
262 1.1 mrg lra_invalidate_insn_data (insn);
263 1.1 mrg SET_INSN_DELETED (insn);
264 1.1 mrg }
265 1.1 mrg
266 1.1 mrg /* Delete an unneeded INSN and any previous insns who sole purpose is
267 1.1 mrg loading data that is dead in INSN. */
268 1.1 mrg void
269 1.1 mrg lra_delete_dead_insn (rtx_insn *insn)
270 1.1 mrg {
271 1.1 mrg rtx_insn *prev = prev_real_insn (insn);
272 1.1 mrg rtx prev_dest;
273 1.1 mrg
274 1.1 mrg /* If the previous insn sets a register that dies in our insn,
275 1.1 mrg delete it too. */
276 1.1 mrg if (prev && GET_CODE (PATTERN (prev)) == SET
277 1.1 mrg && (prev_dest = SET_DEST (PATTERN (prev)), REG_P (prev_dest))
278 1.1 mrg && reg_mentioned_p (prev_dest, PATTERN (insn))
279 1.1 mrg && find_regno_note (insn, REG_DEAD, REGNO (prev_dest))
280 1.1 mrg && ! side_effects_p (SET_SRC (PATTERN (prev))))
281 1.1 mrg lra_delete_dead_insn (prev);
282 1.1 mrg
283 1.1 mrg lra_set_insn_deleted (insn);
284 1.1 mrg }
285 1.1 mrg
286 1.1 mrg /* Emit insn x = y + z. Return NULL if we failed to do it.
287 1.1 mrg Otherwise, return the insn. We don't use gen_add3_insn as it might
288 1.1 mrg clobber CC. */
289 1.1 mrg static rtx_insn *
290 1.1 mrg emit_add3_insn (rtx x, rtx y, rtx z)
291 1.1 mrg {
292 1.1 mrg rtx_insn *last;
293 1.1 mrg
294 1.1 mrg last = get_last_insn ();
295 1.1 mrg
296 1.1 mrg if (have_addptr3_insn (x, y, z))
297 1.1 mrg {
298 1.1 mrg rtx_insn *insn = gen_addptr3_insn (x, y, z);
299 1.1 mrg
300 1.1 mrg /* If the target provides an "addptr" pattern it hopefully does
301 1.1 mrg for a reason. So falling back to the normal add would be
302 1.1 mrg a bug. */
303 1.1 mrg lra_assert (insn != NULL_RTX);
304 1.1 mrg emit_insn (insn);
305 1.1 mrg return insn;
306 1.1 mrg }
307 1.1 mrg
308 1.1 mrg rtx_insn *insn = emit_insn (gen_rtx_SET (x, gen_rtx_PLUS (GET_MODE (y),
309 1.1 mrg y, z)));
310 1.1 mrg if (recog_memoized (insn) < 0)
311 1.1 mrg {
312 1.1 mrg delete_insns_since (last);
313 1.1 mrg insn = NULL;
314 1.1 mrg }
315 1.1 mrg return insn;
316 1.1 mrg }
317 1.1 mrg
318 1.1 mrg /* Emit insn x = x + y. Return the insn. We use gen_add2_insn as the
319 1.1 mrg last resort. */
320 1.1 mrg static rtx_insn *
321 1.1 mrg emit_add2_insn (rtx x, rtx y)
322 1.1 mrg {
323 1.1 mrg rtx_insn *insn = emit_add3_insn (x, x, y);
324 1.1 mrg if (insn == NULL_RTX)
325 1.1 mrg {
326 1.1 mrg insn = gen_add2_insn (x, y);
327 1.1 mrg if (insn != NULL_RTX)
328 1.1 mrg emit_insn (insn);
329 1.1 mrg }
330 1.1 mrg return insn;
331 1.1 mrg }
332 1.1 mrg
333 1.1 mrg /* Target checks operands through operand predicates to recognize an
334 1.1 mrg insn. We should have a special precaution to generate add insns
335 1.1 mrg which are frequent results of elimination.
336 1.1 mrg
337 1.1 mrg Emit insns for x = y + z. X can be used to store intermediate
338 1.1 mrg values and should be not in Y and Z when we use X to store an
339 1.1 mrg intermediate value. Y + Z should form [base] [+ index[ * scale]] [
340 1.1 mrg + disp] where base and index are registers, disp and scale are
341 1.1 mrg constants. Y should contain base if it is present, Z should
342 1.1 mrg contain disp if any. index[*scale] can be part of Y or Z. */
343 1.1 mrg void
344 1.1 mrg lra_emit_add (rtx x, rtx y, rtx z)
345 1.1 mrg {
346 1.1 mrg int old;
347 1.1 mrg rtx_insn *last;
348 1.1 mrg rtx a1, a2, base, index, disp, scale, index_scale;
349 1.1 mrg bool ok_p;
350 1.1 mrg
351 1.1 mrg rtx_insn *add3_insn = emit_add3_insn (x, y, z);
352 1.1 mrg old = max_reg_num ();
353 1.1 mrg if (add3_insn != NULL)
354 1.1 mrg ;
355 1.1 mrg else
356 1.1 mrg {
357 1.1 mrg disp = a2 = NULL_RTX;
358 1.1 mrg if (GET_CODE (y) == PLUS)
359 1.1 mrg {
360 1.1 mrg a1 = XEXP (y, 0);
361 1.1 mrg a2 = XEXP (y, 1);
362 1.1 mrg disp = z;
363 1.1 mrg }
364 1.1 mrg else
365 1.1 mrg {
366 1.1 mrg a1 = y;
367 1.1 mrg if (CONSTANT_P (z))
368 1.1 mrg disp = z;
369 1.1 mrg else
370 1.1 mrg a2 = z;
371 1.1 mrg }
372 1.1 mrg index_scale = scale = NULL_RTX;
373 1.1 mrg if (GET_CODE (a1) == MULT)
374 1.1 mrg {
375 1.1 mrg index_scale = a1;
376 1.1 mrg index = XEXP (a1, 0);
377 1.1 mrg scale = XEXP (a1, 1);
378 1.1 mrg base = a2;
379 1.1 mrg }
380 1.1 mrg else if (a2 != NULL_RTX && GET_CODE (a2) == MULT)
381 1.1 mrg {
382 1.1 mrg index_scale = a2;
383 1.1 mrg index = XEXP (a2, 0);
384 1.1 mrg scale = XEXP (a2, 1);
385 1.1 mrg base = a1;
386 1.1 mrg }
387 1.1 mrg else
388 1.1 mrg {
389 1.1 mrg base = a1;
390 1.1 mrg index = a2;
391 1.1 mrg }
392 1.1 mrg if ((base != NULL_RTX && ! (REG_P (base) || GET_CODE (base) == SUBREG))
393 1.1 mrg || (index != NULL_RTX
394 1.1 mrg && ! (REG_P (index) || GET_CODE (index) == SUBREG))
395 1.1 mrg || (disp != NULL_RTX && ! CONSTANT_P (disp))
396 1.1 mrg || (scale != NULL_RTX && ! CONSTANT_P (scale)))
397 1.1 mrg {
398 1.1 mrg /* Probably we have no 3 op add. Last chance is to use 2-op
399 1.1 mrg add insn. To succeed, don't move Z to X as an address
400 1.1 mrg segment always comes in Y. Otherwise, we might fail when
401 1.1 mrg adding the address segment to register. */
402 1.1 mrg lra_assert (x != y && x != z);
403 1.1 mrg emit_move_insn (x, y);
404 1.1 mrg rtx_insn *insn = emit_add2_insn (x, z);
405 1.1 mrg lra_assert (insn != NULL_RTX);
406 1.1 mrg }
407 1.1 mrg else
408 1.1 mrg {
409 1.1 mrg if (index_scale == NULL_RTX)
410 1.1 mrg index_scale = index;
411 1.1 mrg if (disp == NULL_RTX)
412 1.1 mrg {
413 1.1 mrg /* Generate x = index_scale; x = x + base. */
414 1.1 mrg lra_assert (index_scale != NULL_RTX && base != NULL_RTX);
415 1.1 mrg emit_move_insn (x, index_scale);
416 1.1 mrg rtx_insn *insn = emit_add2_insn (x, base);
417 1.1 mrg lra_assert (insn != NULL_RTX);
418 1.1 mrg }
419 1.1 mrg else if (scale == NULL_RTX)
420 1.1 mrg {
421 1.1 mrg /* Try x = base + disp. */
422 1.1 mrg lra_assert (base != NULL_RTX);
423 1.1 mrg last = get_last_insn ();
424 1.1 mrg rtx_insn *move_insn =
425 1.1 mrg emit_move_insn (x, gen_rtx_PLUS (GET_MODE (base), base, disp));
426 1.1 mrg if (recog_memoized (move_insn) < 0)
427 1.1 mrg {
428 1.1 mrg delete_insns_since (last);
429 1.1 mrg /* Generate x = disp; x = x + base. */
430 1.1 mrg emit_move_insn (x, disp);
431 1.1 mrg rtx_insn *add2_insn = emit_add2_insn (x, base);
432 1.1 mrg lra_assert (add2_insn != NULL_RTX);
433 1.1 mrg }
434 1.1 mrg /* Generate x = x + index. */
435 1.1 mrg if (index != NULL_RTX)
436 1.1 mrg {
437 1.1 mrg rtx_insn *insn = emit_add2_insn (x, index);
438 1.1 mrg lra_assert (insn != NULL_RTX);
439 1.1 mrg }
440 1.1 mrg }
441 1.1 mrg else
442 1.1 mrg {
443 1.1 mrg /* Try x = index_scale; x = x + disp; x = x + base. */
444 1.1 mrg last = get_last_insn ();
445 1.1 mrg rtx_insn *move_insn = emit_move_insn (x, index_scale);
446 1.1 mrg ok_p = false;
447 1.1 mrg if (recog_memoized (move_insn) >= 0)
448 1.1 mrg {
449 1.1 mrg rtx_insn *insn = emit_add2_insn (x, disp);
450 1.1 mrg if (insn != NULL_RTX)
451 1.1 mrg {
452 1.1 mrg if (base == NULL_RTX)
453 1.1 mrg ok_p = true;
454 1.1 mrg else
455 1.1 mrg {
456 1.1 mrg insn = emit_add2_insn (x, base);
457 1.1 mrg if (insn != NULL_RTX)
458 1.1 mrg ok_p = true;
459 1.1 mrg }
460 1.1 mrg }
461 1.1 mrg }
462 1.1 mrg if (! ok_p)
463 1.1 mrg {
464 1.1 mrg rtx_insn *insn;
465 1.1 mrg
466 1.1 mrg delete_insns_since (last);
467 1.1 mrg /* Generate x = disp; x = x + base; x = x + index_scale. */
468 1.1 mrg emit_move_insn (x, disp);
469 1.1 mrg if (base != NULL_RTX)
470 1.1 mrg {
471 1.1 mrg insn = emit_add2_insn (x, base);
472 1.1 mrg lra_assert (insn != NULL_RTX);
473 1.1 mrg }
474 1.1 mrg insn = emit_add2_insn (x, index_scale);
475 1.1 mrg lra_assert (insn != NULL_RTX);
476 1.1 mrg }
477 1.1 mrg }
478 1.1 mrg }
479 1.1 mrg }
480 1.1 mrg /* Functions emit_... can create pseudos -- so expand the pseudo
481 1.1 mrg data. */
482 1.1 mrg if (old != max_reg_num ())
483 1.1 mrg expand_reg_data (old);
484 1.1 mrg }
485 1.1 mrg
486 1.1 mrg /* The number of emitted reload insns so far. */
487 1.1 mrg int lra_curr_reload_num;
488 1.1 mrg
489 1.1 mrg static void remove_insn_scratches (rtx_insn *insn);
490 1.1 mrg
491 1.1 mrg /* Emit x := y, processing special case when y = u + v or y = u + v *
492 1.1 mrg scale + w through emit_add (Y can be an address which is base +
493 1.1 mrg index reg * scale + displacement in general case). X may be used
494 1.1 mrg as intermediate result therefore it should be not in Y. */
495 1.1 mrg void
496 1.1 mrg lra_emit_move (rtx x, rtx y)
497 1.1 mrg {
498 1.1 mrg int old;
499 1.1 mrg rtx_insn *insn;
500 1.1 mrg
501 1.1 mrg if (GET_CODE (y) != PLUS)
502 1.1 mrg {
503 1.1 mrg if (rtx_equal_p (x, y))
504 1.1 mrg return;
505 1.1 mrg old = max_reg_num ();
506 1.1 mrg
507 1.1 mrg insn = (GET_CODE (x) != STRICT_LOW_PART
508 1.1 mrg ? emit_move_insn (x, y) : emit_insn (gen_rtx_SET (x, y)));
509 1.1 mrg /* The move pattern may require scratch registers, so convert them
510 1.1 mrg into real registers now. */
511 1.1 mrg if (insn != NULL_RTX)
512 1.1 mrg remove_insn_scratches (insn);
513 1.1 mrg if (REG_P (x))
514 1.1 mrg lra_reg_info[ORIGINAL_REGNO (x)].last_reload = ++lra_curr_reload_num;
515 1.1 mrg /* Function emit_move can create pseudos -- so expand the pseudo
516 1.1 mrg data. */
517 1.1 mrg if (old != max_reg_num ())
518 1.1 mrg expand_reg_data (old);
519 1.1 mrg return;
520 1.1 mrg }
521 1.1 mrg lra_emit_add (x, XEXP (y, 0), XEXP (y, 1));
522 1.1 mrg }
523 1.1 mrg
524 1.1 mrg /* Update insn operands which are duplication of operands whose
525 1.1 mrg numbers are in array of NOPS (with end marker -1). The insn is
526 1.1 mrg represented by its LRA internal representation ID. */
527 1.1 mrg void
528 1.1 mrg lra_update_dups (lra_insn_recog_data_t id, signed char *nops)
529 1.1 mrg {
530 1.1 mrg int i, j, nop;
531 1.1 mrg struct lra_static_insn_data *static_id = id->insn_static_data;
532 1.1 mrg
533 1.1 mrg for (i = 0; i < static_id->n_dups; i++)
534 1.1 mrg for (j = 0; (nop = nops[j]) >= 0; j++)
535 1.1 mrg if (static_id->dup_num[i] == nop)
536 1.1 mrg *id->dup_loc[i] = *id->operand_loc[nop];
537 1.1 mrg }
538 1.1 mrg
539 1.1 mrg
540 1.1 mrg
542 1.1 mrg /* This page contains code dealing with info about registers in the
543 1.1 mrg insns. */
544 1.1 mrg
545 1.1 mrg /* Pools for insn reg info. */
546 1.1 mrg object_allocator<lra_insn_reg> lra_insn_reg_pool ("insn regs");
547 1.1 mrg
548 1.1 mrg /* Create LRA insn related info about a reference to REGNO in INSN
549 1.1 mrg with TYPE (in/out/inout), biggest reference mode MODE, flag that it
550 1.1 mrg is reference through subreg (SUBREG_P), and reference to the next
551 1.1 mrg insn reg info (NEXT). If REGNO can be early clobbered,
552 1.1 mrg alternatives in which it can be early clobbered are given by
553 1.1 mrg EARLY_CLOBBER_ALTS. */
554 1.1 mrg static struct lra_insn_reg *
555 1.1 mrg new_insn_reg (rtx_insn *insn, int regno, enum op_type type,
556 1.1 mrg machine_mode mode, bool subreg_p,
557 1.1 mrg alternative_mask early_clobber_alts,
558 1.1 mrg struct lra_insn_reg *next)
559 1.1 mrg {
560 1.1 mrg lra_insn_reg *ir = lra_insn_reg_pool.allocate ();
561 1.1 mrg ir->type = type;
562 1.1 mrg ir->biggest_mode = mode;
563 1.1 mrg if (NONDEBUG_INSN_P (insn)
564 1.1 mrg && partial_subreg_p (lra_reg_info[regno].biggest_mode, mode))
565 1.1 mrg lra_reg_info[regno].biggest_mode = mode;
566 1.1 mrg ir->subreg_p = subreg_p;
567 1.1 mrg ir->early_clobber_alts = early_clobber_alts;
568 1.1 mrg ir->regno = regno;
569 1.1 mrg ir->next = next;
570 1.1 mrg return ir;
571 1.1 mrg }
572 1.1 mrg
573 1.1 mrg /* Free insn reg info list IR. */
574 1.1 mrg static void
575 1.1 mrg free_insn_regs (struct lra_insn_reg *ir)
576 1.1 mrg {
577 1.1 mrg struct lra_insn_reg *next_ir;
578 1.1 mrg
579 1.1 mrg for (; ir != NULL; ir = next_ir)
580 1.1 mrg {
581 1.1 mrg next_ir = ir->next;
582 1.1 mrg lra_insn_reg_pool.remove (ir);
583 1.1 mrg }
584 1.1 mrg }
585 1.1 mrg
586 1.1 mrg /* Finish pool for insn reg info. */
587 1.1 mrg static void
588 1.1 mrg finish_insn_regs (void)
589 1.1 mrg {
590 1.1 mrg lra_insn_reg_pool.release ();
591 1.1 mrg }
592 1.1 mrg
593 1.1 mrg
594 1.1 mrg
596 1.1 mrg /* This page contains code dealing LRA insn info (or in other words
597 1.1 mrg LRA internal insn representation). */
598 1.1 mrg
599 1.1 mrg /* Map INSN_CODE -> the static insn data. This info is valid during
600 1.1 mrg all translation unit. */
601 1.1 mrg struct lra_static_insn_data *insn_code_data[NUM_INSN_CODES];
602 1.1 mrg
603 1.1 mrg /* Debug insns are represented as a special insn with one input
604 1.1 mrg operand which is RTL expression in var_location. */
605 1.1 mrg
606 1.1 mrg /* The following data are used as static insn operand data for all
607 1.1 mrg debug insns. If structure lra_operand_data is changed, the
608 1.1 mrg initializer should be changed too. */
609 1.1 mrg static struct lra_operand_data debug_operand_data =
610 1.1 mrg {
611 1.1 mrg NULL, /* alternative */
612 1.1 mrg 0, /* early_clobber_alts */
613 1.1 mrg E_VOIDmode, /* We are not interesting in the operand mode. */
614 1.1 mrg OP_IN,
615 1.1 mrg 0, 0, 0
616 1.1 mrg };
617 1.1 mrg
618 1.1 mrg /* The following data are used as static insn data for all debug
619 1.1 mrg bind insns. If structure lra_static_insn_data is changed, the
620 1.1 mrg initializer should be changed too. */
621 1.1 mrg static struct lra_static_insn_data debug_bind_static_data =
622 1.1 mrg {
623 1.1 mrg &debug_operand_data,
624 1.1 mrg 0, /* Duplication operands #. */
625 1.1 mrg -1, /* Commutative operand #. */
626 1.1 mrg 1, /* Operands #. There is only one operand which is debug RTL
627 1.1 mrg expression. */
628 1.1 mrg 0, /* Duplications #. */
629 1.1 mrg 0, /* Alternatives #. We are not interesting in alternatives
630 1.1 mrg because we does not proceed debug_insns for reloads. */
631 1.1 mrg NULL, /* Hard registers referenced in machine description. */
632 1.1 mrg NULL /* Descriptions of operands in alternatives. */
633 1.1 mrg };
634 1.1 mrg
635 1.1 mrg /* The following data are used as static insn data for all debug
636 1.1 mrg marker insns. If structure lra_static_insn_data is changed, the
637 1.1 mrg initializer should be changed too. */
638 1.1 mrg static struct lra_static_insn_data debug_marker_static_data =
639 1.1 mrg {
640 1.1 mrg &debug_operand_data,
641 1.1 mrg 0, /* Duplication operands #. */
642 1.1 mrg -1, /* Commutative operand #. */
643 1.1 mrg 0, /* Operands #. There isn't any operand. */
644 1.1 mrg 0, /* Duplications #. */
645 1.1 mrg 0, /* Alternatives #. We are not interesting in alternatives
646 1.1 mrg because we does not proceed debug_insns for reloads. */
647 1.1 mrg NULL, /* Hard registers referenced in machine description. */
648 1.1 mrg NULL /* Descriptions of operands in alternatives. */
649 1.1 mrg };
650 1.1 mrg
651 1.1 mrg /* Called once per compiler work to initialize some LRA data related
652 1.1 mrg to insns. */
653 1.1 mrg static void
654 1.1 mrg init_insn_code_data_once (void)
655 1.1 mrg {
656 1.1 mrg memset (insn_code_data, 0, sizeof (insn_code_data));
657 1.1 mrg }
658 1.1 mrg
659 1.1 mrg /* Called once per compiler work to finalize some LRA data related to
660 1.1 mrg insns. */
661 1.1 mrg static void
662 1.1 mrg finish_insn_code_data_once (void)
663 1.1 mrg {
664 1.1 mrg for (unsigned int i = 0; i < NUM_INSN_CODES; i++)
665 1.1 mrg {
666 1.1 mrg if (insn_code_data[i] != NULL)
667 1.1 mrg {
668 1.1 mrg free (insn_code_data[i]);
669 1.1 mrg insn_code_data[i] = NULL;
670 1.1 mrg }
671 1.1 mrg }
672 1.1 mrg }
673 1.1 mrg
674 1.1 mrg /* Return static insn data, allocate and setup if necessary. Although
675 1.1 mrg dup_num is static data (it depends only on icode), to set it up we
676 1.1 mrg need to extract insn first. So recog_data should be valid for
677 1.1 mrg normal insn (ICODE >= 0) before the call. */
678 1.1 mrg static struct lra_static_insn_data *
679 1.1 mrg get_static_insn_data (int icode, int nop, int ndup, int nalt)
680 1.1 mrg {
681 1.1 mrg struct lra_static_insn_data *data;
682 1.1 mrg size_t n_bytes;
683 1.1 mrg
684 1.1 mrg lra_assert (icode < (int) NUM_INSN_CODES);
685 1.1 mrg if (icode >= 0 && (data = insn_code_data[icode]) != NULL)
686 1.1 mrg return data;
687 1.1 mrg lra_assert (nop >= 0 && ndup >= 0 && nalt >= 0);
688 1.1 mrg n_bytes = sizeof (struct lra_static_insn_data)
689 1.1 mrg + sizeof (struct lra_operand_data) * nop
690 1.1 mrg + sizeof (int) * ndup;
691 1.1 mrg data = XNEWVAR (struct lra_static_insn_data, n_bytes);
692 1.1 mrg data->operand_alternative = NULL;
693 1.1 mrg data->n_operands = nop;
694 1.1 mrg data->n_dups = ndup;
695 1.1 mrg data->n_alternatives = nalt;
696 1.1 mrg data->operand = ((struct lra_operand_data *)
697 1.1 mrg ((char *) data + sizeof (struct lra_static_insn_data)));
698 1.1 mrg data->dup_num = ((int *) ((char *) data->operand
699 1.1 mrg + sizeof (struct lra_operand_data) * nop));
700 1.1 mrg if (icode >= 0)
701 1.1 mrg {
702 1.1 mrg int i;
703 1.1 mrg
704 1.1 mrg insn_code_data[icode] = data;
705 1.1 mrg for (i = 0; i < nop; i++)
706 1.1 mrg {
707 1.1 mrg data->operand[i].constraint
708 1.1 mrg = insn_data[icode].operand[i].constraint;
709 1.1 mrg data->operand[i].mode = insn_data[icode].operand[i].mode;
710 1.1 mrg data->operand[i].strict_low = insn_data[icode].operand[i].strict_low;
711 1.1 mrg data->operand[i].is_operator
712 1.1 mrg = insn_data[icode].operand[i].is_operator;
713 1.1 mrg data->operand[i].type
714 1.1 mrg = (data->operand[i].constraint[0] == '=' ? OP_OUT
715 1.1 mrg : data->operand[i].constraint[0] == '+' ? OP_INOUT
716 1.1 mrg : OP_IN);
717 1.1 mrg data->operand[i].is_address = false;
718 1.1 mrg }
719 1.1 mrg for (i = 0; i < ndup; i++)
720 1.1 mrg data->dup_num[i] = recog_data.dup_num[i];
721 1.1 mrg }
722 1.1 mrg return data;
723 1.1 mrg }
724 1.1 mrg
725 1.1 mrg /* The current length of the following array. */
726 1.1 mrg int lra_insn_recog_data_len;
727 1.1 mrg
728 1.1 mrg /* Map INSN_UID -> the insn recog data (NULL if unknown). */
729 1.1 mrg lra_insn_recog_data_t *lra_insn_recog_data;
730 1.1 mrg
731 1.1 mrg /* Alloc pool we allocate entries for lra_insn_recog_data from. */
732 1.1 mrg static object_allocator<class lra_insn_recog_data>
733 1.1 mrg lra_insn_recog_data_pool ("insn recog data pool");
734 1.1 mrg
735 1.1 mrg /* Initialize LRA data about insns. */
736 1.1 mrg static void
737 1.1 mrg init_insn_recog_data (void)
738 1.1 mrg {
739 1.1 mrg lra_insn_recog_data_len = 0;
740 1.1 mrg lra_insn_recog_data = NULL;
741 1.1 mrg }
742 1.1 mrg
743 1.1 mrg /* Expand, if necessary, LRA data about insns. */
744 1.1 mrg static void
745 1.1 mrg check_and_expand_insn_recog_data (int index)
746 1.1 mrg {
747 1.1 mrg int i, old;
748 1.1 mrg
749 1.1 mrg if (lra_insn_recog_data_len > index)
750 1.1 mrg return;
751 1.1 mrg old = lra_insn_recog_data_len;
752 1.1 mrg lra_insn_recog_data_len = index * 3 / 2 + 1;
753 1.1 mrg lra_insn_recog_data = XRESIZEVEC (lra_insn_recog_data_t,
754 1.1 mrg lra_insn_recog_data,
755 1.1 mrg lra_insn_recog_data_len);
756 1.1 mrg for (i = old; i < lra_insn_recog_data_len; i++)
757 1.1 mrg lra_insn_recog_data[i] = NULL;
758 1.1 mrg }
759 1.1 mrg
760 1.1 mrg /* Finish LRA DATA about insn. */
761 1.1 mrg static void
762 1.1 mrg free_insn_recog_data (lra_insn_recog_data_t data)
763 1.1 mrg {
764 1.1 mrg if (data->operand_loc != NULL)
765 1.1 mrg free (data->operand_loc);
766 1.1 mrg if (data->dup_loc != NULL)
767 1.1 mrg free (data->dup_loc);
768 1.1 mrg if (data->arg_hard_regs != NULL)
769 1.1 mrg free (data->arg_hard_regs);
770 1.1 mrg if (data->icode < 0 && NONDEBUG_INSN_P (data->insn))
771 1.1 mrg {
772 1.1 mrg if (data->insn_static_data->operand_alternative != NULL)
773 1.1 mrg free (const_cast <operand_alternative *>
774 1.1 mrg (data->insn_static_data->operand_alternative));
775 1.1 mrg free_insn_regs (data->insn_static_data->hard_regs);
776 1.1 mrg free (data->insn_static_data);
777 1.1 mrg }
778 1.1 mrg free_insn_regs (data->regs);
779 1.1 mrg data->regs = NULL;
780 1.1 mrg lra_insn_recog_data_pool.remove (data);
781 1.1 mrg }
782 1.1 mrg
783 1.1 mrg /* Pools for copies. */
784 1.1 mrg static object_allocator<lra_copy> lra_copy_pool ("lra copies");
785 1.1 mrg
786 1.1 mrg /* Finish LRA data about all insns. */
787 1.1 mrg static void
788 1.1 mrg finish_insn_recog_data (void)
789 1.1 mrg {
790 1.1 mrg int i;
791 1.1 mrg lra_insn_recog_data_t data;
792 1.1 mrg
793 1.1 mrg for (i = 0; i < lra_insn_recog_data_len; i++)
794 1.1 mrg if ((data = lra_insn_recog_data[i]) != NULL)
795 1.1 mrg free_insn_recog_data (data);
796 1.1 mrg finish_insn_regs ();
797 1.1 mrg lra_copy_pool.release ();
798 1.1 mrg lra_insn_reg_pool.release ();
799 1.1 mrg lra_insn_recog_data_pool.release ();
800 1.1 mrg free (lra_insn_recog_data);
801 1.1 mrg }
802 1.1 mrg
803 1.1 mrg /* Setup info about operands in alternatives of LRA DATA of insn. */
804 1.1 mrg static void
805 1.1 mrg setup_operand_alternative (lra_insn_recog_data_t data,
806 1.1 mrg const operand_alternative *op_alt)
807 1.1 mrg {
808 1.1 mrg int i, j, nop, nalt;
809 1.1 mrg int icode = data->icode;
810 1.1 mrg struct lra_static_insn_data *static_data = data->insn_static_data;
811 1.1 mrg
812 1.1 mrg static_data->commutative = -1;
813 1.1 mrg nop = static_data->n_operands;
814 1.1 mrg nalt = static_data->n_alternatives;
815 1.1 mrg static_data->operand_alternative = op_alt;
816 1.1 mrg for (i = 0; i < nop; i++)
817 1.1 mrg {
818 1.1 mrg static_data->operand[i].early_clobber_alts = 0;
819 1.1 mrg static_data->operand[i].is_address = false;
820 1.1 mrg if (static_data->operand[i].constraint[0] == '%')
821 1.1 mrg {
822 1.1 mrg /* We currently only support one commutative pair of operands. */
823 1.1 mrg if (static_data->commutative < 0)
824 1.1 mrg static_data->commutative = i;
825 1.1 mrg else
826 1.1 mrg lra_assert (icode < 0); /* Asm */
827 1.1 mrg /* The last operand should not be marked commutative. */
828 1.1 mrg lra_assert (i != nop - 1);
829 1.1 mrg }
830 1.1 mrg }
831 1.1 mrg for (j = 0; j < nalt; j++)
832 1.1 mrg for (i = 0; i < nop; i++, op_alt++)
833 1.1 mrg {
834 1.1 mrg if (op_alt->earlyclobber)
835 1.1 mrg static_data->operand[i].early_clobber_alts |= (alternative_mask) 1 << j;
836 1.1 mrg static_data->operand[i].is_address |= op_alt->is_address;
837 1.1 mrg }
838 1.1 mrg }
839 1.1 mrg
840 1.1 mrg /* Recursively process X and collect info about registers, which are
841 1.1 mrg not the insn operands, in X with TYPE (in/out/inout) and flag that
842 1.1 mrg it is early clobbered in the insn (EARLY_CLOBBER) and add the info
843 1.1 mrg to LIST. X is a part of insn given by DATA. Return the result
844 1.1 mrg list. */
845 1.1 mrg static struct lra_insn_reg *
846 1.1 mrg collect_non_operand_hard_regs (rtx_insn *insn, rtx *x,
847 1.1 mrg lra_insn_recog_data_t data,
848 1.1 mrg struct lra_insn_reg *list,
849 1.1 mrg enum op_type type, bool early_clobber)
850 1.1 mrg {
851 1.1 mrg int i, j, regno, last;
852 1.1 mrg bool subreg_p;
853 1.1 mrg machine_mode mode;
854 1.1 mrg struct lra_insn_reg *curr;
855 1.1 mrg rtx op = *x;
856 1.1 mrg enum rtx_code code = GET_CODE (op);
857 1.1 mrg const char *fmt = GET_RTX_FORMAT (code);
858 1.1 mrg
859 1.1 mrg for (i = 0; i < data->insn_static_data->n_operands; i++)
860 1.1 mrg if (! data->insn_static_data->operand[i].is_operator
861 1.1 mrg && x == data->operand_loc[i])
862 1.1 mrg /* It is an operand loc. Stop here. */
863 1.1 mrg return list;
864 1.1 mrg for (i = 0; i < data->insn_static_data->n_dups; i++)
865 1.1 mrg if (x == data->dup_loc[i])
866 1.1 mrg /* It is a dup loc. Stop here. */
867 1.1 mrg return list;
868 1.1 mrg mode = GET_MODE (op);
869 1.1 mrg subreg_p = false;
870 1.1 mrg if (code == SUBREG)
871 1.1 mrg {
872 1.1 mrg mode = wider_subreg_mode (op);
873 1.1 mrg if (read_modify_subreg_p (op))
874 1.1 mrg subreg_p = true;
875 1.1 mrg op = SUBREG_REG (op);
876 1.1 mrg code = GET_CODE (op);
877 1.1 mrg }
878 1.1 mrg if (REG_P (op))
879 1.1 mrg {
880 1.1 mrg if ((regno = REGNO (op)) >= FIRST_PSEUDO_REGISTER)
881 1.1 mrg return list;
882 1.1 mrg /* Process all regs even unallocatable ones as we need info
883 1.1 mrg about all regs for rematerialization pass. */
884 1.1 mrg for (last = end_hard_regno (mode, regno); regno < last; regno++)
885 1.1 mrg {
886 1.1 mrg for (curr = list; curr != NULL; curr = curr->next)
887 1.1 mrg if (curr->regno == regno && curr->subreg_p == subreg_p
888 1.1 mrg && curr->biggest_mode == mode)
889 1.1 mrg {
890 1.1 mrg if (curr->type != type)
891 1.1 mrg curr->type = OP_INOUT;
892 1.1 mrg if (early_clobber)
893 1.1 mrg curr->early_clobber_alts = ALL_ALTERNATIVES;
894 1.1 mrg break;
895 1.1 mrg }
896 1.1 mrg if (curr == NULL)
897 1.1 mrg {
898 1.1 mrg /* This is a new hard regno or the info cannot be
899 1.1 mrg integrated into the found structure. */
900 1.1 mrg #ifdef STACK_REGS
901 1.1 mrg early_clobber
902 1.1 mrg = (early_clobber
903 1.1 mrg /* This clobber is to inform popping floating
904 1.1 mrg point stack only. */
905 1.1 mrg && ! (FIRST_STACK_REG <= regno
906 1.1 mrg && regno <= LAST_STACK_REG));
907 1.1 mrg #endif
908 1.1 mrg list = new_insn_reg (data->insn, regno, type, mode, subreg_p,
909 1.1 mrg early_clobber ? ALL_ALTERNATIVES : 0, list);
910 1.1 mrg }
911 1.1 mrg }
912 1.1 mrg return list;
913 1.1 mrg }
914 1.1 mrg switch (code)
915 1.1 mrg {
916 1.1 mrg case SET:
917 1.1 mrg list = collect_non_operand_hard_regs (insn, &SET_DEST (op), data,
918 1.1 mrg list, OP_OUT, false);
919 1.1 mrg list = collect_non_operand_hard_regs (insn, &SET_SRC (op), data,
920 1.1 mrg list, OP_IN, false);
921 1.1 mrg break;
922 1.1 mrg case CLOBBER:
923 1.1 mrg /* We treat clobber of non-operand hard registers as early clobber. */
924 1.1 mrg list = collect_non_operand_hard_regs (insn, &XEXP (op, 0), data,
925 1.1 mrg list, OP_OUT, true);
926 1.1 mrg break;
927 1.1 mrg case PRE_INC: case PRE_DEC: case POST_INC: case POST_DEC:
928 1.1 mrg list = collect_non_operand_hard_regs (insn, &XEXP (op, 0), data,
929 1.1 mrg list, OP_INOUT, false);
930 1.1 mrg break;
931 1.1 mrg case PRE_MODIFY: case POST_MODIFY:
932 1.1 mrg list = collect_non_operand_hard_regs (insn, &XEXP (op, 0), data,
933 1.1 mrg list, OP_INOUT, false);
934 1.1 mrg list = collect_non_operand_hard_regs (insn, &XEXP (op, 1), data,
935 1.1 mrg list, OP_IN, false);
936 1.1 mrg break;
937 1.1 mrg default:
938 1.1 mrg fmt = GET_RTX_FORMAT (code);
939 1.1 mrg for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
940 1.1 mrg {
941 1.1 mrg if (fmt[i] == 'e')
942 1.1 mrg list = collect_non_operand_hard_regs (insn, &XEXP (op, i), data,
943 1.1 mrg list, OP_IN, false);
944 1.1 mrg else if (fmt[i] == 'E')
945 1.1 mrg for (j = XVECLEN (op, i) - 1; j >= 0; j--)
946 1.1 mrg list = collect_non_operand_hard_regs (insn, &XVECEXP (op, i, j),
947 1.1 mrg data, list, OP_IN, false);
948 1.1 mrg }
949 1.1 mrg }
950 1.1 mrg return list;
951 1.1 mrg }
952 1.1 mrg
953 1.1 mrg /* Set up and return info about INSN. Set up the info if it is not set up
954 1.1 mrg yet. */
955 1.1 mrg lra_insn_recog_data_t
956 1.1 mrg lra_set_insn_recog_data (rtx_insn *insn)
957 1.1 mrg {
958 1.1 mrg lra_insn_recog_data_t data;
959 1.1 mrg int i, n, icode;
960 1.1 mrg rtx **locs;
961 1.1 mrg unsigned int uid = INSN_UID (insn);
962 1.1 mrg struct lra_static_insn_data *insn_static_data;
963 1.1 mrg
964 1.1 mrg check_and_expand_insn_recog_data (uid);
965 1.1 mrg if (DEBUG_INSN_P (insn))
966 1.1 mrg icode = -1;
967 1.1 mrg else
968 1.1 mrg {
969 1.1 mrg icode = INSN_CODE (insn);
970 1.1 mrg if (icode < 0)
971 1.1 mrg /* It might be a new simple insn which is not recognized yet. */
972 1.1 mrg INSN_CODE (insn) = icode = recog_memoized (insn);
973 1.1 mrg }
974 1.1 mrg data = lra_insn_recog_data_pool.allocate ();
975 1.1 mrg lra_insn_recog_data[uid] = data;
976 1.1 mrg data->insn = insn;
977 1.1 mrg data->used_insn_alternative = LRA_UNKNOWN_ALT;
978 1.1 mrg data->icode = icode;
979 1.1 mrg data->regs = NULL;
980 1.1 mrg if (DEBUG_INSN_P (insn))
981 1.1 mrg {
982 1.1 mrg data->dup_loc = NULL;
983 1.1 mrg data->arg_hard_regs = NULL;
984 1.1 mrg data->preferred_alternatives = ALL_ALTERNATIVES;
985 1.1 mrg if (DEBUG_BIND_INSN_P (insn))
986 1.1 mrg {
987 1.1 mrg data->insn_static_data = &debug_bind_static_data;
988 1.1 mrg data->operand_loc = XNEWVEC (rtx *, 1);
989 1.1 mrg data->operand_loc[0] = &INSN_VAR_LOCATION_LOC (insn);
990 1.1 mrg }
991 1.1 mrg else if (DEBUG_MARKER_INSN_P (insn))
992 1.1 mrg {
993 1.1 mrg data->insn_static_data = &debug_marker_static_data;
994 1.1 mrg data->operand_loc = NULL;
995 1.1 mrg }
996 1.1 mrg return data;
997 1.1 mrg }
998 1.1 mrg if (icode < 0)
999 1.1 mrg {
1000 1.1 mrg int nop, nalt;
1001 1.1 mrg machine_mode operand_mode[MAX_RECOG_OPERANDS];
1002 1.1 mrg const char *constraints[MAX_RECOG_OPERANDS];
1003 1.1 mrg
1004 1.1 mrg nop = asm_noperands (PATTERN (insn));
1005 1.1 mrg data->operand_loc = data->dup_loc = NULL;
1006 1.1 mrg nalt = 1;
1007 1.1 mrg if (nop < 0)
1008 1.1 mrg {
1009 1.1 mrg /* It is a special insn like USE or CLOBBER. We should
1010 1.1 mrg recognize any regular insn otherwise LRA can do nothing
1011 1.1 mrg with this insn. */
1012 1.1 mrg gcc_assert (GET_CODE (PATTERN (insn)) == USE
1013 1.1 mrg || GET_CODE (PATTERN (insn)) == CLOBBER
1014 1.1 mrg || GET_CODE (PATTERN (insn)) == ASM_INPUT);
1015 1.1 mrg data->insn_static_data = insn_static_data
1016 1.1 mrg = get_static_insn_data (-1, 0, 0, nalt);
1017 1.1 mrg }
1018 1.1 mrg else
1019 1.1 mrg {
1020 1.1 mrg /* expand_asm_operands makes sure there aren't too many
1021 1.1 mrg operands. */
1022 1.1 mrg lra_assert (nop <= MAX_RECOG_OPERANDS);
1023 1.1 mrg if (nop != 0)
1024 1.1 mrg data->operand_loc = XNEWVEC (rtx *, nop);
1025 1.1 mrg /* Now get the operand values and constraints out of the
1026 1.1 mrg insn. */
1027 1.1 mrg decode_asm_operands (PATTERN (insn), NULL,
1028 1.1 mrg data->operand_loc,
1029 1.1 mrg constraints, operand_mode, NULL);
1030 1.1 mrg if (nop > 0)
1031 1.1 mrg for (const char *p =constraints[0]; *p; p++)
1032 1.1 mrg nalt += *p == ',';
1033 1.1 mrg data->insn_static_data = insn_static_data
1034 1.1 mrg = get_static_insn_data (-1, nop, 0, nalt);
1035 1.1 mrg for (i = 0; i < nop; i++)
1036 1.1 mrg {
1037 1.1 mrg insn_static_data->operand[i].mode = operand_mode[i];
1038 1.1 mrg insn_static_data->operand[i].constraint = constraints[i];
1039 1.1 mrg insn_static_data->operand[i].strict_low = false;
1040 1.1 mrg insn_static_data->operand[i].is_operator = false;
1041 1.1 mrg insn_static_data->operand[i].is_address = false;
1042 1.1 mrg }
1043 1.1 mrg }
1044 1.1 mrg for (i = 0; i < insn_static_data->n_operands; i++)
1045 1.1 mrg insn_static_data->operand[i].type
1046 1.1 mrg = (insn_static_data->operand[i].constraint[0] == '=' ? OP_OUT
1047 1.1 mrg : insn_static_data->operand[i].constraint[0] == '+' ? OP_INOUT
1048 1.1 mrg : OP_IN);
1049 1.1 mrg data->preferred_alternatives = ALL_ALTERNATIVES;
1050 1.1 mrg if (nop > 0)
1051 1.1 mrg {
1052 1.1 mrg operand_alternative *op_alt = XCNEWVEC (operand_alternative,
1053 1.1 mrg nalt * nop);
1054 1.1 mrg preprocess_constraints (nop, nalt, constraints, op_alt,
1055 1.1 mrg data->operand_loc);
1056 1.1 mrg setup_operand_alternative (data, op_alt);
1057 1.1 mrg }
1058 1.1 mrg }
1059 1.1 mrg else
1060 1.1 mrg {
1061 1.1 mrg insn_extract (insn);
1062 1.1 mrg data->insn_static_data = insn_static_data
1063 1.1 mrg = get_static_insn_data (icode, insn_data[icode].n_operands,
1064 1.1 mrg insn_data[icode].n_dups,
1065 1.1 mrg insn_data[icode].n_alternatives);
1066 1.1 mrg n = insn_static_data->n_operands;
1067 1.1 mrg if (n == 0)
1068 1.1 mrg locs = NULL;
1069 1.1 mrg else
1070 1.1 mrg {
1071 1.1 mrg locs = XNEWVEC (rtx *, n);
1072 1.1 mrg memcpy (locs, recog_data.operand_loc, n * sizeof (rtx *));
1073 1.1 mrg }
1074 1.1 mrg data->operand_loc = locs;
1075 1.1 mrg n = insn_static_data->n_dups;
1076 1.1 mrg if (n == 0)
1077 1.1 mrg locs = NULL;
1078 1.1 mrg else
1079 1.1 mrg {
1080 1.1 mrg locs = XNEWVEC (rtx *, n);
1081 1.1 mrg memcpy (locs, recog_data.dup_loc, n * sizeof (rtx *));
1082 1.1 mrg }
1083 1.1 mrg data->dup_loc = locs;
1084 1.1 mrg data->preferred_alternatives = get_preferred_alternatives (insn);
1085 1.1 mrg const operand_alternative *op_alt = preprocess_insn_constraints (icode);
1086 1.1 mrg if (!insn_static_data->operand_alternative)
1087 1.1 mrg setup_operand_alternative (data, op_alt);
1088 1.1 mrg else if (op_alt != insn_static_data->operand_alternative)
1089 1.1 mrg insn_static_data->operand_alternative = op_alt;
1090 1.1 mrg }
1091 1.1 mrg if (GET_CODE (PATTERN (insn)) == CLOBBER || GET_CODE (PATTERN (insn)) == USE)
1092 1.1 mrg insn_static_data->hard_regs = NULL;
1093 1.1 mrg else
1094 1.1 mrg insn_static_data->hard_regs
1095 1.1 mrg = collect_non_operand_hard_regs (insn, &PATTERN (insn), data,
1096 1.1 mrg NULL, OP_IN, false);
1097 1.1 mrg data->arg_hard_regs = NULL;
1098 1.1 mrg if (CALL_P (insn))
1099 1.1 mrg {
1100 1.1 mrg bool use_p;
1101 1.1 mrg rtx link;
1102 1.1 mrg int n_hard_regs, regno, arg_hard_regs[FIRST_PSEUDO_REGISTER];
1103 1.1 mrg
1104 1.1 mrg n_hard_regs = 0;
1105 1.1 mrg /* Finding implicit hard register usage. We believe it will be
1106 1.1 mrg not changed whatever transformations are used. Call insns
1107 1.1 mrg are such example. */
1108 1.1 mrg for (link = CALL_INSN_FUNCTION_USAGE (insn);
1109 1.1 mrg link != NULL_RTX;
1110 1.1 mrg link = XEXP (link, 1))
1111 1.1 mrg if (((use_p = GET_CODE (XEXP (link, 0)) == USE)
1112 1.1 mrg || GET_CODE (XEXP (link, 0)) == CLOBBER)
1113 1.1 mrg && REG_P (XEXP (XEXP (link, 0), 0)))
1114 1.1 mrg {
1115 1.1 mrg regno = REGNO (XEXP (XEXP (link, 0), 0));
1116 1.1 mrg lra_assert (regno < FIRST_PSEUDO_REGISTER);
1117 1.1 mrg /* It is an argument register. */
1118 1.1 mrg for (i = REG_NREGS (XEXP (XEXP (link, 0), 0)) - 1; i >= 0; i--)
1119 1.1 mrg arg_hard_regs[n_hard_regs++]
1120 1.1 mrg = regno + i + (use_p ? 0 : FIRST_PSEUDO_REGISTER);
1121 1.1 mrg }
1122 1.1 mrg
1123 1.1 mrg if (n_hard_regs != 0)
1124 1.1 mrg {
1125 1.1 mrg arg_hard_regs[n_hard_regs++] = -1;
1126 1.1 mrg data->arg_hard_regs = XNEWVEC (int, n_hard_regs);
1127 1.1 mrg memcpy (data->arg_hard_regs, arg_hard_regs,
1128 1.1 mrg sizeof (int) * n_hard_regs);
1129 1.1 mrg }
1130 1.1 mrg }
1131 1.1 mrg /* Some output operand can be recognized only from the context not
1132 1.1 mrg from the constraints which are empty in this case. Call insn may
1133 1.1 mrg contain a hard register in set destination with empty constraint
1134 1.1 mrg and extract_insn treats them as an input. */
1135 1.1 mrg for (i = 0; i < insn_static_data->n_operands; i++)
1136 1.1 mrg {
1137 1.1 mrg int j;
1138 1.1 mrg rtx pat, set;
1139 1.1 mrg struct lra_operand_data *operand = &insn_static_data->operand[i];
1140 1.1 mrg
1141 1.1 mrg /* ??? Should we treat 'X' the same way. It looks to me that
1142 1.1 mrg 'X' means anything and empty constraint means we do not
1143 1.1 mrg care. */
1144 1.1 mrg if (operand->type != OP_IN || *operand->constraint != '\0'
1145 1.1 mrg || operand->is_operator)
1146 1.1 mrg continue;
1147 1.1 mrg pat = PATTERN (insn);
1148 1.1 mrg if (GET_CODE (pat) == SET)
1149 1.1 mrg {
1150 1.1 mrg if (data->operand_loc[i] != &SET_DEST (pat))
1151 1.1 mrg continue;
1152 1.1 mrg }
1153 1.1 mrg else if (GET_CODE (pat) == PARALLEL)
1154 1.1 mrg {
1155 1.1 mrg for (j = XVECLEN (pat, 0) - 1; j >= 0; j--)
1156 1.1 mrg {
1157 1.1 mrg set = XVECEXP (PATTERN (insn), 0, j);
1158 1.1 mrg if (GET_CODE (set) == SET
1159 1.1 mrg && &SET_DEST (set) == data->operand_loc[i])
1160 1.1 mrg break;
1161 1.1 mrg }
1162 1.1 mrg if (j < 0)
1163 1.1 mrg continue;
1164 1.1 mrg }
1165 1.1 mrg else
1166 1.1 mrg continue;
1167 1.1 mrg operand->type = OP_OUT;
1168 1.1 mrg }
1169 1.1 mrg return data;
1170 1.1 mrg }
1171 1.1 mrg
1172 1.1 mrg /* Return info about insn give by UID. The info should be already set
1173 1.1 mrg up. */
1174 1.1 mrg static lra_insn_recog_data_t
1175 1.1 mrg get_insn_recog_data_by_uid (int uid)
1176 1.1 mrg {
1177 1.1 mrg lra_insn_recog_data_t data;
1178 1.1 mrg
1179 1.1 mrg data = lra_insn_recog_data[uid];
1180 1.1 mrg lra_assert (data != NULL);
1181 1.1 mrg return data;
1182 1.1 mrg }
1183 1.1 mrg
1184 1.1 mrg /* Invalidate all info about insn given by its UID. */
1185 1.1 mrg static void
1186 1.1 mrg invalidate_insn_recog_data (int uid)
1187 1.1 mrg {
1188 1.1 mrg lra_insn_recog_data_t data;
1189 1.1 mrg
1190 1.1 mrg data = lra_insn_recog_data[uid];
1191 1.1 mrg lra_assert (data != NULL);
1192 1.1 mrg free_insn_recog_data (data);
1193 1.1 mrg lra_insn_recog_data[uid] = NULL;
1194 1.1 mrg }
1195 1.1 mrg
1196 1.1 mrg /* Update all the insn info about INSN. It is usually called when
1197 1.1 mrg something in the insn was changed. Return the updated info. */
1198 1.1 mrg lra_insn_recog_data_t
1199 1.1 mrg lra_update_insn_recog_data (rtx_insn *insn)
1200 1.1 mrg {
1201 1.1 mrg lra_insn_recog_data_t data;
1202 1.1 mrg int n;
1203 1.1 mrg unsigned int uid = INSN_UID (insn);
1204 1.1 mrg struct lra_static_insn_data *insn_static_data;
1205 1.1 mrg poly_int64 sp_offset = 0;
1206 1.1 mrg
1207 1.1 mrg check_and_expand_insn_recog_data (uid);
1208 1.1 mrg if ((data = lra_insn_recog_data[uid]) != NULL
1209 1.1 mrg && data->icode != INSN_CODE (insn))
1210 1.1 mrg {
1211 1.1 mrg sp_offset = data->sp_offset;
1212 1.1 mrg invalidate_insn_data_regno_info (data, insn, get_insn_freq (insn));
1213 1.1 mrg invalidate_insn_recog_data (uid);
1214 1.1 mrg data = NULL;
1215 1.1 mrg }
1216 1.1 mrg if (data == NULL)
1217 1.1 mrg {
1218 1.1 mrg data = lra_get_insn_recog_data (insn);
1219 1.1 mrg /* Initiate or restore SP offset. */
1220 1.1 mrg data->sp_offset = sp_offset;
1221 1.1 mrg return data;
1222 1.1 mrg }
1223 1.1 mrg insn_static_data = data->insn_static_data;
1224 1.1 mrg data->used_insn_alternative = LRA_UNKNOWN_ALT;
1225 1.1 mrg if (DEBUG_INSN_P (insn))
1226 1.1 mrg return data;
1227 1.1 mrg if (data->icode < 0)
1228 1.1 mrg {
1229 1.1 mrg int nop;
1230 1.1 mrg machine_mode operand_mode[MAX_RECOG_OPERANDS];
1231 1.1 mrg const char *constraints[MAX_RECOG_OPERANDS];
1232 1.1 mrg
1233 1.1 mrg nop = asm_noperands (PATTERN (insn));
1234 1.1 mrg if (nop >= 0)
1235 1.1 mrg {
1236 1.1 mrg lra_assert (nop == data->insn_static_data->n_operands);
1237 1.1 mrg /* Now get the operand values and constraints out of the
1238 1.1 mrg insn. */
1239 1.1 mrg decode_asm_operands (PATTERN (insn), NULL,
1240 1.1 mrg data->operand_loc,
1241 1.1 mrg constraints, operand_mode, NULL);
1242 1.1 mrg
1243 1.1 mrg if (flag_checking)
1244 1.1 mrg for (int i = 0; i < nop; i++)
1245 1.1 mrg lra_assert
1246 1.1 mrg (insn_static_data->operand[i].mode == operand_mode[i]
1247 1.1 mrg && insn_static_data->operand[i].constraint == constraints[i]
1248 1.1 mrg && ! insn_static_data->operand[i].is_operator);
1249 1.1 mrg }
1250 1.1 mrg
1251 1.1 mrg if (flag_checking)
1252 1.1 mrg for (int i = 0; i < insn_static_data->n_operands; i++)
1253 1.1 mrg lra_assert
1254 1.1 mrg (insn_static_data->operand[i].type
1255 1.1 mrg == (insn_static_data->operand[i].constraint[0] == '=' ? OP_OUT
1256 1.1 mrg : insn_static_data->operand[i].constraint[0] == '+' ? OP_INOUT
1257 1.1 mrg : OP_IN));
1258 1.1 mrg }
1259 1.1 mrg else
1260 1.1 mrg {
1261 1.1 mrg insn_extract (insn);
1262 1.1 mrg n = insn_static_data->n_operands;
1263 1.1 mrg if (n != 0)
1264 1.1 mrg memcpy (data->operand_loc, recog_data.operand_loc, n * sizeof (rtx *));
1265 1.1 mrg n = insn_static_data->n_dups;
1266 1.1 mrg if (n != 0)
1267 1.1 mrg memcpy (data->dup_loc, recog_data.dup_loc, n * sizeof (rtx *));
1268 1.1 mrg lra_assert (check_bool_attrs (insn));
1269 1.1 mrg }
1270 1.1 mrg return data;
1271 1.1 mrg }
1272 1.1 mrg
1273 1.1 mrg /* Set up that INSN is using alternative ALT now. */
1274 1.1 mrg void
1275 1.1 mrg lra_set_used_insn_alternative (rtx_insn *insn, int alt)
1276 1.1 mrg {
1277 1.1 mrg lra_insn_recog_data_t data;
1278 1.1 mrg
1279 1.1 mrg data = lra_get_insn_recog_data (insn);
1280 1.1 mrg data->used_insn_alternative = alt;
1281 1.1 mrg }
1282 1.1 mrg
1283 1.1 mrg /* Set up that insn with UID is using alternative ALT now. The insn
1284 1.1 mrg info should be already set up. */
1285 1.1 mrg void
1286 1.1 mrg lra_set_used_insn_alternative_by_uid (int uid, int alt)
1287 1.1 mrg {
1288 1.1 mrg lra_insn_recog_data_t data;
1289 1.1 mrg
1290 1.1 mrg check_and_expand_insn_recog_data (uid);
1291 1.1 mrg data = lra_insn_recog_data[uid];
1292 1.1 mrg lra_assert (data != NULL);
1293 1.1 mrg data->used_insn_alternative = alt;
1294 1.1 mrg }
1295 1.1 mrg
1296 1.1 mrg
1297 1.1 mrg
1299 1.1 mrg /* This page contains code dealing with common register info and
1300 1.1 mrg pseudo copies. */
1301 1.1 mrg
1302 1.1 mrg /* The size of the following array. */
1303 1.1 mrg static int reg_info_size;
1304 1.1 mrg /* Common info about each register. */
1305 1.1 mrg class lra_reg *lra_reg_info;
1306 1.1 mrg
1307 1.1 mrg HARD_REG_SET hard_regs_spilled_into;
1308 1.1 mrg
1309 1.1 mrg /* Last register value. */
1310 1.1 mrg static int last_reg_value;
1311 1.1 mrg
1312 1.1 mrg /* Return new register value. */
1313 1.1 mrg static int
1314 1.1 mrg get_new_reg_value (void)
1315 1.1 mrg {
1316 1.1 mrg return ++last_reg_value;
1317 1.1 mrg }
1318 1.1 mrg
1319 1.1 mrg /* Vec referring to pseudo copies. */
1320 1.1 mrg static vec<lra_copy_t> copy_vec;
1321 1.1 mrg
1322 1.1 mrg /* Initialize I-th element of lra_reg_info. */
1323 1.1 mrg static inline void
1324 1.1 mrg initialize_lra_reg_info_element (int i)
1325 1.1 mrg {
1326 1.1 mrg bitmap_initialize (&lra_reg_info[i].insn_bitmap, ®_obstack);
1327 1.1 mrg #ifdef STACK_REGS
1328 1.1 mrg lra_reg_info[i].no_stack_p = false;
1329 1.1 mrg #endif
1330 1.1 mrg CLEAR_HARD_REG_SET (lra_reg_info[i].conflict_hard_regs);
1331 1.1 mrg CLEAR_HARD_REG_SET (lra_reg_info[i].exclude_start_hard_regs);
1332 1.1 mrg lra_reg_info[i].preferred_hard_regno1 = -1;
1333 1.1 mrg lra_reg_info[i].preferred_hard_regno2 = -1;
1334 1.1 mrg lra_reg_info[i].preferred_hard_regno_profit1 = 0;
1335 1.1 mrg lra_reg_info[i].preferred_hard_regno_profit2 = 0;
1336 1.1 mrg lra_reg_info[i].biggest_mode = VOIDmode;
1337 1.1 mrg lra_reg_info[i].live_ranges = NULL;
1338 1.1 mrg lra_reg_info[i].nrefs = lra_reg_info[i].freq = 0;
1339 1.1 mrg lra_reg_info[i].last_reload = 0;
1340 1.1 mrg lra_reg_info[i].restore_rtx = NULL_RTX;
1341 1.1 mrg lra_reg_info[i].val = get_new_reg_value ();
1342 1.1 mrg lra_reg_info[i].offset = 0;
1343 1.1 mrg lra_reg_info[i].copies = NULL;
1344 1.1 mrg }
1345 1.1 mrg
1346 1.1 mrg /* Initialize common reg info and copies. */
1347 1.1 mrg static void
1348 1.1 mrg init_reg_info (void)
1349 1.1 mrg {
1350 1.1 mrg int i;
1351 1.1 mrg
1352 1.1 mrg last_reg_value = 0;
1353 1.1 mrg reg_info_size = max_reg_num () * 3 / 2 + 1;
1354 1.1 mrg lra_reg_info = XNEWVEC (class lra_reg, reg_info_size);
1355 1.1 mrg for (i = 0; i < reg_info_size; i++)
1356 1.1 mrg initialize_lra_reg_info_element (i);
1357 1.1 mrg copy_vec.truncate (0);
1358 1.1 mrg CLEAR_HARD_REG_SET (hard_regs_spilled_into);
1359 1.1 mrg }
1360 1.1 mrg
1361 1.1 mrg
1362 1.1 mrg /* Finish common reg info and copies. */
1363 1.1 mrg static void
1364 1.1 mrg finish_reg_info (void)
1365 1.1 mrg {
1366 1.1 mrg int i;
1367 1.1 mrg
1368 1.1 mrg for (i = 0; i < reg_info_size; i++)
1369 1.1 mrg bitmap_clear (&lra_reg_info[i].insn_bitmap);
1370 1.1 mrg free (lra_reg_info);
1371 1.1 mrg reg_info_size = 0;
1372 1.1 mrg }
1373 1.1 mrg
1374 1.1 mrg /* Expand common reg info if it is necessary. */
1375 1.1 mrg static void
1376 1.1 mrg expand_reg_info (void)
1377 1.1 mrg {
1378 1.1 mrg int i, old = reg_info_size;
1379 1.1 mrg
1380 1.1 mrg if (reg_info_size > max_reg_num ())
1381 1.1 mrg return;
1382 1.1 mrg reg_info_size = max_reg_num () * 3 / 2 + 1;
1383 1.1 mrg lra_reg_info = XRESIZEVEC (class lra_reg, lra_reg_info, reg_info_size);
1384 1.1 mrg for (i = old; i < reg_info_size; i++)
1385 1.1 mrg initialize_lra_reg_info_element (i);
1386 1.1 mrg }
1387 1.1 mrg
1388 1.1 mrg /* Free all copies. */
1389 1.1 mrg void
1390 1.1 mrg lra_free_copies (void)
1391 1.1 mrg {
1392 1.1 mrg lra_copy_t cp;
1393 1.1 mrg
1394 1.1 mrg while (copy_vec.length () != 0)
1395 1.1 mrg {
1396 1.1 mrg cp = copy_vec.pop ();
1397 1.1 mrg lra_reg_info[cp->regno1].copies = lra_reg_info[cp->regno2].copies = NULL;
1398 1.1 mrg lra_copy_pool.remove (cp);
1399 1.1 mrg }
1400 1.1 mrg }
1401 1.1 mrg
1402 1.1 mrg /* Create copy of two pseudos REGNO1 and REGNO2. The copy execution
1403 1.1 mrg frequency is FREQ. */
1404 1.1 mrg void
1405 1.1 mrg lra_create_copy (int regno1, int regno2, int freq)
1406 1.1 mrg {
1407 1.1 mrg bool regno1_dest_p;
1408 1.1 mrg lra_copy_t cp;
1409 1.1 mrg
1410 1.1 mrg lra_assert (regno1 != regno2);
1411 1.1 mrg regno1_dest_p = true;
1412 1.1 mrg if (regno1 > regno2)
1413 1.1 mrg {
1414 1.1 mrg std::swap (regno1, regno2);
1415 1.1 mrg regno1_dest_p = false;
1416 1.1 mrg }
1417 1.1 mrg cp = lra_copy_pool.allocate ();
1418 1.1 mrg copy_vec.safe_push (cp);
1419 1.1 mrg cp->regno1_dest_p = regno1_dest_p;
1420 1.1 mrg cp->freq = freq;
1421 1.1 mrg cp->regno1 = regno1;
1422 1.1 mrg cp->regno2 = regno2;
1423 1.1 mrg cp->regno1_next = lra_reg_info[regno1].copies;
1424 1.1 mrg lra_reg_info[regno1].copies = cp;
1425 1.1 mrg cp->regno2_next = lra_reg_info[regno2].copies;
1426 1.1 mrg lra_reg_info[regno2].copies = cp;
1427 1.1 mrg if (lra_dump_file != NULL)
1428 1.1 mrg fprintf (lra_dump_file, " Creating copy r%d%sr%d@%d\n",
1429 1.1 mrg regno1, regno1_dest_p ? "<-" : "->", regno2, freq);
1430 1.1 mrg }
1431 1.1 mrg
1432 1.1 mrg /* Return N-th (0, 1, ...) copy. If there is no copy, return
1433 1.1 mrg NULL. */
1434 1.1 mrg lra_copy_t
1435 1.1 mrg lra_get_copy (int n)
1436 1.1 mrg {
1437 1.1 mrg if (n >= (int) copy_vec.length ())
1438 1.1 mrg return NULL;
1439 1.1 mrg return copy_vec[n];
1440 1.1 mrg }
1441 1.1 mrg
1442 1.1 mrg
1443 1.1 mrg
1445 1.1 mrg /* This page contains code dealing with info about registers in
1446 1.1 mrg insns. */
1447 1.1 mrg
1448 1.1 mrg /* Process X of INSN recursively and add info (operand type is given
1449 1.1 mrg by TYPE) about registers in X to the insn DATA. If X can be early
1450 1.1 mrg clobbered, alternatives in which it can be early clobbered are given
1451 1.1 mrg by EARLY_CLOBBER_ALTS. */
1452 1.1 mrg static void
1453 1.1 mrg add_regs_to_insn_regno_info (lra_insn_recog_data_t data, rtx x,
1454 1.1 mrg rtx_insn *insn, enum op_type type,
1455 1.1 mrg alternative_mask early_clobber_alts)
1456 1.1 mrg {
1457 1.1 mrg int i, j, regno;
1458 1.1 mrg bool subreg_p;
1459 1.1 mrg machine_mode mode;
1460 1.1 mrg const char *fmt;
1461 1.1 mrg enum rtx_code code;
1462 1.1 mrg struct lra_insn_reg *curr;
1463 1.1 mrg
1464 1.1 mrg code = GET_CODE (x);
1465 1.1 mrg mode = GET_MODE (x);
1466 1.1 mrg subreg_p = false;
1467 1.1 mrg if (GET_CODE (x) == SUBREG)
1468 1.1 mrg {
1469 1.1 mrg mode = wider_subreg_mode (x);
1470 1.1 mrg if (read_modify_subreg_p (x))
1471 1.1 mrg subreg_p = true;
1472 1.1 mrg x = SUBREG_REG (x);
1473 1.1 mrg code = GET_CODE (x);
1474 1.1 mrg }
1475 1.1 mrg if (REG_P (x))
1476 1.1 mrg {
1477 1.1 mrg regno = REGNO (x);
1478 1.1 mrg /* Process all regs even unallocatable ones as we need info about
1479 1.1 mrg all regs for rematerialization pass. */
1480 1.1 mrg expand_reg_info ();
1481 1.1 mrg if (bitmap_set_bit (&lra_reg_info[regno].insn_bitmap, INSN_UID (insn)))
1482 1.1 mrg {
1483 1.1 mrg data->regs = new_insn_reg (data->insn, regno, type, mode, subreg_p,
1484 1.1 mrg early_clobber_alts, data->regs);
1485 1.1 mrg return;
1486 1.1 mrg }
1487 1.1 mrg else
1488 1.1 mrg {
1489 1.1 mrg for (curr = data->regs; curr != NULL; curr = curr->next)
1490 1.1 mrg if (curr->regno == regno)
1491 1.1 mrg {
1492 1.1 mrg if (curr->subreg_p != subreg_p || curr->biggest_mode != mode)
1493 1.1 mrg /* The info cannot be integrated into the found
1494 1.1 mrg structure. */
1495 1.1 mrg data->regs = new_insn_reg (data->insn, regno, type, mode,
1496 1.1 mrg subreg_p, early_clobber_alts,
1497 1.1 mrg data->regs);
1498 1.1 mrg else
1499 1.1 mrg {
1500 1.1 mrg if (curr->type != type)
1501 1.1 mrg curr->type = OP_INOUT;
1502 1.1 mrg curr->early_clobber_alts |= early_clobber_alts;
1503 1.1 mrg }
1504 1.1 mrg return;
1505 1.1 mrg }
1506 1.1 mrg gcc_unreachable ();
1507 1.1 mrg }
1508 1.1 mrg }
1509 1.1 mrg
1510 1.1 mrg switch (code)
1511 1.1 mrg {
1512 1.1 mrg case SET:
1513 1.1 mrg add_regs_to_insn_regno_info (data, SET_DEST (x), insn, OP_OUT, 0);
1514 1.1 mrg add_regs_to_insn_regno_info (data, SET_SRC (x), insn, OP_IN, 0);
1515 1.1 mrg break;
1516 1.1 mrg case CLOBBER:
1517 1.1 mrg /* We treat clobber of non-operand hard registers as early
1518 1.1 mrg clobber. */
1519 1.1 mrg add_regs_to_insn_regno_info (data, XEXP (x, 0), insn, OP_OUT,
1520 1.1 mrg ALL_ALTERNATIVES);
1521 1.1 mrg break;
1522 1.1 mrg case PRE_INC: case PRE_DEC: case POST_INC: case POST_DEC:
1523 1.1 mrg add_regs_to_insn_regno_info (data, XEXP (x, 0), insn, OP_INOUT, 0);
1524 1.1 mrg break;
1525 1.1 mrg case PRE_MODIFY: case POST_MODIFY:
1526 1.1 mrg add_regs_to_insn_regno_info (data, XEXP (x, 0), insn, OP_INOUT, 0);
1527 1.1 mrg add_regs_to_insn_regno_info (data, XEXP (x, 1), insn, OP_IN, 0);
1528 1.1 mrg break;
1529 1.1 mrg default:
1530 1.1 mrg if ((code != PARALLEL && code != EXPR_LIST) || type != OP_OUT)
1531 1.1 mrg /* Some targets place small structures in registers for return
1532 1.1 mrg values of functions, and those registers are wrapped in
1533 1.1 mrg PARALLEL that we may see as the destination of a SET. Here
1534 1.1 mrg is an example:
1535 1.1 mrg
1536 1.1 mrg (call_insn 13 12 14 2 (set (parallel:BLK [
1537 1.1 mrg (expr_list:REG_DEP_TRUE (reg:DI 0 ax)
1538 1.1 mrg (const_int 0 [0]))
1539 1.1 mrg (expr_list:REG_DEP_TRUE (reg:DI 1 dx)
1540 1.1 mrg (const_int 8 [0x8]))
1541 1.1 mrg ])
1542 1.1 mrg (call (mem:QI (symbol_ref:DI (... */
1543 1.1 mrg type = OP_IN;
1544 1.1 mrg fmt = GET_RTX_FORMAT (code);
1545 1.1 mrg for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1546 1.1 mrg {
1547 1.1 mrg if (fmt[i] == 'e')
1548 1.1 mrg add_regs_to_insn_regno_info (data, XEXP (x, i), insn, type, 0);
1549 1.1 mrg else if (fmt[i] == 'E')
1550 1.1 mrg {
1551 1.1 mrg for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1552 1.1 mrg add_regs_to_insn_regno_info (data, XVECEXP (x, i, j), insn,
1553 1.1 mrg type, 0);
1554 1.1 mrg }
1555 1.1 mrg }
1556 1.1 mrg }
1557 1.1 mrg }
1558 1.1 mrg
1559 1.1 mrg /* Return execution frequency of INSN. */
1560 1.1 mrg static int
1561 1.1 mrg get_insn_freq (rtx_insn *insn)
1562 1.1 mrg {
1563 1.1 mrg basic_block bb = BLOCK_FOR_INSN (insn);
1564 1.1 mrg
1565 1.1 mrg gcc_checking_assert (bb != NULL);
1566 1.1 mrg return REG_FREQ_FROM_BB (bb);
1567 1.1 mrg }
1568 1.1 mrg
1569 1.1 mrg /* Invalidate all reg info of INSN with DATA and execution frequency
1570 1.1 mrg FREQ. Update common info about the invalidated registers. */
1571 1.1 mrg static void
1572 1.1 mrg invalidate_insn_data_regno_info (lra_insn_recog_data_t data, rtx_insn *insn,
1573 1.1 mrg int freq)
1574 1.1 mrg {
1575 1.1 mrg int uid;
1576 1.1 mrg bool debug_p;
1577 1.1 mrg unsigned int i;
1578 1.1 mrg struct lra_insn_reg *ir, *next_ir;
1579 1.1 mrg
1580 1.1 mrg uid = INSN_UID (insn);
1581 1.1 mrg debug_p = DEBUG_INSN_P (insn);
1582 1.1 mrg for (ir = data->regs; ir != NULL; ir = next_ir)
1583 1.1 mrg {
1584 1.1 mrg i = ir->regno;
1585 1.1 mrg next_ir = ir->next;
1586 1.1 mrg lra_insn_reg_pool.remove (ir);
1587 1.1 mrg bitmap_clear_bit (&lra_reg_info[i].insn_bitmap, uid);
1588 1.1 mrg if (i >= FIRST_PSEUDO_REGISTER && ! debug_p)
1589 1.1 mrg {
1590 1.1 mrg lra_reg_info[i].nrefs--;
1591 1.1 mrg lra_reg_info[i].freq -= freq;
1592 1.1 mrg lra_assert (lra_reg_info[i].nrefs >= 0 && lra_reg_info[i].freq >= 0);
1593 1.1 mrg }
1594 1.1 mrg }
1595 1.1 mrg data->regs = NULL;
1596 1.1 mrg }
1597 1.1 mrg
1598 1.1 mrg /* Invalidate all reg info of INSN. Update common info about the
1599 1.1 mrg invalidated registers. */
1600 1.1 mrg void
1601 1.1 mrg lra_invalidate_insn_regno_info (rtx_insn *insn)
1602 1.1 mrg {
1603 1.1 mrg invalidate_insn_data_regno_info (lra_get_insn_recog_data (insn), insn,
1604 1.1 mrg get_insn_freq (insn));
1605 1.1 mrg }
1606 1.1 mrg
1607 1.1 mrg /* Update common reg info from reg info of insn given by its DATA and
1608 1.1 mrg execution frequency FREQ. */
1609 1.1 mrg static void
1610 1.1 mrg setup_insn_reg_info (lra_insn_recog_data_t data, int freq)
1611 1.1 mrg {
1612 1.1 mrg unsigned int i;
1613 1.1 mrg struct lra_insn_reg *ir;
1614 1.1 mrg
1615 1.1 mrg for (ir = data->regs; ir != NULL; ir = ir->next)
1616 1.1 mrg if ((i = ir->regno) >= FIRST_PSEUDO_REGISTER)
1617 1.1 mrg {
1618 1.1 mrg lra_reg_info[i].nrefs++;
1619 1.1 mrg lra_reg_info[i].freq += freq;
1620 1.1 mrg }
1621 1.1 mrg }
1622 1.1 mrg
1623 1.1 mrg /* Set up insn reg info of INSN. Update common reg info from reg info
1624 1.1 mrg of INSN. */
1625 1.1 mrg void
1626 1.1 mrg lra_update_insn_regno_info (rtx_insn *insn)
1627 1.1 mrg {
1628 1.1 mrg int i, freq;
1629 1.1 mrg lra_insn_recog_data_t data;
1630 1.1 mrg struct lra_static_insn_data *static_data;
1631 1.1 mrg enum rtx_code code;
1632 1.1 mrg rtx link;
1633 1.1 mrg
1634 1.1 mrg if (! INSN_P (insn))
1635 1.1 mrg return;
1636 1.1 mrg data = lra_get_insn_recog_data (insn);
1637 1.1 mrg static_data = data->insn_static_data;
1638 1.1 mrg freq = NONDEBUG_INSN_P (insn) ? get_insn_freq (insn) : 0;
1639 1.1 mrg invalidate_insn_data_regno_info (data, insn, freq);
1640 1.1 mrg for (i = static_data->n_operands - 1; i >= 0; i--)
1641 1.1 mrg add_regs_to_insn_regno_info (data, *data->operand_loc[i], insn,
1642 1.1 mrg static_data->operand[i].type,
1643 1.1 mrg static_data->operand[i].early_clobber_alts);
1644 1.1 mrg if ((code = GET_CODE (PATTERN (insn))) == CLOBBER || code == USE)
1645 1.1 mrg add_regs_to_insn_regno_info (data, XEXP (PATTERN (insn), 0), insn,
1646 1.1 mrg code == USE ? OP_IN : OP_OUT, 0);
1647 1.1 mrg if (CALL_P (insn))
1648 1.1 mrg /* On some targets call insns can refer to pseudos in memory in
1649 1.1 mrg CALL_INSN_FUNCTION_USAGE list. Process them in order to
1650 1.1 mrg consider their occurrences in calls for different
1651 1.1 mrg transformations (e.g. inheritance) with given pseudos. */
1652 1.1 mrg for (link = CALL_INSN_FUNCTION_USAGE (insn);
1653 1.1 mrg link != NULL_RTX;
1654 1.1 mrg link = XEXP (link, 1))
1655 1.1 mrg {
1656 1.1 mrg code = GET_CODE (XEXP (link, 0));
1657 1.1 mrg if ((code == USE || code == CLOBBER)
1658 1.1 mrg && MEM_P (XEXP (XEXP (link, 0), 0)))
1659 1.1 mrg add_regs_to_insn_regno_info (data, XEXP (XEXP (link, 0), 0), insn,
1660 1.1 mrg code == USE ? OP_IN : OP_OUT, 0);
1661 1.1 mrg }
1662 1.1 mrg if (NONDEBUG_INSN_P (insn))
1663 1.1 mrg setup_insn_reg_info (data, freq);
1664 1.1 mrg }
1665 1.1 mrg
1666 1.1 mrg /* Return reg info of insn given by it UID. */
1667 1.1 mrg struct lra_insn_reg *
1668 1.1 mrg lra_get_insn_regs (int uid)
1669 1.1 mrg {
1670 1.1 mrg lra_insn_recog_data_t data;
1671 1.1 mrg
1672 1.1 mrg data = get_insn_recog_data_by_uid (uid);
1673 1.1 mrg return data->regs;
1674 1.1 mrg }
1675 1.1 mrg
1676 1.1 mrg
1677 1.1 mrg
1679 1.1 mrg /* Recursive hash function for RTL X. */
1680 1.1 mrg hashval_t
1681 1.1 mrg lra_rtx_hash (rtx x)
1682 1.1 mrg {
1683 1.1 mrg int i, j;
1684 1.1 mrg enum rtx_code code;
1685 1.1 mrg const char *fmt;
1686 1.1 mrg hashval_t val = 0;
1687 1.1 mrg
1688 1.1 mrg if (x == 0)
1689 1.1 mrg return val;
1690 1.1 mrg
1691 1.1 mrg code = GET_CODE (x);
1692 1.1 mrg val += (int) code + 4095;
1693 1.1 mrg
1694 1.1 mrg /* Some RTL can be compared nonrecursively. */
1695 1.1 mrg switch (code)
1696 1.1 mrg {
1697 1.1 mrg case REG:
1698 1.1 mrg return val + REGNO (x);
1699 1.1 mrg
1700 1.1 mrg case LABEL_REF:
1701 1.1 mrg return iterative_hash_object (XEXP (x, 0), val);
1702 1.1 mrg
1703 1.1 mrg case SYMBOL_REF:
1704 1.1 mrg return iterative_hash_object (XSTR (x, 0), val);
1705 1.1 mrg
1706 1.1 mrg case SCRATCH:
1707 1.1 mrg case CONST_DOUBLE:
1708 1.1 mrg case CONST_VECTOR:
1709 1.1 mrg return val;
1710 1.1 mrg
1711 1.1 mrg case CONST_INT:
1712 1.1 mrg return val + UINTVAL (x);
1713 1.1 mrg
1714 1.1 mrg default:
1715 1.1 mrg break;
1716 1.1 mrg }
1717 1.1 mrg
1718 1.1 mrg /* Hash the elements. */
1719 1.1 mrg fmt = GET_RTX_FORMAT (code);
1720 1.1 mrg for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1721 1.1 mrg {
1722 1.1 mrg switch (fmt[i])
1723 1.1 mrg {
1724 1.1 mrg case 'w':
1725 1.1 mrg val += XWINT (x, i);
1726 1.1 mrg break;
1727 1.1 mrg
1728 1.1 mrg case 'n':
1729 1.1 mrg case 'i':
1730 1.1 mrg val += XINT (x, i);
1731 1.1 mrg break;
1732 1.1 mrg
1733 1.1 mrg case 'V':
1734 1.1 mrg case 'E':
1735 1.1 mrg val += XVECLEN (x, i);
1736 1.1 mrg
1737 1.1 mrg for (j = 0; j < XVECLEN (x, i); j++)
1738 1.1 mrg val += lra_rtx_hash (XVECEXP (x, i, j));
1739 1.1 mrg break;
1740 1.1 mrg
1741 1.1 mrg case 'e':
1742 1.1 mrg val += lra_rtx_hash (XEXP (x, i));
1743 1.1 mrg break;
1744 1.1 mrg
1745 1.1 mrg case 'S':
1746 1.1 mrg case 's':
1747 1.1 mrg val += htab_hash_string (XSTR (x, i));
1748 1.1 mrg break;
1749 1.1 mrg
1750 1.1 mrg case 'u':
1751 1.1 mrg case '0':
1752 1.1 mrg case 't':
1753 1.1 mrg break;
1754 1.1 mrg
1755 1.1 mrg /* It is believed that rtx's at this level will never
1756 1.1 mrg contain anything but integers and other rtx's, except for
1757 1.1 mrg within LABEL_REFs and SYMBOL_REFs. */
1758 1.1 mrg default:
1759 1.1 mrg abort ();
1760 1.1 mrg }
1761 1.1 mrg }
1762 1.1 mrg return val;
1763 1.1 mrg }
1764 1.1 mrg
1765 1.1 mrg
1766 1.1 mrg
1768 1.1 mrg /* This page contains code dealing with stack of the insns which
1769 1.1 mrg should be processed by the next constraint pass. */
1770 1.1 mrg
1771 1.1 mrg /* Bitmap used to put an insn on the stack only in one exemplar. */
1772 1.1 mrg static sbitmap lra_constraint_insn_stack_bitmap;
1773 1.1 mrg
1774 1.1 mrg /* The stack itself. */
1775 1.1 mrg vec<rtx_insn *> lra_constraint_insn_stack;
1776 1.1 mrg
1777 1.1 mrg /* Put INSN on the stack. If ALWAYS_UPDATE is true, always update the reg
1778 1.1 mrg info for INSN, otherwise only update it if INSN is not already on the
1779 1.1 mrg stack. */
1780 1.1 mrg static inline void
1781 1.1 mrg lra_push_insn_1 (rtx_insn *insn, bool always_update)
1782 1.1 mrg {
1783 1.1 mrg unsigned int uid = INSN_UID (insn);
1784 1.1 mrg if (always_update)
1785 1.1 mrg lra_update_insn_regno_info (insn);
1786 1.1 mrg if (uid >= SBITMAP_SIZE (lra_constraint_insn_stack_bitmap))
1787 1.1 mrg lra_constraint_insn_stack_bitmap =
1788 1.1 mrg sbitmap_resize (lra_constraint_insn_stack_bitmap, 3 * uid / 2, 0);
1789 1.1 mrg if (bitmap_bit_p (lra_constraint_insn_stack_bitmap, uid))
1790 1.1 mrg return;
1791 1.1 mrg bitmap_set_bit (lra_constraint_insn_stack_bitmap, uid);
1792 1.1 mrg if (! always_update)
1793 1.1 mrg lra_update_insn_regno_info (insn);
1794 1.1 mrg lra_constraint_insn_stack.safe_push (insn);
1795 1.1 mrg }
1796 1.1 mrg
1797 1.1 mrg /* Put INSN on the stack. */
1798 1.1 mrg void
1799 1.1 mrg lra_push_insn (rtx_insn *insn)
1800 1.1 mrg {
1801 1.1 mrg lra_push_insn_1 (insn, false);
1802 1.1 mrg }
1803 1.1 mrg
1804 1.1 mrg /* Put INSN on the stack and update its reg info. */
1805 1.1 mrg void
1806 1.1 mrg lra_push_insn_and_update_insn_regno_info (rtx_insn *insn)
1807 1.1 mrg {
1808 1.1 mrg lra_push_insn_1 (insn, true);
1809 1.1 mrg }
1810 1.1 mrg
1811 1.1 mrg /* Put insn with UID on the stack. */
1812 1.1 mrg void
1813 1.1 mrg lra_push_insn_by_uid (unsigned int uid)
1814 1.1 mrg {
1815 1.1 mrg lra_push_insn (lra_insn_recog_data[uid]->insn);
1816 1.1 mrg }
1817 1.1 mrg
1818 1.1 mrg /* Take the last-inserted insns off the stack and return it. */
1819 1.1 mrg rtx_insn *
1820 1.1 mrg lra_pop_insn (void)
1821 1.1 mrg {
1822 1.1 mrg rtx_insn *insn = lra_constraint_insn_stack.pop ();
1823 1.1 mrg bitmap_clear_bit (lra_constraint_insn_stack_bitmap, INSN_UID (insn));
1824 1.1 mrg return insn;
1825 1.1 mrg }
1826 1.1 mrg
1827 1.1 mrg /* Return the current size of the insn stack. */
1828 1.1 mrg unsigned int
1829 1.1 mrg lra_insn_stack_length (void)
1830 1.1 mrg {
1831 1.1 mrg return lra_constraint_insn_stack.length ();
1832 1.1 mrg }
1833 1.1 mrg
1834 1.1 mrg /* Push insns FROM to TO (excluding it) going in reverse order. */
1835 1.1 mrg static void
1836 1.1 mrg push_insns (rtx_insn *from, rtx_insn *to)
1837 1.1 mrg {
1838 1.1 mrg rtx_insn *insn;
1839 1.1 mrg
1840 1.1 mrg if (from == NULL_RTX)
1841 1.1 mrg return;
1842 1.1 mrg for (insn = from; insn != to; insn = PREV_INSN (insn))
1843 1.1 mrg if (INSN_P (insn))
1844 1.1 mrg lra_push_insn (insn);
1845 1.1 mrg }
1846 1.1 mrg
1847 1.1 mrg /* Set up sp offset for insn in range [FROM, LAST]. The offset is
1848 1.1 mrg taken from the next BB insn after LAST or zero if there in such
1849 1.1 mrg insn. */
1850 1.1 mrg static void
1851 1.1 mrg setup_sp_offset (rtx_insn *from, rtx_insn *last)
1852 1.1 mrg {
1853 1.1 mrg rtx_insn *before = next_nonnote_nondebug_insn_bb (last);
1854 1.1 mrg poly_int64 offset = (before == NULL_RTX || ! INSN_P (before)
1855 1.1 mrg ? 0 : lra_get_insn_recog_data (before)->sp_offset);
1856 1.1 mrg
1857 1.1 mrg for (rtx_insn *insn = from; insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
1858 1.1 mrg lra_get_insn_recog_data (insn)->sp_offset = offset;
1859 1.1 mrg }
1860 1.1 mrg
1861 1.1 mrg /* Emit insns BEFORE before INSN and insns AFTER after INSN. Put the
1862 1.1 mrg insns onto the stack. Print about emitting the insns with
1863 1.1 mrg TITLE. */
1864 1.1 mrg void
1865 1.1 mrg lra_process_new_insns (rtx_insn *insn, rtx_insn *before, rtx_insn *after,
1866 1.1 mrg const char *title)
1867 1.1 mrg {
1868 1.1 mrg if (before == NULL_RTX && after == NULL_RTX)
1869 1.1 mrg return;
1870 1.1 mrg if (lra_dump_file != NULL)
1871 1.1 mrg {
1872 1.1 mrg dump_insn_slim (lra_dump_file, insn);
1873 1.1 mrg if (before != NULL_RTX)
1874 1.1 mrg {
1875 1.1 mrg fprintf (lra_dump_file," %s before:\n", title);
1876 1.1 mrg dump_rtl_slim (lra_dump_file, before, NULL, -1, 0);
1877 1.1 mrg }
1878 1.1 mrg }
1879 1.1 mrg if (before != NULL_RTX)
1880 1.1 mrg {
1881 1.1 mrg if (cfun->can_throw_non_call_exceptions)
1882 1.1 mrg copy_reg_eh_region_note_forward (insn, before, NULL);
1883 1.1 mrg emit_insn_before (before, insn);
1884 1.1 mrg push_insns (PREV_INSN (insn), PREV_INSN (before));
1885 1.1 mrg setup_sp_offset (before, PREV_INSN (insn));
1886 1.1 mrg }
1887 1.1 mrg if (after != NULL_RTX)
1888 1.1 mrg {
1889 1.1 mrg if (cfun->can_throw_non_call_exceptions)
1890 1.1 mrg copy_reg_eh_region_note_forward (insn, after, NULL);
1891 1.1 mrg if (! JUMP_P (insn))
1892 1.1 mrg {
1893 1.1 mrg rtx_insn *last;
1894 1.1 mrg
1895 1.1 mrg if (lra_dump_file != NULL)
1896 1.1 mrg {
1897 1.1 mrg fprintf (lra_dump_file, " %s after:\n", title);
1898 1.1 mrg dump_rtl_slim (lra_dump_file, after, NULL, -1, 0);
1899 1.1 mrg }
1900 1.1 mrg for (last = after;
1901 1.1 mrg NEXT_INSN (last) != NULL_RTX;
1902 1.1 mrg last = NEXT_INSN (last))
1903 1.1 mrg ;
1904 1.1 mrg emit_insn_after (after, insn);
1905 1.1 mrg push_insns (last, insn);
1906 1.1 mrg setup_sp_offset (after, last);
1907 1.1 mrg }
1908 1.1 mrg else
1909 1.1 mrg {
1910 1.1 mrg /* Put output reload insns on successor BBs: */
1911 1.1 mrg edge_iterator ei;
1912 1.1 mrg edge e;
1913 1.1 mrg
1914 1.1 mrg FOR_EACH_EDGE (e, ei, BLOCK_FOR_INSN (insn)->succs)
1915 1.1 mrg if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1916 1.1 mrg {
1917 1.1 mrg /* We already made the edge no-critical in ira.cc::ira */
1918 1.1 mrg lra_assert (!EDGE_CRITICAL_P (e));
1919 1.1 mrg rtx_insn *curr, *tmp = BB_HEAD (e->dest);
1920 1.1 mrg if (LABEL_P (tmp))
1921 1.1 mrg tmp = NEXT_INSN (tmp);
1922 1.1 mrg if (NOTE_INSN_BASIC_BLOCK_P (tmp))
1923 1.1 mrg tmp = NEXT_INSN (tmp);
1924 1.1 mrg /* Do not put reload insns if it is the last BB
1925 1.1 mrg without actual insns. */
1926 1.1 mrg if (tmp == NULL)
1927 1.1 mrg continue;
1928 1.1 mrg start_sequence ();
1929 1.1 mrg for (curr = after; curr != NULL_RTX; curr = NEXT_INSN (curr))
1930 1.1 mrg emit_insn (copy_insn (PATTERN (curr)));
1931 1.1 mrg rtx_insn *copy = get_insns (), *last = get_last_insn ();
1932 1.1 mrg end_sequence ();
1933 1.1 mrg if (lra_dump_file != NULL)
1934 1.1 mrg {
1935 1.1 mrg fprintf (lra_dump_file, " %s after in bb%d:\n", title,
1936 1.1 mrg e->dest->index);
1937 1.1 mrg dump_rtl_slim (lra_dump_file, copy, NULL, -1, 0);
1938 1.1 mrg }
1939 1.1 mrg /* Use the right emit func for setting up BB_END/BB_HEAD: */
1940 1.1 mrg if (BB_END (e->dest) == PREV_INSN (tmp))
1941 1.1 mrg emit_insn_after_noloc (copy, PREV_INSN (tmp), e->dest);
1942 1.1 mrg else
1943 1.1 mrg emit_insn_before_noloc (copy, tmp, e->dest);
1944 1.1 mrg push_insns (last, PREV_INSN (copy));
1945 1.1 mrg setup_sp_offset (copy, last);
1946 1.1 mrg /* We can ignore BB live info here as it and reg notes
1947 1.1 mrg will be updated before the next assignment
1948 1.1 mrg sub-pass. */
1949 1.1 mrg }
1950 1.1 mrg }
1951 1.1 mrg }
1952 1.1 mrg if (lra_dump_file != NULL)
1953 1.1 mrg fprintf (lra_dump_file, "\n");
1954 1.1 mrg if (cfun->can_throw_non_call_exceptions)
1955 1.1 mrg {
1956 1.1 mrg rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
1957 1.1 mrg if (note && !insn_could_throw_p (insn))
1958 1.1 mrg remove_note (insn, note);
1959 1.1 mrg }
1960 1.1 mrg }
1961 1.1 mrg
1962 1.1 mrg
1964 1.1 mrg /* Replace all references to register OLD_REGNO in *LOC with pseudo
1965 1.1 mrg register NEW_REG. Try to simplify subreg of constant if SUBREG_P.
1966 1.1 mrg DEBUG_P is if LOC is within a DEBUG_INSN. Return true if any
1967 1.1 mrg change was made. */
1968 1.1 mrg bool
1969 1.1 mrg lra_substitute_pseudo (rtx *loc, int old_regno, rtx new_reg, bool subreg_p,
1970 1.1 mrg bool debug_p)
1971 1.1 mrg {
1972 1.1 mrg rtx x = *loc;
1973 1.1 mrg bool result = false;
1974 1.1 mrg enum rtx_code code;
1975 1.1 mrg const char *fmt;
1976 1.1 mrg int i, j;
1977 1.1 mrg
1978 1.1 mrg if (x == NULL_RTX)
1979 1.1 mrg return false;
1980 1.1 mrg
1981 1.1 mrg code = GET_CODE (x);
1982 1.1 mrg if (code == SUBREG && subreg_p)
1983 1.1 mrg {
1984 1.1 mrg rtx subst, inner = SUBREG_REG (x);
1985 1.1 mrg /* Transform subreg of constant while we still have inner mode
1986 1.1 mrg of the subreg. The subreg internal should not be an insn
1987 1.1 mrg operand. */
1988 1.1 mrg if (REG_P (inner) && (int) REGNO (inner) == old_regno
1989 1.1 mrg && CONSTANT_P (new_reg)
1990 1.1 mrg && (subst = simplify_subreg (GET_MODE (x), new_reg, GET_MODE (inner),
1991 1.1 mrg SUBREG_BYTE (x))) != NULL_RTX)
1992 1.1 mrg {
1993 1.1 mrg *loc = subst;
1994 1.1 mrg return true;
1995 1.1 mrg }
1996 1.1 mrg
1997 1.1 mrg }
1998 1.1 mrg else if (code == REG && (int) REGNO (x) == old_regno)
1999 1.1 mrg {
2000 1.1 mrg machine_mode mode = GET_MODE (x);
2001 1.1 mrg machine_mode inner_mode = GET_MODE (new_reg);
2002 1.1 mrg
2003 1.1 mrg if (mode != inner_mode
2004 1.1 mrg && ! (CONST_SCALAR_INT_P (new_reg) && SCALAR_INT_MODE_P (mode)))
2005 1.1 mrg {
2006 1.1 mrg poly_uint64 offset = 0;
2007 1.1 mrg if (partial_subreg_p (mode, inner_mode)
2008 1.1 mrg && SCALAR_INT_MODE_P (inner_mode))
2009 1.1 mrg offset = subreg_lowpart_offset (mode, inner_mode);
2010 1.1 mrg if (debug_p)
2011 1.1 mrg new_reg = gen_rtx_raw_SUBREG (mode, new_reg, offset);
2012 1.1 mrg else
2013 1.1 mrg new_reg = gen_rtx_SUBREG (mode, new_reg, offset);
2014 1.1 mrg }
2015 1.1 mrg *loc = new_reg;
2016 1.1 mrg return true;
2017 1.1 mrg }
2018 1.1 mrg
2019 1.1 mrg /* Scan all the operand sub-expressions. */
2020 1.1 mrg fmt = GET_RTX_FORMAT (code);
2021 1.1 mrg for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2022 1.1 mrg {
2023 1.1 mrg if (fmt[i] == 'e')
2024 1.1 mrg {
2025 1.1 mrg if (debug_p
2026 1.1 mrg && i == 0
2027 1.1 mrg && (code == SUBREG
2028 1.1 mrg || code == ZERO_EXTEND
2029 1.1 mrg || code == SIGN_EXTEND
2030 1.1 mrg || code == FLOAT
2031 1.1 mrg || code == UNSIGNED_FLOAT))
2032 1.1 mrg {
2033 1.1 mrg rtx y = XEXP (x, 0);
2034 1.1 mrg if (lra_substitute_pseudo (&y, old_regno,
2035 1.1 mrg new_reg, subreg_p, debug_p))
2036 1.1 mrg {
2037 1.1 mrg result = true;
2038 1.1 mrg if (CONST_SCALAR_INT_P (y))
2039 1.1 mrg {
2040 1.1 mrg if (code == SUBREG)
2041 1.1 mrg y = simplify_subreg (GET_MODE (x), y,
2042 1.1 mrg GET_MODE (SUBREG_REG (x)),
2043 1.1 mrg SUBREG_BYTE (x));
2044 1.1 mrg else
2045 1.1 mrg y = simplify_unary_operation (code, GET_MODE (x), y,
2046 1.1 mrg GET_MODE (XEXP (x, 0)));
2047 1.1 mrg if (y)
2048 1.1 mrg *loc = y;
2049 1.1 mrg else
2050 1.1 mrg *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
2051 1.1 mrg }
2052 1.1 mrg else
2053 1.1 mrg XEXP (x, 0) = y;
2054 1.1 mrg }
2055 1.1 mrg }
2056 1.1 mrg else if (lra_substitute_pseudo (&XEXP (x, i), old_regno,
2057 1.1 mrg new_reg, subreg_p, debug_p))
2058 1.1 mrg result = true;
2059 1.1 mrg }
2060 1.1 mrg else if (fmt[i] == 'E')
2061 1.1 mrg {
2062 1.1 mrg for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2063 1.1 mrg if (lra_substitute_pseudo (&XVECEXP (x, i, j), old_regno,
2064 1.1 mrg new_reg, subreg_p, debug_p))
2065 1.1 mrg result = true;
2066 1.1 mrg }
2067 1.1 mrg }
2068 1.1 mrg return result;
2069 1.1 mrg }
2070 1.1 mrg
2071 1.1 mrg /* Call lra_substitute_pseudo within an insn. Try to simplify subreg
2072 1.1 mrg of constant if SUBREG_P. This won't update the insn ptr, just the
2073 1.1 mrg contents of the insn. */
2074 1.1 mrg bool
2075 1.1 mrg lra_substitute_pseudo_within_insn (rtx_insn *insn, int old_regno,
2076 1.1 mrg rtx new_reg, bool subreg_p)
2077 1.1 mrg {
2078 1.1 mrg rtx loc = insn;
2079 1.1 mrg return lra_substitute_pseudo (&loc, old_regno, new_reg, subreg_p,
2080 1.1 mrg DEBUG_INSN_P (insn));
2081 1.1 mrg }
2082 1.1 mrg
2083 1.1 mrg
2084 1.1 mrg
2086 1.1 mrg /* Return new register of the same mode as ORIGINAL of class ALL_REGS.
2087 1.1 mrg Used in ira_remove_scratches. */
2088 1.1 mrg static rtx
2089 1.1 mrg get_scratch_reg (rtx original)
2090 1.1 mrg {
2091 1.1 mrg return lra_create_new_reg (GET_MODE (original), original, ALL_REGS,
2092 1.1 mrg NULL, NULL);
2093 1.1 mrg }
2094 1.1 mrg
2095 1.1 mrg /* Remove all insn scratches in INSN. */
2096 1.1 mrg static void
2097 1.1 mrg remove_insn_scratches (rtx_insn *insn)
2098 1.1 mrg {
2099 1.1 mrg if (ira_remove_insn_scratches (insn, true, lra_dump_file, get_scratch_reg))
2100 1.1 mrg df_insn_rescan (insn);
2101 1.1 mrg }
2102 1.1 mrg
2103 1.1 mrg /* Remove all insn scratches in the current function. */
2104 1.1 mrg static void
2105 1.1 mrg remove_scratches (void)
2106 1.1 mrg {
2107 1.1 mrg basic_block bb;
2108 1.1 mrg rtx_insn *insn;
2109 1.1 mrg
2110 1.1 mrg FOR_EACH_BB_FN (bb, cfun)
2111 1.1 mrg FOR_BB_INSNS (bb, insn)
2112 1.1 mrg if (INSN_P (insn))
2113 1.1 mrg remove_insn_scratches (insn);
2114 1.1 mrg }
2115 1.1 mrg
2116 1.1 mrg /* Function checks RTL for correctness. If FINAL_P is true, it is
2117 1.1 mrg done at the end of LRA and the check is more rigorous. */
2118 1.1 mrg static void
2119 1.1 mrg check_rtl (bool final_p)
2120 1.1 mrg {
2121 1.1 mrg basic_block bb;
2122 1.1 mrg rtx_insn *insn;
2123 1.1 mrg
2124 1.1 mrg lra_assert (! final_p || reload_completed);
2125 1.1 mrg FOR_EACH_BB_FN (bb, cfun)
2126 1.1 mrg FOR_BB_INSNS (bb, insn)
2127 1.1 mrg if (NONDEBUG_INSN_P (insn)
2128 1.1 mrg && GET_CODE (PATTERN (insn)) != USE
2129 1.1 mrg && GET_CODE (PATTERN (insn)) != CLOBBER
2130 1.1 mrg && GET_CODE (PATTERN (insn)) != ASM_INPUT)
2131 1.1 mrg {
2132 1.1 mrg if (final_p)
2133 1.1 mrg {
2134 1.1 mrg extract_constrain_insn (insn);
2135 1.1 mrg continue;
2136 1.1 mrg }
2137 1.1 mrg /* LRA code is based on assumption that all addresses can be
2138 1.1 mrg correctly decomposed. LRA can generate reloads for
2139 1.1 mrg decomposable addresses. The decomposition code checks the
2140 1.1 mrg correctness of the addresses. So we don't need to check
2141 1.1 mrg the addresses here. Don't call insn_invalid_p here, it can
2142 1.1 mrg change the code at this stage. */
2143 1.1 mrg if (recog_memoized (insn) < 0 && asm_noperands (PATTERN (insn)) < 0)
2144 1.1 mrg fatal_insn_not_found (insn);
2145 1.1 mrg }
2146 1.1 mrg }
2147 1.1 mrg
2148 1.1 mrg /* Determine if the current function has an exception receiver block
2149 1.1 mrg that reaches the exit block via non-exceptional edges */
2150 1.1 mrg static bool
2151 1.1 mrg has_nonexceptional_receiver (void)
2152 1.1 mrg {
2153 1.1 mrg edge e;
2154 1.1 mrg edge_iterator ei;
2155 1.1 mrg basic_block *tos, *worklist, bb;
2156 1.1 mrg
2157 1.1 mrg /* If we're not optimizing, then just err on the safe side. */
2158 1.1 mrg if (!optimize)
2159 1.1 mrg return true;
2160 1.1 mrg
2161 1.1 mrg /* First determine which blocks can reach exit via normal paths. */
2162 1.1 mrg tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);
2163 1.1 mrg
2164 1.1 mrg FOR_EACH_BB_FN (bb, cfun)
2165 1.1 mrg bb->flags &= ~BB_REACHABLE;
2166 1.1 mrg
2167 1.1 mrg /* Place the exit block on our worklist. */
2168 1.1 mrg EXIT_BLOCK_PTR_FOR_FN (cfun)->flags |= BB_REACHABLE;
2169 1.1 mrg *tos++ = EXIT_BLOCK_PTR_FOR_FN (cfun);
2170 1.1 mrg
2171 1.1 mrg /* Iterate: find everything reachable from what we've already seen. */
2172 1.1 mrg while (tos != worklist)
2173 1.1 mrg {
2174 1.1 mrg bb = *--tos;
2175 1.1 mrg
2176 1.1 mrg FOR_EACH_EDGE (e, ei, bb->preds)
2177 1.1 mrg if (e->flags & EDGE_ABNORMAL)
2178 1.1 mrg {
2179 1.1 mrg free (worklist);
2180 1.1 mrg return true;
2181 1.1 mrg }
2182 1.1 mrg else
2183 1.1 mrg {
2184 1.1 mrg basic_block src = e->src;
2185 1.1 mrg
2186 1.1 mrg if (!(src->flags & BB_REACHABLE))
2187 1.1 mrg {
2188 1.1 mrg src->flags |= BB_REACHABLE;
2189 1.1 mrg *tos++ = src;
2190 1.1 mrg }
2191 1.1 mrg }
2192 1.1 mrg }
2193 1.1 mrg free (worklist);
2194 1.1 mrg /* No exceptional block reached exit unexceptionally. */
2195 1.1 mrg return false;
2196 1.1 mrg }
2197 1.1 mrg
2198 1.1 mrg /* Remove all REG_DEAD and REG_UNUSED notes and regenerate REG_INC.
2199 1.1 mrg We change pseudos by hard registers without notification of DF and
2200 1.1 mrg that can make the notes obsolete. DF-infrastructure does not deal
2201 1.1 mrg with REG_INC notes -- so we should regenerate them here. */
2202 1.1 mrg static void
2203 1.1 mrg update_inc_notes (void)
2204 1.1 mrg {
2205 1.1 mrg rtx *pnote;
2206 1.1 mrg basic_block bb;
2207 1.1 mrg rtx_insn *insn;
2208 1.1 mrg
2209 1.1 mrg FOR_EACH_BB_FN (bb, cfun)
2210 1.1 mrg FOR_BB_INSNS (bb, insn)
2211 1.1 mrg if (NONDEBUG_INSN_P (insn))
2212 1.1 mrg {
2213 1.1 mrg pnote = ®_NOTES (insn);
2214 1.1 mrg while (*pnote != 0)
2215 1.1 mrg {
2216 1.1 mrg if (REG_NOTE_KIND (*pnote) == REG_DEAD
2217 1.1 mrg || REG_NOTE_KIND (*pnote) == REG_UNUSED
2218 1.1 mrg || REG_NOTE_KIND (*pnote) == REG_INC)
2219 1.1 mrg *pnote = XEXP (*pnote, 1);
2220 1.1 mrg else
2221 1.1 mrg pnote = &XEXP (*pnote, 1);
2222 1.1 mrg }
2223 1.1 mrg
2224 1.1 mrg if (AUTO_INC_DEC)
2225 1.1 mrg add_auto_inc_notes (insn, PATTERN (insn));
2226 1.1 mrg }
2227 1.1 mrg }
2228 1.1 mrg
2229 1.1 mrg /* Set to 1 while in lra. */
2230 1.1 mrg int lra_in_progress;
2231 1.1 mrg
2232 1.1 mrg /* Start of pseudo regnos before the LRA. */
2233 1.1 mrg int lra_new_regno_start;
2234 1.1 mrg
2235 1.1 mrg /* Start of reload pseudo regnos before the new spill pass. */
2236 1.1 mrg int lra_constraint_new_regno_start;
2237 1.1 mrg
2238 1.1 mrg /* Avoid spilling pseudos with regno more than the following value if
2239 1.1 mrg it is possible. */
2240 1.1 mrg int lra_bad_spill_regno_start;
2241 1.1 mrg
2242 1.1 mrg /* A pseudo of Pmode. */
2243 1.1 mrg rtx lra_pmode_pseudo;
2244 1.1 mrg
2245 1.1 mrg /* Inheritance pseudo regnos before the new spill pass. */
2246 1.1 mrg bitmap_head lra_inheritance_pseudos;
2247 1.1 mrg
2248 1.1 mrg /* Split regnos before the new spill pass. */
2249 1.1 mrg bitmap_head lra_split_regs;
2250 1.1 mrg
2251 1.1 mrg /* Reload pseudo regnos before the new assignment pass which still can
2252 1.1 mrg be spilled after the assignment pass as memory is also accepted in
2253 1.1 mrg insns for the reload pseudos. */
2254 1.1 mrg bitmap_head lra_optional_reload_pseudos;
2255 1.1 mrg
2256 1.1 mrg /* Pseudo regnos used for subreg reloads before the new assignment
2257 1.1 mrg pass. Such pseudos still can be spilled after the assignment
2258 1.1 mrg pass. */
2259 1.1 mrg bitmap_head lra_subreg_reload_pseudos;
2260 1.1 mrg
2261 1.1 mrg /* File used for output of LRA debug information. */
2262 1.1 mrg FILE *lra_dump_file;
2263 1.1 mrg
2264 1.1 mrg /* True if we split hard reg after the last constraint sub-pass. */
2265 1.1 mrg bool lra_hard_reg_split_p;
2266 1.1 mrg
2267 1.1 mrg /* True if we found an asm error. */
2268 1.1 mrg bool lra_asm_error_p;
2269 1.1 mrg
2270 1.1 mrg /* True if we should try spill into registers of different classes
2271 1.1 mrg instead of memory. */
2272 1.1 mrg bool lra_reg_spill_p;
2273 1.1 mrg
2274 1.1 mrg /* Set up value LRA_REG_SPILL_P. */
2275 1.1 mrg static void
2276 1.1 mrg setup_reg_spill_flag (void)
2277 1.1 mrg {
2278 1.1 mrg int cl, mode;
2279 1.1 mrg
2280 1.1 mrg if (targetm.spill_class != NULL)
2281 1.1 mrg for (cl = 0; cl < (int) LIM_REG_CLASSES; cl++)
2282 1.1 mrg for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
2283 1.1 mrg if (targetm.spill_class ((enum reg_class) cl,
2284 1.1 mrg (machine_mode) mode) != NO_REGS)
2285 1.1 mrg {
2286 1.1 mrg lra_reg_spill_p = true;
2287 1.1 mrg return;
2288 1.1 mrg }
2289 1.1 mrg lra_reg_spill_p = false;
2290 1.1 mrg }
2291 1.1 mrg
2292 1.1 mrg /* True if the current function is too big to use regular algorithms
2293 1.1 mrg in LRA. In other words, we should use simpler and faster algorithms
2294 1.1 mrg in LRA. It also means we should not worry about generation code
2295 1.1 mrg for caller saves. The value is set up in IRA. */
2296 1.1 mrg bool lra_simple_p;
2297 1.1 mrg
2298 1.1 mrg /* Major LRA entry function. F is a file should be used to dump LRA
2299 1.1 mrg debug info. */
2300 1.1 mrg void
2301 1.1 mrg lra (FILE *f)
2302 1.1 mrg {
2303 1.1 mrg int i;
2304 1.1 mrg bool live_p, inserted_p;
2305 1.1 mrg
2306 1.1 mrg lra_dump_file = f;
2307 1.1 mrg lra_asm_error_p = false;
2308 1.1 mrg lra_pmode_pseudo = gen_reg_rtx (Pmode);
2309 1.1 mrg
2310 1.1 mrg timevar_push (TV_LRA);
2311 1.1 mrg
2312 1.1 mrg /* Make sure that the last insn is a note. Some subsequent passes
2313 1.1 mrg need it. */
2314 1.1 mrg emit_note (NOTE_INSN_DELETED);
2315 1.1 mrg
2316 1.1 mrg lra_no_alloc_regs = ira_no_alloc_regs;
2317 1.1 mrg
2318 1.1 mrg init_reg_info ();
2319 1.1 mrg expand_reg_info ();
2320 1.1 mrg
2321 1.1 mrg init_insn_recog_data ();
2322 1.1 mrg
2323 1.1 mrg /* Some quick check on RTL generated by previous passes. */
2324 1.1 mrg if (flag_checking)
2325 1.1 mrg check_rtl (false);
2326 1.1 mrg
2327 1.1 mrg lra_in_progress = 1;
2328 1.1 mrg
2329 1.1 mrg lra_live_range_iter = lra_coalesce_iter = lra_constraint_iter = 0;
2330 1.1 mrg lra_assignment_iter = lra_assignment_iter_after_spill = 0;
2331 1.1 mrg lra_inheritance_iter = lra_undo_inheritance_iter = 0;
2332 1.1 mrg lra_rematerialization_iter = 0;
2333 1.1 mrg
2334 1.1 mrg setup_reg_spill_flag ();
2335 1.1 mrg
2336 1.1 mrg /* Function remove_scratches can creates new pseudos for clobbers --
2337 1.1 mrg so set up lra_constraint_new_regno_start before its call to
2338 1.1 mrg permit changing reg classes for pseudos created by this
2339 1.1 mrg simplification. */
2340 1.1 mrg lra_constraint_new_regno_start = lra_new_regno_start = max_reg_num ();
2341 1.1 mrg lra_bad_spill_regno_start = INT_MAX;
2342 1.1 mrg remove_scratches ();
2343 1.1 mrg
2344 1.1 mrg /* A function that has a non-local label that can reach the exit
2345 1.1 mrg block via non-exceptional paths must save all call-saved
2346 1.1 mrg registers. */
2347 1.1 mrg if (cfun->has_nonlocal_label && has_nonexceptional_receiver ())
2348 1.1 mrg crtl->saves_all_registers = 1;
2349 1.1 mrg
2350 1.1 mrg if (crtl->saves_all_registers)
2351 1.1 mrg for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2352 1.1 mrg if (!crtl->abi->clobbers_full_reg_p (i)
2353 1.1 mrg && !fixed_regs[i]
2354 1.1 mrg && !LOCAL_REGNO (i))
2355 1.1 mrg df_set_regs_ever_live (i, true);
2356 1.1 mrg
2357 1.1 mrg /* We don't DF from now and avoid its using because it is to
2358 1.1 mrg expensive when a lot of RTL changes are made. */
2359 1.1 mrg df_set_flags (DF_NO_INSN_RESCAN);
2360 1.1 mrg lra_constraint_insn_stack.create (get_max_uid ());
2361 1.1 mrg lra_constraint_insn_stack_bitmap = sbitmap_alloc (get_max_uid ());
2362 1.1 mrg bitmap_clear (lra_constraint_insn_stack_bitmap);
2363 1.1 mrg lra_live_ranges_init ();
2364 1.1 mrg lra_constraints_init ();
2365 1.1 mrg lra_curr_reload_num = 0;
2366 1.1 mrg push_insns (get_last_insn (), NULL);
2367 1.1 mrg /* It is needed for the 1st coalescing. */
2368 1.1 mrg bitmap_initialize (&lra_inheritance_pseudos, ®_obstack);
2369 1.1 mrg bitmap_initialize (&lra_split_regs, ®_obstack);
2370 1.1 mrg bitmap_initialize (&lra_optional_reload_pseudos, ®_obstack);
2371 1.1 mrg bitmap_initialize (&lra_subreg_reload_pseudos, ®_obstack);
2372 1.1 mrg live_p = false;
2373 1.1 mrg if (maybe_ne (get_frame_size (), 0) && crtl->stack_alignment_needed)
2374 1.1 mrg /* If we have a stack frame, we must align it now. The stack size
2375 1.1 mrg may be a part of the offset computation for register
2376 1.1 mrg elimination. */
2377 1.1 mrg assign_stack_local (BLKmode, 0, crtl->stack_alignment_needed);
2378 1.1 mrg lra_init_equiv ();
2379 1.1 mrg for (;;)
2380 1.1 mrg {
2381 1.1 mrg for (;;)
2382 1.1 mrg {
2383 1.1 mrg bool reloads_p = lra_constraints (lra_constraint_iter == 0);
2384 1.1 mrg /* Constraint transformations may result in that eliminable
2385 1.1 mrg hard regs become uneliminable and pseudos which use them
2386 1.1 mrg should be spilled. It is better to do it before pseudo
2387 1.1 mrg assignments.
2388 1.1 mrg
2389 1.1 mrg For example, rs6000 can make
2390 1.1 mrg RS6000_PIC_OFFSET_TABLE_REGNUM uneliminable if we started
2391 1.1 mrg to use a constant pool. */
2392 1.1 mrg lra_eliminate (false, false);
2393 1.1 mrg /* We should try to assign hard registers to scratches even
2394 1.1 mrg if there were no RTL transformations in lra_constraints.
2395 1.1 mrg Also we should check IRA assignments on the first
2396 1.1 mrg iteration as they can be wrong because of early clobbers
2397 1.1 mrg operands which are ignored in IRA. */
2398 1.1 mrg if (! reloads_p && lra_constraint_iter > 1)
2399 1.1 mrg {
2400 1.1 mrg /* Stack is not empty here only when there are changes
2401 1.1 mrg during the elimination sub-pass. */
2402 1.1 mrg if (bitmap_empty_p (lra_constraint_insn_stack_bitmap))
2403 1.1 mrg break;
2404 1.1 mrg else
2405 1.1 mrg /* If there are no reloads but changing due
2406 1.1 mrg elimination, restart the constraint sub-pass
2407 1.1 mrg first. */
2408 1.1 mrg continue;
2409 1.1 mrg }
2410 1.1 mrg /* Do inheritance only for regular algorithms. */
2411 1.1 mrg if (! lra_simple_p)
2412 1.1 mrg lra_inheritance ();
2413 1.1 mrg if (live_p)
2414 1.1 mrg lra_clear_live_ranges ();
2415 1.1 mrg bool fails_p;
2416 1.1 mrg lra_hard_reg_split_p = false;
2417 1.1 mrg do
2418 1.1 mrg {
2419 1.1 mrg /* We need live ranges for lra_assign -- so build them.
2420 1.1 mrg But don't remove dead insns or change global live
2421 1.1 mrg info as we can undo inheritance transformations after
2422 1.1 mrg inheritance pseudo assigning. */
2423 1.1 mrg lra_create_live_ranges (true, !lra_simple_p);
2424 1.1 mrg live_p = true;
2425 1.1 mrg /* If we don't spill non-reload and non-inheritance
2426 1.1 mrg pseudos, there is no sense to run memory-memory move
2427 1.1 mrg coalescing. If inheritance pseudos were spilled, the
2428 1.1 mrg memory-memory moves involving them will be removed by
2429 1.1 mrg pass undoing inheritance. */
2430 1.1 mrg if (lra_simple_p)
2431 1.1 mrg lra_assign (fails_p);
2432 1.1 mrg else
2433 1.1 mrg {
2434 1.1 mrg bool spill_p = !lra_assign (fails_p);
2435 1.1 mrg
2436 1.1 mrg if (lra_undo_inheritance ())
2437 1.1 mrg live_p = false;
2438 1.1 mrg if (spill_p && ! fails_p)
2439 1.1 mrg {
2440 1.1 mrg if (! live_p)
2441 1.1 mrg {
2442 1.1 mrg lra_create_live_ranges (true, true);
2443 1.1 mrg live_p = true;
2444 1.1 mrg }
2445 1.1 mrg if (lra_coalesce ())
2446 1.1 mrg live_p = false;
2447 1.1 mrg }
2448 1.1 mrg if (! live_p)
2449 1.1 mrg lra_clear_live_ranges ();
2450 1.1 mrg }
2451 1.1 mrg if (fails_p)
2452 1.1 mrg {
2453 1.1 mrg /* It is a very rare case. It is the last hope to
2454 1.1 mrg split a hard regno live range for a reload
2455 1.1 mrg pseudo. */
2456 1.1 mrg if (live_p)
2457 1.1 mrg lra_clear_live_ranges ();
2458 1.1 mrg live_p = false;
2459 1.1 mrg if (! lra_split_hard_reg_for ())
2460 1.1 mrg break;
2461 1.1 mrg lra_hard_reg_split_p = true;
2462 1.1 mrg }
2463 1.1 mrg }
2464 1.1 mrg while (fails_p);
2465 1.1 mrg if (! live_p) {
2466 1.1 mrg /* We need the correct reg notes for work of constraint sub-pass. */
2467 1.1 mrg lra_create_live_ranges (true, true);
2468 1.1 mrg live_p = true;
2469 1.1 mrg }
2470 1.1 mrg }
2471 1.1 mrg /* Don't clear optional reloads bitmap until all constraints are
2472 1.1 mrg satisfied as we need to differ them from regular reloads. */
2473 1.1 mrg bitmap_clear (&lra_optional_reload_pseudos);
2474 1.1 mrg bitmap_clear (&lra_subreg_reload_pseudos);
2475 1.1 mrg bitmap_clear (&lra_inheritance_pseudos);
2476 1.1 mrg bitmap_clear (&lra_split_regs);
2477 1.1 mrg if (! live_p)
2478 1.1 mrg {
2479 1.1 mrg /* We need full live info for spilling pseudos into
2480 1.1 mrg registers instead of memory. */
2481 1.1 mrg lra_create_live_ranges (lra_reg_spill_p, true);
2482 1.1 mrg live_p = true;
2483 1.1 mrg }
2484 1.1 mrg /* We should check necessity for spilling here as the above live
2485 1.1 mrg range pass can remove spilled pseudos. */
2486 1.1 mrg if (! lra_need_for_spills_p ())
2487 1.1 mrg break;
2488 1.1 mrg /* Now we know what pseudos should be spilled. Try to
2489 1.1 mrg rematerialize them first. */
2490 1.1 mrg if (lra_remat ())
2491 1.1 mrg {
2492 1.1 mrg /* We need full live info -- see the comment above. */
2493 1.1 mrg lra_create_live_ranges (lra_reg_spill_p, true);
2494 1.1 mrg live_p = true;
2495 1.1 mrg if (! lra_need_for_spills_p ())
2496 1.1 mrg {
2497 1.1 mrg if (lra_need_for_scratch_reg_p ())
2498 1.1 mrg continue;
2499 1.1 mrg break;
2500 1.1 mrg }
2501 1.1 mrg }
2502 1.1 mrg lra_spill ();
2503 1.1 mrg /* Assignment of stack slots changes elimination offsets for
2504 1.1 mrg some eliminations. So update the offsets here. */
2505 1.1 mrg lra_eliminate (false, false);
2506 1.1 mrg lra_constraint_new_regno_start = max_reg_num ();
2507 1.1 mrg if (lra_bad_spill_regno_start == INT_MAX
2508 1.1 mrg && lra_inheritance_iter > LRA_MAX_INHERITANCE_PASSES
2509 1.1 mrg && lra_rematerialization_iter > LRA_MAX_REMATERIALIZATION_PASSES)
2510 1.1 mrg /* After switching off inheritance and rematerialization
2511 1.1 mrg passes, avoid spilling reload pseudos will be created to
2512 1.1 mrg prevent LRA cycling in some complicated cases. */
2513 1.1 mrg lra_bad_spill_regno_start = lra_constraint_new_regno_start;
2514 1.1 mrg lra_assignment_iter_after_spill = 0;
2515 1.1 mrg }
2516 1.1 mrg ira_restore_scratches (lra_dump_file);
2517 1.1 mrg lra_eliminate (true, false);
2518 1.1 mrg lra_final_code_change ();
2519 1.1 mrg lra_in_progress = 0;
2520 1.1 mrg if (live_p)
2521 1.1 mrg lra_clear_live_ranges ();
2522 1.1 mrg lra_live_ranges_finish ();
2523 1.1 mrg lra_constraints_finish ();
2524 1.1 mrg finish_reg_info ();
2525 1.1 mrg sbitmap_free (lra_constraint_insn_stack_bitmap);
2526 1.1 mrg lra_constraint_insn_stack.release ();
2527 1.1 mrg finish_insn_recog_data ();
2528 1.1 mrg regstat_free_n_sets_and_refs ();
2529 1.1 mrg regstat_free_ri ();
2530 1.1 mrg reload_completed = 1;
2531 1.1 mrg update_inc_notes ();
2532 1.1 mrg
2533 1.1 mrg inserted_p = fixup_abnormal_edges ();
2534 1.1 mrg
2535 1.1 mrg /* We've possibly turned single trapping insn into multiple ones. */
2536 1.1 mrg if (cfun->can_throw_non_call_exceptions)
2537 1.1 mrg {
2538 1.1 mrg auto_sbitmap blocks (last_basic_block_for_fn (cfun));
2539 1.1 mrg bitmap_ones (blocks);
2540 1.1 mrg find_many_sub_basic_blocks (blocks);
2541 1.1 mrg }
2542 1.1 mrg
2543 1.1 mrg if (inserted_p)
2544 1.1 mrg commit_edge_insertions ();
2545 1.1 mrg
2546 1.1 mrg /* Subsequent passes expect that rtl is unshared, so unshare everything
2547 1.1 mrg here. */
2548 1.1 mrg unshare_all_rtl_again (get_insns ());
2549 1.1 mrg
2550 1.1 mrg if (flag_checking)
2551 1.1 mrg check_rtl (true);
2552 1.1 mrg
2553 1.1 mrg timevar_pop (TV_LRA);
2554 1.1 mrg }
2555 1.1 mrg
2556 1.1 mrg /* Called once per compiler to initialize LRA data once. */
2557 1.1 mrg void
2558 1.1 mrg lra_init_once (void)
2559 1.1 mrg {
2560 1.1 mrg init_insn_code_data_once ();
2561 1.1 mrg }
2562
2563 /* Called once per compiler to finish LRA data which are initialize
2564 once. */
2565 void
2566 lra_finish_once (void)
2567 {
2568 finish_insn_code_data_once ();
2569 }
2570