lra-spills.cc revision 1.1 1 1.1 mrg /* Change pseudos by memory.
2 1.1 mrg Copyright (C) 2010-2022 Free Software Foundation, Inc.
3 1.1 mrg Contributed by Vladimir Makarov <vmakarov (at) redhat.com>.
4 1.1 mrg
5 1.1 mrg This file is part of GCC.
6 1.1 mrg
7 1.1 mrg GCC is free software; you can redistribute it and/or modify it under
8 1.1 mrg the terms of the GNU General Public License as published by the Free
9 1.1 mrg Software Foundation; either version 3, or (at your option) any later
10 1.1 mrg version.
11 1.1 mrg
12 1.1 mrg GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 1.1 mrg WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 1.1 mrg FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 1.1 mrg for more details.
16 1.1 mrg
17 1.1 mrg You should have received a copy of the GNU General Public License
18 1.1 mrg along with GCC; see the file COPYING3. If not see
19 1.1 mrg <http://www.gnu.org/licenses/>. */
20 1.1 mrg
21 1.1 mrg
22 1.1 mrg /* This file contains code for a pass to change spilled pseudos into
23 1.1 mrg memory.
24 1.1 mrg
25 1.1 mrg The pass creates necessary stack slots and assigns spilled pseudos
26 1.1 mrg to the stack slots in following way:
27 1.1 mrg
28 1.1 mrg for all spilled pseudos P most frequently used first do
29 1.1 mrg for all stack slots S do
30 1.1 mrg if P doesn't conflict with pseudos assigned to S then
31 1.1 mrg assign S to P and goto to the next pseudo process
32 1.1 mrg end
33 1.1 mrg end
34 1.1 mrg create new stack slot S and assign P to S
35 1.1 mrg end
36 1.1 mrg
37 1.1 mrg The actual algorithm is bit more complicated because of different
38 1.1 mrg pseudo sizes.
39 1.1 mrg
40 1.1 mrg After that the code changes spilled pseudos (except ones created
41 1.1 mrg from scratches) by corresponding stack slot memory in RTL.
42 1.1 mrg
43 1.1 mrg If at least one stack slot was created, we need to run more passes
44 1.1 mrg because we have new addresses which should be checked and because
45 1.1 mrg the old address displacements might change and address constraints
46 1.1 mrg (or insn memory constraints) might not be satisfied any more.
47 1.1 mrg
48 1.1 mrg For some targets, the pass can spill some pseudos into hard
49 1.1 mrg registers of different class (usually into vector registers)
50 1.1 mrg instead of spilling them into memory if it is possible and
51 1.1 mrg profitable. Spilling GENERAL_REGS pseudo into SSE registers for
52 1.1 mrg Intel Corei7 is an example of such optimization. And this is
53 1.1 mrg actually recommended by Intel optimization guide.
54 1.1 mrg
55 1.1 mrg The file also contains code for final change of pseudos on hard
56 1.1 mrg regs correspondingly assigned to them. */
57 1.1 mrg
58 1.1 mrg #include "config.h"
59 1.1 mrg #include "system.h"
60 1.1 mrg #include "coretypes.h"
61 1.1 mrg #include "backend.h"
62 1.1 mrg #include "target.h"
63 1.1 mrg #include "rtl.h"
64 1.1 mrg #include "df.h"
65 1.1 mrg #include "insn-config.h"
66 1.1 mrg #include "regs.h"
67 1.1 mrg #include "memmodel.h"
68 1.1 mrg #include "ira.h"
69 1.1 mrg #include "recog.h"
70 1.1 mrg #include "output.h"
71 1.1 mrg #include "cfgrtl.h"
72 1.1 mrg #include "lra.h"
73 1.1 mrg #include "lra-int.h"
74 1.1 mrg
75 1.1 mrg
76 1.1 mrg /* Max regno at the start of the pass. */
77 1.1 mrg static int regs_num;
78 1.1 mrg
79 1.1 mrg /* Map spilled regno -> hard regno used instead of memory for
80 1.1 mrg spilling. */
81 1.1 mrg static rtx *spill_hard_reg;
82 1.1 mrg
83 1.1 mrg /* The structure describes stack slot of a spilled pseudo. */
84 1.1 mrg struct pseudo_slot
85 1.1 mrg {
86 1.1 mrg /* Number (0, 1, ...) of the stack slot to which given pseudo
87 1.1 mrg belongs. */
88 1.1 mrg int slot_num;
89 1.1 mrg /* First or next slot with the same slot number. */
90 1.1 mrg struct pseudo_slot *next, *first;
91 1.1 mrg /* Memory representing the spilled pseudo. */
92 1.1 mrg rtx mem;
93 1.1 mrg };
94 1.1 mrg
95 1.1 mrg /* The stack slots for each spilled pseudo. Indexed by regnos. */
96 1.1 mrg static struct pseudo_slot *pseudo_slots;
97 1.1 mrg
98 1.1 mrg /* The structure describes a register or a stack slot which can be
99 1.1 mrg used for several spilled pseudos. */
100 1.1 mrg class slot
101 1.1 mrg {
102 1.1 mrg public:
103 1.1 mrg /* First pseudo with given stack slot. */
104 1.1 mrg int regno;
105 1.1 mrg /* Hard reg into which the slot pseudos are spilled. The value is
106 1.1 mrg negative for pseudos spilled into memory. */
107 1.1 mrg int hard_regno;
108 1.1 mrg /* Maximum alignment required by all users of the slot. */
109 1.1 mrg unsigned int align;
110 1.1 mrg /* Maximum size required by all users of the slot. */
111 1.1 mrg poly_int64 size;
112 1.1 mrg /* Memory representing the all stack slot. It can be different from
113 1.1 mrg memory representing a pseudo belonging to give stack slot because
114 1.1 mrg pseudo can be placed in a part of the corresponding stack slot.
115 1.1 mrg The value is NULL for pseudos spilled into a hard reg. */
116 1.1 mrg rtx mem;
117 1.1 mrg /* Combined live ranges of all pseudos belonging to given slot. It
118 1.1 mrg is used to figure out that a new spilled pseudo can use given
119 1.1 mrg stack slot. */
120 1.1 mrg lra_live_range_t live_ranges;
121 1.1 mrg };
122 1.1 mrg
123 1.1 mrg /* Array containing info about the stack slots. The array element is
124 1.1 mrg indexed by the stack slot number in the range [0..slots_num). */
125 1.1 mrg static class slot *slots;
126 1.1 mrg /* The number of the stack slots currently existing. */
127 1.1 mrg static int slots_num;
128 1.1 mrg
129 1.1 mrg /* Set up memory of the spilled pseudo I. The function can allocate
130 1.1 mrg the corresponding stack slot if it is not done yet. */
131 1.1 mrg static void
132 1.1 mrg assign_mem_slot (int i)
133 1.1 mrg {
134 1.1 mrg rtx x = NULL_RTX;
135 1.1 mrg machine_mode mode = GET_MODE (regno_reg_rtx[i]);
136 1.1 mrg poly_int64 inherent_size = PSEUDO_REGNO_BYTES (i);
137 1.1 mrg machine_mode wider_mode
138 1.1 mrg = wider_subreg_mode (mode, lra_reg_info[i].biggest_mode);
139 1.1 mrg poly_int64 total_size = GET_MODE_SIZE (wider_mode);
140 1.1 mrg poly_int64 adjust = 0;
141 1.1 mrg
142 1.1 mrg lra_assert (regno_reg_rtx[i] != NULL_RTX && REG_P (regno_reg_rtx[i])
143 1.1 mrg && lra_reg_info[i].nrefs != 0 && reg_renumber[i] < 0);
144 1.1 mrg
145 1.1 mrg unsigned int slot_num = pseudo_slots[i].slot_num;
146 1.1 mrg x = slots[slot_num].mem;
147 1.1 mrg if (!x)
148 1.1 mrg {
149 1.1 mrg x = assign_stack_local (BLKmode, slots[slot_num].size,
150 1.1 mrg slots[slot_num].align);
151 1.1 mrg slots[slot_num].mem = x;
152 1.1 mrg }
153 1.1 mrg
154 1.1 mrg /* On a big endian machine, the "address" of the slot is the address
155 1.1 mrg of the low part that fits its inherent mode. */
156 1.1 mrg adjust += subreg_size_lowpart_offset (inherent_size, total_size);
157 1.1 mrg x = adjust_address_nv (x, GET_MODE (regno_reg_rtx[i]), adjust);
158 1.1 mrg
159 1.1 mrg /* Set all of the memory attributes as appropriate for a spill. */
160 1.1 mrg set_mem_attrs_for_spill (x);
161 1.1 mrg pseudo_slots[i].mem = x;
162 1.1 mrg }
163 1.1 mrg
164 1.1 mrg /* Sort pseudos according their usage frequencies. */
165 1.1 mrg static int
166 1.1 mrg regno_freq_compare (const void *v1p, const void *v2p)
167 1.1 mrg {
168 1.1 mrg const int regno1 = *(const int *) v1p;
169 1.1 mrg const int regno2 = *(const int *) v2p;
170 1.1 mrg int diff;
171 1.1 mrg
172 1.1 mrg if ((diff = lra_reg_info[regno2].freq - lra_reg_info[regno1].freq) != 0)
173 1.1 mrg return diff;
174 1.1 mrg return regno1 - regno2;
175 1.1 mrg }
176 1.1 mrg
177 1.1 mrg /* Sort pseudos according to their slots, putting the slots in the order
178 1.1 mrg that they should be allocated.
179 1.1 mrg
180 1.1 mrg First prefer to group slots with variable sizes together and slots
181 1.1 mrg with constant sizes together, since that usually makes them easier
182 1.1 mrg to address from a common anchor point. E.g. loads of polynomial-sized
183 1.1 mrg registers tend to take polynomial offsets while loads of constant-sized
184 1.1 mrg registers tend to take constant (non-polynomial) offsets.
185 1.1 mrg
186 1.1 mrg Next, slots with lower numbers have the highest priority and should
187 1.1 mrg get the smallest displacement from the stack or frame pointer
188 1.1 mrg (whichever is being used).
189 1.1 mrg
190 1.1 mrg The first allocated slot is always closest to the frame pointer,
191 1.1 mrg so prefer lower slot numbers when frame_pointer_needed. If the stack
192 1.1 mrg and frame grow in the same direction, then the first allocated slot is
193 1.1 mrg always closest to the initial stack pointer and furthest away from the
194 1.1 mrg final stack pointer, so allocate higher numbers first when using the
195 1.1 mrg stack pointer in that case. The reverse is true if the stack and
196 1.1 mrg frame grow in opposite directions. */
197 1.1 mrg static int
198 1.1 mrg pseudo_reg_slot_compare (const void *v1p, const void *v2p)
199 1.1 mrg {
200 1.1 mrg const int regno1 = *(const int *) v1p;
201 1.1 mrg const int regno2 = *(const int *) v2p;
202 1.1 mrg int diff, slot_num1, slot_num2;
203 1.1 mrg
204 1.1 mrg slot_num1 = pseudo_slots[regno1].slot_num;
205 1.1 mrg slot_num2 = pseudo_slots[regno2].slot_num;
206 1.1 mrg diff = (int (slots[slot_num1].size.is_constant ())
207 1.1 mrg - int (slots[slot_num2].size.is_constant ()));
208 1.1 mrg if (diff != 0)
209 1.1 mrg return diff;
210 1.1 mrg if ((diff = slot_num1 - slot_num2) != 0)
211 1.1 mrg return (frame_pointer_needed
212 1.1 mrg || (!FRAME_GROWS_DOWNWARD) == STACK_GROWS_DOWNWARD ? diff : -diff);
213 1.1 mrg poly_int64 total_size1 = GET_MODE_SIZE (lra_reg_info[regno1].biggest_mode);
214 1.1 mrg poly_int64 total_size2 = GET_MODE_SIZE (lra_reg_info[regno2].biggest_mode);
215 1.1 mrg if ((diff = compare_sizes_for_sort (total_size2, total_size1)) != 0)
216 1.1 mrg return diff;
217 1.1 mrg return regno1 - regno2;
218 1.1 mrg }
219 1.1 mrg
220 1.1 mrg /* Assign spill hard registers to N pseudos in PSEUDO_REGNOS which is
221 1.1 mrg sorted in order of highest frequency first. Put the pseudos which
222 1.1 mrg did not get a spill hard register at the beginning of array
223 1.1 mrg PSEUDO_REGNOS. Return the number of such pseudos. */
224 1.1 mrg static int
225 1.1 mrg assign_spill_hard_regs (int *pseudo_regnos, int n)
226 1.1 mrg {
227 1.1 mrg int i, k, p, regno, res, spill_class_size, hard_regno, nr;
228 1.1 mrg enum reg_class rclass, spill_class;
229 1.1 mrg machine_mode mode;
230 1.1 mrg lra_live_range_t r;
231 1.1 mrg rtx_insn *insn;
232 1.1 mrg rtx set;
233 1.1 mrg basic_block bb;
234 1.1 mrg HARD_REG_SET conflict_hard_regs;
235 1.1 mrg bitmap setjump_crosses = regstat_get_setjmp_crosses ();
236 1.1 mrg /* Hard registers which cannot be used for any purpose at given
237 1.1 mrg program point because they are unallocatable or already allocated
238 1.1 mrg for other pseudos. */
239 1.1 mrg HARD_REG_SET *reserved_hard_regs;
240 1.1 mrg
241 1.1 mrg if (! lra_reg_spill_p)
242 1.1 mrg return n;
243 1.1 mrg /* Set up reserved hard regs for every program point. */
244 1.1 mrg reserved_hard_regs = XNEWVEC (HARD_REG_SET, lra_live_max_point);
245 1.1 mrg for (p = 0; p < lra_live_max_point; p++)
246 1.1 mrg reserved_hard_regs[p] = lra_no_alloc_regs;
247 1.1 mrg for (i = FIRST_PSEUDO_REGISTER; i < regs_num; i++)
248 1.1 mrg if (lra_reg_info[i].nrefs != 0
249 1.1 mrg && (hard_regno = lra_get_regno_hard_regno (i)) >= 0)
250 1.1 mrg for (r = lra_reg_info[i].live_ranges; r != NULL; r = r->next)
251 1.1 mrg for (p = r->start; p <= r->finish; p++)
252 1.1 mrg add_to_hard_reg_set (&reserved_hard_regs[p],
253 1.1 mrg lra_reg_info[i].biggest_mode, hard_regno);
254 1.1 mrg auto_bitmap ok_insn_bitmap (®_obstack);
255 1.1 mrg FOR_EACH_BB_FN (bb, cfun)
256 1.1 mrg FOR_BB_INSNS (bb, insn)
257 1.1 mrg if (DEBUG_INSN_P (insn)
258 1.1 mrg || ((set = single_set (insn)) != NULL_RTX
259 1.1 mrg && REG_P (SET_SRC (set)) && REG_P (SET_DEST (set))))
260 1.1 mrg bitmap_set_bit (ok_insn_bitmap, INSN_UID (insn));
261 1.1 mrg for (res = i = 0; i < n; i++)
262 1.1 mrg {
263 1.1 mrg regno = pseudo_regnos[i];
264 1.1 mrg rclass = lra_get_allocno_class (regno);
265 1.1 mrg if (bitmap_bit_p (setjump_crosses, regno)
266 1.1 mrg || (spill_class
267 1.1 mrg = ((enum reg_class)
268 1.1 mrg targetm.spill_class ((reg_class_t) rclass,
269 1.1 mrg PSEUDO_REGNO_MODE (regno)))) == NO_REGS
270 1.1 mrg || bitmap_intersect_compl_p (&lra_reg_info[regno].insn_bitmap,
271 1.1 mrg ok_insn_bitmap))
272 1.1 mrg {
273 1.1 mrg pseudo_regnos[res++] = regno;
274 1.1 mrg continue;
275 1.1 mrg }
276 1.1 mrg lra_assert (spill_class != NO_REGS);
277 1.1 mrg conflict_hard_regs = lra_reg_info[regno].conflict_hard_regs;
278 1.1 mrg for (r = lra_reg_info[regno].live_ranges; r != NULL; r = r->next)
279 1.1 mrg for (p = r->start; p <= r->finish; p++)
280 1.1 mrg conflict_hard_regs |= reserved_hard_regs[p];
281 1.1 mrg spill_class_size = ira_class_hard_regs_num[spill_class];
282 1.1 mrg mode = lra_reg_info[regno].biggest_mode;
283 1.1 mrg for (k = 0; k < spill_class_size; k++)
284 1.1 mrg {
285 1.1 mrg hard_regno = ira_class_hard_regs[spill_class][k];
286 1.1 mrg if (TEST_HARD_REG_BIT (eliminable_regset, hard_regno)
287 1.1 mrg || !targetm.hard_regno_mode_ok (hard_regno, mode))
288 1.1 mrg continue;
289 1.1 mrg if (! overlaps_hard_reg_set_p (conflict_hard_regs, mode, hard_regno))
290 1.1 mrg break;
291 1.1 mrg }
292 1.1 mrg if (k >= spill_class_size)
293 1.1 mrg {
294 1.1 mrg /* There is no available regs -- assign memory later. */
295 1.1 mrg pseudo_regnos[res++] = regno;
296 1.1 mrg continue;
297 1.1 mrg }
298 1.1 mrg if (lra_dump_file != NULL)
299 1.1 mrg fprintf (lra_dump_file, " Spill r%d into hr%d\n", regno, hard_regno);
300 1.1 mrg add_to_hard_reg_set (&hard_regs_spilled_into,
301 1.1 mrg lra_reg_info[regno].biggest_mode, hard_regno);
302 1.1 mrg /* Update reserved_hard_regs. */
303 1.1 mrg for (r = lra_reg_info[regno].live_ranges; r != NULL; r = r->next)
304 1.1 mrg for (p = r->start; p <= r->finish; p++)
305 1.1 mrg add_to_hard_reg_set (&reserved_hard_regs[p],
306 1.1 mrg lra_reg_info[regno].biggest_mode, hard_regno);
307 1.1 mrg spill_hard_reg[regno]
308 1.1 mrg = gen_raw_REG (PSEUDO_REGNO_MODE (regno), hard_regno);
309 1.1 mrg for (nr = 0;
310 1.1 mrg nr < hard_regno_nregs (hard_regno,
311 1.1 mrg lra_reg_info[regno].biggest_mode);
312 1.1 mrg nr++)
313 1.1 mrg /* Just loop. */
314 1.1 mrg df_set_regs_ever_live (hard_regno + nr, true);
315 1.1 mrg }
316 1.1 mrg free (reserved_hard_regs);
317 1.1 mrg return res;
318 1.1 mrg }
319 1.1 mrg
320 1.1 mrg /* Add pseudo REGNO to slot SLOT_NUM. */
321 1.1 mrg static void
322 1.1 mrg add_pseudo_to_slot (int regno, int slot_num)
323 1.1 mrg {
324 1.1 mrg struct pseudo_slot *first;
325 1.1 mrg
326 1.1 mrg /* Each pseudo has an inherent size which comes from its own mode,
327 1.1 mrg and a total size which provides room for paradoxical subregs.
328 1.1 mrg We need to make sure the size and alignment of the slot are
329 1.1 mrg sufficient for both. */
330 1.1 mrg machine_mode mode = wider_subreg_mode (PSEUDO_REGNO_MODE (regno),
331 1.1 mrg lra_reg_info[regno].biggest_mode);
332 1.1 mrg unsigned int align = spill_slot_alignment (mode);
333 1.1 mrg slots[slot_num].align = MAX (slots[slot_num].align, align);
334 1.1 mrg slots[slot_num].size = upper_bound (slots[slot_num].size,
335 1.1 mrg GET_MODE_SIZE (mode));
336 1.1 mrg
337 1.1 mrg if (slots[slot_num].regno < 0)
338 1.1 mrg {
339 1.1 mrg /* It is the first pseudo in the slot. */
340 1.1 mrg slots[slot_num].regno = regno;
341 1.1 mrg pseudo_slots[regno].first = &pseudo_slots[regno];
342 1.1 mrg pseudo_slots[regno].next = NULL;
343 1.1 mrg }
344 1.1 mrg else
345 1.1 mrg {
346 1.1 mrg first = pseudo_slots[regno].first = &pseudo_slots[slots[slot_num].regno];
347 1.1 mrg pseudo_slots[regno].next = first->next;
348 1.1 mrg first->next = &pseudo_slots[regno];
349 1.1 mrg }
350 1.1 mrg pseudo_slots[regno].mem = NULL_RTX;
351 1.1 mrg pseudo_slots[regno].slot_num = slot_num;
352 1.1 mrg slots[slot_num].live_ranges
353 1.1 mrg = lra_merge_live_ranges (slots[slot_num].live_ranges,
354 1.1 mrg lra_copy_live_range_list
355 1.1 mrg (lra_reg_info[regno].live_ranges));
356 1.1 mrg }
357 1.1 mrg
358 1.1 mrg /* Assign stack slot numbers to pseudos in array PSEUDO_REGNOS of
359 1.1 mrg length N. Sort pseudos in PSEUDO_REGNOS for subsequent assigning
360 1.1 mrg memory stack slots. */
361 1.1 mrg static void
362 1.1 mrg assign_stack_slot_num_and_sort_pseudos (int *pseudo_regnos, int n)
363 1.1 mrg {
364 1.1 mrg int i, j, regno;
365 1.1 mrg
366 1.1 mrg slots_num = 0;
367 1.1 mrg /* Assign stack slot numbers to spilled pseudos, use smaller numbers
368 1.1 mrg for most frequently used pseudos. */
369 1.1 mrg for (i = 0; i < n; i++)
370 1.1 mrg {
371 1.1 mrg regno = pseudo_regnos[i];
372 1.1 mrg if (! flag_ira_share_spill_slots)
373 1.1 mrg j = slots_num;
374 1.1 mrg else
375 1.1 mrg {
376 1.1 mrg machine_mode mode
377 1.1 mrg = wider_subreg_mode (PSEUDO_REGNO_MODE (regno),
378 1.1 mrg lra_reg_info[regno].biggest_mode);
379 1.1 mrg for (j = 0; j < slots_num; j++)
380 1.1 mrg if (slots[j].hard_regno < 0
381 1.1 mrg /* Although it's possible to share slots between modes
382 1.1 mrg with constant and non-constant widths, we usually
383 1.1 mrg get better spill code by keeping the constant and
384 1.1 mrg non-constant areas separate. */
385 1.1 mrg && (GET_MODE_SIZE (mode).is_constant ()
386 1.1 mrg == slots[j].size.is_constant ())
387 1.1 mrg && ! (lra_intersected_live_ranges_p
388 1.1 mrg (slots[j].live_ranges,
389 1.1 mrg lra_reg_info[regno].live_ranges)))
390 1.1 mrg break;
391 1.1 mrg }
392 1.1 mrg if (j >= slots_num)
393 1.1 mrg {
394 1.1 mrg /* New slot. */
395 1.1 mrg slots[j].live_ranges = NULL;
396 1.1 mrg slots[j].size = 0;
397 1.1 mrg slots[j].align = BITS_PER_UNIT;
398 1.1 mrg slots[j].regno = slots[j].hard_regno = -1;
399 1.1 mrg slots[j].mem = NULL_RTX;
400 1.1 mrg slots_num++;
401 1.1 mrg }
402 1.1 mrg add_pseudo_to_slot (regno, j);
403 1.1 mrg }
404 1.1 mrg /* Sort regnos according to their slot numbers. */
405 1.1 mrg qsort (pseudo_regnos, n, sizeof (int), pseudo_reg_slot_compare);
406 1.1 mrg }
407 1.1 mrg
408 1.1 mrg /* Recursively process LOC in INSN and change spilled pseudos to the
409 1.1 mrg corresponding memory or spilled hard reg. Ignore spilled pseudos
410 1.1 mrg created from the scratches. Return true if the pseudo nrefs equal
411 1.1 mrg to 0 (don't change the pseudo in this case). Otherwise return false. */
412 1.1 mrg static bool
413 1.1 mrg remove_pseudos (rtx *loc, rtx_insn *insn)
414 1.1 mrg {
415 1.1 mrg int i;
416 1.1 mrg rtx hard_reg;
417 1.1 mrg const char *fmt;
418 1.1 mrg enum rtx_code code;
419 1.1 mrg bool res = false;
420 1.1 mrg
421 1.1 mrg if (*loc == NULL_RTX)
422 1.1 mrg return res;
423 1.1 mrg code = GET_CODE (*loc);
424 1.1 mrg if (code == SUBREG && REG_P (SUBREG_REG (*loc)))
425 1.1 mrg {
426 1.1 mrg /* Try to remove memory subregs to simplify LRA job
427 1.1 mrg and avoid LRA cycling in case of subreg memory reload. */
428 1.1 mrg res = remove_pseudos (&SUBREG_REG (*loc), insn);
429 1.1 mrg if (GET_CODE (SUBREG_REG (*loc)) == MEM)
430 1.1 mrg {
431 1.1 mrg alter_subreg (loc, false);
432 1.1 mrg if (GET_CODE (*loc) == MEM)
433 1.1 mrg {
434 1.1 mrg lra_update_insn_recog_data (insn);
435 1.1 mrg if (lra_dump_file != NULL)
436 1.1 mrg fprintf (lra_dump_file,
437 1.1 mrg "Memory subreg was simplified in insn #%u\n",
438 1.1 mrg INSN_UID (insn));
439 1.1 mrg }
440 1.1 mrg }
441 1.1 mrg return res;
442 1.1 mrg }
443 1.1 mrg else if (code == REG && (i = REGNO (*loc)) >= FIRST_PSEUDO_REGISTER
444 1.1 mrg && lra_get_regno_hard_regno (i) < 0
445 1.1 mrg /* We do not want to assign memory for former scratches because
446 1.1 mrg it might result in an address reload for some targets. In
447 1.1 mrg any case we transform such pseudos not getting hard registers
448 1.1 mrg into scratches back. */
449 1.1 mrg && ! ira_former_scratch_p (i))
450 1.1 mrg {
451 1.1 mrg if (lra_reg_info[i].nrefs == 0
452 1.1 mrg && pseudo_slots[i].mem == NULL && spill_hard_reg[i] == NULL)
453 1.1 mrg return true;
454 1.1 mrg if ((hard_reg = spill_hard_reg[i]) != NULL_RTX)
455 1.1 mrg *loc = copy_rtx (hard_reg);
456 1.1 mrg else
457 1.1 mrg {
458 1.1 mrg rtx x = lra_eliminate_regs_1 (insn, pseudo_slots[i].mem,
459 1.1 mrg GET_MODE (pseudo_slots[i].mem),
460 1.1 mrg false, false, 0, true);
461 1.1 mrg *loc = x != pseudo_slots[i].mem ? x : copy_rtx (x);
462 1.1 mrg }
463 1.1 mrg return res;
464 1.1 mrg }
465 1.1 mrg
466 1.1 mrg fmt = GET_RTX_FORMAT (code);
467 1.1 mrg for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
468 1.1 mrg {
469 1.1 mrg if (fmt[i] == 'e')
470 1.1 mrg res = remove_pseudos (&XEXP (*loc, i), insn) || res;
471 1.1 mrg else if (fmt[i] == 'E')
472 1.1 mrg {
473 1.1 mrg int j;
474 1.1 mrg
475 1.1 mrg for (j = XVECLEN (*loc, i) - 1; j >= 0; j--)
476 1.1 mrg res = remove_pseudos (&XVECEXP (*loc, i, j), insn) || res;
477 1.1 mrg }
478 1.1 mrg }
479 1.1 mrg return res;
480 1.1 mrg }
481 1.1 mrg
482 1.1 mrg /* Convert spilled pseudos into their stack slots or spill hard regs,
483 1.1 mrg put insns to process on the constraint stack (that is all insns in
484 1.1 mrg which pseudos were changed to memory or spill hard regs). */
485 1.1 mrg static void
486 1.1 mrg spill_pseudos (void)
487 1.1 mrg {
488 1.1 mrg basic_block bb;
489 1.1 mrg rtx_insn *insn, *curr;
490 1.1 mrg int i;
491 1.1 mrg
492 1.1 mrg auto_bitmap spilled_pseudos (®_obstack);
493 1.1 mrg auto_bitmap changed_insns (®_obstack);
494 1.1 mrg for (i = FIRST_PSEUDO_REGISTER; i < regs_num; i++)
495 1.1 mrg {
496 1.1 mrg if (lra_reg_info[i].nrefs != 0 && lra_get_regno_hard_regno (i) < 0
497 1.1 mrg && ! ira_former_scratch_p (i))
498 1.1 mrg {
499 1.1 mrg bitmap_set_bit (spilled_pseudos, i);
500 1.1 mrg bitmap_ior_into (changed_insns, &lra_reg_info[i].insn_bitmap);
501 1.1 mrg }
502 1.1 mrg }
503 1.1 mrg FOR_EACH_BB_FN (bb, cfun)
504 1.1 mrg {
505 1.1 mrg FOR_BB_INSNS_SAFE (bb, insn, curr)
506 1.1 mrg {
507 1.1 mrg bool removed_pseudo_p = false;
508 1.1 mrg
509 1.1 mrg if (bitmap_bit_p (changed_insns, INSN_UID (insn)))
510 1.1 mrg {
511 1.1 mrg rtx *link_loc, link;
512 1.1 mrg
513 1.1 mrg removed_pseudo_p = remove_pseudos (&PATTERN (insn), insn);
514 1.1 mrg if (CALL_P (insn)
515 1.1 mrg && remove_pseudos (&CALL_INSN_FUNCTION_USAGE (insn), insn))
516 1.1 mrg removed_pseudo_p = true;
517 1.1 mrg for (link_loc = ®_NOTES (insn);
518 1.1 mrg (link = *link_loc) != NULL_RTX;
519 1.1 mrg link_loc = &XEXP (link, 1))
520 1.1 mrg {
521 1.1 mrg switch (REG_NOTE_KIND (link))
522 1.1 mrg {
523 1.1 mrg case REG_FRAME_RELATED_EXPR:
524 1.1 mrg case REG_CFA_DEF_CFA:
525 1.1 mrg case REG_CFA_ADJUST_CFA:
526 1.1 mrg case REG_CFA_OFFSET:
527 1.1 mrg case REG_CFA_REGISTER:
528 1.1 mrg case REG_CFA_EXPRESSION:
529 1.1 mrg case REG_CFA_RESTORE:
530 1.1 mrg case REG_CFA_SET_VDRAP:
531 1.1 mrg if (remove_pseudos (&XEXP (link, 0), insn))
532 1.1 mrg removed_pseudo_p = true;
533 1.1 mrg break;
534 1.1 mrg default:
535 1.1 mrg break;
536 1.1 mrg }
537 1.1 mrg }
538 1.1 mrg if (lra_dump_file != NULL)
539 1.1 mrg fprintf (lra_dump_file,
540 1.1 mrg "Changing spilled pseudos to memory in insn #%u\n",
541 1.1 mrg INSN_UID (insn));
542 1.1 mrg lra_push_insn (insn);
543 1.1 mrg if (lra_reg_spill_p || targetm.different_addr_displacement_p ())
544 1.1 mrg lra_set_used_insn_alternative (insn, LRA_UNKNOWN_ALT);
545 1.1 mrg }
546 1.1 mrg else if (CALL_P (insn)
547 1.1 mrg /* Presence of any pseudo in CALL_INSN_FUNCTION_USAGE
548 1.1 mrg does not affect value of insn_bitmap of the
549 1.1 mrg corresponding lra_reg_info. That is because we
550 1.1 mrg don't need to reload pseudos in
551 1.1 mrg CALL_INSN_FUNCTION_USAGEs. So if we process only
552 1.1 mrg insns in the insn_bitmap of given pseudo here, we
553 1.1 mrg can miss the pseudo in some
554 1.1 mrg CALL_INSN_FUNCTION_USAGEs. */
555 1.1 mrg && remove_pseudos (&CALL_INSN_FUNCTION_USAGE (insn), insn))
556 1.1 mrg removed_pseudo_p = true;
557 1.1 mrg if (removed_pseudo_p)
558 1.1 mrg {
559 1.1 mrg lra_assert (DEBUG_INSN_P (insn));
560 1.1 mrg lra_invalidate_insn_data (insn);
561 1.1 mrg INSN_VAR_LOCATION_LOC (insn) = gen_rtx_UNKNOWN_VAR_LOC ();
562 1.1 mrg if (lra_dump_file != NULL)
563 1.1 mrg fprintf (lra_dump_file,
564 1.1 mrg "Debug insn #%u is reset because it referenced "
565 1.1 mrg "removed pseudo\n", INSN_UID (insn));
566 1.1 mrg }
567 1.1 mrg bitmap_and_compl_into (df_get_live_in (bb), spilled_pseudos);
568 1.1 mrg bitmap_and_compl_into (df_get_live_out (bb), spilled_pseudos);
569 1.1 mrg }
570 1.1 mrg }
571 1.1 mrg }
572 1.1 mrg
573 1.1 mrg /* Return true if we need scratch reg assignments. */
574 1.1 mrg bool
575 1.1 mrg lra_need_for_scratch_reg_p (void)
576 1.1 mrg {
577 1.1 mrg int i; max_regno = max_reg_num ();
578 1.1 mrg
579 1.1 mrg for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
580 1.1 mrg if (lra_reg_info[i].nrefs != 0 && lra_get_regno_hard_regno (i) < 0
581 1.1 mrg && ira_former_scratch_p (i))
582 1.1 mrg return true;
583 1.1 mrg return false;
584 1.1 mrg }
585 1.1 mrg
586 1.1 mrg /* Return true if we need to change some pseudos into memory. */
587 1.1 mrg bool
588 1.1 mrg lra_need_for_spills_p (void)
589 1.1 mrg {
590 1.1 mrg int i; max_regno = max_reg_num ();
591 1.1 mrg
592 1.1 mrg for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
593 1.1 mrg if (lra_reg_info[i].nrefs != 0 && lra_get_regno_hard_regno (i) < 0
594 1.1 mrg && ! ira_former_scratch_p (i))
595 1.1 mrg return true;
596 1.1 mrg return false;
597 1.1 mrg }
598 1.1 mrg
599 1.1 mrg /* Change spilled pseudos into memory or spill hard regs. Put changed
600 1.1 mrg insns on the constraint stack (these insns will be considered on
601 1.1 mrg the next constraint pass). The changed insns are all insns in
602 1.1 mrg which pseudos were changed. */
603 1.1 mrg void
604 1.1 mrg lra_spill (void)
605 1.1 mrg {
606 1.1 mrg int i, n, curr_regno;
607 1.1 mrg int *pseudo_regnos;
608 1.1 mrg
609 1.1 mrg regs_num = max_reg_num ();
610 1.1 mrg spill_hard_reg = XNEWVEC (rtx, regs_num);
611 1.1 mrg pseudo_regnos = XNEWVEC (int, regs_num);
612 1.1 mrg for (n = 0, i = FIRST_PSEUDO_REGISTER; i < regs_num; i++)
613 1.1 mrg if (lra_reg_info[i].nrefs != 0 && lra_get_regno_hard_regno (i) < 0
614 1.1 mrg /* We do not want to assign memory for former scratches. */
615 1.1 mrg && ! ira_former_scratch_p (i))
616 1.1 mrg pseudo_regnos[n++] = i;
617 1.1 mrg lra_assert (n > 0);
618 1.1 mrg pseudo_slots = XNEWVEC (struct pseudo_slot, regs_num);
619 1.1 mrg for (i = FIRST_PSEUDO_REGISTER; i < regs_num; i++)
620 1.1 mrg {
621 1.1 mrg spill_hard_reg[i] = NULL_RTX;
622 1.1 mrg pseudo_slots[i].mem = NULL_RTX;
623 1.1 mrg }
624 1.1 mrg slots = XNEWVEC (class slot, regs_num);
625 1.1 mrg /* Sort regnos according their usage frequencies. */
626 1.1 mrg qsort (pseudo_regnos, n, sizeof (int), regno_freq_compare);
627 1.1 mrg n = assign_spill_hard_regs (pseudo_regnos, n);
628 1.1 mrg assign_stack_slot_num_and_sort_pseudos (pseudo_regnos, n);
629 1.1 mrg for (i = 0; i < n; i++)
630 1.1 mrg if (pseudo_slots[pseudo_regnos[i]].mem == NULL_RTX)
631 1.1 mrg assign_mem_slot (pseudo_regnos[i]);
632 1.1 mrg if (n > 0 && crtl->stack_alignment_needed)
633 1.1 mrg /* If we have a stack frame, we must align it now. The stack size
634 1.1 mrg may be a part of the offset computation for register
635 1.1 mrg elimination. */
636 1.1 mrg assign_stack_local (BLKmode, 0, crtl->stack_alignment_needed);
637 1.1 mrg if (lra_dump_file != NULL)
638 1.1 mrg {
639 1.1 mrg for (i = 0; i < slots_num; i++)
640 1.1 mrg {
641 1.1 mrg fprintf (lra_dump_file, " Slot %d regnos (width = ", i);
642 1.1 mrg print_dec (GET_MODE_SIZE (GET_MODE (slots[i].mem)),
643 1.1 mrg lra_dump_file, SIGNED);
644 1.1 mrg fprintf (lra_dump_file, "):");
645 1.1 mrg for (curr_regno = slots[i].regno;;
646 1.1 mrg curr_regno = pseudo_slots[curr_regno].next - pseudo_slots)
647 1.1 mrg {
648 1.1 mrg fprintf (lra_dump_file, " %d", curr_regno);
649 1.1 mrg if (pseudo_slots[curr_regno].next == NULL)
650 1.1 mrg break;
651 1.1 mrg }
652 1.1 mrg fprintf (lra_dump_file, "\n");
653 1.1 mrg }
654 1.1 mrg }
655 1.1 mrg spill_pseudos ();
656 1.1 mrg free (slots);
657 1.1 mrg free (pseudo_slots);
658 1.1 mrg free (pseudo_regnos);
659 1.1 mrg free (spill_hard_reg);
660 1.1 mrg }
661 1.1 mrg
662 1.1 mrg /* Apply alter_subreg for subregs of regs in *LOC. Use FINAL_P for
663 1.1 mrg alter_subreg calls. Return true if any subreg of reg is
664 1.1 mrg processed. */
665 1.1 mrg static bool
666 1.1 mrg alter_subregs (rtx *loc, bool final_p)
667 1.1 mrg {
668 1.1 mrg int i;
669 1.1 mrg rtx x = *loc;
670 1.1 mrg bool res;
671 1.1 mrg const char *fmt;
672 1.1 mrg enum rtx_code code;
673 1.1 mrg
674 1.1 mrg if (x == NULL_RTX)
675 1.1 mrg return false;
676 1.1 mrg code = GET_CODE (x);
677 1.1 mrg if (code == SUBREG && REG_P (SUBREG_REG (x)))
678 1.1 mrg {
679 1.1 mrg lra_assert (REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER);
680 1.1 mrg alter_subreg (loc, final_p);
681 1.1 mrg return true;
682 1.1 mrg }
683 1.1 mrg fmt = GET_RTX_FORMAT (code);
684 1.1 mrg res = false;
685 1.1 mrg for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
686 1.1 mrg {
687 1.1 mrg if (fmt[i] == 'e')
688 1.1 mrg {
689 1.1 mrg if (alter_subregs (&XEXP (x, i), final_p))
690 1.1 mrg res = true;
691 1.1 mrg }
692 1.1 mrg else if (fmt[i] == 'E')
693 1.1 mrg {
694 1.1 mrg int j;
695 1.1 mrg
696 1.1 mrg for (j = XVECLEN (x, i) - 1; j >= 0; j--)
697 1.1 mrg if (alter_subregs (&XVECEXP (x, i, j), final_p))
698 1.1 mrg res = true;
699 1.1 mrg }
700 1.1 mrg }
701 1.1 mrg return res;
702 1.1 mrg }
703 1.1 mrg
704 1.1 mrg /* Return true if REGNO is used for return in the current
705 1.1 mrg function. */
706 1.1 mrg static bool
707 1.1 mrg return_regno_p (unsigned int regno)
708 1.1 mrg {
709 1.1 mrg rtx outgoing = crtl->return_rtx;
710 1.1 mrg
711 1.1 mrg if (! outgoing)
712 1.1 mrg return false;
713 1.1 mrg
714 1.1 mrg if (REG_P (outgoing))
715 1.1 mrg return REGNO (outgoing) == regno;
716 1.1 mrg else if (GET_CODE (outgoing) == PARALLEL)
717 1.1 mrg {
718 1.1 mrg int i;
719 1.1 mrg
720 1.1 mrg for (i = 0; i < XVECLEN (outgoing, 0); i++)
721 1.1 mrg {
722 1.1 mrg rtx x = XEXP (XVECEXP (outgoing, 0, i), 0);
723 1.1 mrg
724 1.1 mrg if (REG_P (x) && REGNO (x) == regno)
725 1.1 mrg return true;
726 1.1 mrg }
727 1.1 mrg }
728 1.1 mrg return false;
729 1.1 mrg }
730 1.1 mrg
731 1.1 mrg /* Return true if REGNO is in one of subsequent USE after INSN in the
732 1.1 mrg same BB. */
733 1.1 mrg static bool
734 1.1 mrg regno_in_use_p (rtx_insn *insn, unsigned int regno)
735 1.1 mrg {
736 1.1 mrg static lra_insn_recog_data_t id;
737 1.1 mrg static struct lra_static_insn_data *static_id;
738 1.1 mrg struct lra_insn_reg *reg;
739 1.1 mrg int i, arg_regno;
740 1.1 mrg basic_block bb = BLOCK_FOR_INSN (insn);
741 1.1 mrg
742 1.1 mrg while ((insn = next_nondebug_insn (insn)) != NULL_RTX)
743 1.1 mrg {
744 1.1 mrg if (BARRIER_P (insn) || bb != BLOCK_FOR_INSN (insn))
745 1.1 mrg return false;
746 1.1 mrg if (! INSN_P (insn))
747 1.1 mrg continue;
748 1.1 mrg if (GET_CODE (PATTERN (insn)) == USE
749 1.1 mrg && REG_P (XEXP (PATTERN (insn), 0))
750 1.1 mrg && regno == REGNO (XEXP (PATTERN (insn), 0)))
751 1.1 mrg return true;
752 1.1 mrg /* Check that the regno is not modified. */
753 1.1 mrg id = lra_get_insn_recog_data (insn);
754 1.1 mrg for (reg = id->regs; reg != NULL; reg = reg->next)
755 1.1 mrg if (reg->type != OP_IN && reg->regno == (int) regno)
756 1.1 mrg return false;
757 1.1 mrg static_id = id->insn_static_data;
758 1.1 mrg for (reg = static_id->hard_regs; reg != NULL; reg = reg->next)
759 1.1 mrg if (reg->type != OP_IN && reg->regno == (int) regno)
760 1.1 mrg return false;
761 1.1 mrg if (id->arg_hard_regs != NULL)
762 1.1 mrg for (i = 0; (arg_regno = id->arg_hard_regs[i]) >= 0; i++)
763 1.1 mrg if ((int) regno == (arg_regno >= FIRST_PSEUDO_REGISTER
764 1.1 mrg ? arg_regno : arg_regno - FIRST_PSEUDO_REGISTER))
765 1.1 mrg return false;
766 1.1 mrg }
767 1.1 mrg return false;
768 1.1 mrg }
769 1.1 mrg
770 1.1 mrg /* Final change of pseudos got hard registers into the corresponding
771 1.1 mrg hard registers and removing temporary clobbers. */
772 1.1 mrg void
773 1.1 mrg lra_final_code_change (void)
774 1.1 mrg {
775 1.1 mrg int i, hard_regno;
776 1.1 mrg basic_block bb;
777 1.1 mrg rtx_insn *insn, *curr;
778 1.1 mrg rtx set;
779 1.1 mrg int max_regno = max_reg_num ();
780 1.1 mrg
781 1.1 mrg for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
782 1.1 mrg if (lra_reg_info[i].nrefs != 0
783 1.1 mrg && (hard_regno = lra_get_regno_hard_regno (i)) >= 0)
784 1.1 mrg SET_REGNO (regno_reg_rtx[i], hard_regno);
785 1.1 mrg FOR_EACH_BB_FN (bb, cfun)
786 1.1 mrg FOR_BB_INSNS_SAFE (bb, insn, curr)
787 1.1 mrg if (INSN_P (insn))
788 1.1 mrg {
789 1.1 mrg rtx pat = PATTERN (insn);
790 1.1 mrg
791 1.1 mrg if (GET_CODE (pat) == USE && XEXP (pat, 0) == const1_rtx)
792 1.1 mrg {
793 1.1 mrg /* Remove markers to eliminate critical edges for jump insn
794 1.1 mrg output reloads (see code in ira.cc::ira). */
795 1.1 mrg lra_invalidate_insn_data (insn);
796 1.1 mrg delete_insn (insn);
797 1.1 mrg continue;
798 1.1 mrg }
799 1.1 mrg if (GET_CODE (pat) == CLOBBER && LRA_TEMP_CLOBBER_P (pat))
800 1.1 mrg {
801 1.1 mrg /* Remove clobbers temporarily created in LRA. We don't
802 1.1 mrg need them anymore and don't want to waste compiler
803 1.1 mrg time processing them in a few subsequent passes. */
804 1.1 mrg lra_invalidate_insn_data (insn);
805 1.1 mrg delete_insn (insn);
806 1.1 mrg continue;
807 1.1 mrg }
808 1.1 mrg
809 1.1 mrg /* IRA can generate move insns involving pseudos. It is
810 1.1 mrg better remove them earlier to speed up compiler a bit.
811 1.1 mrg It is also better to do it here as they might not pass
812 1.1 mrg final RTL check in LRA, (e.g. insn moving a control
813 1.1 mrg register into itself). So remove an useless move insn
814 1.1 mrg unless next insn is USE marking the return reg (we should
815 1.1 mrg save this as some subsequent optimizations assume that
816 1.1 mrg such original insns are saved). */
817 1.1 mrg if (NONJUMP_INSN_P (insn) && GET_CODE (pat) == SET
818 1.1 mrg && REG_P (SET_SRC (pat)) && REG_P (SET_DEST (pat))
819 1.1 mrg && REGNO (SET_SRC (pat)) == REGNO (SET_DEST (pat))
820 1.1 mrg && (! return_regno_p (REGNO (SET_SRC (pat)))
821 1.1 mrg || ! regno_in_use_p (insn, REGNO (SET_SRC (pat)))))
822 1.1 mrg {
823 1.1 mrg lra_invalidate_insn_data (insn);
824 1.1 mrg delete_insn (insn);
825 1.1 mrg continue;
826 1.1 mrg }
827 1.1 mrg
828 1.1 mrg lra_insn_recog_data_t id = lra_get_insn_recog_data (insn);
829 1.1 mrg struct lra_insn_reg *reg;
830 1.1 mrg
831 1.1 mrg for (reg = id->regs; reg != NULL; reg = reg->next)
832 1.1 mrg if (reg->regno >= FIRST_PSEUDO_REGISTER
833 1.1 mrg && lra_reg_info [reg->regno].nrefs == 0)
834 1.1 mrg break;
835 1.1 mrg
836 1.1 mrg if (reg != NULL)
837 1.1 mrg {
838 1.1 mrg /* Pseudos still can be in debug insns in some very rare
839 1.1 mrg and complicated cases, e.g. the pseudo was removed by
840 1.1 mrg inheritance and the debug insn is not EBBs where the
841 1.1 mrg inheritance happened. It is difficult and time
842 1.1 mrg consuming to find what hard register corresponds the
843 1.1 mrg pseudo -- so just remove the debug insn. Another
844 1.1 mrg solution could be assigning hard reg/memory but it
845 1.1 mrg would be a misleading info. It is better not to have
846 1.1 mrg info than have it wrong. */
847 1.1 mrg lra_assert (DEBUG_INSN_P (insn));
848 1.1 mrg lra_invalidate_insn_data (insn);
849 1.1 mrg delete_insn (insn);
850 1.1 mrg continue;
851 1.1 mrg }
852 1.1 mrg
853 1.1 mrg struct lra_static_insn_data *static_id = id->insn_static_data;
854 1.1 mrg bool insn_change_p = false;
855 1.1 mrg
856 1.1 mrg for (i = id->insn_static_data->n_operands - 1; i >= 0; i--)
857 1.1 mrg if ((DEBUG_INSN_P (insn) || ! static_id->operand[i].is_operator)
858 1.1 mrg && alter_subregs (id->operand_loc[i], ! DEBUG_INSN_P (insn)))
859 1.1 mrg {
860 1.1 mrg lra_update_dup (id, i);
861 1.1 mrg insn_change_p = true;
862 1.1 mrg }
863 1.1 mrg if ((GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
864 1.1 mrg && alter_subregs (&XEXP (pat, 0), false))
865 1.1 mrg insn_change_p = true;
866 1.1 mrg if (insn_change_p)
867 1.1 mrg lra_update_operator_dups (id);
868 1.1 mrg
869 1.1 mrg if ((set = single_set (insn)) != NULL
870 1.1 mrg && REG_P (SET_SRC (set)) && REG_P (SET_DEST (set))
871 1.1 mrg && REGNO (SET_SRC (set)) == REGNO (SET_DEST (set)))
872 1.1 mrg {
873 1.1 mrg /* Remove an useless move insn. IRA can generate move
874 1.1 mrg insns involving pseudos. It is better remove them
875 1.1 mrg earlier to speed up compiler a bit. It is also
876 1.1 mrg better to do it here as they might not pass final RTL
877 1.1 mrg check in LRA, (e.g. insn moving a control register
878 1.1 mrg into itself). */
879 1.1 mrg lra_invalidate_insn_data (insn);
880 1.1 mrg delete_insn (insn);
881 1.1 mrg }
882 1.1 mrg }
883 1.1 mrg }
884