ggc-common.cc revision 1.1.1.1 1 1.1 mrg /* Simple garbage collection for the GNU compiler.
2 1.1 mrg Copyright (C) 1999-2022 Free Software Foundation, Inc.
3 1.1 mrg
4 1.1 mrg This file is part of GCC.
5 1.1 mrg
6 1.1 mrg GCC is free software; you can redistribute it and/or modify it under
7 1.1 mrg the terms of the GNU General Public License as published by the Free
8 1.1 mrg Software Foundation; either version 3, or (at your option) any later
9 1.1 mrg version.
10 1.1 mrg
11 1.1 mrg GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 1.1 mrg WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 1.1 mrg FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 1.1 mrg for more details.
15 1.1 mrg
16 1.1 mrg You should have received a copy of the GNU General Public License
17 1.1 mrg along with GCC; see the file COPYING3. If not see
18 1.1 mrg <http://www.gnu.org/licenses/>. */
19 1.1 mrg
20 1.1 mrg /* Generic garbage collection (GC) functions and data, not specific to
21 1.1 mrg any particular GC implementation. */
22 1.1 mrg
23 1.1 mrg #include "config.h"
24 1.1 mrg #define INCLUDE_MALLOC_H
25 1.1 mrg #include "system.h"
26 1.1 mrg #include "coretypes.h"
27 1.1 mrg #include "timevar.h"
28 1.1 mrg #include "diagnostic-core.h"
29 1.1 mrg #include "ggc-internal.h"
30 1.1 mrg #include "hosthooks.h"
31 1.1 mrg #include "plugin.h"
32 1.1 mrg #include "options.h"
33 1.1 mrg
34 1.1 mrg /* When true, protect the contents of the identifier hash table. */
35 1.1 mrg bool ggc_protect_identifiers = true;
36 1.1 mrg
37 1.1 mrg /* Statistics about the allocation. */
38 1.1 mrg static ggc_statistics *ggc_stats;
39 1.1 mrg
40 1.1 mrg struct traversal_state;
41 1.1 mrg
42 1.1 mrg static int compare_ptr_data (const void *, const void *);
43 1.1 mrg static void relocate_ptrs (void *, void *, void *);
44 1.1 mrg static void write_pch_globals (const struct ggc_root_tab * const *tab,
45 1.1 mrg struct traversal_state *state);
46 1.1 mrg
47 1.1 mrg /* Maintain global roots that are preserved during GC. */
48 1.1 mrg
49 1.1 mrg /* This extra vector of dynamically registered root_tab-s is used by
50 1.1 mrg ggc_mark_roots and gives the ability to dynamically add new GGC root
51 1.1 mrg tables, for instance from some plugins; this vector is on the heap
52 1.1 mrg since it is used by GGC internally. */
53 1.1 mrg typedef const struct ggc_root_tab *const_ggc_root_tab_t;
54 1.1 mrg static vec<const_ggc_root_tab_t> extra_root_vec;
55 1.1 mrg
56 1.1 mrg /* Dynamically register a new GGC root table RT. This is useful for
57 1.1 mrg plugins. */
58 1.1 mrg
59 1.1 mrg void
60 1.1 mrg ggc_register_root_tab (const struct ggc_root_tab* rt)
61 1.1 mrg {
62 1.1 mrg if (rt)
63 1.1 mrg extra_root_vec.safe_push (rt);
64 1.1 mrg }
65 1.1 mrg
66 1.1 mrg /* Mark all the roots in the table RT. */
67 1.1 mrg
68 1.1 mrg static void
69 1.1 mrg ggc_mark_root_tab (const_ggc_root_tab_t rt)
70 1.1 mrg {
71 1.1 mrg size_t i;
72 1.1 mrg
73 1.1 mrg for ( ; rt->base != NULL; rt++)
74 1.1 mrg for (i = 0; i < rt->nelt; i++)
75 1.1 mrg (*rt->cb) (*(void **) ((char *)rt->base + rt->stride * i));
76 1.1 mrg }
77 1.1 mrg
78 1.1 mrg /* Iterate through all registered roots and mark each element. */
79 1.1 mrg
80 1.1 mrg void
81 1.1 mrg ggc_mark_roots (void)
82 1.1 mrg {
83 1.1 mrg const struct ggc_root_tab *const *rt;
84 1.1 mrg const_ggc_root_tab_t rtp, rti;
85 1.1 mrg size_t i;
86 1.1 mrg
87 1.1 mrg for (rt = gt_ggc_deletable_rtab; *rt; rt++)
88 1.1 mrg for (rti = *rt; rti->base != NULL; rti++)
89 1.1 mrg memset (rti->base, 0, rti->stride);
90 1.1 mrg
91 1.1 mrg for (rt = gt_ggc_rtab; *rt; rt++)
92 1.1 mrg ggc_mark_root_tab (*rt);
93 1.1 mrg
94 1.1 mrg FOR_EACH_VEC_ELT (extra_root_vec, i, rtp)
95 1.1 mrg ggc_mark_root_tab (rtp);
96 1.1 mrg
97 1.1 mrg if (ggc_protect_identifiers)
98 1.1 mrg ggc_mark_stringpool ();
99 1.1 mrg
100 1.1 mrg gt_clear_caches ();
101 1.1 mrg
102 1.1 mrg if (! ggc_protect_identifiers)
103 1.1 mrg ggc_purge_stringpool ();
104 1.1 mrg
105 1.1 mrg /* Some plugins may call ggc_set_mark from here. */
106 1.1 mrg invoke_plugin_callbacks (PLUGIN_GGC_MARKING, NULL);
107 1.1 mrg }
108 1.1 mrg
109 1.1 mrg /* Allocate a block of memory, then clear it. */
110 1.1 mrg void *
111 1.1 mrg ggc_internal_cleared_alloc (size_t size, void (*f)(void *), size_t s, size_t n
112 1.1 mrg MEM_STAT_DECL)
113 1.1 mrg {
114 1.1 mrg void *buf = ggc_internal_alloc (size, f, s, n PASS_MEM_STAT);
115 1.1 mrg memset (buf, 0, size);
116 1.1 mrg return buf;
117 1.1 mrg }
118 1.1 mrg
119 1.1 mrg /* Resize a block of memory, possibly re-allocating it. */
120 1.1 mrg void *
121 1.1 mrg ggc_realloc (void *x, size_t size MEM_STAT_DECL)
122 1.1 mrg {
123 1.1 mrg void *r;
124 1.1 mrg size_t old_size;
125 1.1 mrg
126 1.1 mrg if (x == NULL)
127 1.1 mrg return ggc_internal_alloc (size PASS_MEM_STAT);
128 1.1 mrg
129 1.1 mrg old_size = ggc_get_size (x);
130 1.1 mrg
131 1.1 mrg if (size <= old_size)
132 1.1 mrg {
133 1.1 mrg /* Mark the unwanted memory as unaccessible. We also need to make
134 1.1 mrg the "new" size accessible, since ggc_get_size returns the size of
135 1.1 mrg the pool, not the size of the individually allocated object, the
136 1.1 mrg size which was previously made accessible. Unfortunately, we
137 1.1 mrg don't know that previously allocated size. Without that
138 1.1 mrg knowledge we have to lose some initialization-tracking for the
139 1.1 mrg old parts of the object. An alternative is to mark the whole
140 1.1 mrg old_size as reachable, but that would lose tracking of writes
141 1.1 mrg after the end of the object (by small offsets). Discard the
142 1.1 mrg handle to avoid handle leak. */
143 1.1 mrg VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x + size,
144 1.1 mrg old_size - size));
145 1.1 mrg VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, size));
146 1.1 mrg return x;
147 1.1 mrg }
148 1.1 mrg
149 1.1 mrg r = ggc_internal_alloc (size PASS_MEM_STAT);
150 1.1 mrg
151 1.1 mrg /* Since ggc_get_size returns the size of the pool, not the size of the
152 1.1 mrg individually allocated object, we'd access parts of the old object
153 1.1 mrg that were marked invalid with the memcpy below. We lose a bit of the
154 1.1 mrg initialization-tracking since some of it may be uninitialized. */
155 1.1 mrg VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, old_size));
156 1.1 mrg
157 1.1 mrg memcpy (r, x, old_size);
158 1.1 mrg
159 1.1 mrg /* The old object is not supposed to be used anymore. */
160 1.1 mrg ggc_free (x);
161 1.1 mrg
162 1.1 mrg return r;
163 1.1 mrg }
164 1.1 mrg
165 1.1 mrg void *
166 1.1 mrg ggc_cleared_alloc_htab_ignore_args (size_t c ATTRIBUTE_UNUSED,
167 1.1 mrg size_t n ATTRIBUTE_UNUSED)
168 1.1 mrg {
169 1.1 mrg gcc_assert (c * n == sizeof (struct htab));
170 1.1 mrg return ggc_cleared_alloc<htab> ();
171 1.1 mrg }
172 1.1 mrg
173 1.1 mrg /* TODO: once we actually use type information in GGC, create a new tag
174 1.1 mrg gt_gcc_ptr_array and use it for pointer arrays. */
175 1.1 mrg void *
176 1.1 mrg ggc_cleared_alloc_ptr_array_two_args (size_t c, size_t n)
177 1.1 mrg {
178 1.1 mrg gcc_assert (sizeof (PTR *) == n);
179 1.1 mrg return ggc_cleared_vec_alloc<PTR *> (c);
180 1.1 mrg }
181 1.1 mrg
182 1.1 mrg /* These are for splay_tree_new_ggc. */
183 1.1 mrg void *
184 1.1 mrg ggc_splay_alloc (int sz, void *nl)
185 1.1 mrg {
186 1.1 mrg gcc_assert (!nl);
187 1.1 mrg return ggc_internal_alloc (sz);
188 1.1 mrg }
189 1.1 mrg
190 1.1 mrg void
191 1.1 mrg ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
192 1.1 mrg {
193 1.1 mrg gcc_assert (!nl);
194 1.1 mrg }
195 1.1 mrg
196 1.1 mrg void
197 1.1 mrg ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
198 1.1 mrg ggc_statistics *stats)
199 1.1 mrg {
200 1.1 mrg /* Set the pointer so that during collection we will actually gather
201 1.1 mrg the statistics. */
202 1.1 mrg ggc_stats = stats;
203 1.1 mrg
204 1.1 mrg /* Then do one collection to fill in the statistics. */
205 1.1 mrg ggc_collect ();
206 1.1 mrg
207 1.1 mrg /* At present, we don't really gather any interesting statistics. */
208 1.1 mrg
209 1.1 mrg /* Don't gather statistics any more. */
210 1.1 mrg ggc_stats = NULL;
211 1.1 mrg }
212 1.1 mrg
213 1.1 mrg /* Functions for saving and restoring GCable memory to disk. */
215 1.1 mrg
216 1.1 mrg struct ptr_data
217 1.1 mrg {
218 1.1 mrg void *obj;
219 1.1 mrg void *note_ptr_cookie;
220 1.1 mrg gt_note_pointers note_ptr_fn;
221 1.1 mrg gt_handle_reorder reorder_fn;
222 1.1 mrg size_t size;
223 1.1 mrg void *new_addr;
224 1.1 mrg };
225 1.1 mrg
226 1.1 mrg #define POINTER_HASH(x) (hashval_t)((intptr_t)x >> 3)
227 1.1 mrg
228 1.1 mrg /* Helper for hashing saving_htab. */
229 1.1 mrg
230 1.1 mrg struct saving_hasher : free_ptr_hash <ptr_data>
231 1.1 mrg {
232 1.1 mrg typedef void *compare_type;
233 1.1 mrg static inline hashval_t hash (const ptr_data *);
234 1.1 mrg static inline bool equal (const ptr_data *, const void *);
235 1.1 mrg };
236 1.1 mrg
237 1.1 mrg inline hashval_t
238 1.1 mrg saving_hasher::hash (const ptr_data *p)
239 1.1 mrg {
240 1.1 mrg return POINTER_HASH (p->obj);
241 1.1 mrg }
242 1.1 mrg
243 1.1 mrg inline bool
244 1.1 mrg saving_hasher::equal (const ptr_data *p1, const void *p2)
245 1.1 mrg {
246 1.1 mrg return p1->obj == p2;
247 1.1 mrg }
248 1.1 mrg
249 1.1 mrg static hash_table<saving_hasher> *saving_htab;
250 1.1 mrg static vec<void *> callback_vec;
251 1.1 mrg static vec<void *> reloc_addrs_vec;
252 1.1 mrg
253 1.1 mrg /* Register an object in the hash table. */
254 1.1 mrg
255 1.1 mrg int
256 1.1 mrg gt_pch_note_object (void *obj, void *note_ptr_cookie,
257 1.1 mrg gt_note_pointers note_ptr_fn)
258 1.1 mrg {
259 1.1 mrg struct ptr_data **slot;
260 1.1 mrg
261 1.1 mrg if (obj == NULL || obj == (void *) 1)
262 1.1 mrg return 0;
263 1.1 mrg
264 1.1 mrg slot = (struct ptr_data **)
265 1.1 mrg saving_htab->find_slot_with_hash (obj, POINTER_HASH (obj), INSERT);
266 1.1 mrg if (*slot != NULL)
267 1.1 mrg {
268 1.1 mrg gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
269 1.1 mrg && (*slot)->note_ptr_cookie == note_ptr_cookie);
270 1.1 mrg return 0;
271 1.1 mrg }
272 1.1 mrg
273 1.1 mrg *slot = XCNEW (struct ptr_data);
274 1.1 mrg (*slot)->obj = obj;
275 1.1 mrg (*slot)->note_ptr_fn = note_ptr_fn;
276 1.1 mrg (*slot)->note_ptr_cookie = note_ptr_cookie;
277 1.1 mrg if (note_ptr_fn == gt_pch_p_S)
278 1.1 mrg (*slot)->size = strlen ((const char *)obj) + 1;
279 1.1 mrg else
280 1.1 mrg (*slot)->size = ggc_get_size (obj);
281 1.1 mrg return 1;
282 1.1 mrg }
283 1.1 mrg
284 1.1 mrg /* Register address of a callback pointer. */
285 1.1 mrg void
286 1.1 mrg gt_pch_note_callback (void *obj, void *base)
287 1.1 mrg {
288 1.1 mrg void *ptr;
289 1.1 mrg memcpy (&ptr, obj, sizeof (void *));
290 1.1 mrg if (ptr != NULL)
291 1.1 mrg {
292 1.1 mrg struct ptr_data *data
293 1.1 mrg = (struct ptr_data *)
294 1.1 mrg saving_htab->find_with_hash (base, POINTER_HASH (base));
295 1.1 mrg gcc_assert (data);
296 1.1 mrg callback_vec.safe_push ((char *) data->new_addr
297 1.1 mrg + ((char *) obj - (char *) base));
298 1.1 mrg }
299 1.1 mrg }
300 1.1 mrg
301 1.1 mrg /* Register an object in the hash table. */
302 1.1 mrg
303 1.1 mrg void
304 1.1 mrg gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
305 1.1 mrg gt_handle_reorder reorder_fn)
306 1.1 mrg {
307 1.1 mrg struct ptr_data *data;
308 1.1 mrg
309 1.1 mrg if (obj == NULL || obj == (void *) 1)
310 1.1 mrg return;
311 1.1 mrg
312 1.1 mrg data = (struct ptr_data *)
313 1.1 mrg saving_htab->find_with_hash (obj, POINTER_HASH (obj));
314 1.1 mrg gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
315 1.1 mrg
316 1.1 mrg data->reorder_fn = reorder_fn;
317 1.1 mrg }
318 1.1 mrg
319 1.1 mrg /* Handy state for the traversal functions. */
320 1.1 mrg
321 1.1 mrg struct traversal_state
322 1.1 mrg {
323 1.1 mrg FILE *f;
324 1.1 mrg struct ggc_pch_data *d;
325 1.1 mrg size_t count;
326 1.1 mrg struct ptr_data **ptrs;
327 1.1 mrg size_t ptrs_i;
328 1.1 mrg };
329 1.1 mrg
330 1.1 mrg /* Callbacks for htab_traverse. */
331 1.1 mrg
332 1.1 mrg int
333 1.1 mrg ggc_call_count (ptr_data **slot, traversal_state *state)
334 1.1 mrg {
335 1.1 mrg struct ptr_data *d = *slot;
336 1.1 mrg
337 1.1 mrg ggc_pch_count_object (state->d, d->obj, d->size,
338 1.1 mrg d->note_ptr_fn == gt_pch_p_S);
339 1.1 mrg state->count++;
340 1.1 mrg return 1;
341 1.1 mrg }
342 1.1 mrg
343 1.1 mrg int
344 1.1 mrg ggc_call_alloc (ptr_data **slot, traversal_state *state)
345 1.1 mrg {
346 1.1 mrg struct ptr_data *d = *slot;
347 1.1 mrg
348 1.1 mrg d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size,
349 1.1 mrg d->note_ptr_fn == gt_pch_p_S);
350 1.1 mrg state->ptrs[state->ptrs_i++] = d;
351 1.1 mrg return 1;
352 1.1 mrg }
353 1.1 mrg
354 1.1 mrg /* Callback for qsort. */
355 1.1 mrg
356 1.1 mrg static int
357 1.1 mrg compare_ptr_data (const void *p1_p, const void *p2_p)
358 1.1 mrg {
359 1.1 mrg const struct ptr_data *const p1 = *(const struct ptr_data *const *)p1_p;
360 1.1 mrg const struct ptr_data *const p2 = *(const struct ptr_data *const *)p2_p;
361 1.1 mrg return (((size_t)p1->new_addr > (size_t)p2->new_addr)
362 1.1 mrg - ((size_t)p1->new_addr < (size_t)p2->new_addr));
363 1.1 mrg }
364 1.1 mrg
365 1.1 mrg /* Callbacks for note_ptr_fn. */
366 1.1 mrg
367 1.1 mrg static void
368 1.1 mrg relocate_ptrs (void *ptr_p, void *real_ptr_p, void *state_p)
369 1.1 mrg {
370 1.1 mrg void **ptr = (void **)ptr_p;
371 1.1 mrg struct traversal_state *state
372 1.1 mrg = (struct traversal_state *)state_p;
373 1.1 mrg struct ptr_data *result;
374 1.1 mrg
375 1.1 mrg if (*ptr == NULL || *ptr == (void *)1)
376 1.1 mrg return;
377 1.1 mrg
378 1.1 mrg result = (struct ptr_data *)
379 1.1 mrg saving_htab->find_with_hash (*ptr, POINTER_HASH (*ptr));
380 1.1 mrg gcc_assert (result);
381 1.1 mrg *ptr = result->new_addr;
382 1.1 mrg if (ptr_p == real_ptr_p)
383 1.1 mrg return;
384 1.1 mrg if (real_ptr_p == NULL)
385 1.1 mrg real_ptr_p = ptr_p;
386 1.1 mrg gcc_assert (real_ptr_p >= state->ptrs[state->ptrs_i]->obj
387 1.1 mrg && ((char *) real_ptr_p + sizeof (void *)
388 1.1 mrg <= ((char *) state->ptrs[state->ptrs_i]->obj
389 1.1 mrg + state->ptrs[state->ptrs_i]->size)));
390 1.1 mrg void *addr
391 1.1 mrg = (void *) ((char *) state->ptrs[state->ptrs_i]->new_addr
392 1.1 mrg + ((char *) real_ptr_p
393 1.1 mrg - (char *) state->ptrs[state->ptrs_i]->obj));
394 1.1 mrg reloc_addrs_vec.safe_push (addr);
395 1.1 mrg }
396 1.1 mrg
397 1.1 mrg /* Write out, after relocation, the pointers in TAB. */
398 1.1 mrg static void
399 1.1 mrg write_pch_globals (const struct ggc_root_tab * const *tab,
400 1.1 mrg struct traversal_state *state)
401 1.1 mrg {
402 1.1 mrg const struct ggc_root_tab *const *rt;
403 1.1 mrg const struct ggc_root_tab *rti;
404 1.1 mrg size_t i;
405 1.1 mrg
406 1.1 mrg for (rt = tab; *rt; rt++)
407 1.1 mrg for (rti = *rt; rti->base != NULL; rti++)
408 1.1 mrg for (i = 0; i < rti->nelt; i++)
409 1.1 mrg {
410 1.1 mrg void *ptr = *(void **)((char *)rti->base + rti->stride * i);
411 1.1 mrg struct ptr_data *new_ptr;
412 1.1 mrg if (ptr == NULL || ptr == (void *)1)
413 1.1 mrg {
414 1.1 mrg if (fwrite (&ptr, sizeof (void *), 1, state->f)
415 1.1 mrg != 1)
416 1.1 mrg fatal_error (input_location, "cannot write PCH file: %m");
417 1.1 mrg }
418 1.1 mrg else
419 1.1 mrg {
420 1.1 mrg new_ptr = (struct ptr_data *)
421 1.1 mrg saving_htab->find_with_hash (ptr, POINTER_HASH (ptr));
422 1.1 mrg if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
423 1.1 mrg != 1)
424 1.1 mrg fatal_error (input_location, "cannot write PCH file: %m");
425 1.1 mrg }
426 1.1 mrg }
427 1.1 mrg }
428 1.1 mrg
429 1.1 mrg /* Callback for qsort. */
430 1.1 mrg
431 1.1 mrg static int
432 1.1 mrg compare_ptr (const void *p1_p, const void *p2_p)
433 1.1 mrg {
434 1.1 mrg void *p1 = *(void *const *)p1_p;
435 1.1 mrg void *p2 = *(void *const *)p2_p;
436 1.1 mrg return (((uintptr_t)p1 > (uintptr_t)p2)
437 1.1 mrg - ((uintptr_t)p1 < (uintptr_t)p2));
438 1.1 mrg }
439 1.1 mrg
440 1.1 mrg /* Decode one uleb128 from P, return first byte after it, store
441 1.1 mrg decoded value into *VAL. */
442 1.1 mrg
443 1.1 mrg static unsigned char *
444 1.1 mrg read_uleb128 (unsigned char *p, size_t *val)
445 1.1 mrg {
446 1.1 mrg unsigned int shift = 0;
447 1.1 mrg unsigned char byte;
448 1.1 mrg size_t result;
449 1.1 mrg
450 1.1 mrg result = 0;
451 1.1 mrg do
452 1.1 mrg {
453 1.1 mrg byte = *p++;
454 1.1 mrg result |= ((size_t) byte & 0x7f) << shift;
455 1.1 mrg shift += 7;
456 1.1 mrg }
457 1.1 mrg while (byte & 0x80);
458 1.1 mrg
459 1.1 mrg *val = result;
460 1.1 mrg return p;
461 1.1 mrg }
462 1.1 mrg
463 1.1 mrg /* Store VAL as uleb128 at P, return length in bytes. */
464 1.1 mrg
465 1.1 mrg static size_t
466 1.1 mrg write_uleb128 (unsigned char *p, size_t val)
467 1.1 mrg {
468 1.1 mrg size_t len = 0;
469 1.1 mrg do
470 1.1 mrg {
471 1.1 mrg unsigned char byte = (val & 0x7f);
472 1.1 mrg val >>= 7;
473 1.1 mrg if (val != 0)
474 1.1 mrg /* More bytes to follow. */
475 1.1 mrg byte |= 0x80;
476 1.1 mrg
477 1.1 mrg *p++ = byte;
478 1.1 mrg ++len;
479 1.1 mrg }
480 1.1 mrg while (val != 0);
481 1.1 mrg return len;
482 1.1 mrg }
483 1.1 mrg
484 1.1 mrg /* Hold the information we need to mmap the file back in. */
485 1.1 mrg
486 1.1 mrg struct mmap_info
487 1.1 mrg {
488 1.1 mrg size_t offset;
489 1.1 mrg size_t size;
490 1.1 mrg void *preferred_base;
491 1.1 mrg };
492 1.1 mrg
493 1.1 mrg /* Write out the state of the compiler to F. */
494 1.1 mrg
495 1.1 mrg void
496 1.1 mrg gt_pch_save (FILE *f)
497 1.1 mrg {
498 1.1 mrg const struct ggc_root_tab *const *rt;
499 1.1 mrg const struct ggc_root_tab *rti;
500 1.1 mrg size_t i;
501 1.1 mrg struct traversal_state state;
502 1.1 mrg char *this_object = NULL;
503 1.1 mrg size_t this_object_size = 0;
504 1.1 mrg struct mmap_info mmi;
505 1.1 mrg const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity ();
506 1.1 mrg
507 1.1 mrg gt_pch_save_stringpool ();
508 1.1 mrg
509 1.1 mrg timevar_push (TV_PCH_PTR_REALLOC);
510 1.1 mrg saving_htab = new hash_table<saving_hasher> (50000);
511 1.1 mrg
512 1.1 mrg for (rt = gt_ggc_rtab; *rt; rt++)
513 1.1 mrg for (rti = *rt; rti->base != NULL; rti++)
514 1.1 mrg for (i = 0; i < rti->nelt; i++)
515 1.1 mrg (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
516 1.1 mrg
517 1.1 mrg /* Prepare the objects for writing, determine addresses and such. */
518 1.1 mrg state.f = f;
519 1.1 mrg state.d = init_ggc_pch ();
520 1.1 mrg state.count = 0;
521 1.1 mrg saving_htab->traverse <traversal_state *, ggc_call_count> (&state);
522 1.1 mrg
523 1.1 mrg mmi.size = ggc_pch_total_size (state.d);
524 1.1 mrg
525 1.1 mrg /* Try to arrange things so that no relocation is necessary, but
526 1.1 mrg don't try very hard. On most platforms, this will always work,
527 1.1 mrg and on the rest it's a lot of work to do better.
528 1.1 mrg (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
529 1.1 mrg HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
530 1.1 mrg mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
531 1.1 mrg /* If the host cannot supply any suitable address for this, we are stuck. */
532 1.1 mrg if (mmi.preferred_base == NULL)
533 1.1 mrg fatal_error (input_location,
534 1.1 mrg "cannot write PCH file: required memory segment unavailable");
535 1.1 mrg
536 1.1 mrg ggc_pch_this_base (state.d, mmi.preferred_base);
537 1.1 mrg
538 1.1 mrg state.ptrs = XNEWVEC (struct ptr_data *, state.count);
539 1.1 mrg state.ptrs_i = 0;
540 1.1 mrg
541 1.1 mrg saving_htab->traverse <traversal_state *, ggc_call_alloc> (&state);
542 1.1 mrg timevar_pop (TV_PCH_PTR_REALLOC);
543 1.1 mrg
544 1.1 mrg timevar_push (TV_PCH_PTR_SORT);
545 1.1 mrg qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
546 1.1 mrg timevar_pop (TV_PCH_PTR_SORT);
547 1.1 mrg
548 1.1 mrg /* Write out all the scalar variables. */
549 1.1 mrg for (rt = gt_pch_scalar_rtab; *rt; rt++)
550 1.1 mrg for (rti = *rt; rti->base != NULL; rti++)
551 1.1 mrg if (fwrite (rti->base, rti->stride, 1, f) != 1)
552 1.1 mrg fatal_error (input_location, "cannot write PCH file: %m");
553 1.1 mrg
554 1.1 mrg /* Write out all the global pointers, after translation. */
555 1.1 mrg write_pch_globals (gt_ggc_rtab, &state);
556 1.1 mrg
557 1.1 mrg /* Pad the PCH file so that the mmapped area starts on an allocation
558 1.1 mrg granularity (usually page) boundary. */
559 1.1 mrg {
560 1.1 mrg long o;
561 1.1 mrg o = ftell (state.f) + sizeof (mmi);
562 1.1 mrg if (o == -1)
563 1.1 mrg fatal_error (input_location, "cannot get position in PCH file: %m");
564 1.1 mrg mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment;
565 1.1 mrg if (mmi.offset == mmap_offset_alignment)
566 1.1 mrg mmi.offset = 0;
567 1.1 mrg mmi.offset += o;
568 1.1 mrg }
569 1.1 mrg if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
570 1.1 mrg fatal_error (input_location, "cannot write PCH file: %m");
571 1.1 mrg if (mmi.offset != 0
572 1.1 mrg && fseek (state.f, mmi.offset, SEEK_SET) != 0)
573 1.1 mrg fatal_error (input_location, "cannot write padding to PCH file: %m");
574 1.1 mrg
575 1.1 mrg ggc_pch_prepare_write (state.d, state.f);
576 1.1 mrg
577 1.1 mrg #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
578 1.1 mrg vec<char> vbits = vNULL;
579 1.1 mrg #endif
580 1.1 mrg
581 1.1 mrg /* Actually write out the objects. */
582 1.1 mrg for (i = 0; i < state.count; i++)
583 1.1 mrg {
584 1.1 mrg state.ptrs_i = i;
585 1.1 mrg if (this_object_size < state.ptrs[i]->size)
586 1.1 mrg {
587 1.1 mrg this_object_size = state.ptrs[i]->size;
588 1.1 mrg this_object = XRESIZEVAR (char, this_object, this_object_size);
589 1.1 mrg }
590 1.1 mrg #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
591 1.1 mrg /* obj might contain uninitialized bytes, e.g. in the trailing
592 1.1 mrg padding of the object. Avoid warnings by making the memory
593 1.1 mrg temporarily defined and then restoring previous state. */
594 1.1 mrg int get_vbits = 0;
595 1.1 mrg size_t valid_size = state.ptrs[i]->size;
596 1.1 mrg if (__builtin_expect (RUNNING_ON_VALGRIND, 0))
597 1.1 mrg {
598 1.1 mrg if (vbits.length () < valid_size)
599 1.1 mrg vbits.safe_grow (valid_size, true);
600 1.1 mrg get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
601 1.1 mrg vbits.address (), valid_size);
602 1.1 mrg if (get_vbits == 3)
603 1.1 mrg {
604 1.1 mrg /* We assume that first part of obj is addressable, and
605 1.1 mrg the rest is unaddressable. Find out where the boundary is
606 1.1 mrg using binary search. */
607 1.1 mrg size_t lo = 0, hi = valid_size;
608 1.1 mrg while (hi > lo)
609 1.1 mrg {
610 1.1 mrg size_t mid = (lo + hi) / 2;
611 1.1 mrg get_vbits = VALGRIND_GET_VBITS ((char *) state.ptrs[i]->obj
612 1.1 mrg + mid, vbits.address (),
613 1.1 mrg 1);
614 1.1 mrg if (get_vbits == 3)
615 1.1 mrg hi = mid;
616 1.1 mrg else if (get_vbits == 1)
617 1.1 mrg lo = mid + 1;
618 1.1 mrg else
619 1.1 mrg break;
620 1.1 mrg }
621 1.1 mrg if (get_vbits == 1 || get_vbits == 3)
622 1.1 mrg {
623 1.1 mrg valid_size = lo;
624 1.1 mrg get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
625 1.1 mrg vbits.address (),
626 1.1 mrg valid_size);
627 1.1 mrg }
628 1.1 mrg }
629 1.1 mrg if (get_vbits == 1)
630 1.1 mrg VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (state.ptrs[i]->obj,
631 1.1 mrg state.ptrs[i]->size));
632 1.1 mrg }
633 1.1 mrg #endif
634 1.1 mrg memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
635 1.1 mrg if (state.ptrs[i]->reorder_fn != NULL)
636 1.1 mrg state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
637 1.1 mrg state.ptrs[i]->note_ptr_cookie,
638 1.1 mrg relocate_ptrs, &state);
639 1.1 mrg state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
640 1.1 mrg state.ptrs[i]->note_ptr_cookie,
641 1.1 mrg relocate_ptrs, &state);
642 1.1 mrg ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
643 1.1 mrg state.ptrs[i]->new_addr, state.ptrs[i]->size,
644 1.1 mrg state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
645 1.1 mrg if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
646 1.1 mrg memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
647 1.1 mrg #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
648 1.1 mrg if (__builtin_expect (get_vbits == 1, 0))
649 1.1 mrg {
650 1.1 mrg (void) VALGRIND_SET_VBITS (state.ptrs[i]->obj, vbits.address (),
651 1.1 mrg valid_size);
652 1.1 mrg if (valid_size != state.ptrs[i]->size)
653 1.1 mrg VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *)
654 1.1 mrg state.ptrs[i]->obj
655 1.1 mrg + valid_size,
656 1.1 mrg state.ptrs[i]->size
657 1.1 mrg - valid_size));
658 1.1 mrg }
659 1.1 mrg #endif
660 1.1 mrg }
661 1.1 mrg #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
662 1.1 mrg vbits.release ();
663 1.1 mrg #endif
664 1.1 mrg
665 1.1 mrg reloc_addrs_vec.qsort (compare_ptr);
666 1.1 mrg
667 1.1 mrg size_t reloc_addrs_size = 0;
668 1.1 mrg void *last_addr = NULL;
669 1.1 mrg unsigned char uleb128_buf[sizeof (size_t) * 2];
670 1.1 mrg for (void *addr : reloc_addrs_vec)
671 1.1 mrg {
672 1.1 mrg gcc_assert ((uintptr_t) addr >= (uintptr_t) mmi.preferred_base
673 1.1 mrg && ((uintptr_t) addr + sizeof (void *)
674 1.1 mrg <= (uintptr_t) mmi.preferred_base + mmi.size));
675 1.1 mrg if (addr == last_addr)
676 1.1 mrg continue;
677 1.1 mrg if (last_addr == NULL)
678 1.1 mrg last_addr = mmi.preferred_base;
679 1.1 mrg size_t diff = (uintptr_t) addr - (uintptr_t) last_addr;
680 1.1 mrg reloc_addrs_size += write_uleb128 (uleb128_buf, diff);
681 1.1 mrg last_addr = addr;
682 1.1 mrg }
683 1.1 mrg if (fwrite (&reloc_addrs_size, sizeof (reloc_addrs_size), 1, f) != 1)
684 1.1 mrg fatal_error (input_location, "cannot write PCH file: %m");
685 1.1 mrg last_addr = NULL;
686 1.1 mrg for (void *addr : reloc_addrs_vec)
687 1.1 mrg {
688 1.1 mrg if (addr == last_addr)
689 1.1 mrg continue;
690 1.1 mrg if (last_addr == NULL)
691 1.1 mrg last_addr = mmi.preferred_base;
692 1.1 mrg size_t diff = (uintptr_t) addr - (uintptr_t) last_addr;
693 1.1 mrg reloc_addrs_size = write_uleb128 (uleb128_buf, diff);
694 1.1 mrg if (fwrite (uleb128_buf, 1, reloc_addrs_size, f) != reloc_addrs_size)
695 1.1 mrg fatal_error (input_location, "cannot write PCH file: %m");
696 1.1 mrg last_addr = addr;
697 1.1 mrg }
698 1.1 mrg
699 1.1 mrg ggc_pch_finish (state.d, state.f);
700 1.1 mrg
701 1.1 mrg gt_pch_fixup_stringpool ();
702 1.1 mrg
703 1.1 mrg unsigned num_callbacks = callback_vec.length ();
704 1.1 mrg void (*pch_save) (FILE *) = >_pch_save;
705 1.1 mrg if (fwrite (&pch_save, sizeof (pch_save), 1, f) != 1
706 1.1 mrg || fwrite (&num_callbacks, sizeof (num_callbacks), 1, f) != 1
707 1.1 mrg || (num_callbacks
708 1.1 mrg && fwrite (callback_vec.address (), sizeof (void *), num_callbacks,
709 1.1 mrg f) != num_callbacks))
710 1.1 mrg fatal_error (input_location, "cannot write PCH file: %m");
711 1.1 mrg
712 1.1 mrg XDELETE (state.ptrs);
713 1.1 mrg XDELETE (this_object);
714 1.1 mrg delete saving_htab;
715 1.1 mrg saving_htab = NULL;
716 1.1 mrg callback_vec.release ();
717 1.1 mrg reloc_addrs_vec.release ();
718 1.1 mrg }
719 1.1 mrg
720 1.1 mrg /* Read the state of the compiler back in from F. */
721 1.1 mrg
722 1.1 mrg void
723 1.1 mrg gt_pch_restore (FILE *f)
724 1.1 mrg {
725 1.1 mrg const struct ggc_root_tab *const *rt;
726 1.1 mrg const struct ggc_root_tab *rti;
727 1.1 mrg size_t i;
728 1.1 mrg struct mmap_info mmi;
729 1.1 mrg int result;
730 1.1 mrg struct line_maps * old_line_table = line_table;
731 1.1 mrg location_t old_input_loc = input_location;
732 1.1 mrg
733 1.1 mrg /* We are about to reload the line maps along with the rest of the PCH
734 1.1 mrg data, which means that the (loaded) ones cannot be guaranteed to be
735 1.1 mrg in any valid state for reporting diagnostics that happen during the
736 1.1 mrg load. Save the current table (and use it during the loading process
737 1.1 mrg below). */
738 1.1 mrg class line_maps *save_line_table = line_table;
739 1.1 mrg
740 1.1 mrg /* Delete any deletable objects. This makes ggc_pch_read much
741 1.1 mrg faster, as it can be sure that no GCable objects remain other
742 1.1 mrg than the ones just read in. */
743 1.1 mrg for (rt = gt_ggc_deletable_rtab; *rt; rt++)
744 1.1 mrg for (rti = *rt; rti->base != NULL; rti++)
745 1.1 mrg memset (rti->base, 0, rti->stride);
746 1.1 mrg
747 1.1 mrg /* Read in all the scalar variables. */
748 1.1 mrg for (rt = gt_pch_scalar_rtab; *rt; rt++)
749 1.1 mrg for (rti = *rt; rti->base != NULL; rti++)
750 1.1 mrg if (fread (rti->base, rti->stride, 1, f) != 1)
751 1.1 mrg {
752 1.1 mrg line_table = old_line_table;
753 1.1 mrg input_location = old_input_loc;
754 1.1 mrg fatal_error (input_location, "cannot read PCH file: %m");
755 1.1 mrg }
756 1.1 mrg
757 1.1 mrg /* Read in all the global pointers, in 6 easy loops. */
758 1.1 mrg bool error_reading_pointers = false;
759 1.1 mrg for (rt = gt_ggc_rtab; *rt; rt++)
760 1.1 mrg for (rti = *rt; rti->base != NULL; rti++)
761 1.1 mrg for (i = 0; i < rti->nelt; i++)
762 1.1 mrg if (fread ((char *)rti->base + rti->stride * i,
763 1.1 mrg sizeof (void *), 1, f) != 1)
764 1.1 mrg error_reading_pointers = true;
765 1.1 mrg
766 1.1 mrg /* Stash the newly read-in line table pointer - it does not point to
767 1.1 mrg anything meaningful yet, so swap the old one back in. */
768 1.1 mrg class line_maps *new_line_table = line_table;
769 1.1 mrg line_table = save_line_table;
770 1.1 mrg if (error_reading_pointers)
771 1.1 mrg fatal_error (input_location, "cannot read PCH file: %m");
772 1.1 mrg
773 1.1 mrg if (fread (&mmi, sizeof (mmi), 1, f) != 1)
774 1.1 mrg {
775 1.1 mrg line_table = old_line_table;
776 1.1 mrg input_location = old_input_loc;
777 1.1 mrg fatal_error (input_location, "cannot read PCH file: %m");
778 1.1 mrg }
779 1.1 mrg
780 1.1 mrg void *orig_preferred_base = mmi.preferred_base;
781 1.1 mrg result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size,
782 1.1 mrg fileno (f), mmi.offset);
783 1.1 mrg
784 1.1 mrg /* We could not mmap or otherwise allocate the required memory at the
785 1.1 mrg address needed. */
786 1.1 mrg if (result < 0)
787 1.1 mrg {
788 1.1 mrg line_table = old_line_table;
789 1.1 mrg input_location = old_input_loc;
790 1.1 mrg sorry_at (input_location, "PCH allocation failure");
791 1.1 mrg /* There is no point in continuing from here, we will only end up
792 1.1 mrg with a crashed (most likely hanging) compiler. */
793 1.1 mrg exit (-1);
794 1.1 mrg }
795 1.1 mrg
796 1.1 mrg /* (0) We allocated memory, but did not mmap the file, so we need to read
797 1.1 mrg the data in manually. (>0) Otherwise the mmap succeed for the address
798 1.1 mrg we wanted. */
799 1.1 mrg if (result == 0)
800 1.1 mrg {
801 1.1 mrg if (fseek (f, mmi.offset, SEEK_SET) != 0
802 1.1 mrg || fread (mmi.preferred_base, mmi.size, 1, f) != 1)
803 1.1 mrg {
804 1.1 mrg line_table = old_line_table;
805 1.1 mrg input_location = old_input_loc;
806 1.1 mrg fatal_error (input_location, "cannot read PCH file: %m");
807 1.1 mrg }
808 1.1 mrg }
809 1.1 mrg else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
810 1.1 mrg {
811 1.1 mrg line_table = old_line_table;
812 1.1 mrg input_location = old_input_loc;
813 1.1 mrg fatal_error (input_location, "cannot read PCH file: %m");
814 1.1 mrg }
815 1.1 mrg
816 1.1 mrg size_t reloc_addrs_size;
817 1.1 mrg if (fread (&reloc_addrs_size, sizeof (reloc_addrs_size), 1, f) != 1)
818 1.1 mrg {
819 1.1 mrg line_table = old_line_table;
820 1.1 mrg input_location = old_input_loc;
821 1.1 mrg fatal_error (input_location, "cannot read PCH file: %m");
822 1.1 mrg }
823 1.1 mrg
824 1.1 mrg if (orig_preferred_base != mmi.preferred_base)
825 1.1 mrg {
826 1.1 mrg uintptr_t bias
827 1.1 mrg = (uintptr_t) mmi.preferred_base - (uintptr_t) orig_preferred_base;
828 1.1 mrg
829 1.1 mrg /* Adjust all the global pointers by bias. */
830 1.1 mrg line_table = new_line_table;
831 1.1 mrg for (rt = gt_ggc_rtab; *rt; rt++)
832 1.1 mrg for (rti = *rt; rti->base != NULL; rti++)
833 1.1 mrg for (i = 0; i < rti->nelt; i++)
834 1.1 mrg {
835 1.1 mrg char *addr = (char *)rti->base + rti->stride * i;
836 1.1 mrg char *p;
837 1.1 mrg memcpy (&p, addr, sizeof (void *));
838 1.1 mrg if ((uintptr_t) p >= (uintptr_t) orig_preferred_base
839 1.1 mrg && (uintptr_t) p < (uintptr_t) orig_preferred_base + mmi.size)
840 1.1 mrg {
841 1.1 mrg p = (char *) ((uintptr_t) p + bias);
842 1.1 mrg memcpy (addr, &p, sizeof (void *));
843 1.1 mrg }
844 1.1 mrg }
845 1.1 mrg new_line_table = line_table;
846 1.1 mrg line_table = save_line_table;
847 1.1 mrg
848 1.1 mrg /* And adjust all the pointers in the image by bias too. */
849 1.1 mrg char *addr = (char *) mmi.preferred_base;
850 1.1 mrg unsigned char uleb128_buf[4096], *uleb128_ptr = uleb128_buf;
851 1.1 mrg while (reloc_addrs_size != 0)
852 1.1 mrg {
853 1.1 mrg size_t this_size
854 1.1 mrg = MIN (reloc_addrs_size,
855 1.1 mrg (size_t) (4096 - (uleb128_ptr - uleb128_buf)));
856 1.1 mrg if (fread (uleb128_ptr, 1, this_size, f) != this_size)
857 1.1 mrg {
858 1.1 mrg line_table = old_line_table;
859 1.1 mrg input_location = old_input_loc;
860 1.1 mrg fatal_error (input_location, "cannot read PCH file: %m");
861 1.1 mrg }
862 1.1 mrg unsigned char *uleb128_end = uleb128_ptr + this_size;
863 1.1 mrg if (this_size != reloc_addrs_size)
864 1.1 mrg uleb128_end -= 2 * sizeof (size_t);
865 1.1 mrg uleb128_ptr = uleb128_buf;
866 1.1 mrg while (uleb128_ptr < uleb128_end)
867 1.1 mrg {
868 1.1 mrg size_t diff;
869 1.1 mrg uleb128_ptr = read_uleb128 (uleb128_ptr, &diff);
870 1.1 mrg addr = (char *) ((uintptr_t) addr + diff);
871 1.1 mrg
872 1.1 mrg char *p;
873 1.1 mrg memcpy (&p, addr, sizeof (void *));
874 1.1 mrg gcc_assert ((uintptr_t) p >= (uintptr_t) orig_preferred_base
875 1.1 mrg && ((uintptr_t) p
876 1.1 mrg < (uintptr_t) orig_preferred_base + mmi.size));
877 1.1 mrg p = (char *) ((uintptr_t) p + bias);
878 1.1 mrg memcpy (addr, &p, sizeof (void *));
879 1.1 mrg }
880 1.1 mrg reloc_addrs_size -= this_size;
881 1.1 mrg if (reloc_addrs_size == 0)
882 1.1 mrg break;
883 1.1 mrg this_size = uleb128_end + 2 * sizeof (size_t) - uleb128_ptr;
884 1.1 mrg memcpy (uleb128_buf, uleb128_ptr, this_size);
885 1.1 mrg uleb128_ptr = uleb128_buf + this_size;
886 1.1 mrg }
887 1.1 mrg }
888 1.1 mrg else if (fseek (f, (mmi.offset + mmi.size + sizeof (reloc_addrs_size)
889 1.1 mrg + reloc_addrs_size), SEEK_SET) != 0)
890 1.1 mrg fatal_error (input_location, "cannot read PCH file: %m");
891 1.1 mrg
892 1.1 mrg ggc_pch_read (f, mmi.preferred_base);
893 1.1 mrg
894 1.1 mrg void (*pch_save) (FILE *);
895 1.1 mrg unsigned num_callbacks;
896 1.1 mrg if (fread (&pch_save, sizeof (pch_save), 1, f) != 1
897 1.1 mrg || fread (&num_callbacks, sizeof (num_callbacks), 1, f) != 1)
898 1.1 mrg {
899 1.1 mrg line_table = old_line_table;
900 1.1 mrg input_location = old_input_loc;
901 1.1 mrg fatal_error (input_location, "cannot read PCH file: %m");
902 1.1 mrg }
903 1.1 mrg if (pch_save != >_pch_save)
904 1.1 mrg {
905 1.1 mrg uintptr_t binbias = (uintptr_t) >_pch_save - (uintptr_t) pch_save;
906 1.1 mrg void **ptrs = XNEWVEC (void *, num_callbacks);
907 1.1 mrg unsigned i;
908 1.1 mrg uintptr_t bias
909 1.1 mrg = (uintptr_t) mmi.preferred_base - (uintptr_t) orig_preferred_base;
910 1.1 mrg
911 1.1 mrg if (fread (ptrs, sizeof (void *), num_callbacks, f) != num_callbacks)
912 1.1 mrg {
913 1.1 mrg line_table = old_line_table;
914 1.1 mrg input_location = old_input_loc;
915 1.1 mrg fatal_error (input_location, "cannot read PCH file: %m");
916 1.1 mrg }
917 1.1 mrg for (i = 0; i < num_callbacks; ++i)
918 1.1 mrg {
919 1.1 mrg void *ptr = (void *) ((uintptr_t) ptrs[i] + bias);
920 1.1 mrg memcpy (&pch_save, ptr, sizeof (pch_save));
921 1.1 mrg pch_save = (void (*) (FILE *)) ((uintptr_t) pch_save + binbias);
922 1.1 mrg memcpy (ptr, &pch_save, sizeof (pch_save));
923 1.1 mrg }
924 1.1 mrg XDELETE (ptrs);
925 1.1 mrg }
926 1.1 mrg else if (fseek (f, num_callbacks * sizeof (void *), SEEK_CUR) != 0)
927 1.1 mrg fatal_error (input_location, "cannot read PCH file: %m");
928 1.1 mrg
929 1.1 mrg gt_pch_restore_stringpool ();
930 1.1 mrg
931 1.1 mrg /* Barring corruption of the PCH file, the restored line table should be
932 1.1 mrg complete and usable. */
933 1.1 mrg line_table = new_line_table;
934 1.1 mrg }
935 1.1 mrg
936 1.1 mrg /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
937 1.1 mrg Select no address whatsoever, and let gt_pch_save choose what it will with
938 1.1 mrg malloc, presumably. */
939 1.1 mrg
940 1.1 mrg void *
941 1.1 mrg default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED,
942 1.1 mrg int fd ATTRIBUTE_UNUSED)
943 1.1 mrg {
944 1.1 mrg return NULL;
945 1.1 mrg }
946 1.1 mrg
947 1.1 mrg /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
948 1.1 mrg Allocate SIZE bytes with malloc. Return 0 if the address we got is the
949 1.1 mrg same as base, indicating that the memory has been allocated but needs to
950 1.1 mrg be read in from the file. Return -1 if the address differs, to relocation
951 1.1 mrg of the PCH file would be required. */
952 1.1 mrg
953 1.1 mrg int
954 1.1 mrg default_gt_pch_use_address (void *&base, size_t size, int fd ATTRIBUTE_UNUSED,
955 1.1 mrg size_t offset ATTRIBUTE_UNUSED)
956 1.1 mrg {
957 1.1 mrg void *addr = xmalloc (size);
958 1.1 mrg return (addr == base) - 1;
959 1.1 mrg }
960 1.1 mrg
961 1.1 mrg /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS. Return the
962 1.1 mrg alignment required for allocating virtual memory. Usually this is the
963 1.1 mrg same as pagesize. */
964 1.1 mrg
965 1.1 mrg size_t
966 1.1 mrg default_gt_pch_alloc_granularity (void)
967 1.1 mrg {
968 1.1 mrg return getpagesize ();
969 1.1 mrg }
970 1.1 mrg
971 1.1 mrg #if HAVE_MMAP_FILE
972 1.1 mrg /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
973 1.1 mrg We temporarily allocate SIZE bytes, and let the kernel place the data
974 1.1 mrg wherever it will. If it worked, that's our spot, if not we're likely
975 1.1 mrg to be in trouble. */
976 1.1 mrg
977 1.1 mrg void *
978 1.1 mrg mmap_gt_pch_get_address (size_t size, int fd)
979 1.1 mrg {
980 1.1 mrg void *ret;
981 1.1 mrg
982 1.1 mrg ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
983 1.1 mrg if (ret == (void *) MAP_FAILED)
984 1.1 mrg ret = NULL;
985 1.1 mrg else
986 1.1 mrg munmap ((caddr_t) ret, size);
987 1.1 mrg
988 1.1 mrg return ret;
989 1.1 mrg }
990 1.1 mrg
991 1.1 mrg /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
992 1.1 mrg Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
993 1.1 mrg mapping the data at BASE, -1 if we couldn't.
994 1.1 mrg
995 1.1 mrg This version assumes that the kernel honors the START operand of mmap
996 1.1 mrg even without MAP_FIXED if START through START+SIZE are not currently
997 1.1 mrg mapped with something. */
998 1.1 mrg
999 1.1 mrg int
1000 1.1 mrg mmap_gt_pch_use_address (void *&base, size_t size, int fd, size_t offset)
1001 1.1 mrg {
1002 1.1 mrg void *addr;
1003 1.1 mrg
1004 1.1 mrg /* We're called with size == 0 if we're not planning to load a PCH
1005 1.1 mrg file at all. This allows the hook to free any static space that
1006 1.1 mrg we might have allocated at link time. */
1007 1.1 mrg if (size == 0)
1008 1.1 mrg return -1;
1009 1.1 mrg
1010 1.1 mrg addr = mmap ((caddr_t) base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
1011 1.1 mrg fd, offset);
1012 1.1 mrg
1013 1.1 mrg return addr == base ? 1 : -1;
1014 1.1 mrg }
1015 1.1 mrg #endif /* HAVE_MMAP_FILE */
1016 1.1 mrg
1017 1.1 mrg #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
1018 1.1 mrg
1019 1.1 mrg /* Modify the bound based on rlimits. */
1020 1.1 mrg static double
1021 1.1 mrg ggc_rlimit_bound (double limit)
1022 1.1 mrg {
1023 1.1 mrg #if defined(HAVE_GETRLIMIT)
1024 1.1 mrg struct rlimit rlim;
1025 1.1 mrg # if defined (RLIMIT_AS)
1026 1.1 mrg /* RLIMIT_AS is what POSIX says is the limit on mmap. Presumably
1027 1.1 mrg any OS which has RLIMIT_AS also has a working mmap that GCC will use. */
1028 1.1 mrg if (getrlimit (RLIMIT_AS, &rlim) == 0
1029 1.1 mrg && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
1030 1.1 mrg && rlim.rlim_cur < limit)
1031 1.1 mrg limit = rlim.rlim_cur;
1032 1.1 mrg # elif defined (RLIMIT_DATA)
1033 1.1 mrg /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
1034 1.1 mrg might be on an OS that has a broken mmap. (Others don't bound
1035 1.1 mrg mmap at all, apparently.) */
1036 1.1 mrg if (getrlimit (RLIMIT_DATA, &rlim) == 0
1037 1.1 mrg && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
1038 1.1 mrg && rlim.rlim_cur < limit
1039 1.1 mrg /* Darwin has this horribly bogus default setting of
1040 1.1 mrg RLIMIT_DATA, to 6144Kb. No-one notices because RLIMIT_DATA
1041 1.1 mrg appears to be ignored. Ignore such silliness. If a limit
1042 1.1 mrg this small was actually effective for mmap, GCC wouldn't even
1043 1.1 mrg start up. */
1044 1.1 mrg && rlim.rlim_cur >= 8 * ONE_M)
1045 1.1 mrg limit = rlim.rlim_cur;
1046 1.1 mrg # endif /* RLIMIT_AS or RLIMIT_DATA */
1047 1.1 mrg #endif /* HAVE_GETRLIMIT */
1048 1.1 mrg
1049 1.1 mrg return limit;
1050 1.1 mrg }
1051 1.1 mrg
1052 1.1 mrg /* Heuristic to set a default for GGC_MIN_EXPAND. */
1053 1.1 mrg static int
1054 1.1 mrg ggc_min_expand_heuristic (void)
1055 1.1 mrg {
1056 1.1 mrg double min_expand = physmem_total ();
1057 1.1 mrg
1058 1.1 mrg /* Adjust for rlimits. */
1059 1.1 mrg min_expand = ggc_rlimit_bound (min_expand);
1060 1.1 mrg
1061 1.1 mrg /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
1062 1.1 mrg a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
1063 1.1 mrg min_expand /= ONE_G;
1064 1.1 mrg min_expand *= 70;
1065 1.1 mrg min_expand = MIN (min_expand, 70);
1066 1.1 mrg min_expand += 30;
1067 1.1 mrg
1068 1.1 mrg return min_expand;
1069 1.1 mrg }
1070 1.1 mrg
1071 1.1 mrg /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
1072 1.1 mrg static int
1073 1.1 mrg ggc_min_heapsize_heuristic (void)
1074 1.1 mrg {
1075 1.1 mrg double phys_kbytes = physmem_total ();
1076 1.1 mrg double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2);
1077 1.1 mrg
1078 1.1 mrg phys_kbytes /= ONE_K; /* Convert to Kbytes. */
1079 1.1 mrg limit_kbytes /= ONE_K;
1080 1.1 mrg
1081 1.1 mrg /* The heuristic is RAM/8, with a lower bound of 4M and an upper
1082 1.1 mrg bound of 128M (when RAM >= 1GB). */
1083 1.1 mrg phys_kbytes /= 8;
1084 1.1 mrg
1085 1.1 mrg #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
1086 1.1 mrg /* Try not to overrun the RSS limit while doing garbage collection.
1087 1.1 mrg The RSS limit is only advisory, so no margin is subtracted. */
1088 1.1 mrg {
1089 1.1 mrg struct rlimit rlim;
1090 1.1 mrg if (getrlimit (RLIMIT_RSS, &rlim) == 0
1091 1.1 mrg && rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
1092 1.1 mrg phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / ONE_K);
1093 1.1 mrg }
1094 1.1 mrg # endif
1095 1.1 mrg
1096 1.1 mrg /* Don't blindly run over our data limit; do GC at least when the
1097 1.1 mrg *next* GC would be within 20Mb of the limit or within a quarter of
1098 1.1 mrg the limit, whichever is larger. If GCC does hit the data limit,
1099 1.1 mrg compilation will fail, so this tries to be conservative. */
1100 1.1 mrg limit_kbytes = MAX (0, limit_kbytes - MAX (limit_kbytes / 4, 20 * ONE_K));
1101 1.1 mrg limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic ());
1102 1.1 mrg phys_kbytes = MIN (phys_kbytes, limit_kbytes);
1103 1.1 mrg
1104 1.1 mrg phys_kbytes = MAX (phys_kbytes, 4 * ONE_K);
1105 1.1 mrg phys_kbytes = MIN (phys_kbytes, 128 * ONE_K);
1106 1.1 mrg
1107 1.1 mrg return phys_kbytes;
1108 1.1 mrg }
1109 1.1 mrg #endif
1110 1.1 mrg
1111 1.1 mrg void
1112 1.1 mrg init_ggc_heuristics (void)
1113 1.1 mrg {
1114 1.1 mrg #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
1115 1.1 mrg param_ggc_min_expand = ggc_min_expand_heuristic ();
1116 1.1 mrg param_ggc_min_heapsize = ggc_min_heapsize_heuristic ();
1117 1.1 mrg #endif
1118 1.1 mrg }
1119 1.1 mrg
1120 1.1 mrg /* GGC memory usage. */
1121 1.1 mrg class ggc_usage: public mem_usage
1122 1.1 mrg {
1123 1.1 mrg public:
1124 1.1 mrg /* Default constructor. */
1125 1.1 mrg ggc_usage (): m_freed (0), m_collected (0), m_overhead (0) {}
1126 1.1 mrg /* Constructor. */
1127 1.1 mrg ggc_usage (size_t allocated, size_t times, size_t peak,
1128 1.1 mrg size_t freed, size_t collected, size_t overhead)
1129 1.1 mrg : mem_usage (allocated, times, peak),
1130 1.1 mrg m_freed (freed), m_collected (collected), m_overhead (overhead) {}
1131 1.1 mrg
1132 1.1 mrg /* Equality operator. */
1133 1.1 mrg inline bool
1134 1.1 mrg operator== (const ggc_usage &second) const
1135 1.1 mrg {
1136 1.1 mrg return (get_balance () == second.get_balance ()
1137 1.1 mrg && m_peak == second.m_peak
1138 1.1 mrg && m_times == second.m_times);
1139 1.1 mrg }
1140 1.1 mrg
1141 1.1 mrg /* Comparison operator. */
1142 1.1 mrg inline bool
1143 1.1 mrg operator< (const ggc_usage &second) const
1144 1.1 mrg {
1145 1.1 mrg if (*this == second)
1146 1.1 mrg return false;
1147 1.1 mrg
1148 1.1 mrg return (get_balance () == second.get_balance () ?
1149 1.1 mrg (m_peak == second.m_peak ? m_times < second.m_times
1150 1.1 mrg : m_peak < second.m_peak)
1151 1.1 mrg : get_balance () < second.get_balance ());
1152 1.1 mrg }
1153 1.1 mrg
1154 1.1 mrg /* Register overhead of ALLOCATED and OVERHEAD bytes. */
1155 1.1 mrg inline void
1156 1.1 mrg register_overhead (size_t allocated, size_t overhead)
1157 1.1 mrg {
1158 1.1 mrg m_allocated += allocated;
1159 1.1 mrg m_overhead += overhead;
1160 1.1 mrg m_times++;
1161 1.1 mrg }
1162 1.1 mrg
1163 1.1 mrg /* Release overhead of SIZE bytes. */
1164 1.1 mrg inline void
1165 1.1 mrg release_overhead (size_t size)
1166 1.1 mrg {
1167 1.1 mrg m_freed += size;
1168 1.1 mrg }
1169 1.1 mrg
1170 1.1 mrg /* Sum the usage with SECOND usage. */
1171 1.1 mrg ggc_usage
1172 1.1 mrg operator+ (const ggc_usage &second)
1173 1.1 mrg {
1174 1.1 mrg return ggc_usage (m_allocated + second.m_allocated,
1175 1.1 mrg m_times + second.m_times,
1176 1.1 mrg m_peak + second.m_peak,
1177 1.1 mrg m_freed + second.m_freed,
1178 1.1 mrg m_collected + second.m_collected,
1179 1.1 mrg m_overhead + second.m_overhead);
1180 1.1 mrg }
1181 1.1 mrg
1182 1.1 mrg /* Dump usage with PREFIX, where TOTAL is sum of all rows. */
1183 1.1 mrg inline void
1184 1.1 mrg dump (const char *prefix, ggc_usage &total) const
1185 1.1 mrg {
1186 1.1 mrg size_t balance = get_balance ();
1187 1.1 mrg fprintf (stderr,
1188 1.1 mrg "%-48s " PRsa (9) ":%5.1f%%" PRsa (9) ":%5.1f%%"
1189 1.1 mrg PRsa (9) ":%5.1f%%" PRsa (9) ":%5.1f%%" PRsa (9) "\n",
1190 1.1 mrg prefix,
1191 1.1 mrg SIZE_AMOUNT (balance), get_percent (balance, total.get_balance ()),
1192 1.1 mrg SIZE_AMOUNT (m_collected),
1193 1.1 mrg get_percent (m_collected, total.m_collected),
1194 1.1 mrg SIZE_AMOUNT (m_freed), get_percent (m_freed, total.m_freed),
1195 1.1 mrg SIZE_AMOUNT (m_overhead),
1196 1.1 mrg get_percent (m_overhead, total.m_overhead),
1197 1.1 mrg SIZE_AMOUNT (m_times));
1198 1.1 mrg }
1199 1.1 mrg
1200 1.1 mrg /* Dump usage coupled to LOC location, where TOTAL is sum of all rows. */
1201 1.1 mrg inline void
1202 1.1 mrg dump (mem_location *loc, ggc_usage &total) const
1203 1.1 mrg {
1204 1.1 mrg char *location_string = loc->to_string ();
1205 1.1 mrg
1206 1.1 mrg dump (location_string, total);
1207 1.1 mrg
1208 1.1 mrg free (location_string);
1209 1.1 mrg }
1210 1.1 mrg
1211 1.1 mrg /* Dump footer. */
1212 1.1 mrg inline void
1213 1.1 mrg dump_footer ()
1214 1.1 mrg {
1215 1.1 mrg dump ("Total", *this);
1216 1.1 mrg }
1217 1.1 mrg
1218 1.1 mrg /* Get balance which is GGC allocation leak. */
1219 1.1 mrg inline size_t
1220 1.1 mrg get_balance () const
1221 1.1 mrg {
1222 1.1 mrg return m_allocated + m_overhead - m_collected - m_freed;
1223 1.1 mrg }
1224 1.1 mrg
1225 1.1 mrg typedef std::pair<mem_location *, ggc_usage *> mem_pair_t;
1226 1.1 mrg
1227 1.1 mrg /* Compare wrapper used by qsort method. */
1228 1.1 mrg static int
1229 1.1 mrg compare (const void *first, const void *second)
1230 1.1 mrg {
1231 1.1 mrg const mem_pair_t mem1 = *(const mem_pair_t *) first;
1232 1.1 mrg const mem_pair_t mem2 = *(const mem_pair_t *) second;
1233 1.1 mrg
1234 1.1 mrg size_t balance1 = mem1.second->get_balance ();
1235 1.1 mrg size_t balance2 = mem2.second->get_balance ();
1236 1.1 mrg
1237 1.1 mrg return balance1 == balance2 ? 0 : (balance1 < balance2 ? 1 : -1);
1238 1.1 mrg }
1239 1.1 mrg
1240 1.1 mrg /* Dump header with NAME. */
1241 1.1 mrg static inline void
1242 1.1 mrg dump_header (const char *name)
1243 1.1 mrg {
1244 1.1 mrg fprintf (stderr, "%-48s %11s%17s%17s%16s%17s\n", name, "Leak", "Garbage",
1245 1.1 mrg "Freed", "Overhead", "Times");
1246 1.1 mrg }
1247 1.1 mrg
1248 1.1 mrg /* Freed memory in bytes. */
1249 1.1 mrg size_t m_freed;
1250 1.1 mrg /* Collected memory in bytes. */
1251 1.1 mrg size_t m_collected;
1252 1.1 mrg /* Overhead memory in bytes. */
1253 1.1 mrg size_t m_overhead;
1254 1.1 mrg };
1255 1.1 mrg
1256 1.1 mrg /* GCC memory description. */
1257 1.1 mrg static mem_alloc_description<ggc_usage> ggc_mem_desc;
1258 1.1 mrg
1259 1.1 mrg /* Dump per-site memory statistics. */
1260 1.1 mrg
1261 1.1 mrg void
1262 1.1 mrg dump_ggc_loc_statistics ()
1263 1.1 mrg {
1264 1.1 mrg if (! GATHER_STATISTICS)
1265 1.1 mrg return;
1266 1.1 mrg
1267 1.1 mrg ggc_collect (GGC_COLLECT_FORCE);
1268 1.1 mrg
1269 1.1 mrg ggc_mem_desc.dump (GGC_ORIGIN);
1270 1.1 mrg }
1271 1.1 mrg
1272 1.1 mrg /* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION). */
1273 1.1 mrg void
1274 1.1 mrg ggc_record_overhead (size_t allocated, size_t overhead, void *ptr MEM_STAT_DECL)
1275 1.1 mrg {
1276 1.1 mrg ggc_usage *usage = ggc_mem_desc.register_descriptor (ptr, GGC_ORIGIN, false
1277 1.1 mrg FINAL_PASS_MEM_STAT);
1278 1.1 mrg
1279 1.1 mrg ggc_mem_desc.register_object_overhead (usage, allocated + overhead, ptr);
1280 1.1 mrg usage->register_overhead (allocated, overhead);
1281 1.1 mrg }
1282 1.1 mrg
1283 1.1 mrg /* Notice that the pointer has been freed. */
1284 1.1 mrg void
1285 1.1 mrg ggc_free_overhead (void *ptr)
1286 1.1 mrg {
1287 1.1 mrg ggc_mem_desc.release_object_overhead (ptr);
1288 1.1 mrg }
1289 1.1 mrg
1290 1.1 mrg /* After live values has been marked, walk all recorded pointers and see if
1291 1.1 mrg they are still live. */
1292 1.1 mrg void
1293 1.1 mrg ggc_prune_overhead_list (void)
1294 1.1 mrg {
1295 1.1 mrg typedef hash_map<const void *, std::pair<ggc_usage *, size_t > > map_t;
1296 1.1 mrg
1297 1.1 mrg map_t::iterator it = ggc_mem_desc.m_reverse_object_map->begin ();
1298 1.1 mrg
1299 1.1 mrg for (; it != ggc_mem_desc.m_reverse_object_map->end (); ++it)
1300 1.1 mrg if (!ggc_marked_p ((*it).first))
1301 1.1 mrg {
1302 1.1 mrg (*it).second.first->m_collected += (*it).second.second;
1303 1.1 mrg ggc_mem_desc.m_reverse_object_map->remove ((*it).first);
1304 1.1 mrg }
1305 1.1 mrg }
1306 1.1 mrg
1307 1.1 mrg /* Print memory used by heap if this info is available. */
1308 1.1 mrg
1309 1.1 mrg void
1310 1.1 mrg report_heap_memory_use ()
1311 1.1 mrg {
1312 1.1 mrg #if defined(HAVE_MALLINFO) || defined(HAVE_MALLINFO2)
1313 1.1 mrg #ifdef HAVE_MALLINFO2
1314 1.1 mrg #define MALLINFO_FN mallinfo2
1315 1.1 mrg #else
1316 1.1 mrg #define MALLINFO_FN mallinfo
1317 1.1 mrg #endif
1318 1.1 mrg if (!quiet_flag)
1319 1.1 mrg fprintf (stderr, " {heap " PRsa (0) "}",
1320 1.1 mrg SIZE_AMOUNT (MALLINFO_FN ().arena));
1321 1.1 mrg #endif
1322 }
1323