ggc-common.cc revision 1.1 1 /* Simple garbage collection for the GNU compiler.
2 Copyright (C) 1999-2022 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* Generic garbage collection (GC) functions and data, not specific to
21 any particular GC implementation. */
22
23 #include "config.h"
24 #define INCLUDE_MALLOC_H
25 #include "system.h"
26 #include "coretypes.h"
27 #include "timevar.h"
28 #include "diagnostic-core.h"
29 #include "ggc-internal.h"
30 #include "hosthooks.h"
31 #include "plugin.h"
32 #include "options.h"
33
34 /* When true, protect the contents of the identifier hash table. */
35 bool ggc_protect_identifiers = true;
36
37 /* Statistics about the allocation. */
38 static ggc_statistics *ggc_stats;
39
40 struct traversal_state;
41
42 static int compare_ptr_data (const void *, const void *);
43 static void relocate_ptrs (void *, void *, void *);
44 static void write_pch_globals (const struct ggc_root_tab * const *tab,
45 struct traversal_state *state);
46
47 /* Maintain global roots that are preserved during GC. */
48
49 /* This extra vector of dynamically registered root_tab-s is used by
50 ggc_mark_roots and gives the ability to dynamically add new GGC root
51 tables, for instance from some plugins; this vector is on the heap
52 since it is used by GGC internally. */
53 typedef const struct ggc_root_tab *const_ggc_root_tab_t;
54 static vec<const_ggc_root_tab_t> extra_root_vec;
55
56 /* Dynamically register a new GGC root table RT. This is useful for
57 plugins. */
58
59 void
60 ggc_register_root_tab (const struct ggc_root_tab* rt)
61 {
62 if (rt)
63 extra_root_vec.safe_push (rt);
64 }
65
66 /* Mark all the roots in the table RT. */
67
68 static void
69 ggc_mark_root_tab (const_ggc_root_tab_t rt)
70 {
71 size_t i;
72
73 for ( ; rt->base != NULL; rt++)
74 for (i = 0; i < rt->nelt; i++)
75 (*rt->cb) (*(void **) ((char *)rt->base + rt->stride * i));
76 }
77
78 /* Iterate through all registered roots and mark each element. */
79
80 void
81 ggc_mark_roots (void)
82 {
83 const struct ggc_root_tab *const *rt;
84 const_ggc_root_tab_t rtp, rti;
85 size_t i;
86
87 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
88 for (rti = *rt; rti->base != NULL; rti++)
89 memset (rti->base, 0, rti->stride);
90
91 for (rt = gt_ggc_rtab; *rt; rt++)
92 ggc_mark_root_tab (*rt);
93
94 FOR_EACH_VEC_ELT (extra_root_vec, i, rtp)
95 ggc_mark_root_tab (rtp);
96
97 if (ggc_protect_identifiers)
98 ggc_mark_stringpool ();
99
100 gt_clear_caches ();
101
102 if (! ggc_protect_identifiers)
103 ggc_purge_stringpool ();
104
105 /* Some plugins may call ggc_set_mark from here. */
106 invoke_plugin_callbacks (PLUGIN_GGC_MARKING, NULL);
107 }
108
109 /* Allocate a block of memory, then clear it. */
110 void *
111 ggc_internal_cleared_alloc (size_t size, void (*f)(void *), size_t s, size_t n
112 MEM_STAT_DECL)
113 {
114 void *buf = ggc_internal_alloc (size, f, s, n PASS_MEM_STAT);
115 memset (buf, 0, size);
116 return buf;
117 }
118
119 /* Resize a block of memory, possibly re-allocating it. */
120 void *
121 ggc_realloc (void *x, size_t size MEM_STAT_DECL)
122 {
123 void *r;
124 size_t old_size;
125
126 if (x == NULL)
127 return ggc_internal_alloc (size PASS_MEM_STAT);
128
129 old_size = ggc_get_size (x);
130
131 if (size <= old_size)
132 {
133 /* Mark the unwanted memory as unaccessible. We also need to make
134 the "new" size accessible, since ggc_get_size returns the size of
135 the pool, not the size of the individually allocated object, the
136 size which was previously made accessible. Unfortunately, we
137 don't know that previously allocated size. Without that
138 knowledge we have to lose some initialization-tracking for the
139 old parts of the object. An alternative is to mark the whole
140 old_size as reachable, but that would lose tracking of writes
141 after the end of the object (by small offsets). Discard the
142 handle to avoid handle leak. */
143 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x + size,
144 old_size - size));
145 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, size));
146 return x;
147 }
148
149 r = ggc_internal_alloc (size PASS_MEM_STAT);
150
151 /* Since ggc_get_size returns the size of the pool, not the size of the
152 individually allocated object, we'd access parts of the old object
153 that were marked invalid with the memcpy below. We lose a bit of the
154 initialization-tracking since some of it may be uninitialized. */
155 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, old_size));
156
157 memcpy (r, x, old_size);
158
159 /* The old object is not supposed to be used anymore. */
160 ggc_free (x);
161
162 return r;
163 }
164
165 void *
166 ggc_cleared_alloc_htab_ignore_args (size_t c ATTRIBUTE_UNUSED,
167 size_t n ATTRIBUTE_UNUSED)
168 {
169 gcc_assert (c * n == sizeof (struct htab));
170 return ggc_cleared_alloc<htab> ();
171 }
172
173 /* TODO: once we actually use type information in GGC, create a new tag
174 gt_gcc_ptr_array and use it for pointer arrays. */
175 void *
176 ggc_cleared_alloc_ptr_array_two_args (size_t c, size_t n)
177 {
178 gcc_assert (sizeof (PTR *) == n);
179 return ggc_cleared_vec_alloc<PTR *> (c);
180 }
181
182 /* These are for splay_tree_new_ggc. */
183 void *
184 ggc_splay_alloc (int sz, void *nl)
185 {
186 gcc_assert (!nl);
187 return ggc_internal_alloc (sz);
188 }
189
190 void
191 ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
192 {
193 gcc_assert (!nl);
194 }
195
196 void
197 ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
198 ggc_statistics *stats)
199 {
200 /* Set the pointer so that during collection we will actually gather
201 the statistics. */
202 ggc_stats = stats;
203
204 /* Then do one collection to fill in the statistics. */
205 ggc_collect ();
206
207 /* At present, we don't really gather any interesting statistics. */
208
209 /* Don't gather statistics any more. */
210 ggc_stats = NULL;
211 }
212
213 /* Functions for saving and restoring GCable memory to disk. */
215
216 struct ptr_data
217 {
218 void *obj;
219 void *note_ptr_cookie;
220 gt_note_pointers note_ptr_fn;
221 gt_handle_reorder reorder_fn;
222 size_t size;
223 void *new_addr;
224 };
225
226 #define POINTER_HASH(x) (hashval_t)((intptr_t)x >> 3)
227
228 /* Helper for hashing saving_htab. */
229
230 struct saving_hasher : free_ptr_hash <ptr_data>
231 {
232 typedef void *compare_type;
233 static inline hashval_t hash (const ptr_data *);
234 static inline bool equal (const ptr_data *, const void *);
235 };
236
237 inline hashval_t
238 saving_hasher::hash (const ptr_data *p)
239 {
240 return POINTER_HASH (p->obj);
241 }
242
243 inline bool
244 saving_hasher::equal (const ptr_data *p1, const void *p2)
245 {
246 return p1->obj == p2;
247 }
248
249 static hash_table<saving_hasher> *saving_htab;
250 static vec<void *> callback_vec;
251 static vec<void *> reloc_addrs_vec;
252
253 /* Register an object in the hash table. */
254
255 int
256 gt_pch_note_object (void *obj, void *note_ptr_cookie,
257 gt_note_pointers note_ptr_fn)
258 {
259 struct ptr_data **slot;
260
261 if (obj == NULL || obj == (void *) 1)
262 return 0;
263
264 slot = (struct ptr_data **)
265 saving_htab->find_slot_with_hash (obj, POINTER_HASH (obj), INSERT);
266 if (*slot != NULL)
267 {
268 gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
269 && (*slot)->note_ptr_cookie == note_ptr_cookie);
270 return 0;
271 }
272
273 *slot = XCNEW (struct ptr_data);
274 (*slot)->obj = obj;
275 (*slot)->note_ptr_fn = note_ptr_fn;
276 (*slot)->note_ptr_cookie = note_ptr_cookie;
277 if (note_ptr_fn == gt_pch_p_S)
278 (*slot)->size = strlen ((const char *)obj) + 1;
279 else
280 (*slot)->size = ggc_get_size (obj);
281 return 1;
282 }
283
284 /* Register address of a callback pointer. */
285 void
286 gt_pch_note_callback (void *obj, void *base)
287 {
288 void *ptr;
289 memcpy (&ptr, obj, sizeof (void *));
290 if (ptr != NULL)
291 {
292 struct ptr_data *data
293 = (struct ptr_data *)
294 saving_htab->find_with_hash (base, POINTER_HASH (base));
295 gcc_assert (data);
296 callback_vec.safe_push ((char *) data->new_addr
297 + ((char *) obj - (char *) base));
298 }
299 }
300
301 /* Register an object in the hash table. */
302
303 void
304 gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
305 gt_handle_reorder reorder_fn)
306 {
307 struct ptr_data *data;
308
309 if (obj == NULL || obj == (void *) 1)
310 return;
311
312 data = (struct ptr_data *)
313 saving_htab->find_with_hash (obj, POINTER_HASH (obj));
314 gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
315
316 data->reorder_fn = reorder_fn;
317 }
318
319 /* Handy state for the traversal functions. */
320
321 struct traversal_state
322 {
323 FILE *f;
324 struct ggc_pch_data *d;
325 size_t count;
326 struct ptr_data **ptrs;
327 size_t ptrs_i;
328 };
329
330 /* Callbacks for htab_traverse. */
331
332 int
333 ggc_call_count (ptr_data **slot, traversal_state *state)
334 {
335 struct ptr_data *d = *slot;
336
337 ggc_pch_count_object (state->d, d->obj, d->size,
338 d->note_ptr_fn == gt_pch_p_S);
339 state->count++;
340 return 1;
341 }
342
343 int
344 ggc_call_alloc (ptr_data **slot, traversal_state *state)
345 {
346 struct ptr_data *d = *slot;
347
348 d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size,
349 d->note_ptr_fn == gt_pch_p_S);
350 state->ptrs[state->ptrs_i++] = d;
351 return 1;
352 }
353
354 /* Callback for qsort. */
355
356 static int
357 compare_ptr_data (const void *p1_p, const void *p2_p)
358 {
359 const struct ptr_data *const p1 = *(const struct ptr_data *const *)p1_p;
360 const struct ptr_data *const p2 = *(const struct ptr_data *const *)p2_p;
361 return (((size_t)p1->new_addr > (size_t)p2->new_addr)
362 - ((size_t)p1->new_addr < (size_t)p2->new_addr));
363 }
364
365 /* Callbacks for note_ptr_fn. */
366
367 static void
368 relocate_ptrs (void *ptr_p, void *real_ptr_p, void *state_p)
369 {
370 void **ptr = (void **)ptr_p;
371 struct traversal_state *state
372 = (struct traversal_state *)state_p;
373 struct ptr_data *result;
374
375 if (*ptr == NULL || *ptr == (void *)1)
376 return;
377
378 result = (struct ptr_data *)
379 saving_htab->find_with_hash (*ptr, POINTER_HASH (*ptr));
380 gcc_assert (result);
381 *ptr = result->new_addr;
382 if (ptr_p == real_ptr_p)
383 return;
384 if (real_ptr_p == NULL)
385 real_ptr_p = ptr_p;
386 gcc_assert (real_ptr_p >= state->ptrs[state->ptrs_i]->obj
387 && ((char *) real_ptr_p + sizeof (void *)
388 <= ((char *) state->ptrs[state->ptrs_i]->obj
389 + state->ptrs[state->ptrs_i]->size)));
390 void *addr
391 = (void *) ((char *) state->ptrs[state->ptrs_i]->new_addr
392 + ((char *) real_ptr_p
393 - (char *) state->ptrs[state->ptrs_i]->obj));
394 reloc_addrs_vec.safe_push (addr);
395 }
396
397 /* Write out, after relocation, the pointers in TAB. */
398 static void
399 write_pch_globals (const struct ggc_root_tab * const *tab,
400 struct traversal_state *state)
401 {
402 const struct ggc_root_tab *const *rt;
403 const struct ggc_root_tab *rti;
404 size_t i;
405
406 for (rt = tab; *rt; rt++)
407 for (rti = *rt; rti->base != NULL; rti++)
408 for (i = 0; i < rti->nelt; i++)
409 {
410 void *ptr = *(void **)((char *)rti->base + rti->stride * i);
411 struct ptr_data *new_ptr;
412 if (ptr == NULL || ptr == (void *)1)
413 {
414 if (fwrite (&ptr, sizeof (void *), 1, state->f)
415 != 1)
416 fatal_error (input_location, "cannot write PCH file: %m");
417 }
418 else
419 {
420 new_ptr = (struct ptr_data *)
421 saving_htab->find_with_hash (ptr, POINTER_HASH (ptr));
422 if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
423 != 1)
424 fatal_error (input_location, "cannot write PCH file: %m");
425 }
426 }
427 }
428
429 /* Callback for qsort. */
430
431 static int
432 compare_ptr (const void *p1_p, const void *p2_p)
433 {
434 void *p1 = *(void *const *)p1_p;
435 void *p2 = *(void *const *)p2_p;
436 return (((uintptr_t)p1 > (uintptr_t)p2)
437 - ((uintptr_t)p1 < (uintptr_t)p2));
438 }
439
440 /* Decode one uleb128 from P, return first byte after it, store
441 decoded value into *VAL. */
442
443 static unsigned char *
444 read_uleb128 (unsigned char *p, size_t *val)
445 {
446 unsigned int shift = 0;
447 unsigned char byte;
448 size_t result;
449
450 result = 0;
451 do
452 {
453 byte = *p++;
454 result |= ((size_t) byte & 0x7f) << shift;
455 shift += 7;
456 }
457 while (byte & 0x80);
458
459 *val = result;
460 return p;
461 }
462
463 /* Store VAL as uleb128 at P, return length in bytes. */
464
465 static size_t
466 write_uleb128 (unsigned char *p, size_t val)
467 {
468 size_t len = 0;
469 do
470 {
471 unsigned char byte = (val & 0x7f);
472 val >>= 7;
473 if (val != 0)
474 /* More bytes to follow. */
475 byte |= 0x80;
476
477 *p++ = byte;
478 ++len;
479 }
480 while (val != 0);
481 return len;
482 }
483
484 /* Hold the information we need to mmap the file back in. */
485
486 struct mmap_info
487 {
488 size_t offset;
489 size_t size;
490 void *preferred_base;
491 };
492
493 /* Write out the state of the compiler to F. */
494
495 void
496 gt_pch_save (FILE *f)
497 {
498 const struct ggc_root_tab *const *rt;
499 const struct ggc_root_tab *rti;
500 size_t i;
501 struct traversal_state state;
502 char *this_object = NULL;
503 size_t this_object_size = 0;
504 struct mmap_info mmi;
505 const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity ();
506
507 gt_pch_save_stringpool ();
508
509 timevar_push (TV_PCH_PTR_REALLOC);
510 saving_htab = new hash_table<saving_hasher> (50000);
511
512 for (rt = gt_ggc_rtab; *rt; rt++)
513 for (rti = *rt; rti->base != NULL; rti++)
514 for (i = 0; i < rti->nelt; i++)
515 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
516
517 /* Prepare the objects for writing, determine addresses and such. */
518 state.f = f;
519 state.d = init_ggc_pch ();
520 state.count = 0;
521 saving_htab->traverse <traversal_state *, ggc_call_count> (&state);
522
523 mmi.size = ggc_pch_total_size (state.d);
524
525 /* Try to arrange things so that no relocation is necessary, but
526 don't try very hard. On most platforms, this will always work,
527 and on the rest it's a lot of work to do better.
528 (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
529 HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
530 mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
531 /* If the host cannot supply any suitable address for this, we are stuck. */
532 if (mmi.preferred_base == NULL)
533 fatal_error (input_location,
534 "cannot write PCH file: required memory segment unavailable");
535
536 ggc_pch_this_base (state.d, mmi.preferred_base);
537
538 state.ptrs = XNEWVEC (struct ptr_data *, state.count);
539 state.ptrs_i = 0;
540
541 saving_htab->traverse <traversal_state *, ggc_call_alloc> (&state);
542 timevar_pop (TV_PCH_PTR_REALLOC);
543
544 timevar_push (TV_PCH_PTR_SORT);
545 qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
546 timevar_pop (TV_PCH_PTR_SORT);
547
548 /* Write out all the scalar variables. */
549 for (rt = gt_pch_scalar_rtab; *rt; rt++)
550 for (rti = *rt; rti->base != NULL; rti++)
551 if (fwrite (rti->base, rti->stride, 1, f) != 1)
552 fatal_error (input_location, "cannot write PCH file: %m");
553
554 /* Write out all the global pointers, after translation. */
555 write_pch_globals (gt_ggc_rtab, &state);
556
557 /* Pad the PCH file so that the mmapped area starts on an allocation
558 granularity (usually page) boundary. */
559 {
560 long o;
561 o = ftell (state.f) + sizeof (mmi);
562 if (o == -1)
563 fatal_error (input_location, "cannot get position in PCH file: %m");
564 mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment;
565 if (mmi.offset == mmap_offset_alignment)
566 mmi.offset = 0;
567 mmi.offset += o;
568 }
569 if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
570 fatal_error (input_location, "cannot write PCH file: %m");
571 if (mmi.offset != 0
572 && fseek (state.f, mmi.offset, SEEK_SET) != 0)
573 fatal_error (input_location, "cannot write padding to PCH file: %m");
574
575 ggc_pch_prepare_write (state.d, state.f);
576
577 #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
578 vec<char> vbits = vNULL;
579 #endif
580
581 /* Actually write out the objects. */
582 for (i = 0; i < state.count; i++)
583 {
584 state.ptrs_i = i;
585 if (this_object_size < state.ptrs[i]->size)
586 {
587 this_object_size = state.ptrs[i]->size;
588 this_object = XRESIZEVAR (char, this_object, this_object_size);
589 }
590 #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
591 /* obj might contain uninitialized bytes, e.g. in the trailing
592 padding of the object. Avoid warnings by making the memory
593 temporarily defined and then restoring previous state. */
594 int get_vbits = 0;
595 size_t valid_size = state.ptrs[i]->size;
596 if (__builtin_expect (RUNNING_ON_VALGRIND, 0))
597 {
598 if (vbits.length () < valid_size)
599 vbits.safe_grow (valid_size, true);
600 get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
601 vbits.address (), valid_size);
602 if (get_vbits == 3)
603 {
604 /* We assume that first part of obj is addressable, and
605 the rest is unaddressable. Find out where the boundary is
606 using binary search. */
607 size_t lo = 0, hi = valid_size;
608 while (hi > lo)
609 {
610 size_t mid = (lo + hi) / 2;
611 get_vbits = VALGRIND_GET_VBITS ((char *) state.ptrs[i]->obj
612 + mid, vbits.address (),
613 1);
614 if (get_vbits == 3)
615 hi = mid;
616 else if (get_vbits == 1)
617 lo = mid + 1;
618 else
619 break;
620 }
621 if (get_vbits == 1 || get_vbits == 3)
622 {
623 valid_size = lo;
624 get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
625 vbits.address (),
626 valid_size);
627 }
628 }
629 if (get_vbits == 1)
630 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (state.ptrs[i]->obj,
631 state.ptrs[i]->size));
632 }
633 #endif
634 memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
635 if (state.ptrs[i]->reorder_fn != NULL)
636 state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
637 state.ptrs[i]->note_ptr_cookie,
638 relocate_ptrs, &state);
639 state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
640 state.ptrs[i]->note_ptr_cookie,
641 relocate_ptrs, &state);
642 ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
643 state.ptrs[i]->new_addr, state.ptrs[i]->size,
644 state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
645 if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
646 memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
647 #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
648 if (__builtin_expect (get_vbits == 1, 0))
649 {
650 (void) VALGRIND_SET_VBITS (state.ptrs[i]->obj, vbits.address (),
651 valid_size);
652 if (valid_size != state.ptrs[i]->size)
653 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *)
654 state.ptrs[i]->obj
655 + valid_size,
656 state.ptrs[i]->size
657 - valid_size));
658 }
659 #endif
660 }
661 #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
662 vbits.release ();
663 #endif
664
665 reloc_addrs_vec.qsort (compare_ptr);
666
667 size_t reloc_addrs_size = 0;
668 void *last_addr = NULL;
669 unsigned char uleb128_buf[sizeof (size_t) * 2];
670 for (void *addr : reloc_addrs_vec)
671 {
672 gcc_assert ((uintptr_t) addr >= (uintptr_t) mmi.preferred_base
673 && ((uintptr_t) addr + sizeof (void *)
674 <= (uintptr_t) mmi.preferred_base + mmi.size));
675 if (addr == last_addr)
676 continue;
677 if (last_addr == NULL)
678 last_addr = mmi.preferred_base;
679 size_t diff = (uintptr_t) addr - (uintptr_t) last_addr;
680 reloc_addrs_size += write_uleb128 (uleb128_buf, diff);
681 last_addr = addr;
682 }
683 if (fwrite (&reloc_addrs_size, sizeof (reloc_addrs_size), 1, f) != 1)
684 fatal_error (input_location, "cannot write PCH file: %m");
685 last_addr = NULL;
686 for (void *addr : reloc_addrs_vec)
687 {
688 if (addr == last_addr)
689 continue;
690 if (last_addr == NULL)
691 last_addr = mmi.preferred_base;
692 size_t diff = (uintptr_t) addr - (uintptr_t) last_addr;
693 reloc_addrs_size = write_uleb128 (uleb128_buf, diff);
694 if (fwrite (uleb128_buf, 1, reloc_addrs_size, f) != reloc_addrs_size)
695 fatal_error (input_location, "cannot write PCH file: %m");
696 last_addr = addr;
697 }
698
699 ggc_pch_finish (state.d, state.f);
700
701 gt_pch_fixup_stringpool ();
702
703 unsigned num_callbacks = callback_vec.length ();
704 void (*pch_save) (FILE *) = >_pch_save;
705 if (fwrite (&pch_save, sizeof (pch_save), 1, f) != 1
706 || fwrite (&num_callbacks, sizeof (num_callbacks), 1, f) != 1
707 || (num_callbacks
708 && fwrite (callback_vec.address (), sizeof (void *), num_callbacks,
709 f) != num_callbacks))
710 fatal_error (input_location, "cannot write PCH file: %m");
711
712 XDELETE (state.ptrs);
713 XDELETE (this_object);
714 delete saving_htab;
715 saving_htab = NULL;
716 callback_vec.release ();
717 reloc_addrs_vec.release ();
718 }
719
720 /* Read the state of the compiler back in from F. */
721
722 void
723 gt_pch_restore (FILE *f)
724 {
725 const struct ggc_root_tab *const *rt;
726 const struct ggc_root_tab *rti;
727 size_t i;
728 struct mmap_info mmi;
729 int result;
730 struct line_maps * old_line_table = line_table;
731 location_t old_input_loc = input_location;
732
733 /* We are about to reload the line maps along with the rest of the PCH
734 data, which means that the (loaded) ones cannot be guaranteed to be
735 in any valid state for reporting diagnostics that happen during the
736 load. Save the current table (and use it during the loading process
737 below). */
738 class line_maps *save_line_table = line_table;
739
740 /* Delete any deletable objects. This makes ggc_pch_read much
741 faster, as it can be sure that no GCable objects remain other
742 than the ones just read in. */
743 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
744 for (rti = *rt; rti->base != NULL; rti++)
745 memset (rti->base, 0, rti->stride);
746
747 /* Read in all the scalar variables. */
748 for (rt = gt_pch_scalar_rtab; *rt; rt++)
749 for (rti = *rt; rti->base != NULL; rti++)
750 if (fread (rti->base, rti->stride, 1, f) != 1)
751 {
752 line_table = old_line_table;
753 input_location = old_input_loc;
754 fatal_error (input_location, "cannot read PCH file: %m");
755 }
756
757 /* Read in all the global pointers, in 6 easy loops. */
758 bool error_reading_pointers = false;
759 for (rt = gt_ggc_rtab; *rt; rt++)
760 for (rti = *rt; rti->base != NULL; rti++)
761 for (i = 0; i < rti->nelt; i++)
762 if (fread ((char *)rti->base + rti->stride * i,
763 sizeof (void *), 1, f) != 1)
764 error_reading_pointers = true;
765
766 /* Stash the newly read-in line table pointer - it does not point to
767 anything meaningful yet, so swap the old one back in. */
768 class line_maps *new_line_table = line_table;
769 line_table = save_line_table;
770 if (error_reading_pointers)
771 fatal_error (input_location, "cannot read PCH file: %m");
772
773 if (fread (&mmi, sizeof (mmi), 1, f) != 1)
774 {
775 line_table = old_line_table;
776 input_location = old_input_loc;
777 fatal_error (input_location, "cannot read PCH file: %m");
778 }
779
780 void *orig_preferred_base = mmi.preferred_base;
781 result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size,
782 fileno (f), mmi.offset);
783
784 /* We could not mmap or otherwise allocate the required memory at the
785 address needed. */
786 if (result < 0)
787 {
788 line_table = old_line_table;
789 input_location = old_input_loc;
790 sorry_at (input_location, "PCH allocation failure");
791 /* There is no point in continuing from here, we will only end up
792 with a crashed (most likely hanging) compiler. */
793 exit (-1);
794 }
795
796 /* (0) We allocated memory, but did not mmap the file, so we need to read
797 the data in manually. (>0) Otherwise the mmap succeed for the address
798 we wanted. */
799 if (result == 0)
800 {
801 if (fseek (f, mmi.offset, SEEK_SET) != 0
802 || fread (mmi.preferred_base, mmi.size, 1, f) != 1)
803 {
804 line_table = old_line_table;
805 input_location = old_input_loc;
806 fatal_error (input_location, "cannot read PCH file: %m");
807 }
808 }
809 else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
810 {
811 line_table = old_line_table;
812 input_location = old_input_loc;
813 fatal_error (input_location, "cannot read PCH file: %m");
814 }
815
816 size_t reloc_addrs_size;
817 if (fread (&reloc_addrs_size, sizeof (reloc_addrs_size), 1, f) != 1)
818 {
819 line_table = old_line_table;
820 input_location = old_input_loc;
821 fatal_error (input_location, "cannot read PCH file: %m");
822 }
823
824 if (orig_preferred_base != mmi.preferred_base)
825 {
826 uintptr_t bias
827 = (uintptr_t) mmi.preferred_base - (uintptr_t) orig_preferred_base;
828
829 /* Adjust all the global pointers by bias. */
830 line_table = new_line_table;
831 for (rt = gt_ggc_rtab; *rt; rt++)
832 for (rti = *rt; rti->base != NULL; rti++)
833 for (i = 0; i < rti->nelt; i++)
834 {
835 char *addr = (char *)rti->base + rti->stride * i;
836 char *p;
837 memcpy (&p, addr, sizeof (void *));
838 if ((uintptr_t) p >= (uintptr_t) orig_preferred_base
839 && (uintptr_t) p < (uintptr_t) orig_preferred_base + mmi.size)
840 {
841 p = (char *) ((uintptr_t) p + bias);
842 memcpy (addr, &p, sizeof (void *));
843 }
844 }
845 new_line_table = line_table;
846 line_table = save_line_table;
847
848 /* And adjust all the pointers in the image by bias too. */
849 char *addr = (char *) mmi.preferred_base;
850 unsigned char uleb128_buf[4096], *uleb128_ptr = uleb128_buf;
851 while (reloc_addrs_size != 0)
852 {
853 size_t this_size
854 = MIN (reloc_addrs_size,
855 (size_t) (4096 - (uleb128_ptr - uleb128_buf)));
856 if (fread (uleb128_ptr, 1, this_size, f) != this_size)
857 {
858 line_table = old_line_table;
859 input_location = old_input_loc;
860 fatal_error (input_location, "cannot read PCH file: %m");
861 }
862 unsigned char *uleb128_end = uleb128_ptr + this_size;
863 if (this_size != reloc_addrs_size)
864 uleb128_end -= 2 * sizeof (size_t);
865 uleb128_ptr = uleb128_buf;
866 while (uleb128_ptr < uleb128_end)
867 {
868 size_t diff;
869 uleb128_ptr = read_uleb128 (uleb128_ptr, &diff);
870 addr = (char *) ((uintptr_t) addr + diff);
871
872 char *p;
873 memcpy (&p, addr, sizeof (void *));
874 gcc_assert ((uintptr_t) p >= (uintptr_t) orig_preferred_base
875 && ((uintptr_t) p
876 < (uintptr_t) orig_preferred_base + mmi.size));
877 p = (char *) ((uintptr_t) p + bias);
878 memcpy (addr, &p, sizeof (void *));
879 }
880 reloc_addrs_size -= this_size;
881 if (reloc_addrs_size == 0)
882 break;
883 this_size = uleb128_end + 2 * sizeof (size_t) - uleb128_ptr;
884 memcpy (uleb128_buf, uleb128_ptr, this_size);
885 uleb128_ptr = uleb128_buf + this_size;
886 }
887 }
888 else if (fseek (f, (mmi.offset + mmi.size + sizeof (reloc_addrs_size)
889 + reloc_addrs_size), SEEK_SET) != 0)
890 fatal_error (input_location, "cannot read PCH file: %m");
891
892 ggc_pch_read (f, mmi.preferred_base);
893
894 void (*pch_save) (FILE *);
895 unsigned num_callbacks;
896 if (fread (&pch_save, sizeof (pch_save), 1, f) != 1
897 || fread (&num_callbacks, sizeof (num_callbacks), 1, f) != 1)
898 {
899 line_table = old_line_table;
900 input_location = old_input_loc;
901 fatal_error (input_location, "cannot read PCH file: %m");
902 }
903 if (pch_save != >_pch_save)
904 {
905 uintptr_t binbias = (uintptr_t) >_pch_save - (uintptr_t) pch_save;
906 void **ptrs = XNEWVEC (void *, num_callbacks);
907 unsigned i;
908 uintptr_t bias
909 = (uintptr_t) mmi.preferred_base - (uintptr_t) orig_preferred_base;
910
911 if (fread (ptrs, sizeof (void *), num_callbacks, f) != num_callbacks)
912 {
913 line_table = old_line_table;
914 input_location = old_input_loc;
915 fatal_error (input_location, "cannot read PCH file: %m");
916 }
917 for (i = 0; i < num_callbacks; ++i)
918 {
919 void *ptr = (void *) ((uintptr_t) ptrs[i] + bias);
920 memcpy (&pch_save, ptr, sizeof (pch_save));
921 pch_save = (void (*) (FILE *)) ((uintptr_t) pch_save + binbias);
922 memcpy (ptr, &pch_save, sizeof (pch_save));
923 }
924 XDELETE (ptrs);
925 }
926 else if (fseek (f, num_callbacks * sizeof (void *), SEEK_CUR) != 0)
927 fatal_error (input_location, "cannot read PCH file: %m");
928
929 gt_pch_restore_stringpool ();
930
931 /* Barring corruption of the PCH file, the restored line table should be
932 complete and usable. */
933 line_table = new_line_table;
934 }
935
936 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
937 Select no address whatsoever, and let gt_pch_save choose what it will with
938 malloc, presumably. */
939
940 void *
941 default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED,
942 int fd ATTRIBUTE_UNUSED)
943 {
944 return NULL;
945 }
946
947 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
948 Allocate SIZE bytes with malloc. Return 0 if the address we got is the
949 same as base, indicating that the memory has been allocated but needs to
950 be read in from the file. Return -1 if the address differs, to relocation
951 of the PCH file would be required. */
952
953 int
954 default_gt_pch_use_address (void *&base, size_t size, int fd ATTRIBUTE_UNUSED,
955 size_t offset ATTRIBUTE_UNUSED)
956 {
957 void *addr = xmalloc (size);
958 return (addr == base) - 1;
959 }
960
961 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS. Return the
962 alignment required for allocating virtual memory. Usually this is the
963 same as pagesize. */
964
965 size_t
966 default_gt_pch_alloc_granularity (void)
967 {
968 return getpagesize ();
969 }
970
971 #if HAVE_MMAP_FILE
972 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
973 We temporarily allocate SIZE bytes, and let the kernel place the data
974 wherever it will. If it worked, that's our spot, if not we're likely
975 to be in trouble. */
976
977 void *
978 mmap_gt_pch_get_address (size_t size, int fd)
979 {
980 void *ret;
981
982 ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
983 if (ret == (void *) MAP_FAILED)
984 ret = NULL;
985 else
986 munmap ((caddr_t) ret, size);
987
988 return ret;
989 }
990
991 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
992 Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
993 mapping the data at BASE, -1 if we couldn't.
994
995 This version assumes that the kernel honors the START operand of mmap
996 even without MAP_FIXED if START through START+SIZE are not currently
997 mapped with something. */
998
999 int
1000 mmap_gt_pch_use_address (void *&base, size_t size, int fd, size_t offset)
1001 {
1002 void *addr;
1003
1004 /* We're called with size == 0 if we're not planning to load a PCH
1005 file at all. This allows the hook to free any static space that
1006 we might have allocated at link time. */
1007 if (size == 0)
1008 return -1;
1009
1010 addr = mmap ((caddr_t) base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
1011 fd, offset);
1012
1013 return addr == base ? 1 : -1;
1014 }
1015 #endif /* HAVE_MMAP_FILE */
1016
1017 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
1018
1019 /* Modify the bound based on rlimits. */
1020 static double
1021 ggc_rlimit_bound (double limit)
1022 {
1023 #if defined(HAVE_GETRLIMIT)
1024 struct rlimit rlim;
1025 # if defined (RLIMIT_AS)
1026 /* RLIMIT_AS is what POSIX says is the limit on mmap. Presumably
1027 any OS which has RLIMIT_AS also has a working mmap that GCC will use. */
1028 if (getrlimit (RLIMIT_AS, &rlim) == 0
1029 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
1030 && rlim.rlim_cur < limit)
1031 limit = rlim.rlim_cur;
1032 # elif defined (RLIMIT_DATA)
1033 /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
1034 might be on an OS that has a broken mmap. (Others don't bound
1035 mmap at all, apparently.) */
1036 if (getrlimit (RLIMIT_DATA, &rlim) == 0
1037 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
1038 && rlim.rlim_cur < limit
1039 /* Darwin has this horribly bogus default setting of
1040 RLIMIT_DATA, to 6144Kb. No-one notices because RLIMIT_DATA
1041 appears to be ignored. Ignore such silliness. If a limit
1042 this small was actually effective for mmap, GCC wouldn't even
1043 start up. */
1044 && rlim.rlim_cur >= 8 * ONE_M)
1045 limit = rlim.rlim_cur;
1046 # endif /* RLIMIT_AS or RLIMIT_DATA */
1047 #endif /* HAVE_GETRLIMIT */
1048
1049 return limit;
1050 }
1051
1052 /* Heuristic to set a default for GGC_MIN_EXPAND. */
1053 static int
1054 ggc_min_expand_heuristic (void)
1055 {
1056 double min_expand = physmem_total ();
1057
1058 /* Adjust for rlimits. */
1059 min_expand = ggc_rlimit_bound (min_expand);
1060
1061 /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
1062 a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
1063 min_expand /= ONE_G;
1064 min_expand *= 70;
1065 min_expand = MIN (min_expand, 70);
1066 min_expand += 30;
1067
1068 return min_expand;
1069 }
1070
1071 /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
1072 static int
1073 ggc_min_heapsize_heuristic (void)
1074 {
1075 double phys_kbytes = physmem_total ();
1076 double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2);
1077
1078 phys_kbytes /= ONE_K; /* Convert to Kbytes. */
1079 limit_kbytes /= ONE_K;
1080
1081 /* The heuristic is RAM/8, with a lower bound of 4M and an upper
1082 bound of 128M (when RAM >= 1GB). */
1083 phys_kbytes /= 8;
1084
1085 #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
1086 /* Try not to overrun the RSS limit while doing garbage collection.
1087 The RSS limit is only advisory, so no margin is subtracted. */
1088 {
1089 struct rlimit rlim;
1090 if (getrlimit (RLIMIT_RSS, &rlim) == 0
1091 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
1092 phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / ONE_K);
1093 }
1094 # endif
1095
1096 /* Don't blindly run over our data limit; do GC at least when the
1097 *next* GC would be within 20Mb of the limit or within a quarter of
1098 the limit, whichever is larger. If GCC does hit the data limit,
1099 compilation will fail, so this tries to be conservative. */
1100 limit_kbytes = MAX (0, limit_kbytes - MAX (limit_kbytes / 4, 20 * ONE_K));
1101 limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic ());
1102 phys_kbytes = MIN (phys_kbytes, limit_kbytes);
1103
1104 phys_kbytes = MAX (phys_kbytes, 4 * ONE_K);
1105 phys_kbytes = MIN (phys_kbytes, 128 * ONE_K);
1106
1107 return phys_kbytes;
1108 }
1109 #endif
1110
1111 void
1112 init_ggc_heuristics (void)
1113 {
1114 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
1115 param_ggc_min_expand = ggc_min_expand_heuristic ();
1116 param_ggc_min_heapsize = ggc_min_heapsize_heuristic ();
1117 #endif
1118 }
1119
1120 /* GGC memory usage. */
1121 class ggc_usage: public mem_usage
1122 {
1123 public:
1124 /* Default constructor. */
1125 ggc_usage (): m_freed (0), m_collected (0), m_overhead (0) {}
1126 /* Constructor. */
1127 ggc_usage (size_t allocated, size_t times, size_t peak,
1128 size_t freed, size_t collected, size_t overhead)
1129 : mem_usage (allocated, times, peak),
1130 m_freed (freed), m_collected (collected), m_overhead (overhead) {}
1131
1132 /* Equality operator. */
1133 inline bool
1134 operator== (const ggc_usage &second) const
1135 {
1136 return (get_balance () == second.get_balance ()
1137 && m_peak == second.m_peak
1138 && m_times == second.m_times);
1139 }
1140
1141 /* Comparison operator. */
1142 inline bool
1143 operator< (const ggc_usage &second) const
1144 {
1145 if (*this == second)
1146 return false;
1147
1148 return (get_balance () == second.get_balance () ?
1149 (m_peak == second.m_peak ? m_times < second.m_times
1150 : m_peak < second.m_peak)
1151 : get_balance () < second.get_balance ());
1152 }
1153
1154 /* Register overhead of ALLOCATED and OVERHEAD bytes. */
1155 inline void
1156 register_overhead (size_t allocated, size_t overhead)
1157 {
1158 m_allocated += allocated;
1159 m_overhead += overhead;
1160 m_times++;
1161 }
1162
1163 /* Release overhead of SIZE bytes. */
1164 inline void
1165 release_overhead (size_t size)
1166 {
1167 m_freed += size;
1168 }
1169
1170 /* Sum the usage with SECOND usage. */
1171 ggc_usage
1172 operator+ (const ggc_usage &second)
1173 {
1174 return ggc_usage (m_allocated + second.m_allocated,
1175 m_times + second.m_times,
1176 m_peak + second.m_peak,
1177 m_freed + second.m_freed,
1178 m_collected + second.m_collected,
1179 m_overhead + second.m_overhead);
1180 }
1181
1182 /* Dump usage with PREFIX, where TOTAL is sum of all rows. */
1183 inline void
1184 dump (const char *prefix, ggc_usage &total) const
1185 {
1186 size_t balance = get_balance ();
1187 fprintf (stderr,
1188 "%-48s " PRsa (9) ":%5.1f%%" PRsa (9) ":%5.1f%%"
1189 PRsa (9) ":%5.1f%%" PRsa (9) ":%5.1f%%" PRsa (9) "\n",
1190 prefix,
1191 SIZE_AMOUNT (balance), get_percent (balance, total.get_balance ()),
1192 SIZE_AMOUNT (m_collected),
1193 get_percent (m_collected, total.m_collected),
1194 SIZE_AMOUNT (m_freed), get_percent (m_freed, total.m_freed),
1195 SIZE_AMOUNT (m_overhead),
1196 get_percent (m_overhead, total.m_overhead),
1197 SIZE_AMOUNT (m_times));
1198 }
1199
1200 /* Dump usage coupled to LOC location, where TOTAL is sum of all rows. */
1201 inline void
1202 dump (mem_location *loc, ggc_usage &total) const
1203 {
1204 char *location_string = loc->to_string ();
1205
1206 dump (location_string, total);
1207
1208 free (location_string);
1209 }
1210
1211 /* Dump footer. */
1212 inline void
1213 dump_footer ()
1214 {
1215 dump ("Total", *this);
1216 }
1217
1218 /* Get balance which is GGC allocation leak. */
1219 inline size_t
1220 get_balance () const
1221 {
1222 return m_allocated + m_overhead - m_collected - m_freed;
1223 }
1224
1225 typedef std::pair<mem_location *, ggc_usage *> mem_pair_t;
1226
1227 /* Compare wrapper used by qsort method. */
1228 static int
1229 compare (const void *first, const void *second)
1230 {
1231 const mem_pair_t mem1 = *(const mem_pair_t *) first;
1232 const mem_pair_t mem2 = *(const mem_pair_t *) second;
1233
1234 size_t balance1 = mem1.second->get_balance ();
1235 size_t balance2 = mem2.second->get_balance ();
1236
1237 return balance1 == balance2 ? 0 : (balance1 < balance2 ? 1 : -1);
1238 }
1239
1240 /* Dump header with NAME. */
1241 static inline void
1242 dump_header (const char *name)
1243 {
1244 fprintf (stderr, "%-48s %11s%17s%17s%16s%17s\n", name, "Leak", "Garbage",
1245 "Freed", "Overhead", "Times");
1246 }
1247
1248 /* Freed memory in bytes. */
1249 size_t m_freed;
1250 /* Collected memory in bytes. */
1251 size_t m_collected;
1252 /* Overhead memory in bytes. */
1253 size_t m_overhead;
1254 };
1255
1256 /* GCC memory description. */
1257 static mem_alloc_description<ggc_usage> ggc_mem_desc;
1258
1259 /* Dump per-site memory statistics. */
1260
1261 void
1262 dump_ggc_loc_statistics ()
1263 {
1264 if (! GATHER_STATISTICS)
1265 return;
1266
1267 ggc_collect (GGC_COLLECT_FORCE);
1268
1269 ggc_mem_desc.dump (GGC_ORIGIN);
1270 }
1271
1272 /* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION). */
1273 void
1274 ggc_record_overhead (size_t allocated, size_t overhead, void *ptr MEM_STAT_DECL)
1275 {
1276 ggc_usage *usage = ggc_mem_desc.register_descriptor (ptr, GGC_ORIGIN, false
1277 FINAL_PASS_MEM_STAT);
1278
1279 ggc_mem_desc.register_object_overhead (usage, allocated + overhead, ptr);
1280 usage->register_overhead (allocated, overhead);
1281 }
1282
1283 /* Notice that the pointer has been freed. */
1284 void
1285 ggc_free_overhead (void *ptr)
1286 {
1287 ggc_mem_desc.release_object_overhead (ptr);
1288 }
1289
1290 /* After live values has been marked, walk all recorded pointers and see if
1291 they are still live. */
1292 void
1293 ggc_prune_overhead_list (void)
1294 {
1295 typedef hash_map<const void *, std::pair<ggc_usage *, size_t > > map_t;
1296
1297 map_t::iterator it = ggc_mem_desc.m_reverse_object_map->begin ();
1298
1299 for (; it != ggc_mem_desc.m_reverse_object_map->end (); ++it)
1300 if (!ggc_marked_p ((*it).first))
1301 {
1302 (*it).second.first->m_collected += (*it).second.second;
1303 ggc_mem_desc.m_reverse_object_map->remove ((*it).first);
1304 }
1305 }
1306
1307 /* Print memory used by heap if this info is available. */
1308
1309 void
1310 report_heap_memory_use ()
1311 {
1312 #if defined(HAVE_MALLINFO) || defined(HAVE_MALLINFO2)
1313 #ifdef HAVE_MALLINFO2
1314 #define MALLINFO_FN mallinfo2
1315 #else
1316 #define MALLINFO_FN mallinfo
1317 #endif
1318 if (!quiet_flag)
1319 fprintf (stderr, " {heap " PRsa (0) "}",
1320 SIZE_AMOUNT (MALLINFO_FN ().arena));
1321 #endif
1322 }
1323