ggc-page.cc revision 1.1.1.1 1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999-2022 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "alias.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "memmodel.h"
28 #include "tm_p.h"
29 #include "diagnostic-core.h"
30 #include "flags.h"
31 #include "ggc-internal.h"
32 #include "timevar.h"
33 #include "cgraph.h"
34 #include "cfgloop.h"
35 #include "plugin.h"
36
37 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
38 file open. Prefer either to valloc. */
39 #ifdef HAVE_MMAP_ANON
40 # undef HAVE_MMAP_DEV_ZERO
41 # define USING_MMAP
42 #endif
43
44 #ifdef HAVE_MMAP_DEV_ZERO
45 # define USING_MMAP
46 #endif
47
48 #ifndef USING_MMAP
49 #define USING_MALLOC_PAGE_GROUPS
50 #endif
51
52 #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
53 && defined(USING_MMAP)
54 # define USING_MADVISE
55 #endif
56
57 /* Strategy:
58
59 This garbage-collecting allocator allocates objects on one of a set
60 of pages. Each page can allocate objects of a single size only;
61 available sizes are powers of two starting at four bytes. The size
62 of an allocation request is rounded up to the next power of two
63 (`order'), and satisfied from the appropriate page.
64
65 Each page is recorded in a page-entry, which also maintains an
66 in-use bitmap of object positions on the page. This allows the
67 allocation state of a particular object to be flipped without
68 touching the page itself.
69
70 Each page-entry also has a context depth, which is used to track
71 pushing and popping of allocation contexts. Only objects allocated
72 in the current (highest-numbered) context may be collected.
73
74 Page entries are arranged in an array of singly-linked lists. The
75 array is indexed by the allocation size, in bits, of the pages on
76 it; i.e. all pages on a list allocate objects of the same size.
77 Pages are ordered on the list such that all non-full pages precede
78 all full pages, with non-full pages arranged in order of decreasing
79 context depth.
80
81 Empty pages (of all orders) are kept on a single page cache list,
82 and are considered first when new pages are required; they are
83 deallocated at the start of the next collection if they haven't
84 been recycled by then. */
85
86 /* Define GGC_DEBUG_LEVEL to print debugging information.
87 0: No debugging output.
88 1: GC statistics only.
89 2: Page-entry allocations/deallocations as well.
90 3: Object allocations as well.
91 4: Object marks as well. */
92 #define GGC_DEBUG_LEVEL (0)
93
94 /* A two-level tree is used to look up the page-entry for a given
96 pointer. Two chunks of the pointer's bits are extracted to index
97 the first and second levels of the tree, as follows:
98
99 HOST_PAGE_SIZE_BITS
100 32 | |
101 msb +----------------+----+------+------+ lsb
102 | | |
103 PAGE_L1_BITS |
104 | |
105 PAGE_L2_BITS
106
107 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
108 pages are aligned on system page boundaries. The next most
109 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
110 index values in the lookup table, respectively.
111
112 For 32-bit architectures and the settings below, there are no
113 leftover bits. For architectures with wider pointers, the lookup
114 tree points to a list of pages, which must be scanned to find the
115 correct one. */
116
117 #define PAGE_L1_BITS (8)
118 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
119 #define PAGE_L1_SIZE ((uintptr_t) 1 << PAGE_L1_BITS)
120 #define PAGE_L2_SIZE ((uintptr_t) 1 << PAGE_L2_BITS)
121
122 #define LOOKUP_L1(p) \
123 (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
124
125 #define LOOKUP_L2(p) \
126 (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
127
128 /* The number of objects per allocation page, for objects on a page of
129 the indicated ORDER. */
130 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
131
132 /* The number of objects in P. */
133 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
134
135 /* The size of an object on a page of the indicated ORDER. */
136 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
137
138 /* For speed, we avoid doing a general integer divide to locate the
139 offset in the allocation bitmap, by precalculating numbers M, S
140 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
141 within the page which is evenly divisible by the object size Z. */
142 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
143 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
144 #define OFFSET_TO_BIT(OFFSET, ORDER) \
145 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
146
147 /* We use this structure to determine the alignment required for
148 allocations. For power-of-two sized allocations, that's not a
149 problem, but it does matter for odd-sized allocations.
150 We do not care about alignment for floating-point types. */
151
152 struct max_alignment {
153 char c;
154 union {
155 int64_t i;
156 void *p;
157 } u;
158 };
159
160 /* The biggest alignment required. */
161
162 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
163
164
165 /* The number of extra orders, not corresponding to power-of-two sized
166 objects. */
167
168 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
169
170 #define RTL_SIZE(NSLOTS) \
171 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
172
173 #define TREE_EXP_SIZE(OPS) \
174 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
175
176 /* The Ith entry is the maximum size of an object to be stored in the
177 Ith extra order. Adding a new entry to this array is the *only*
178 thing you need to do to add a new special allocation size. */
179
180 static const size_t extra_order_size_table[] = {
181 /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
182 There are a lot of structures with these sizes and explicitly
183 listing them risks orders being dropped because they changed size. */
184 MAX_ALIGNMENT * 3,
185 MAX_ALIGNMENT * 5,
186 MAX_ALIGNMENT * 6,
187 MAX_ALIGNMENT * 7,
188 MAX_ALIGNMENT * 9,
189 MAX_ALIGNMENT * 10,
190 MAX_ALIGNMENT * 11,
191 MAX_ALIGNMENT * 12,
192 MAX_ALIGNMENT * 13,
193 MAX_ALIGNMENT * 14,
194 MAX_ALIGNMENT * 15,
195 sizeof (struct tree_decl_non_common),
196 sizeof (struct tree_field_decl),
197 sizeof (struct tree_parm_decl),
198 sizeof (struct tree_var_decl),
199 sizeof (struct tree_type_non_common),
200 sizeof (struct function),
201 sizeof (struct basic_block_def),
202 sizeof (struct cgraph_node),
203 sizeof (class loop),
204 };
205
206 /* The total number of orders. */
207
208 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
209
210 /* Compute the smallest nonnegative number which when added to X gives
211 a multiple of F. */
212
213 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
214
215 /* Round X to next multiple of the page size */
216
217 #define PAGE_ALIGN(x) ROUND_UP ((x), G.pagesize)
218
219 /* The Ith entry is the number of objects on a page or order I. */
220
221 static unsigned objects_per_page_table[NUM_ORDERS];
222
223 /* The Ith entry is the size of an object on a page of order I. */
224
225 static size_t object_size_table[NUM_ORDERS];
226
227 /* The Ith entry is a pair of numbers (mult, shift) such that
228 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
229 for all k evenly divisible by OBJECT_SIZE(I). */
230
231 static struct
232 {
233 size_t mult;
234 unsigned int shift;
235 }
236 inverse_table[NUM_ORDERS];
237
238 /* A page_entry records the status of an allocation page. This
239 structure is dynamically sized to fit the bitmap in_use_p. */
240 struct page_entry
241 {
242 /* The next page-entry with objects of the same size, or NULL if
243 this is the last page-entry. */
244 struct page_entry *next;
245
246 /* The previous page-entry with objects of the same size, or NULL if
247 this is the first page-entry. The PREV pointer exists solely to
248 keep the cost of ggc_free manageable. */
249 struct page_entry *prev;
250
251 /* The number of bytes allocated. (This will always be a multiple
252 of the host system page size.) */
253 size_t bytes;
254
255 /* The address at which the memory is allocated. */
256 char *page;
257
258 #ifdef USING_MALLOC_PAGE_GROUPS
259 /* Back pointer to the page group this page came from. */
260 struct page_group *group;
261 #endif
262
263 /* This is the index in the by_depth varray where this page table
264 can be found. */
265 unsigned long index_by_depth;
266
267 /* Context depth of this page. */
268 unsigned short context_depth;
269
270 /* The number of free objects remaining on this page. */
271 unsigned short num_free_objects;
272
273 /* A likely candidate for the bit position of a free object for the
274 next allocation from this page. */
275 unsigned short next_bit_hint;
276
277 /* The lg of size of objects allocated from this page. */
278 unsigned char order;
279
280 /* Discarded page? */
281 bool discarded;
282
283 /* A bit vector indicating whether or not objects are in use. The
284 Nth bit is one if the Nth object on this page is allocated. This
285 array is dynamically sized. */
286 unsigned long in_use_p[1];
287 };
288
289 #ifdef USING_MALLOC_PAGE_GROUPS
290 /* A page_group describes a large allocation from malloc, from which
291 we parcel out aligned pages. */
292 struct page_group
293 {
294 /* A linked list of all extant page groups. */
295 struct page_group *next;
296
297 /* The address we received from malloc. */
298 char *allocation;
299
300 /* The size of the block. */
301 size_t alloc_size;
302
303 /* A bitmask of pages in use. */
304 unsigned int in_use;
305 };
306 #endif
307
308 #if HOST_BITS_PER_PTR <= 32
309
310 /* On 32-bit hosts, we use a two level page table, as pictured above. */
311 typedef page_entry **page_table[PAGE_L1_SIZE];
312
313 #else
314
315 /* On 64-bit hosts, we use the same two level page tables plus a linked
316 list that disambiguates the top 32-bits. There will almost always be
317 exactly one entry in the list. */
318 typedef struct page_table_chain
319 {
320 struct page_table_chain *next;
321 size_t high_bits;
322 page_entry **table[PAGE_L1_SIZE];
323 } *page_table;
324
325 #endif
326
327 class finalizer
328 {
329 public:
330 finalizer (void *addr, void (*f)(void *)) : m_addr (addr), m_function (f) {}
331
332 void *addr () const { return m_addr; }
333
334 void call () const { m_function (m_addr); }
335
336 private:
337 void *m_addr;
338 void (*m_function)(void *);
339 };
340
341 class vec_finalizer
342 {
343 public:
344 vec_finalizer (uintptr_t addr, void (*f)(void *), size_t s, size_t n) :
345 m_addr (addr), m_function (f), m_object_size (s), m_n_objects (n) {}
346
347 void call () const
348 {
349 for (size_t i = 0; i < m_n_objects; i++)
350 m_function (reinterpret_cast<void *> (m_addr + (i * m_object_size)));
351 }
352
353 void *addr () const { return reinterpret_cast<void *> (m_addr); }
354
355 private:
356 uintptr_t m_addr;
357 void (*m_function)(void *);
358 size_t m_object_size;
359 size_t m_n_objects;
360 };
361
362 #ifdef ENABLE_GC_ALWAYS_COLLECT
363 /* List of free objects to be verified as actually free on the
364 next collection. */
365 struct free_object
366 {
367 void *object;
368 struct free_object *next;
369 };
370 #endif
371
372 /* The rest of the global variables. */
373 static struct ggc_globals
374 {
375 /* The Nth element in this array is a page with objects of size 2^N.
376 If there are any pages with free objects, they will be at the
377 head of the list. NULL if there are no page-entries for this
378 object size. */
379 page_entry *pages[NUM_ORDERS];
380
381 /* The Nth element in this array is the last page with objects of
382 size 2^N. NULL if there are no page-entries for this object
383 size. */
384 page_entry *page_tails[NUM_ORDERS];
385
386 /* Lookup table for associating allocation pages with object addresses. */
387 page_table lookup;
388
389 /* The system's page size. */
390 size_t pagesize;
391 size_t lg_pagesize;
392
393 /* Bytes currently allocated. */
394 size_t allocated;
395
396 /* Bytes currently allocated at the end of the last collection. */
397 size_t allocated_last_gc;
398
399 /* Total amount of memory mapped. */
400 size_t bytes_mapped;
401
402 /* Bit N set if any allocations have been done at context depth N. */
403 unsigned long context_depth_allocations;
404
405 /* Bit N set if any collections have been done at context depth N. */
406 unsigned long context_depth_collections;
407
408 /* The current depth in the context stack. */
409 unsigned short context_depth;
410
411 /* A file descriptor open to /dev/zero for reading. */
412 #if defined (HAVE_MMAP_DEV_ZERO)
413 int dev_zero_fd;
414 #endif
415
416 /* A cache of free system pages. */
417 page_entry *free_pages;
418
419 #ifdef USING_MALLOC_PAGE_GROUPS
420 page_group *page_groups;
421 #endif
422
423 /* The file descriptor for debugging output. */
424 FILE *debug_file;
425
426 /* Current number of elements in use in depth below. */
427 unsigned int depth_in_use;
428
429 /* Maximum number of elements that can be used before resizing. */
430 unsigned int depth_max;
431
432 /* Each element of this array is an index in by_depth where the given
433 depth starts. This structure is indexed by that given depth we
434 are interested in. */
435 unsigned int *depth;
436
437 /* Current number of elements in use in by_depth below. */
438 unsigned int by_depth_in_use;
439
440 /* Maximum number of elements that can be used before resizing. */
441 unsigned int by_depth_max;
442
443 /* Each element of this array is a pointer to a page_entry, all
444 page_entries can be found in here by increasing depth.
445 index_by_depth in the page_entry is the index into this data
446 structure where that page_entry can be found. This is used to
447 speed up finding all page_entries at a particular depth. */
448 page_entry **by_depth;
449
450 /* Each element is a pointer to the saved in_use_p bits, if any,
451 zero otherwise. We allocate them all together, to enable a
452 better runtime data access pattern. */
453 unsigned long **save_in_use;
454
455 /* Finalizers for single objects. The first index is collection_depth. */
456 vec<vec<finalizer> > finalizers;
457
458 /* Finalizers for vectors of objects. */
459 vec<vec<vec_finalizer> > vec_finalizers;
460
461 #ifdef ENABLE_GC_ALWAYS_COLLECT
462 /* List of free objects to be verified as actually free on the
463 next collection. */
464 struct free_object *free_object_list;
465 #endif
466
467 struct
468 {
469 /* Total GC-allocated memory. */
470 unsigned long long total_allocated;
471 /* Total overhead for GC-allocated memory. */
472 unsigned long long total_overhead;
473
474 /* Total allocations and overhead for sizes less than 32, 64 and 128.
475 These sizes are interesting because they are typical cache line
476 sizes. */
477
478 unsigned long long total_allocated_under32;
479 unsigned long long total_overhead_under32;
480
481 unsigned long long total_allocated_under64;
482 unsigned long long total_overhead_under64;
483
484 unsigned long long total_allocated_under128;
485 unsigned long long total_overhead_under128;
486
487 /* The allocations for each of the allocation orders. */
488 unsigned long long total_allocated_per_order[NUM_ORDERS];
489
490 /* The overhead for each of the allocation orders. */
491 unsigned long long total_overhead_per_order[NUM_ORDERS];
492 } stats;
493 } G;
494
495 /* True if a gc is currently taking place. */
496
497 static bool in_gc = false;
498
499 /* The size in bytes required to maintain a bitmap for the objects
500 on a page-entry. */
501 #define BITMAP_SIZE(Num_objects) \
502 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
503
504 /* Allocate pages in chunks of this size, to throttle calls to memory
505 allocation routines. The first page is used, the rest go onto the
506 free list. This cannot be larger than HOST_BITS_PER_INT for the
507 in_use bitmask for page_group. Hosts that need a different value
508 can override this by defining GGC_QUIRE_SIZE explicitly. */
509 #ifndef GGC_QUIRE_SIZE
510 # ifdef USING_MMAP
511 # define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */
512 # else
513 # define GGC_QUIRE_SIZE 16
514 # endif
515 #endif
516
517 /* Initial guess as to how many page table entries we might need. */
518 #define INITIAL_PTE_COUNT 128
519
520 static page_entry *lookup_page_table_entry (const void *);
522 static void set_page_table_entry (void *, page_entry *);
523 #ifdef USING_MMAP
524 static char *alloc_anon (char *, size_t, bool check);
525 #endif
526 #ifdef USING_MALLOC_PAGE_GROUPS
527 static size_t page_group_index (char *, char *);
528 static void set_page_group_in_use (page_group *, char *);
529 static void clear_page_group_in_use (page_group *, char *);
530 #endif
531 static struct page_entry * alloc_page (unsigned);
532 static void free_page (struct page_entry *);
533 static void clear_marks (void);
534 static void sweep_pages (void);
535 static void ggc_recalculate_in_use_p (page_entry *);
536 static void compute_inverse (unsigned);
537 static inline void adjust_depth (void);
538 static void move_ptes_to_front (int, int);
539
540 void debug_print_page_list (int);
541 static void push_depth (unsigned int);
542 static void push_by_depth (page_entry *, unsigned long *);
543
544 /* Push an entry onto G.depth. */
545
546 inline static void
547 push_depth (unsigned int i)
548 {
549 if (G.depth_in_use >= G.depth_max)
550 {
551 G.depth_max *= 2;
552 G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
553 }
554 G.depth[G.depth_in_use++] = i;
555 }
556
557 /* Push an entry onto G.by_depth and G.save_in_use. */
558
559 inline static void
560 push_by_depth (page_entry *p, unsigned long *s)
561 {
562 if (G.by_depth_in_use >= G.by_depth_max)
563 {
564 G.by_depth_max *= 2;
565 G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
566 G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
567 G.by_depth_max);
568 }
569 G.by_depth[G.by_depth_in_use] = p;
570 G.save_in_use[G.by_depth_in_use++] = s;
571 }
572
573 #if (GCC_VERSION < 3001)
574 #define prefetch(X) ((void) X)
575 #else
576 #define prefetch(X) __builtin_prefetch (X)
577 #endif
578
579 #define save_in_use_p_i(__i) \
580 (G.save_in_use[__i])
581 #define save_in_use_p(__p) \
582 (save_in_use_p_i (__p->index_by_depth))
583
584 /* Traverse the page table and find the entry for a page.
585 If the object wasn't allocated in GC return NULL. */
586
587 static inline page_entry *
588 safe_lookup_page_table_entry (const void *p)
589 {
590 page_entry ***base;
591 size_t L1, L2;
592
593 #if HOST_BITS_PER_PTR <= 32
594 base = &G.lookup[0];
595 #else
596 page_table table = G.lookup;
597 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
598 while (1)
599 {
600 if (table == NULL)
601 return NULL;
602 if (table->high_bits == high_bits)
603 break;
604 table = table->next;
605 }
606 base = &table->table[0];
607 #endif
608
609 /* Extract the level 1 and 2 indices. */
610 L1 = LOOKUP_L1 (p);
611 L2 = LOOKUP_L2 (p);
612 if (! base[L1])
613 return NULL;
614
615 return base[L1][L2];
616 }
617
618 /* Traverse the page table and find the entry for a page.
619 Die (probably) if the object wasn't allocated via GC. */
620
621 static inline page_entry *
622 lookup_page_table_entry (const void *p)
623 {
624 page_entry ***base;
625 size_t L1, L2;
626
627 #if HOST_BITS_PER_PTR <= 32
628 base = &G.lookup[0];
629 #else
630 page_table table = G.lookup;
631 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
632 while (table->high_bits != high_bits)
633 table = table->next;
634 base = &table->table[0];
635 #endif
636
637 /* Extract the level 1 and 2 indices. */
638 L1 = LOOKUP_L1 (p);
639 L2 = LOOKUP_L2 (p);
640
641 return base[L1][L2];
642 }
643
644 /* Set the page table entry for a page. */
645
646 static void
647 set_page_table_entry (void *p, page_entry *entry)
648 {
649 page_entry ***base;
650 size_t L1, L2;
651
652 #if HOST_BITS_PER_PTR <= 32
653 base = &G.lookup[0];
654 #else
655 page_table table;
656 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
657 for (table = G.lookup; table; table = table->next)
658 if (table->high_bits == high_bits)
659 goto found;
660
661 /* Not found -- allocate a new table. */
662 table = XCNEW (struct page_table_chain);
663 table->next = G.lookup;
664 table->high_bits = high_bits;
665 G.lookup = table;
666 found:
667 base = &table->table[0];
668 #endif
669
670 /* Extract the level 1 and 2 indices. */
671 L1 = LOOKUP_L1 (p);
672 L2 = LOOKUP_L2 (p);
673
674 if (base[L1] == NULL)
675 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
676
677 base[L1][L2] = entry;
678 }
679
680 /* Prints the page-entry for object size ORDER, for debugging. */
681
682 DEBUG_FUNCTION void
683 debug_print_page_list (int order)
684 {
685 page_entry *p;
686 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
687 (void *) G.page_tails[order]);
688 p = G.pages[order];
689 while (p != NULL)
690 {
691 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
692 p->num_free_objects);
693 p = p->next;
694 }
695 printf ("NULL\n");
696 fflush (stdout);
697 }
698
699 #ifdef USING_MMAP
700 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
701 (if non-null). The ifdef structure here is intended to cause a
702 compile error unless exactly one of the HAVE_* is defined. */
703
704 static inline char *
705 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
706 {
707 #ifdef HAVE_MMAP_ANON
708 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
709 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
710 #endif
711 #ifdef HAVE_MMAP_DEV_ZERO
712 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
713 MAP_PRIVATE, G.dev_zero_fd, 0);
714 #endif
715
716 if (page == (char *) MAP_FAILED)
717 {
718 if (!check)
719 return NULL;
720 perror ("virtual memory exhausted");
721 exit (FATAL_EXIT_CODE);
722 }
723
724 /* Remember that we allocated this memory. */
725 G.bytes_mapped += size;
726
727 /* Pretend we don't have access to the allocated pages. We'll enable
728 access to smaller pieces of the area in ggc_internal_alloc. Discard the
729 handle to avoid handle leak. */
730 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
731
732 return page;
733 }
734 #endif
735 #ifdef USING_MALLOC_PAGE_GROUPS
736 /* Compute the index for this page into the page group. */
737
738 static inline size_t
739 page_group_index (char *allocation, char *page)
740 {
741 return (size_t) (page - allocation) >> G.lg_pagesize;
742 }
743
744 /* Set and clear the in_use bit for this page in the page group. */
745
746 static inline void
747 set_page_group_in_use (page_group *group, char *page)
748 {
749 group->in_use |= 1 << page_group_index (group->allocation, page);
750 }
751
752 static inline void
753 clear_page_group_in_use (page_group *group, char *page)
754 {
755 group->in_use &= ~(1 << page_group_index (group->allocation, page));
756 }
757 #endif
758
759 /* Allocate a new page for allocating objects of size 2^ORDER,
760 and return an entry for it. The entry is not added to the
761 appropriate page_table list. */
762
763 static inline struct page_entry *
764 alloc_page (unsigned order)
765 {
766 struct page_entry *entry, *p, **pp;
767 char *page;
768 size_t num_objects;
769 size_t bitmap_size;
770 size_t page_entry_size;
771 size_t entry_size;
772 #ifdef USING_MALLOC_PAGE_GROUPS
773 page_group *group;
774 #endif
775
776 num_objects = OBJECTS_PER_PAGE (order);
777 bitmap_size = BITMAP_SIZE (num_objects + 1);
778 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
779 entry_size = num_objects * OBJECT_SIZE (order);
780 if (entry_size < G.pagesize)
781 entry_size = G.pagesize;
782 entry_size = PAGE_ALIGN (entry_size);
783
784 entry = NULL;
785 page = NULL;
786
787 /* Check the list of free pages for one we can use. */
788 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
789 if (p->bytes == entry_size)
790 break;
791
792 if (p != NULL)
793 {
794 if (p->discarded)
795 G.bytes_mapped += p->bytes;
796 p->discarded = false;
797
798 /* Recycle the allocated memory from this page ... */
799 *pp = p->next;
800 page = p->page;
801
802 #ifdef USING_MALLOC_PAGE_GROUPS
803 group = p->group;
804 #endif
805
806 /* ... and, if possible, the page entry itself. */
807 if (p->order == order)
808 {
809 entry = p;
810 memset (entry, 0, page_entry_size);
811 }
812 else
813 free (p);
814 }
815 #ifdef USING_MMAP
816 else if (entry_size == G.pagesize)
817 {
818 /* We want just one page. Allocate a bunch of them and put the
819 extras on the freelist. (Can only do this optimization with
820 mmap for backing store.) */
821 struct page_entry *e, *f = G.free_pages;
822 int i, entries = GGC_QUIRE_SIZE;
823
824 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
825 if (page == NULL)
826 {
827 page = alloc_anon (NULL, G.pagesize, true);
828 entries = 1;
829 }
830
831 /* This loop counts down so that the chain will be in ascending
832 memory order. */
833 for (i = entries - 1; i >= 1; i--)
834 {
835 e = XCNEWVAR (struct page_entry, page_entry_size);
836 e->order = order;
837 e->bytes = G.pagesize;
838 e->page = page + (i << G.lg_pagesize);
839 e->next = f;
840 f = e;
841 }
842
843 G.free_pages = f;
844 }
845 else
846 page = alloc_anon (NULL, entry_size, true);
847 #endif
848 #ifdef USING_MALLOC_PAGE_GROUPS
849 else
850 {
851 /* Allocate a large block of memory and serve out the aligned
852 pages therein. This results in much less memory wastage
853 than the traditional implementation of valloc. */
854
855 char *allocation, *a, *enda;
856 size_t alloc_size, head_slop, tail_slop;
857 int multiple_pages = (entry_size == G.pagesize);
858
859 if (multiple_pages)
860 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
861 else
862 alloc_size = entry_size + G.pagesize - 1;
863 allocation = XNEWVEC (char, alloc_size);
864
865 page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize);
866 head_slop = page - allocation;
867 if (multiple_pages)
868 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
869 else
870 tail_slop = alloc_size - entry_size - head_slop;
871 enda = allocation + alloc_size - tail_slop;
872
873 /* We allocated N pages, which are likely not aligned, leaving
874 us with N-1 usable pages. We plan to place the page_group
875 structure somewhere in the slop. */
876 if (head_slop >= sizeof (page_group))
877 group = (page_group *)page - 1;
878 else
879 {
880 /* We magically got an aligned allocation. Too bad, we have
881 to waste a page anyway. */
882 if (tail_slop == 0)
883 {
884 enda -= G.pagesize;
885 tail_slop += G.pagesize;
886 }
887 gcc_assert (tail_slop >= sizeof (page_group));
888 group = (page_group *)enda;
889 tail_slop -= sizeof (page_group);
890 }
891
892 /* Remember that we allocated this memory. */
893 group->next = G.page_groups;
894 group->allocation = allocation;
895 group->alloc_size = alloc_size;
896 group->in_use = 0;
897 G.page_groups = group;
898 G.bytes_mapped += alloc_size;
899
900 /* If we allocated multiple pages, put the rest on the free list. */
901 if (multiple_pages)
902 {
903 struct page_entry *e, *f = G.free_pages;
904 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
905 {
906 e = XCNEWVAR (struct page_entry, page_entry_size);
907 e->order = order;
908 e->bytes = G.pagesize;
909 e->page = a;
910 e->group = group;
911 e->next = f;
912 f = e;
913 }
914 G.free_pages = f;
915 }
916 }
917 #endif
918
919 if (entry == NULL)
920 entry = XCNEWVAR (struct page_entry, page_entry_size);
921
922 entry->bytes = entry_size;
923 entry->page = page;
924 entry->context_depth = G.context_depth;
925 entry->order = order;
926 entry->num_free_objects = num_objects;
927 entry->next_bit_hint = 1;
928
929 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
930
931 #ifdef USING_MALLOC_PAGE_GROUPS
932 entry->group = group;
933 set_page_group_in_use (group, page);
934 #endif
935
936 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
937 increment the hint. */
938 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
939 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
940
941 set_page_table_entry (page, entry);
942
943 if (GGC_DEBUG_LEVEL >= 2)
944 fprintf (G.debug_file,
945 "Allocating page at %p, object size=%lu, data %p-%p\n",
946 (void *) entry, (unsigned long) OBJECT_SIZE (order),
947 (void *) page, (void *) (page + entry_size - 1));
948
949 return entry;
950 }
951
952 /* Adjust the size of G.depth so that no index greater than the one
953 used by the top of the G.by_depth is used. */
954
955 static inline void
956 adjust_depth (void)
957 {
958 page_entry *top;
959
960 if (G.by_depth_in_use)
961 {
962 top = G.by_depth[G.by_depth_in_use-1];
963
964 /* Peel back indices in depth that index into by_depth, so that
965 as new elements are added to by_depth, we note the indices
966 of those elements, if they are for new context depths. */
967 while (G.depth_in_use > (size_t)top->context_depth+1)
968 --G.depth_in_use;
969 }
970 }
971
972 /* For a page that is no longer needed, put it on the free page list. */
973
974 static void
975 free_page (page_entry *entry)
976 {
977 if (GGC_DEBUG_LEVEL >= 2)
978 fprintf (G.debug_file,
979 "Deallocating page at %p, data %p-%p\n", (void *) entry,
980 (void *) entry->page, (void *) (entry->page + entry->bytes - 1));
981
982 /* Mark the page as inaccessible. Discard the handle to avoid handle
983 leak. */
984 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
985
986 set_page_table_entry (entry->page, NULL);
987
988 #ifdef USING_MALLOC_PAGE_GROUPS
989 clear_page_group_in_use (entry->group, entry->page);
990 #endif
991
992 if (G.by_depth_in_use > 1)
993 {
994 page_entry *top = G.by_depth[G.by_depth_in_use-1];
995 int i = entry->index_by_depth;
996
997 /* We cannot free a page from a context deeper than the current
998 one. */
999 gcc_assert (entry->context_depth == top->context_depth);
1000
1001 /* Put top element into freed slot. */
1002 G.by_depth[i] = top;
1003 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
1004 top->index_by_depth = i;
1005 }
1006 --G.by_depth_in_use;
1007
1008 adjust_depth ();
1009
1010 entry->next = G.free_pages;
1011 G.free_pages = entry;
1012 }
1013
1014 /* Release the free page cache to the system. */
1015
1016 static void
1017 release_pages (void)
1018 {
1019 size_t n1 = 0;
1020 size_t n2 = 0;
1021 #ifdef USING_MADVISE
1022 page_entry *p, *start_p;
1023 char *start;
1024 size_t len;
1025 size_t mapped_len;
1026 page_entry *next, *prev, *newprev;
1027 size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
1028
1029 /* First free larger continuous areas to the OS.
1030 This allows other allocators to grab these areas if needed.
1031 This is only done on larger chunks to avoid fragmentation.
1032 This does not always work because the free_pages list is only
1033 approximately sorted. */
1034
1035 p = G.free_pages;
1036 prev = NULL;
1037 while (p)
1038 {
1039 start = p->page;
1040 start_p = p;
1041 len = 0;
1042 mapped_len = 0;
1043 newprev = prev;
1044 while (p && p->page == start + len)
1045 {
1046 len += p->bytes;
1047 if (!p->discarded)
1048 mapped_len += p->bytes;
1049 newprev = p;
1050 p = p->next;
1051 }
1052 if (len >= free_unit)
1053 {
1054 while (start_p != p)
1055 {
1056 next = start_p->next;
1057 free (start_p);
1058 start_p = next;
1059 }
1060 munmap (start, len);
1061 if (prev)
1062 prev->next = p;
1063 else
1064 G.free_pages = p;
1065 G.bytes_mapped -= mapped_len;
1066 n1 += len;
1067 continue;
1068 }
1069 prev = newprev;
1070 }
1071
1072 /* Now give back the fragmented pages to the OS, but keep the address
1073 space to reuse it next time. */
1074
1075 for (p = G.free_pages; p; )
1076 {
1077 if (p->discarded)
1078 {
1079 p = p->next;
1080 continue;
1081 }
1082 start = p->page;
1083 len = p->bytes;
1084 start_p = p;
1085 p = p->next;
1086 while (p && p->page == start + len)
1087 {
1088 len += p->bytes;
1089 p = p->next;
1090 }
1091 /* Give the page back to the kernel, but don't free the mapping.
1092 This avoids fragmentation in the virtual memory map of the
1093 process. Next time we can reuse it by just touching it. */
1094 madvise (start, len, MADV_DONTNEED);
1095 /* Don't count those pages as mapped to not touch the garbage collector
1096 unnecessarily. */
1097 G.bytes_mapped -= len;
1098 n2 += len;
1099 while (start_p != p)
1100 {
1101 start_p->discarded = true;
1102 start_p = start_p->next;
1103 }
1104 }
1105 #endif
1106 #if defined(USING_MMAP) && !defined(USING_MADVISE)
1107 page_entry *p, *next;
1108 char *start;
1109 size_t len;
1110
1111 /* Gather up adjacent pages so they are unmapped together. */
1112 p = G.free_pages;
1113
1114 while (p)
1115 {
1116 start = p->page;
1117 next = p->next;
1118 len = p->bytes;
1119 free (p);
1120 p = next;
1121
1122 while (p && p->page == start + len)
1123 {
1124 next = p->next;
1125 len += p->bytes;
1126 free (p);
1127 p = next;
1128 }
1129
1130 munmap (start, len);
1131 n1 += len;
1132 G.bytes_mapped -= len;
1133 }
1134
1135 G.free_pages = NULL;
1136 #endif
1137 #ifdef USING_MALLOC_PAGE_GROUPS
1138 page_entry **pp, *p;
1139 page_group **gp, *g;
1140
1141 /* Remove all pages from free page groups from the list. */
1142 pp = &G.free_pages;
1143 while ((p = *pp) != NULL)
1144 if (p->group->in_use == 0)
1145 {
1146 *pp = p->next;
1147 free (p);
1148 }
1149 else
1150 pp = &p->next;
1151
1152 /* Remove all free page groups, and release the storage. */
1153 gp = &G.page_groups;
1154 while ((g = *gp) != NULL)
1155 if (g->in_use == 0)
1156 {
1157 *gp = g->next;
1158 G.bytes_mapped -= g->alloc_size;
1159 n1 += g->alloc_size;
1160 free (g->allocation);
1161 }
1162 else
1163 gp = &g->next;
1164 #endif
1165 if (!quiet_flag && (n1 || n2))
1166 {
1167 fprintf (stderr, " {GC");
1168 if (n1)
1169 fprintf (stderr, " released " PRsa (0), SIZE_AMOUNT (n1));
1170 if (n2)
1171 fprintf (stderr, " madv_dontneed " PRsa (0), SIZE_AMOUNT (n2));
1172 fprintf (stderr, "}");
1173 }
1174 }
1175
1176 /* This table provides a fast way to determine ceil(log_2(size)) for
1177 allocation requests. The minimum allocation size is eight bytes. */
1178 #define NUM_SIZE_LOOKUP 512
1179 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1180 {
1181 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1182 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1183 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1184 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1185 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1186 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1187 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1188 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1189 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1190 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1191 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1192 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1193 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1194 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1195 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1196 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1197 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1198 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1199 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1200 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1201 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1202 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1203 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1204 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1205 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1206 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1207 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1208 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1209 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1210 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1211 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1212 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1213 };
1214
1215 /* For a given size of memory requested for allocation, return the
1216 actual size that is going to be allocated, as well as the size
1217 order. */
1218
1219 static void
1220 ggc_round_alloc_size_1 (size_t requested_size,
1221 size_t *size_order,
1222 size_t *alloced_size)
1223 {
1224 size_t order, object_size;
1225
1226 if (requested_size < NUM_SIZE_LOOKUP)
1227 {
1228 order = size_lookup[requested_size];
1229 object_size = OBJECT_SIZE (order);
1230 }
1231 else
1232 {
1233 order = 10;
1234 while (requested_size > (object_size = OBJECT_SIZE (order)))
1235 order++;
1236 }
1237
1238 if (size_order)
1239 *size_order = order;
1240 if (alloced_size)
1241 *alloced_size = object_size;
1242 }
1243
1244 /* For a given size of memory requested for allocation, return the
1245 actual size that is going to be allocated. */
1246
1247 size_t
1248 ggc_round_alloc_size (size_t requested_size)
1249 {
1250 size_t size = 0;
1251
1252 ggc_round_alloc_size_1 (requested_size, NULL, &size);
1253 return size;
1254 }
1255
1256 /* Push a finalizer onto the appropriate vec. */
1257
1258 static void
1259 add_finalizer (void *result, void (*f)(void *), size_t s, size_t n)
1260 {
1261 if (f == NULL)
1262 /* No finalizer. */;
1263 else if (n == 1)
1264 {
1265 finalizer fin (result, f);
1266 G.finalizers[G.context_depth].safe_push (fin);
1267 }
1268 else
1269 {
1270 vec_finalizer fin (reinterpret_cast<uintptr_t> (result), f, s, n);
1271 G.vec_finalizers[G.context_depth].safe_push (fin);
1272 }
1273 }
1274
1275 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1276
1277 void *
1278 ggc_internal_alloc (size_t size, void (*f)(void *), size_t s, size_t n
1279 MEM_STAT_DECL)
1280 {
1281 size_t order, word, bit, object_offset, object_size;
1282 struct page_entry *entry;
1283 void *result;
1284
1285 ggc_round_alloc_size_1 (size, &order, &object_size);
1286
1287 /* If there are non-full pages for this size allocation, they are at
1288 the head of the list. */
1289 entry = G.pages[order];
1290
1291 /* If there is no page for this object size, or all pages in this
1292 context are full, allocate a new page. */
1293 if (entry == NULL || entry->num_free_objects == 0)
1294 {
1295 struct page_entry *new_entry;
1296 new_entry = alloc_page (order);
1297
1298 new_entry->index_by_depth = G.by_depth_in_use;
1299 push_by_depth (new_entry, 0);
1300
1301 /* We can skip context depths, if we do, make sure we go all the
1302 way to the new depth. */
1303 while (new_entry->context_depth >= G.depth_in_use)
1304 push_depth (G.by_depth_in_use-1);
1305
1306 /* If this is the only entry, it's also the tail. If it is not
1307 the only entry, then we must update the PREV pointer of the
1308 ENTRY (G.pages[order]) to point to our new page entry. */
1309 if (entry == NULL)
1310 G.page_tails[order] = new_entry;
1311 else
1312 entry->prev = new_entry;
1313
1314 /* Put new pages at the head of the page list. By definition the
1315 entry at the head of the list always has a NULL pointer. */
1316 new_entry->next = entry;
1317 new_entry->prev = NULL;
1318 entry = new_entry;
1319 G.pages[order] = new_entry;
1320
1321 /* For a new page, we know the word and bit positions (in the
1322 in_use bitmap) of the first available object -- they're zero. */
1323 new_entry->next_bit_hint = 1;
1324 word = 0;
1325 bit = 0;
1326 object_offset = 0;
1327 }
1328 else
1329 {
1330 /* First try to use the hint left from the previous allocation
1331 to locate a clear bit in the in-use bitmap. We've made sure
1332 that the one-past-the-end bit is always set, so if the hint
1333 has run over, this test will fail. */
1334 unsigned hint = entry->next_bit_hint;
1335 word = hint / HOST_BITS_PER_LONG;
1336 bit = hint % HOST_BITS_PER_LONG;
1337
1338 /* If the hint didn't work, scan the bitmap from the beginning. */
1339 if ((entry->in_use_p[word] >> bit) & 1)
1340 {
1341 word = bit = 0;
1342 while (~entry->in_use_p[word] == 0)
1343 ++word;
1344
1345 #if GCC_VERSION >= 3004
1346 bit = __builtin_ctzl (~entry->in_use_p[word]);
1347 #else
1348 while ((entry->in_use_p[word] >> bit) & 1)
1349 ++bit;
1350 #endif
1351
1352 hint = word * HOST_BITS_PER_LONG + bit;
1353 }
1354
1355 /* Next time, try the next bit. */
1356 entry->next_bit_hint = hint + 1;
1357
1358 object_offset = hint * object_size;
1359 }
1360
1361 /* Set the in-use bit. */
1362 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1363
1364 /* Keep a running total of the number of free objects. If this page
1365 fills up, we may have to move it to the end of the list if the
1366 next page isn't full. If the next page is full, all subsequent
1367 pages are full, so there's no need to move it. */
1368 if (--entry->num_free_objects == 0
1369 && entry->next != NULL
1370 && entry->next->num_free_objects > 0)
1371 {
1372 /* We have a new head for the list. */
1373 G.pages[order] = entry->next;
1374
1375 /* We are moving ENTRY to the end of the page table list.
1376 The new page at the head of the list will have NULL in
1377 its PREV field and ENTRY will have NULL in its NEXT field. */
1378 entry->next->prev = NULL;
1379 entry->next = NULL;
1380
1381 /* Append ENTRY to the tail of the list. */
1382 entry->prev = G.page_tails[order];
1383 G.page_tails[order]->next = entry;
1384 G.page_tails[order] = entry;
1385 }
1386
1387 /* Calculate the object's address. */
1388 result = entry->page + object_offset;
1389 if (GATHER_STATISTICS)
1390 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1391 result FINAL_PASS_MEM_STAT);
1392
1393 #ifdef ENABLE_GC_CHECKING
1394 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1395 exact same semantics in presence of memory bugs, regardless of
1396 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1397 handle to avoid handle leak. */
1398 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1399
1400 /* `Poison' the entire allocated object, including any padding at
1401 the end. */
1402 memset (result, 0xaf, object_size);
1403
1404 /* Make the bytes after the end of the object unaccessible. Discard the
1405 handle to avoid handle leak. */
1406 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1407 object_size - size));
1408 #endif
1409
1410 /* Tell Valgrind that the memory is there, but its content isn't
1411 defined. The bytes at the end of the object are still marked
1412 unaccessible. */
1413 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1414
1415 /* Keep track of how many bytes are being allocated. This
1416 information is used in deciding when to collect. */
1417 G.allocated += object_size;
1418
1419 /* For timevar statistics. */
1420 timevar_ggc_mem_total += object_size;
1421
1422 if (f)
1423 add_finalizer (result, f, s, n);
1424
1425 if (GATHER_STATISTICS)
1426 {
1427 size_t overhead = object_size - size;
1428
1429 G.stats.total_overhead += overhead;
1430 G.stats.total_allocated += object_size;
1431 G.stats.total_overhead_per_order[order] += overhead;
1432 G.stats.total_allocated_per_order[order] += object_size;
1433
1434 if (size <= 32)
1435 {
1436 G.stats.total_overhead_under32 += overhead;
1437 G.stats.total_allocated_under32 += object_size;
1438 }
1439 if (size <= 64)
1440 {
1441 G.stats.total_overhead_under64 += overhead;
1442 G.stats.total_allocated_under64 += object_size;
1443 }
1444 if (size <= 128)
1445 {
1446 G.stats.total_overhead_under128 += overhead;
1447 G.stats.total_allocated_under128 += object_size;
1448 }
1449 }
1450
1451 if (GGC_DEBUG_LEVEL >= 3)
1452 fprintf (G.debug_file,
1453 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1454 (unsigned long) size, (unsigned long) object_size, result,
1455 (void *) entry);
1456
1457 return result;
1458 }
1459
1460 /* Mark function for strings. */
1461
1462 void
1463 gt_ggc_m_S (const void *p)
1464 {
1465 page_entry *entry;
1466 unsigned bit, word;
1467 unsigned long mask;
1468 unsigned long offset;
1469
1470 if (!p)
1471 return;
1472
1473 /* Look up the page on which the object is alloced. If it was not
1474 GC allocated, gracefully bail out. */
1475 entry = safe_lookup_page_table_entry (p);
1476 if (!entry)
1477 return;
1478
1479 /* Calculate the index of the object on the page; this is its bit
1480 position in the in_use_p bitmap. Note that because a char* might
1481 point to the middle of an object, we need special code here to
1482 make sure P points to the start of an object. */
1483 offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1484 if (offset)
1485 {
1486 /* Here we've seen a char* which does not point to the beginning
1487 of an allocated object. We assume it points to the middle of
1488 a STRING_CST. */
1489 gcc_assert (offset == offsetof (struct tree_string, str));
1490 p = ((const char *) p) - offset;
1491 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1492 return;
1493 }
1494
1495 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1496 word = bit / HOST_BITS_PER_LONG;
1497 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1498
1499 /* If the bit was previously set, skip it. */
1500 if (entry->in_use_p[word] & mask)
1501 return;
1502
1503 /* Otherwise set it, and decrement the free object count. */
1504 entry->in_use_p[word] |= mask;
1505 entry->num_free_objects -= 1;
1506
1507 if (GGC_DEBUG_LEVEL >= 4)
1508 fprintf (G.debug_file, "Marking %p\n", p);
1509
1510 return;
1511 }
1512
1513
1514 /* User-callable entry points for marking string X. */
1515
1516 void
1517 gt_ggc_mx (const char *& x)
1518 {
1519 gt_ggc_m_S (x);
1520 }
1521
1522 void
1523 gt_ggc_mx (char *& x)
1524 {
1525 gt_ggc_m_S (x);
1526 }
1527
1528 void
1529 gt_ggc_mx (unsigned char *& x)
1530 {
1531 gt_ggc_m_S (x);
1532 }
1533
1534 void
1535 gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED)
1536 {
1537 }
1538
1539 /* If P is not marked, marks it and return false. Otherwise return true.
1540 P must have been allocated by the GC allocator; it mustn't point to
1541 static objects, stack variables, or memory allocated with malloc. */
1542
1543 int
1544 ggc_set_mark (const void *p)
1545 {
1546 page_entry *entry;
1547 unsigned bit, word;
1548 unsigned long mask;
1549
1550 /* Look up the page on which the object is alloced. If the object
1551 wasn't allocated by the collector, we'll probably die. */
1552 entry = lookup_page_table_entry (p);
1553 gcc_assert (entry);
1554
1555 /* Calculate the index of the object on the page; this is its bit
1556 position in the in_use_p bitmap. */
1557 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1558 word = bit / HOST_BITS_PER_LONG;
1559 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1560
1561 /* If the bit was previously set, skip it. */
1562 if (entry->in_use_p[word] & mask)
1563 return 1;
1564
1565 /* Otherwise set it, and decrement the free object count. */
1566 entry->in_use_p[word] |= mask;
1567 entry->num_free_objects -= 1;
1568
1569 if (GGC_DEBUG_LEVEL >= 4)
1570 fprintf (G.debug_file, "Marking %p\n", p);
1571
1572 return 0;
1573 }
1574
1575 /* Return 1 if P has been marked, zero otherwise.
1576 P must have been allocated by the GC allocator; it mustn't point to
1577 static objects, stack variables, or memory allocated with malloc. */
1578
1579 int
1580 ggc_marked_p (const void *p)
1581 {
1582 page_entry *entry;
1583 unsigned bit, word;
1584 unsigned long mask;
1585
1586 /* Look up the page on which the object is alloced. If the object
1587 wasn't allocated by the collector, we'll probably die. */
1588 entry = lookup_page_table_entry (p);
1589 gcc_assert (entry);
1590
1591 /* Calculate the index of the object on the page; this is its bit
1592 position in the in_use_p bitmap. */
1593 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1594 word = bit / HOST_BITS_PER_LONG;
1595 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1596
1597 return (entry->in_use_p[word] & mask) != 0;
1598 }
1599
1600 /* Return the size of the gc-able object P. */
1601
1602 size_t
1603 ggc_get_size (const void *p)
1604 {
1605 page_entry *pe = lookup_page_table_entry (p);
1606 return OBJECT_SIZE (pe->order);
1607 }
1608
1609 /* Release the memory for object P. */
1610
1611 void
1612 ggc_free (void *p)
1613 {
1614 if (in_gc)
1615 return;
1616
1617 page_entry *pe = lookup_page_table_entry (p);
1618 size_t order = pe->order;
1619 size_t size = OBJECT_SIZE (order);
1620
1621 if (GATHER_STATISTICS)
1622 ggc_free_overhead (p);
1623
1624 if (GGC_DEBUG_LEVEL >= 3)
1625 fprintf (G.debug_file,
1626 "Freeing object, actual size=%lu, at %p on %p\n",
1627 (unsigned long) size, p, (void *) pe);
1628
1629 #ifdef ENABLE_GC_CHECKING
1630 /* Poison the data, to indicate the data is garbage. */
1631 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1632 memset (p, 0xa5, size);
1633 #endif
1634 /* Let valgrind know the object is free. */
1635 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1636
1637 #ifdef ENABLE_GC_ALWAYS_COLLECT
1638 /* In the completely-anal-checking mode, we do *not* immediately free
1639 the data, but instead verify that the data is *actually* not
1640 reachable the next time we collect. */
1641 {
1642 struct free_object *fo = XNEW (struct free_object);
1643 fo->object = p;
1644 fo->next = G.free_object_list;
1645 G.free_object_list = fo;
1646 }
1647 #else
1648 {
1649 unsigned int bit_offset, word, bit;
1650
1651 G.allocated -= size;
1652
1653 /* Mark the object not-in-use. */
1654 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1655 word = bit_offset / HOST_BITS_PER_LONG;
1656 bit = bit_offset % HOST_BITS_PER_LONG;
1657 pe->in_use_p[word] &= ~(1UL << bit);
1658
1659 if (pe->num_free_objects++ == 0)
1660 {
1661 page_entry *p, *q;
1662
1663 /* If the page is completely full, then it's supposed to
1664 be after all pages that aren't. Since we've freed one
1665 object from a page that was full, we need to move the
1666 page to the head of the list.
1667
1668 PE is the node we want to move. Q is the previous node
1669 and P is the next node in the list. */
1670 q = pe->prev;
1671 if (q && q->num_free_objects == 0)
1672 {
1673 p = pe->next;
1674
1675 q->next = p;
1676
1677 /* If PE was at the end of the list, then Q becomes the
1678 new end of the list. If PE was not the end of the
1679 list, then we need to update the PREV field for P. */
1680 if (!p)
1681 G.page_tails[order] = q;
1682 else
1683 p->prev = q;
1684
1685 /* Move PE to the head of the list. */
1686 pe->next = G.pages[order];
1687 pe->prev = NULL;
1688 G.pages[order]->prev = pe;
1689 G.pages[order] = pe;
1690 }
1691
1692 /* Reset the hint bit to point to the only free object. */
1693 pe->next_bit_hint = bit_offset;
1694 }
1695 }
1696 #endif
1697 }
1698
1699 /* Subroutine of init_ggc which computes the pair of numbers used to
1701 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1702
1703 This algorithm is taken from Granlund and Montgomery's paper
1704 "Division by Invariant Integers using Multiplication"
1705 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1706 constants). */
1707
1708 static void
1709 compute_inverse (unsigned order)
1710 {
1711 size_t size, inv;
1712 unsigned int e;
1713
1714 size = OBJECT_SIZE (order);
1715 e = 0;
1716 while (size % 2 == 0)
1717 {
1718 e++;
1719 size >>= 1;
1720 }
1721
1722 inv = size;
1723 while (inv * size != 1)
1724 inv = inv * (2 - inv*size);
1725
1726 DIV_MULT (order) = inv;
1727 DIV_SHIFT (order) = e;
1728 }
1729
1730 /* Initialize the ggc-mmap allocator. */
1731 void
1732 init_ggc (void)
1733 {
1734 static bool init_p = false;
1735 unsigned order;
1736
1737 if (init_p)
1738 return;
1739 init_p = true;
1740
1741 G.pagesize = getpagesize ();
1742 G.lg_pagesize = exact_log2 (G.pagesize);
1743
1744 #ifdef HAVE_MMAP_DEV_ZERO
1745 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1746 if (G.dev_zero_fd == -1)
1747 internal_error ("open /dev/zero: %m");
1748 #endif
1749
1750 #if 0
1751 G.debug_file = fopen ("ggc-mmap.debug", "w");
1752 #else
1753 G.debug_file = stdout;
1754 #endif
1755
1756 #ifdef USING_MMAP
1757 /* StunOS has an amazing off-by-one error for the first mmap allocation
1758 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1759 believe, is an unaligned page allocation, which would cause us to
1760 hork badly if we tried to use it. */
1761 {
1762 char *p = alloc_anon (NULL, G.pagesize, true);
1763 struct page_entry *e;
1764 if ((uintptr_t)p & (G.pagesize - 1))
1765 {
1766 /* How losing. Discard this one and try another. If we still
1767 can't get something useful, give up. */
1768
1769 p = alloc_anon (NULL, G.pagesize, true);
1770 gcc_assert (!((uintptr_t)p & (G.pagesize - 1)));
1771 }
1772
1773 /* We have a good page, might as well hold onto it... */
1774 e = XCNEW (struct page_entry);
1775 e->bytes = G.pagesize;
1776 e->page = p;
1777 e->next = G.free_pages;
1778 G.free_pages = e;
1779 }
1780 #endif
1781
1782 /* Initialize the object size table. */
1783 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1784 object_size_table[order] = (size_t) 1 << order;
1785 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1786 {
1787 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1788
1789 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1790 so that we're sure of getting aligned memory. */
1791 s = ROUND_UP (s, MAX_ALIGNMENT);
1792 object_size_table[order] = s;
1793 }
1794
1795 /* Initialize the objects-per-page and inverse tables. */
1796 for (order = 0; order < NUM_ORDERS; ++order)
1797 {
1798 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1799 if (objects_per_page_table[order] == 0)
1800 objects_per_page_table[order] = 1;
1801 compute_inverse (order);
1802 }
1803
1804 /* Reset the size_lookup array to put appropriately sized objects in
1805 the special orders. All objects bigger than the previous power
1806 of two, but no greater than the special size, should go in the
1807 new order. */
1808 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1809 {
1810 int o;
1811 int i;
1812
1813 i = OBJECT_SIZE (order);
1814 if (i >= NUM_SIZE_LOOKUP)
1815 continue;
1816
1817 for (o = size_lookup[i]; o == size_lookup [i]; --i)
1818 size_lookup[i] = order;
1819 }
1820
1821 G.depth_in_use = 0;
1822 G.depth_max = 10;
1823 G.depth = XNEWVEC (unsigned int, G.depth_max);
1824
1825 G.by_depth_in_use = 0;
1826 G.by_depth_max = INITIAL_PTE_COUNT;
1827 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1828 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1829
1830 /* Allocate space for the depth 0 finalizers. */
1831 G.finalizers.safe_push (vNULL);
1832 G.vec_finalizers.safe_push (vNULL);
1833 gcc_assert (G.finalizers.length() == 1);
1834 }
1835
1836 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1837 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1838
1839 static void
1840 ggc_recalculate_in_use_p (page_entry *p)
1841 {
1842 unsigned int i;
1843 size_t num_objects;
1844
1845 /* Because the past-the-end bit in in_use_p is always set, we
1846 pretend there is one additional object. */
1847 num_objects = OBJECTS_IN_PAGE (p) + 1;
1848
1849 /* Reset the free object count. */
1850 p->num_free_objects = num_objects;
1851
1852 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1853 for (i = 0;
1854 i < CEIL (BITMAP_SIZE (num_objects),
1855 sizeof (*p->in_use_p));
1856 ++i)
1857 {
1858 unsigned long j;
1859
1860 /* Something is in use if it is marked, or if it was in use in a
1861 context further down the context stack. */
1862 p->in_use_p[i] |= save_in_use_p (p)[i];
1863
1864 /* Decrement the free object count for every object allocated. */
1865 for (j = p->in_use_p[i]; j; j >>= 1)
1866 p->num_free_objects -= (j & 1);
1867 }
1868
1869 gcc_assert (p->num_free_objects < num_objects);
1870 }
1871
1872 /* Unmark all objects. */
1874
1875 static void
1876 clear_marks (void)
1877 {
1878 unsigned order;
1879
1880 for (order = 2; order < NUM_ORDERS; order++)
1881 {
1882 page_entry *p;
1883
1884 for (p = G.pages[order]; p != NULL; p = p->next)
1885 {
1886 size_t num_objects = OBJECTS_IN_PAGE (p);
1887 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1888
1889 /* The data should be page-aligned. */
1890 gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1)));
1891
1892 /* Pages that aren't in the topmost context are not collected;
1893 nevertheless, we need their in-use bit vectors to store GC
1894 marks. So, back them up first. */
1895 if (p->context_depth < G.context_depth)
1896 {
1897 if (! save_in_use_p (p))
1898 save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1899 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1900 }
1901
1902 /* Reset reset the number of free objects and clear the
1903 in-use bits. These will be adjusted by mark_obj. */
1904 p->num_free_objects = num_objects;
1905 memset (p->in_use_p, 0, bitmap_size);
1906
1907 /* Make sure the one-past-the-end bit is always set. */
1908 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1909 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1910 }
1911 }
1912 }
1913
1914 /* Check if any blocks with a registered finalizer have become unmarked. If so
1915 run the finalizer and unregister it because the block is about to be freed.
1916 Note that no garantee is made about what order finalizers will run in so
1917 touching other objects in gc memory is extremely unwise. */
1918
1919 static void
1920 ggc_handle_finalizers ()
1921 {
1922 unsigned dlen = G.finalizers.length();
1923 for (unsigned d = G.context_depth; d < dlen; ++d)
1924 {
1925 vec<finalizer> &v = G.finalizers[d];
1926 unsigned length = v.length ();
1927 for (unsigned int i = 0; i < length;)
1928 {
1929 finalizer &f = v[i];
1930 if (!ggc_marked_p (f.addr ()))
1931 {
1932 f.call ();
1933 v.unordered_remove (i);
1934 length--;
1935 }
1936 else
1937 i++;
1938 }
1939 }
1940
1941 gcc_assert (dlen == G.vec_finalizers.length());
1942 for (unsigned d = G.context_depth; d < dlen; ++d)
1943 {
1944 vec<vec_finalizer> &vv = G.vec_finalizers[d];
1945 unsigned length = vv.length ();
1946 for (unsigned int i = 0; i < length;)
1947 {
1948 vec_finalizer &f = vv[i];
1949 if (!ggc_marked_p (f.addr ()))
1950 {
1951 f.call ();
1952 vv.unordered_remove (i);
1953 length--;
1954 }
1955 else
1956 i++;
1957 }
1958 }
1959 }
1960
1961 /* Free all empty pages. Partially empty pages need no attention
1962 because the `mark' bit doubles as an `unused' bit. */
1963
1964 static void
1965 sweep_pages (void)
1966 {
1967 unsigned order;
1968
1969 for (order = 2; order < NUM_ORDERS; order++)
1970 {
1971 /* The last page-entry to consider, regardless of entries
1972 placed at the end of the list. */
1973 page_entry * const last = G.page_tails[order];
1974
1975 size_t num_objects;
1976 size_t live_objects;
1977 page_entry *p, *previous;
1978 int done;
1979
1980 p = G.pages[order];
1981 if (p == NULL)
1982 continue;
1983
1984 previous = NULL;
1985 do
1986 {
1987 page_entry *next = p->next;
1988
1989 /* Loop until all entries have been examined. */
1990 done = (p == last);
1991
1992 num_objects = OBJECTS_IN_PAGE (p);
1993
1994 /* Add all live objects on this page to the count of
1995 allocated memory. */
1996 live_objects = num_objects - p->num_free_objects;
1997
1998 G.allocated += OBJECT_SIZE (order) * live_objects;
1999
2000 /* Only objects on pages in the topmost context should get
2001 collected. */
2002 if (p->context_depth < G.context_depth)
2003 ;
2004
2005 /* Remove the page if it's empty. */
2006 else if (live_objects == 0)
2007 {
2008 /* If P was the first page in the list, then NEXT
2009 becomes the new first page in the list, otherwise
2010 splice P out of the forward pointers. */
2011 if (! previous)
2012 G.pages[order] = next;
2013 else
2014 previous->next = next;
2015
2016 /* Splice P out of the back pointers too. */
2017 if (next)
2018 next->prev = previous;
2019
2020 /* Are we removing the last element? */
2021 if (p == G.page_tails[order])
2022 G.page_tails[order] = previous;
2023 free_page (p);
2024 p = previous;
2025 }
2026
2027 /* If the page is full, move it to the end. */
2028 else if (p->num_free_objects == 0)
2029 {
2030 /* Don't move it if it's already at the end. */
2031 if (p != G.page_tails[order])
2032 {
2033 /* Move p to the end of the list. */
2034 p->next = NULL;
2035 p->prev = G.page_tails[order];
2036 G.page_tails[order]->next = p;
2037
2038 /* Update the tail pointer... */
2039 G.page_tails[order] = p;
2040
2041 /* ... and the head pointer, if necessary. */
2042 if (! previous)
2043 G.pages[order] = next;
2044 else
2045 previous->next = next;
2046
2047 /* And update the backpointer in NEXT if necessary. */
2048 if (next)
2049 next->prev = previous;
2050
2051 p = previous;
2052 }
2053 }
2054
2055 /* If we've fallen through to here, it's a page in the
2056 topmost context that is neither full nor empty. Such a
2057 page must precede pages at lesser context depth in the
2058 list, so move it to the head. */
2059 else if (p != G.pages[order])
2060 {
2061 previous->next = p->next;
2062
2063 /* Update the backchain in the next node if it exists. */
2064 if (p->next)
2065 p->next->prev = previous;
2066
2067 /* Move P to the head of the list. */
2068 p->next = G.pages[order];
2069 p->prev = NULL;
2070 G.pages[order]->prev = p;
2071
2072 /* Update the head pointer. */
2073 G.pages[order] = p;
2074
2075 /* Are we moving the last element? */
2076 if (G.page_tails[order] == p)
2077 G.page_tails[order] = previous;
2078 p = previous;
2079 }
2080
2081 previous = p;
2082 p = next;
2083 }
2084 while (! done);
2085
2086 /* Now, restore the in_use_p vectors for any pages from contexts
2087 other than the current one. */
2088 for (p = G.pages[order]; p; p = p->next)
2089 if (p->context_depth != G.context_depth)
2090 ggc_recalculate_in_use_p (p);
2091 }
2092 }
2093
2094 #ifdef ENABLE_GC_CHECKING
2095 /* Clobber all free objects. */
2096
2097 static void
2098 poison_pages (void)
2099 {
2100 unsigned order;
2101
2102 for (order = 2; order < NUM_ORDERS; order++)
2103 {
2104 size_t size = OBJECT_SIZE (order);
2105 page_entry *p;
2106
2107 for (p = G.pages[order]; p != NULL; p = p->next)
2108 {
2109 size_t num_objects;
2110 size_t i;
2111
2112 if (p->context_depth != G.context_depth)
2113 /* Since we don't do any collection for pages in pushed
2114 contexts, there's no need to do any poisoning. And
2115 besides, the IN_USE_P array isn't valid until we pop
2116 contexts. */
2117 continue;
2118
2119 num_objects = OBJECTS_IN_PAGE (p);
2120 for (i = 0; i < num_objects; i++)
2121 {
2122 size_t word, bit;
2123 word = i / HOST_BITS_PER_LONG;
2124 bit = i % HOST_BITS_PER_LONG;
2125 if (((p->in_use_p[word] >> bit) & 1) == 0)
2126 {
2127 char *object = p->page + i * size;
2128
2129 /* Keep poison-by-write when we expect to use Valgrind,
2130 so the exact same memory semantics is kept, in case
2131 there are memory errors. We override this request
2132 below. */
2133 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
2134 size));
2135 memset (object, 0xa5, size);
2136
2137 /* Drop the handle to avoid handle leak. */
2138 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
2139 }
2140 }
2141 }
2142 }
2143 }
2144 #else
2145 #define poison_pages()
2146 #endif
2147
2148 #ifdef ENABLE_GC_ALWAYS_COLLECT
2149 /* Validate that the reportedly free objects actually are. */
2150
2151 static void
2152 validate_free_objects (void)
2153 {
2154 struct free_object *f, *next, *still_free = NULL;
2155
2156 for (f = G.free_object_list; f ; f = next)
2157 {
2158 page_entry *pe = lookup_page_table_entry (f->object);
2159 size_t bit, word;
2160
2161 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
2162 word = bit / HOST_BITS_PER_LONG;
2163 bit = bit % HOST_BITS_PER_LONG;
2164 next = f->next;
2165
2166 /* Make certain it isn't visible from any root. Notice that we
2167 do this check before sweep_pages merges save_in_use_p. */
2168 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
2169
2170 /* If the object comes from an outer context, then retain the
2171 free_object entry, so that we can verify that the address
2172 isn't live on the stack in some outer context. */
2173 if (pe->context_depth != G.context_depth)
2174 {
2175 f->next = still_free;
2176 still_free = f;
2177 }
2178 else
2179 free (f);
2180 }
2181
2182 G.free_object_list = still_free;
2183 }
2184 #else
2185 #define validate_free_objects()
2186 #endif
2187
2188 /* Top level mark-and-sweep routine. */
2189
2190 void
2191 ggc_collect (enum ggc_collect mode)
2192 {
2193 /* Avoid frequent unnecessary work by skipping collection if the
2194 total allocations haven't expanded much since the last
2195 collection. */
2196 float allocated_last_gc =
2197 MAX (G.allocated_last_gc, (size_t)param_ggc_min_heapsize * ONE_K);
2198
2199 /* It is also good time to get memory block pool into limits. */
2200 memory_block_pool::trim ();
2201
2202 float min_expand = allocated_last_gc * param_ggc_min_expand / 100;
2203 if (mode == GGC_COLLECT_HEURISTIC
2204 && G.allocated < allocated_last_gc + min_expand)
2205 return;
2206
2207 timevar_push (TV_GC);
2208 if (GGC_DEBUG_LEVEL >= 2)
2209 fprintf (G.debug_file, "BEGIN COLLECTING\n");
2210
2211 /* Zero the total allocated bytes. This will be recalculated in the
2212 sweep phase. */
2213 size_t allocated = G.allocated;
2214 G.allocated = 0;
2215
2216 /* Release the pages we freed the last time we collected, but didn't
2217 reuse in the interim. */
2218 release_pages ();
2219
2220 /* Output this later so we do not interfere with release_pages. */
2221 if (!quiet_flag)
2222 fprintf (stderr, " {GC " PRsa (0) " -> ", SIZE_AMOUNT (allocated));
2223
2224 /* Indicate that we've seen collections at this context depth. */
2225 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
2226
2227 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2228
2229 in_gc = true;
2230 clear_marks ();
2231 ggc_mark_roots ();
2232 ggc_handle_finalizers ();
2233
2234 if (GATHER_STATISTICS)
2235 ggc_prune_overhead_list ();
2236
2237 poison_pages ();
2238 validate_free_objects ();
2239 sweep_pages ();
2240
2241 in_gc = false;
2242 G.allocated_last_gc = G.allocated;
2243
2244 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2245
2246 timevar_pop (TV_GC);
2247
2248 if (!quiet_flag)
2249 fprintf (stderr, PRsa (0) "}", SIZE_AMOUNT (G.allocated));
2250 if (GGC_DEBUG_LEVEL >= 2)
2251 fprintf (G.debug_file, "END COLLECTING\n");
2252 }
2253
2254 /* Return free pages to the system. */
2255
2256 void
2257 ggc_trim ()
2258 {
2259 timevar_push (TV_GC);
2260 G.allocated = 0;
2261 sweep_pages ();
2262 release_pages ();
2263 if (!quiet_flag)
2264 fprintf (stderr, " {GC trimmed to " PRsa (0) ", " PRsa (0) " mapped}",
2265 SIZE_AMOUNT (G.allocated), SIZE_AMOUNT (G.bytes_mapped));
2266 timevar_pop (TV_GC);
2267 }
2268
2269 /* Assume that all GGC memory is reachable and grow the limits for next
2270 collection. With checking, trigger GGC so -Q compilation outputs how much
2271 of memory really is reachable. */
2272
2273 void
2274 ggc_grow (void)
2275 {
2276 if (!flag_checking)
2277 G.allocated_last_gc = MAX (G.allocated_last_gc,
2278 G.allocated);
2279 else
2280 ggc_collect ();
2281 if (!quiet_flag)
2282 fprintf (stderr, " {GC " PRsa (0) "} ", SIZE_AMOUNT (G.allocated));
2283 }
2284
2285 void
2286 ggc_print_statistics (void)
2287 {
2288 struct ggc_statistics stats;
2289 unsigned int i;
2290 size_t total_overhead = 0;
2291
2292 /* Clear the statistics. */
2293 memset (&stats, 0, sizeof (stats));
2294
2295 /* Make sure collection will really occur. */
2296 G.allocated_last_gc = 0;
2297
2298 /* Collect and print the statistics common across collectors. */
2299 ggc_print_common_statistics (stderr, &stats);
2300
2301 /* Release free pages so that we will not count the bytes allocated
2302 there as part of the total allocated memory. */
2303 release_pages ();
2304
2305 /* Collect some information about the various sizes of
2306 allocation. */
2307 fprintf (stderr,
2308 "Memory still allocated at the end of the compilation process\n");
2309 fprintf (stderr, "%-8s %10s %10s %10s\n",
2310 "Size", "Allocated", "Used", "Overhead");
2311 for (i = 0; i < NUM_ORDERS; ++i)
2312 {
2313 page_entry *p;
2314 size_t allocated;
2315 size_t in_use;
2316 size_t overhead;
2317
2318 /* Skip empty entries. */
2319 if (!G.pages[i])
2320 continue;
2321
2322 overhead = allocated = in_use = 0;
2323
2324 /* Figure out the total number of bytes allocated for objects of
2325 this size, and how many of them are actually in use. Also figure
2326 out how much memory the page table is using. */
2327 for (p = G.pages[i]; p; p = p->next)
2328 {
2329 allocated += p->bytes;
2330 in_use +=
2331 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2332
2333 overhead += (sizeof (page_entry) - sizeof (long)
2334 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2335 }
2336 fprintf (stderr, "%-8" PRIu64 " " PRsa (10) " " PRsa (10) " "
2337 PRsa (10) "\n",
2338 (uint64_t)OBJECT_SIZE (i),
2339 SIZE_AMOUNT (allocated),
2340 SIZE_AMOUNT (in_use),
2341 SIZE_AMOUNT (overhead));
2342 total_overhead += overhead;
2343 }
2344 fprintf (stderr, "%-8s " PRsa (10) " " PRsa (10) " " PRsa (10) "\n",
2345 "Total",
2346 SIZE_AMOUNT (G.bytes_mapped),
2347 SIZE_AMOUNT (G.allocated),
2348 SIZE_AMOUNT (total_overhead));
2349
2350 if (GATHER_STATISTICS)
2351 {
2352 fprintf (stderr, "\nTotal allocations and overheads during "
2353 "the compilation process\n");
2354
2355 fprintf (stderr, "Total Overhead: "
2356 PRsa (9) "\n",
2357 SIZE_AMOUNT (G.stats.total_overhead));
2358 fprintf (stderr, "Total Allocated: "
2359 PRsa (9) "\n",
2360 SIZE_AMOUNT (G.stats.total_allocated));
2361
2362 fprintf (stderr, "Total Overhead under 32B: "
2363 PRsa (9) "\n",
2364 SIZE_AMOUNT (G.stats.total_overhead_under32));
2365 fprintf (stderr, "Total Allocated under 32B: "
2366 PRsa (9) "\n",
2367 SIZE_AMOUNT (G.stats.total_allocated_under32));
2368 fprintf (stderr, "Total Overhead under 64B: "
2369 PRsa (9) "\n",
2370 SIZE_AMOUNT (G.stats.total_overhead_under64));
2371 fprintf (stderr, "Total Allocated under 64B: "
2372 PRsa (9) "\n",
2373 SIZE_AMOUNT (G.stats.total_allocated_under64));
2374 fprintf (stderr, "Total Overhead under 128B: "
2375 PRsa (9) "\n",
2376 SIZE_AMOUNT (G.stats.total_overhead_under128));
2377 fprintf (stderr, "Total Allocated under 128B: "
2378 PRsa (9) "\n",
2379 SIZE_AMOUNT (G.stats.total_allocated_under128));
2380
2381 for (i = 0; i < NUM_ORDERS; i++)
2382 if (G.stats.total_allocated_per_order[i])
2383 {
2384 fprintf (stderr, "Total Overhead page size %9" PRIu64 ": "
2385 PRsa (9) "\n",
2386 (uint64_t)OBJECT_SIZE (i),
2387 SIZE_AMOUNT (G.stats.total_overhead_per_order[i]));
2388 fprintf (stderr, "Total Allocated page size %9" PRIu64 ": "
2389 PRsa (9) "\n",
2390 (uint64_t)OBJECT_SIZE (i),
2391 SIZE_AMOUNT (G.stats.total_allocated_per_order[i]));
2392 }
2393 }
2394 }
2395
2396 struct ggc_pch_ondisk
2398 {
2399 unsigned totals[NUM_ORDERS];
2400 };
2401
2402 struct ggc_pch_data
2403 {
2404 struct ggc_pch_ondisk d;
2405 uintptr_t base[NUM_ORDERS];
2406 size_t written[NUM_ORDERS];
2407 };
2408
2409 struct ggc_pch_data *
2410 init_ggc_pch (void)
2411 {
2412 return XCNEW (struct ggc_pch_data);
2413 }
2414
2415 void
2416 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2417 size_t size, bool is_string ATTRIBUTE_UNUSED)
2418 {
2419 unsigned order;
2420
2421 if (size < NUM_SIZE_LOOKUP)
2422 order = size_lookup[size];
2423 else
2424 {
2425 order = 10;
2426 while (size > OBJECT_SIZE (order))
2427 order++;
2428 }
2429
2430 d->d.totals[order]++;
2431 }
2432
2433 size_t
2434 ggc_pch_total_size (struct ggc_pch_data *d)
2435 {
2436 size_t a = 0;
2437 unsigned i;
2438
2439 for (i = 0; i < NUM_ORDERS; i++)
2440 a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2441 return a;
2442 }
2443
2444 void
2445 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2446 {
2447 uintptr_t a = (uintptr_t) base;
2448 unsigned i;
2449
2450 for (i = 0; i < NUM_ORDERS; i++)
2451 {
2452 d->base[i] = a;
2453 a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2454 }
2455 }
2456
2457
2458 char *
2459 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2460 size_t size, bool is_string ATTRIBUTE_UNUSED)
2461 {
2462 unsigned order;
2463 char *result;
2464
2465 if (size < NUM_SIZE_LOOKUP)
2466 order = size_lookup[size];
2467 else
2468 {
2469 order = 10;
2470 while (size > OBJECT_SIZE (order))
2471 order++;
2472 }
2473
2474 result = (char *) d->base[order];
2475 d->base[order] += OBJECT_SIZE (order);
2476 return result;
2477 }
2478
2479 void
2480 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2481 FILE *f ATTRIBUTE_UNUSED)
2482 {
2483 /* Nothing to do. */
2484 }
2485
2486 void
2487 ggc_pch_write_object (struct ggc_pch_data *d,
2488 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2489 size_t size, bool is_string ATTRIBUTE_UNUSED)
2490 {
2491 unsigned order;
2492 static const char emptyBytes[256] = { 0 };
2493
2494 if (size < NUM_SIZE_LOOKUP)
2495 order = size_lookup[size];
2496 else
2497 {
2498 order = 10;
2499 while (size > OBJECT_SIZE (order))
2500 order++;
2501 }
2502
2503 if (fwrite (x, size, 1, f) != 1)
2504 fatal_error (input_location, "cannot write PCH file: %m");
2505
2506 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2507 object out to OBJECT_SIZE(order). This happens for strings. */
2508
2509 if (size != OBJECT_SIZE (order))
2510 {
2511 unsigned padding = OBJECT_SIZE (order) - size;
2512
2513 /* To speed small writes, we use a nulled-out array that's larger
2514 than most padding requests as the source for our null bytes. This
2515 permits us to do the padding with fwrite() rather than fseek(), and
2516 limits the chance the OS may try to flush any outstanding writes. */
2517 if (padding <= sizeof (emptyBytes))
2518 {
2519 if (fwrite (emptyBytes, 1, padding, f) != padding)
2520 fatal_error (input_location, "cannot write PCH file");
2521 }
2522 else
2523 {
2524 /* Larger than our buffer? Just default to fseek. */
2525 if (fseek (f, padding, SEEK_CUR) != 0)
2526 fatal_error (input_location, "cannot write PCH file");
2527 }
2528 }
2529
2530 d->written[order]++;
2531 if (d->written[order] == d->d.totals[order]
2532 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2533 G.pagesize),
2534 SEEK_CUR) != 0)
2535 fatal_error (input_location, "cannot write PCH file: %m");
2536 }
2537
2538 void
2539 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2540 {
2541 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2542 fatal_error (input_location, "cannot write PCH file: %m");
2543 free (d);
2544 }
2545
2546 /* Move the PCH PTE entries just added to the end of by_depth, to the
2547 front. */
2548
2549 static void
2550 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2551 {
2552 /* First, we swap the new entries to the front of the varrays. */
2553 page_entry **new_by_depth;
2554 unsigned long **new_save_in_use;
2555
2556 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2557 new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2558
2559 memcpy (&new_by_depth[0],
2560 &G.by_depth[count_old_page_tables],
2561 count_new_page_tables * sizeof (void *));
2562 memcpy (&new_by_depth[count_new_page_tables],
2563 &G.by_depth[0],
2564 count_old_page_tables * sizeof (void *));
2565 memcpy (&new_save_in_use[0],
2566 &G.save_in_use[count_old_page_tables],
2567 count_new_page_tables * sizeof (void *));
2568 memcpy (&new_save_in_use[count_new_page_tables],
2569 &G.save_in_use[0],
2570 count_old_page_tables * sizeof (void *));
2571
2572 free (G.by_depth);
2573 free (G.save_in_use);
2574
2575 G.by_depth = new_by_depth;
2576 G.save_in_use = new_save_in_use;
2577
2578 /* Now update all the index_by_depth fields. */
2579 for (unsigned i = G.by_depth_in_use; i--;)
2580 {
2581 page_entry *p = G.by_depth[i];
2582 p->index_by_depth = i;
2583 }
2584
2585 /* And last, we update the depth pointers in G.depth. The first
2586 entry is already 0, and context 0 entries always start at index
2587 0, so there is nothing to update in the first slot. We need a
2588 second slot, only if we have old ptes, and if we do, they start
2589 at index count_new_page_tables. */
2590 if (count_old_page_tables)
2591 push_depth (count_new_page_tables);
2592 }
2593
2594 void
2595 ggc_pch_read (FILE *f, void *addr)
2596 {
2597 struct ggc_pch_ondisk d;
2598 unsigned i;
2599 char *offs = (char *) addr;
2600 unsigned long count_old_page_tables;
2601 unsigned long count_new_page_tables;
2602
2603 count_old_page_tables = G.by_depth_in_use;
2604
2605 if (fread (&d, sizeof (d), 1, f) != 1)
2606 fatal_error (input_location, "cannot read PCH file: %m");
2607
2608 /* We've just read in a PCH file. So, every object that used to be
2609 allocated is now free. */
2610 clear_marks ();
2611 #ifdef ENABLE_GC_CHECKING
2612 poison_pages ();
2613 #endif
2614 /* Since we free all the allocated objects, the free list becomes
2615 useless. Validate it now, which will also clear it. */
2616 validate_free_objects ();
2617
2618 /* No object read from a PCH file should ever be freed. So, set the
2619 context depth to 1, and set the depth of all the currently-allocated
2620 pages to be 1 too. PCH pages will have depth 0. */
2621 gcc_assert (!G.context_depth);
2622 G.context_depth = 1;
2623 /* Allocate space for the depth 1 finalizers. */
2624 G.finalizers.safe_push (vNULL);
2625 G.vec_finalizers.safe_push (vNULL);
2626 gcc_assert (G.finalizers.length() == 2);
2627 for (i = 0; i < NUM_ORDERS; i++)
2628 {
2629 page_entry *p;
2630 for (p = G.pages[i]; p != NULL; p = p->next)
2631 p->context_depth = G.context_depth;
2632 }
2633
2634 /* Allocate the appropriate page-table entries for the pages read from
2635 the PCH file. */
2636
2637 for (i = 0; i < NUM_ORDERS; i++)
2638 {
2639 struct page_entry *entry;
2640 char *pte;
2641 size_t bytes;
2642 size_t num_objs;
2643 size_t j;
2644
2645 if (d.totals[i] == 0)
2646 continue;
2647
2648 bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
2649 num_objs = bytes / OBJECT_SIZE (i);
2650 entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2651 - sizeof (long)
2652 + BITMAP_SIZE (num_objs + 1)));
2653 entry->bytes = bytes;
2654 entry->page = offs;
2655 entry->context_depth = 0;
2656 offs += bytes;
2657 entry->num_free_objects = 0;
2658 entry->order = i;
2659
2660 for (j = 0;
2661 j + HOST_BITS_PER_LONG <= num_objs + 1;
2662 j += HOST_BITS_PER_LONG)
2663 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2664 for (; j < num_objs + 1; j++)
2665 entry->in_use_p[j / HOST_BITS_PER_LONG]
2666 |= 1L << (j % HOST_BITS_PER_LONG);
2667
2668 for (pte = entry->page;
2669 pte < entry->page + entry->bytes;
2670 pte += G.pagesize)
2671 set_page_table_entry (pte, entry);
2672
2673 if (G.page_tails[i] != NULL)
2674 G.page_tails[i]->next = entry;
2675 else
2676 G.pages[i] = entry;
2677 G.page_tails[i] = entry;
2678
2679 /* We start off by just adding all the new information to the
2680 end of the varrays, later, we will move the new information
2681 to the front of the varrays, as the PCH page tables are at
2682 context 0. */
2683 push_by_depth (entry, 0);
2684 }
2685
2686 /* Now, we update the various data structures that speed page table
2687 handling. */
2688 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2689
2690 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2691
2692 /* Update the statistics. */
2693 G.allocated = G.allocated_last_gc = offs - (char *)addr;
2694 }
2695