Home | History | Annotate | Line # | Download | only in gcc
      1  1.1  mrg /* "Bag-of-pages" garbage collector for the GNU compiler.
      2  1.1  mrg    Copyright (C) 1999-2022 Free Software Foundation, Inc.
      3  1.1  mrg 
      4  1.1  mrg This file is part of GCC.
      5  1.1  mrg 
      6  1.1  mrg GCC is free software; you can redistribute it and/or modify it under
      7  1.1  mrg the terms of the GNU General Public License as published by the Free
      8  1.1  mrg Software Foundation; either version 3, or (at your option) any later
      9  1.1  mrg version.
     10  1.1  mrg 
     11  1.1  mrg GCC is distributed in the hope that it will be useful, but WITHOUT ANY
     12  1.1  mrg WARRANTY; without even the implied warranty of MERCHANTABILITY or
     13  1.1  mrg FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
     14  1.1  mrg for more details.
     15  1.1  mrg 
     16  1.1  mrg You should have received a copy of the GNU General Public License
     17  1.1  mrg along with GCC; see the file COPYING3.  If not see
     18  1.1  mrg <http://www.gnu.org/licenses/>.  */
     19  1.1  mrg 
     20  1.1  mrg #include "config.h"
     21  1.1  mrg #include "system.h"
     22  1.1  mrg #include "coretypes.h"
     23  1.1  mrg #include "backend.h"
     24  1.1  mrg #include "alias.h"
     25  1.1  mrg #include "tree.h"
     26  1.1  mrg #include "rtl.h"
     27  1.1  mrg #include "memmodel.h"
     28  1.1  mrg #include "tm_p.h"
     29  1.1  mrg #include "diagnostic-core.h"
     30  1.1  mrg #include "flags.h"
     31  1.1  mrg #include "ggc-internal.h"
     32  1.1  mrg #include "timevar.h"
     33  1.1  mrg #include "cgraph.h"
     34  1.1  mrg #include "cfgloop.h"
     35  1.1  mrg #include "plugin.h"
     36  1.1  mrg 
     37  1.1  mrg /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
     38  1.1  mrg    file open.  Prefer either to valloc.  */
     39  1.1  mrg #ifdef HAVE_MMAP_ANON
     40  1.1  mrg # undef HAVE_MMAP_DEV_ZERO
     41  1.1  mrg # define USING_MMAP
     42  1.1  mrg #endif
     43  1.1  mrg 
     44  1.1  mrg #ifdef HAVE_MMAP_DEV_ZERO
     45  1.1  mrg # define USING_MMAP
     46  1.1  mrg #endif
     47  1.1  mrg 
     48  1.1  mrg #ifndef USING_MMAP
     49  1.1  mrg #define USING_MALLOC_PAGE_GROUPS
     50  1.1  mrg #endif
     51  1.1  mrg 
     52  1.1  mrg #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
     53  1.1  mrg     && defined(USING_MMAP)
     54  1.1  mrg # define USING_MADVISE
     55  1.1  mrg #endif
     56  1.1  mrg 
     57  1.1  mrg /* Strategy:
     58  1.1  mrg 
     59  1.1  mrg    This garbage-collecting allocator allocates objects on one of a set
     60  1.1  mrg    of pages.  Each page can allocate objects of a single size only;
     61  1.1  mrg    available sizes are powers of two starting at four bytes.  The size
     62  1.1  mrg    of an allocation request is rounded up to the next power of two
     63  1.1  mrg    (`order'), and satisfied from the appropriate page.
     64  1.1  mrg 
     65  1.1  mrg    Each page is recorded in a page-entry, which also maintains an
     66  1.1  mrg    in-use bitmap of object positions on the page.  This allows the
     67  1.1  mrg    allocation state of a particular object to be flipped without
     68  1.1  mrg    touching the page itself.
     69  1.1  mrg 
     70  1.1  mrg    Each page-entry also has a context depth, which is used to track
     71  1.1  mrg    pushing and popping of allocation contexts.  Only objects allocated
     72  1.1  mrg    in the current (highest-numbered) context may be collected.
     73  1.1  mrg 
     74  1.1  mrg    Page entries are arranged in an array of singly-linked lists.  The
     75  1.1  mrg    array is indexed by the allocation size, in bits, of the pages on
     76  1.1  mrg    it; i.e. all pages on a list allocate objects of the same size.
     77  1.1  mrg    Pages are ordered on the list such that all non-full pages precede
     78  1.1  mrg    all full pages, with non-full pages arranged in order of decreasing
     79  1.1  mrg    context depth.
     80  1.1  mrg 
     81  1.1  mrg    Empty pages (of all orders) are kept on a single page cache list,
     82  1.1  mrg    and are considered first when new pages are required; they are
     83  1.1  mrg    deallocated at the start of the next collection if they haven't
     84  1.1  mrg    been recycled by then.  */
     85  1.1  mrg 
     86  1.1  mrg /* Define GGC_DEBUG_LEVEL to print debugging information.
     87  1.1  mrg      0: No debugging output.
     88  1.1  mrg      1: GC statistics only.
     89  1.1  mrg      2: Page-entry allocations/deallocations as well.
     90  1.1  mrg      3: Object allocations as well.
     91  1.1  mrg      4: Object marks as well.  */
     92  1.1  mrg #define GGC_DEBUG_LEVEL (0)
     93  1.1  mrg 
     94  1.1  mrg /* A two-level tree is used to look up the page-entry for a given
     96  1.1  mrg    pointer.  Two chunks of the pointer's bits are extracted to index
     97  1.1  mrg    the first and second levels of the tree, as follows:
     98  1.1  mrg 
     99  1.1  mrg 				   HOST_PAGE_SIZE_BITS
    100  1.1  mrg 			   32		|      |
    101  1.1  mrg        msb +----------------+----+------+------+ lsb
    102  1.1  mrg 			    |    |      |
    103  1.1  mrg 			 PAGE_L1_BITS   |
    104  1.1  mrg 				 |      |
    105  1.1  mrg 			       PAGE_L2_BITS
    106  1.1  mrg 
    107  1.1  mrg    The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
    108  1.1  mrg    pages are aligned on system page boundaries.  The next most
    109  1.1  mrg    significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
    110  1.1  mrg    index values in the lookup table, respectively.
    111  1.1  mrg 
    112  1.1  mrg    For 32-bit architectures and the settings below, there are no
    113  1.1  mrg    leftover bits.  For architectures with wider pointers, the lookup
    114  1.1  mrg    tree points to a list of pages, which must be scanned to find the
    115  1.1  mrg    correct one.  */
    116  1.1  mrg 
    117  1.1  mrg #define PAGE_L1_BITS	(8)
    118  1.1  mrg #define PAGE_L2_BITS	(32 - PAGE_L1_BITS - G.lg_pagesize)
    119  1.1  mrg #define PAGE_L1_SIZE	((uintptr_t) 1 << PAGE_L1_BITS)
    120  1.1  mrg #define PAGE_L2_SIZE	((uintptr_t) 1 << PAGE_L2_BITS)
    121  1.1  mrg 
    122  1.1  mrg #define LOOKUP_L1(p) \
    123  1.1  mrg   (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
    124  1.1  mrg 
    125  1.1  mrg #define LOOKUP_L2(p) \
    126  1.1  mrg   (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
    127  1.1  mrg 
    128  1.1  mrg /* The number of objects per allocation page, for objects on a page of
    129  1.1  mrg    the indicated ORDER.  */
    130  1.1  mrg #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
    131  1.1  mrg 
    132  1.1  mrg /* The number of objects in P.  */
    133  1.1  mrg #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
    134  1.1  mrg 
    135  1.1  mrg /* The size of an object on a page of the indicated ORDER.  */
    136  1.1  mrg #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
    137  1.1  mrg 
    138  1.1  mrg /* For speed, we avoid doing a general integer divide to locate the
    139  1.1  mrg    offset in the allocation bitmap, by precalculating numbers M, S
    140  1.1  mrg    such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
    141  1.1  mrg    within the page which is evenly divisible by the object size Z.  */
    142  1.1  mrg #define DIV_MULT(ORDER) inverse_table[ORDER].mult
    143  1.1  mrg #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
    144  1.1  mrg #define OFFSET_TO_BIT(OFFSET, ORDER) \
    145  1.1  mrg   (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
    146  1.1  mrg 
    147  1.1  mrg /* We use this structure to determine the alignment required for
    148  1.1  mrg    allocations.  For power-of-two sized allocations, that's not a
    149  1.1  mrg    problem, but it does matter for odd-sized allocations.
    150  1.1  mrg    We do not care about alignment for floating-point types.  */
    151  1.1  mrg 
    152  1.1  mrg struct max_alignment {
    153  1.1  mrg   char c;
    154  1.1  mrg   union {
    155  1.1  mrg     int64_t i;
    156  1.1  mrg     void *p;
    157  1.1  mrg   } u;
    158  1.1  mrg };
    159  1.1  mrg 
    160  1.1  mrg /* The biggest alignment required.  */
    161  1.1  mrg 
    162  1.1  mrg #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
    163  1.1  mrg 
    164  1.1  mrg 
    165  1.1  mrg /* The number of extra orders, not corresponding to power-of-two sized
    166  1.1  mrg    objects.  */
    167  1.1  mrg 
    168  1.1  mrg #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
    169  1.1  mrg 
    170  1.1  mrg #define RTL_SIZE(NSLOTS) \
    171  1.1  mrg   (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
    172  1.1  mrg 
    173  1.1  mrg #define TREE_EXP_SIZE(OPS) \
    174  1.1  mrg   (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
    175  1.1  mrg 
    176  1.1  mrg /* The Ith entry is the maximum size of an object to be stored in the
    177  1.1  mrg    Ith extra order.  Adding a new entry to this array is the *only*
    178  1.1  mrg    thing you need to do to add a new special allocation size.  */
    179  1.1  mrg 
    180  1.1  mrg static const size_t extra_order_size_table[] = {
    181  1.1  mrg   /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
    182  1.1  mrg      There are a lot of structures with these sizes and explicitly
    183  1.1  mrg      listing them risks orders being dropped because they changed size.  */
    184  1.1  mrg   MAX_ALIGNMENT * 3,
    185  1.1  mrg   MAX_ALIGNMENT * 5,
    186  1.1  mrg   MAX_ALIGNMENT * 6,
    187  1.1  mrg   MAX_ALIGNMENT * 7,
    188  1.1  mrg   MAX_ALIGNMENT * 9,
    189  1.1  mrg   MAX_ALIGNMENT * 10,
    190  1.1  mrg   MAX_ALIGNMENT * 11,
    191  1.1  mrg   MAX_ALIGNMENT * 12,
    192  1.1  mrg   MAX_ALIGNMENT * 13,
    193  1.1  mrg   MAX_ALIGNMENT * 14,
    194  1.1  mrg   MAX_ALIGNMENT * 15,
    195  1.1  mrg   sizeof (struct tree_decl_non_common),
    196  1.1  mrg   sizeof (struct tree_field_decl),
    197  1.1  mrg   sizeof (struct tree_parm_decl),
    198  1.1  mrg   sizeof (struct tree_var_decl),
    199  1.1  mrg   sizeof (struct tree_type_non_common),
    200  1.1  mrg   sizeof (struct function),
    201  1.1  mrg   sizeof (struct basic_block_def),
    202  1.1  mrg   sizeof (struct cgraph_node),
    203  1.1  mrg   sizeof (class loop),
    204  1.1  mrg };
    205  1.1  mrg 
    206  1.1  mrg /* The total number of orders.  */
    207  1.1  mrg 
    208  1.1  mrg #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
    209  1.1  mrg 
    210  1.1  mrg /* Compute the smallest nonnegative number which when added to X gives
    211  1.1  mrg    a multiple of F.  */
    212  1.1  mrg 
    213  1.1  mrg #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
    214  1.1  mrg 
    215  1.1  mrg /* Round X to next multiple of the page size */
    216  1.1  mrg 
    217  1.1  mrg #define PAGE_ALIGN(x) ROUND_UP ((x), G.pagesize)
    218  1.1  mrg 
    219  1.1  mrg /* The Ith entry is the number of objects on a page or order I.  */
    220  1.1  mrg 
    221  1.1  mrg static unsigned objects_per_page_table[NUM_ORDERS];
    222  1.1  mrg 
    223  1.1  mrg /* The Ith entry is the size of an object on a page of order I.  */
    224  1.1  mrg 
    225  1.1  mrg static size_t object_size_table[NUM_ORDERS];
    226  1.1  mrg 
    227  1.1  mrg /* The Ith entry is a pair of numbers (mult, shift) such that
    228  1.1  mrg    ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
    229  1.1  mrg    for all k evenly divisible by OBJECT_SIZE(I).  */
    230  1.1  mrg 
    231  1.1  mrg static struct
    232  1.1  mrg {
    233  1.1  mrg   size_t mult;
    234  1.1  mrg   unsigned int shift;
    235  1.1  mrg }
    236  1.1  mrg inverse_table[NUM_ORDERS];
    237  1.1  mrg 
    238  1.1  mrg /* A page_entry records the status of an allocation page.  This
    239  1.1  mrg    structure is dynamically sized to fit the bitmap in_use_p.  */
    240  1.1  mrg struct page_entry
    241  1.1  mrg {
    242  1.1  mrg   /* The next page-entry with objects of the same size, or NULL if
    243  1.1  mrg      this is the last page-entry.  */
    244  1.1  mrg   struct page_entry *next;
    245  1.1  mrg 
    246  1.1  mrg   /* The previous page-entry with objects of the same size, or NULL if
    247  1.1  mrg      this is the first page-entry.   The PREV pointer exists solely to
    248  1.1  mrg      keep the cost of ggc_free manageable.  */
    249  1.1  mrg   struct page_entry *prev;
    250  1.1  mrg 
    251  1.1  mrg   /* The number of bytes allocated.  (This will always be a multiple
    252  1.1  mrg      of the host system page size.)  */
    253  1.1  mrg   size_t bytes;
    254  1.1  mrg 
    255  1.1  mrg   /* The address at which the memory is allocated.  */
    256  1.1  mrg   char *page;
    257  1.1  mrg 
    258  1.1  mrg #ifdef USING_MALLOC_PAGE_GROUPS
    259  1.1  mrg   /* Back pointer to the page group this page came from.  */
    260  1.1  mrg   struct page_group *group;
    261  1.1  mrg #endif
    262  1.1  mrg 
    263  1.1  mrg   /* This is the index in the by_depth varray where this page table
    264  1.1  mrg      can be found.  */
    265  1.1  mrg   unsigned long index_by_depth;
    266  1.1  mrg 
    267  1.1  mrg   /* Context depth of this page.  */
    268  1.1  mrg   unsigned short context_depth;
    269  1.1  mrg 
    270  1.1  mrg   /* The number of free objects remaining on this page.  */
    271  1.1  mrg   unsigned short num_free_objects;
    272  1.1  mrg 
    273  1.1  mrg   /* A likely candidate for the bit position of a free object for the
    274  1.1  mrg      next allocation from this page.  */
    275  1.1  mrg   unsigned short next_bit_hint;
    276  1.1  mrg 
    277  1.1  mrg   /* The lg of size of objects allocated from this page.  */
    278  1.1  mrg   unsigned char order;
    279  1.1  mrg 
    280  1.1  mrg   /* Discarded page? */
    281  1.1  mrg   bool discarded;
    282  1.1  mrg 
    283  1.1  mrg   /* A bit vector indicating whether or not objects are in use.  The
    284  1.1  mrg      Nth bit is one if the Nth object on this page is allocated.  This
    285  1.1  mrg      array is dynamically sized.  */
    286  1.1  mrg   unsigned long in_use_p[1];
    287  1.1  mrg };
    288  1.1  mrg 
    289  1.1  mrg #ifdef USING_MALLOC_PAGE_GROUPS
    290  1.1  mrg /* A page_group describes a large allocation from malloc, from which
    291  1.1  mrg    we parcel out aligned pages.  */
    292  1.1  mrg struct page_group
    293  1.1  mrg {
    294  1.1  mrg   /* A linked list of all extant page groups.  */
    295  1.1  mrg   struct page_group *next;
    296  1.1  mrg 
    297  1.1  mrg   /* The address we received from malloc.  */
    298  1.1  mrg   char *allocation;
    299  1.1  mrg 
    300  1.1  mrg   /* The size of the block.  */
    301  1.1  mrg   size_t alloc_size;
    302  1.1  mrg 
    303  1.1  mrg   /* A bitmask of pages in use.  */
    304  1.1  mrg   unsigned int in_use;
    305  1.1  mrg };
    306  1.1  mrg #endif
    307  1.1  mrg 
    308  1.1  mrg #if HOST_BITS_PER_PTR <= 32
    309  1.1  mrg 
    310  1.1  mrg /* On 32-bit hosts, we use a two level page table, as pictured above.  */
    311  1.1  mrg typedef page_entry **page_table[PAGE_L1_SIZE];
    312  1.1  mrg 
    313  1.1  mrg #else
    314  1.1  mrg 
    315  1.1  mrg /* On 64-bit hosts, we use the same two level page tables plus a linked
    316  1.1  mrg    list that disambiguates the top 32-bits.  There will almost always be
    317  1.1  mrg    exactly one entry in the list.  */
    318  1.1  mrg typedef struct page_table_chain
    319  1.1  mrg {
    320  1.1  mrg   struct page_table_chain *next;
    321  1.1  mrg   size_t high_bits;
    322  1.1  mrg   page_entry **table[PAGE_L1_SIZE];
    323  1.1  mrg } *page_table;
    324  1.1  mrg 
    325  1.1  mrg #endif
    326  1.1  mrg 
    327  1.1  mrg class finalizer
    328  1.1  mrg {
    329  1.1  mrg public:
    330  1.1  mrg   finalizer (void *addr, void (*f)(void *)) : m_addr (addr), m_function (f) {}
    331  1.1  mrg 
    332  1.1  mrg   void *addr () const { return m_addr; }
    333  1.1  mrg 
    334  1.1  mrg   void call () const { m_function (m_addr); }
    335  1.1  mrg 
    336  1.1  mrg private:
    337  1.1  mrg   void *m_addr;
    338  1.1  mrg   void (*m_function)(void *);
    339  1.1  mrg };
    340  1.1  mrg 
    341  1.1  mrg class vec_finalizer
    342  1.1  mrg {
    343  1.1  mrg public:
    344  1.1  mrg   vec_finalizer (uintptr_t addr, void (*f)(void *), size_t s, size_t n) :
    345  1.1  mrg     m_addr (addr), m_function (f), m_object_size (s), m_n_objects (n) {}
    346  1.1  mrg 
    347  1.1  mrg   void call () const
    348  1.1  mrg     {
    349  1.1  mrg       for (size_t i = 0; i < m_n_objects; i++)
    350  1.1  mrg 	m_function (reinterpret_cast<void *> (m_addr + (i * m_object_size)));
    351  1.1  mrg     }
    352  1.1  mrg 
    353  1.1  mrg   void *addr () const { return reinterpret_cast<void *> (m_addr); }
    354  1.1  mrg 
    355  1.1  mrg private:
    356  1.1  mrg   uintptr_t m_addr;
    357  1.1  mrg   void (*m_function)(void *);
    358  1.1  mrg   size_t m_object_size;
    359  1.1  mrg   size_t m_n_objects;
    360  1.1  mrg };
    361  1.1  mrg 
    362  1.1  mrg #ifdef ENABLE_GC_ALWAYS_COLLECT
    363  1.1  mrg /* List of free objects to be verified as actually free on the
    364  1.1  mrg    next collection.  */
    365  1.1  mrg struct free_object
    366  1.1  mrg {
    367  1.1  mrg   void *object;
    368  1.1  mrg   struct free_object *next;
    369  1.1  mrg };
    370  1.1  mrg #endif
    371  1.1  mrg 
    372  1.1  mrg /* The rest of the global variables.  */
    373  1.1  mrg static struct ggc_globals
    374  1.1  mrg {
    375  1.1  mrg   /* The Nth element in this array is a page with objects of size 2^N.
    376  1.1  mrg      If there are any pages with free objects, they will be at the
    377  1.1  mrg      head of the list.  NULL if there are no page-entries for this
    378  1.1  mrg      object size.  */
    379  1.1  mrg   page_entry *pages[NUM_ORDERS];
    380  1.1  mrg 
    381  1.1  mrg   /* The Nth element in this array is the last page with objects of
    382  1.1  mrg      size 2^N.  NULL if there are no page-entries for this object
    383  1.1  mrg      size.  */
    384  1.1  mrg   page_entry *page_tails[NUM_ORDERS];
    385  1.1  mrg 
    386  1.1  mrg   /* Lookup table for associating allocation pages with object addresses.  */
    387  1.1  mrg   page_table lookup;
    388  1.1  mrg 
    389  1.1  mrg   /* The system's page size.  */
    390  1.1  mrg   size_t pagesize;
    391  1.1  mrg   size_t lg_pagesize;
    392  1.1  mrg 
    393  1.1  mrg   /* Bytes currently allocated.  */
    394  1.1  mrg   size_t allocated;
    395  1.1  mrg 
    396  1.1  mrg   /* Bytes currently allocated at the end of the last collection.  */
    397  1.1  mrg   size_t allocated_last_gc;
    398  1.1  mrg 
    399  1.1  mrg   /* Total amount of memory mapped.  */
    400  1.1  mrg   size_t bytes_mapped;
    401  1.1  mrg 
    402  1.1  mrg   /* Bit N set if any allocations have been done at context depth N.  */
    403  1.1  mrg   unsigned long context_depth_allocations;
    404  1.1  mrg 
    405  1.1  mrg   /* Bit N set if any collections have been done at context depth N.  */
    406  1.1  mrg   unsigned long context_depth_collections;
    407  1.1  mrg 
    408  1.1  mrg   /* The current depth in the context stack.  */
    409  1.1  mrg   unsigned short context_depth;
    410  1.1  mrg 
    411  1.1  mrg   /* A file descriptor open to /dev/zero for reading.  */
    412  1.1  mrg #if defined (HAVE_MMAP_DEV_ZERO)
    413  1.1  mrg   int dev_zero_fd;
    414  1.1  mrg #endif
    415  1.1  mrg 
    416  1.1  mrg   /* A cache of free system pages.  */
    417  1.1  mrg   page_entry *free_pages;
    418  1.1  mrg 
    419  1.1  mrg #ifdef USING_MALLOC_PAGE_GROUPS
    420  1.1  mrg   page_group *page_groups;
    421  1.1  mrg #endif
    422  1.1  mrg 
    423  1.1  mrg   /* The file descriptor for debugging output.  */
    424  1.1  mrg   FILE *debug_file;
    425  1.1  mrg 
    426  1.1  mrg   /* Current number of elements in use in depth below.  */
    427  1.1  mrg   unsigned int depth_in_use;
    428  1.1  mrg 
    429  1.1  mrg   /* Maximum number of elements that can be used before resizing.  */
    430  1.1  mrg   unsigned int depth_max;
    431  1.1  mrg 
    432  1.1  mrg   /* Each element of this array is an index in by_depth where the given
    433  1.1  mrg      depth starts.  This structure is indexed by that given depth we
    434  1.1  mrg      are interested in.  */
    435  1.1  mrg   unsigned int *depth;
    436  1.1  mrg 
    437  1.1  mrg   /* Current number of elements in use in by_depth below.  */
    438  1.1  mrg   unsigned int by_depth_in_use;
    439  1.1  mrg 
    440  1.1  mrg   /* Maximum number of elements that can be used before resizing.  */
    441  1.1  mrg   unsigned int by_depth_max;
    442  1.1  mrg 
    443  1.1  mrg   /* Each element of this array is a pointer to a page_entry, all
    444  1.1  mrg      page_entries can be found in here by increasing depth.
    445  1.1  mrg      index_by_depth in the page_entry is the index into this data
    446  1.1  mrg      structure where that page_entry can be found.  This is used to
    447  1.1  mrg      speed up finding all page_entries at a particular depth.  */
    448  1.1  mrg   page_entry **by_depth;
    449  1.1  mrg 
    450  1.1  mrg   /* Each element is a pointer to the saved in_use_p bits, if any,
    451  1.1  mrg      zero otherwise.  We allocate them all together, to enable a
    452  1.1  mrg      better runtime data access pattern.  */
    453  1.1  mrg   unsigned long **save_in_use;
    454  1.1  mrg 
    455  1.1  mrg   /* Finalizers for single objects.  The first index is collection_depth.  */
    456  1.1  mrg   vec<vec<finalizer> > finalizers;
    457  1.1  mrg 
    458  1.1  mrg   /* Finalizers for vectors of objects.  */
    459  1.1  mrg   vec<vec<vec_finalizer> > vec_finalizers;
    460  1.1  mrg 
    461  1.1  mrg #ifdef ENABLE_GC_ALWAYS_COLLECT
    462  1.1  mrg   /* List of free objects to be verified as actually free on the
    463  1.1  mrg      next collection.  */
    464  1.1  mrg   struct free_object *free_object_list;
    465  1.1  mrg #endif
    466  1.1  mrg 
    467  1.1  mrg   struct
    468  1.1  mrg   {
    469  1.1  mrg     /* Total GC-allocated memory.  */
    470  1.1  mrg     unsigned long long total_allocated;
    471  1.1  mrg     /* Total overhead for GC-allocated memory.  */
    472  1.1  mrg     unsigned long long total_overhead;
    473  1.1  mrg 
    474  1.1  mrg     /* Total allocations and overhead for sizes less than 32, 64 and 128.
    475  1.1  mrg        These sizes are interesting because they are typical cache line
    476  1.1  mrg        sizes.  */
    477  1.1  mrg 
    478  1.1  mrg     unsigned long long total_allocated_under32;
    479  1.1  mrg     unsigned long long total_overhead_under32;
    480  1.1  mrg 
    481  1.1  mrg     unsigned long long total_allocated_under64;
    482  1.1  mrg     unsigned long long total_overhead_under64;
    483  1.1  mrg 
    484  1.1  mrg     unsigned long long total_allocated_under128;
    485  1.1  mrg     unsigned long long total_overhead_under128;
    486  1.1  mrg 
    487  1.1  mrg     /* The allocations for each of the allocation orders.  */
    488  1.1  mrg     unsigned long long total_allocated_per_order[NUM_ORDERS];
    489  1.1  mrg 
    490  1.1  mrg     /* The overhead for each of the allocation orders.  */
    491  1.1  mrg     unsigned long long total_overhead_per_order[NUM_ORDERS];
    492  1.1  mrg   } stats;
    493  1.1  mrg } G;
    494  1.1  mrg 
    495  1.1  mrg /* True if a gc is currently taking place.  */
    496  1.1  mrg 
    497  1.1  mrg static bool in_gc = false;
    498  1.1  mrg 
    499  1.1  mrg /* The size in bytes required to maintain a bitmap for the objects
    500  1.1  mrg    on a page-entry.  */
    501  1.1  mrg #define BITMAP_SIZE(Num_objects) \
    502  1.1  mrg   (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
    503  1.1  mrg 
    504  1.1  mrg /* Allocate pages in chunks of this size, to throttle calls to memory
    505  1.1  mrg    allocation routines.  The first page is used, the rest go onto the
    506  1.1  mrg    free list.  This cannot be larger than HOST_BITS_PER_INT for the
    507  1.1  mrg    in_use bitmask for page_group.  Hosts that need a different value
    508  1.1  mrg    can override this by defining GGC_QUIRE_SIZE explicitly.  */
    509  1.1  mrg #ifndef GGC_QUIRE_SIZE
    510  1.1  mrg # ifdef USING_MMAP
    511  1.1  mrg #  define GGC_QUIRE_SIZE 512	/* 2MB for 4K pages */
    512  1.1  mrg # else
    513  1.1  mrg #  define GGC_QUIRE_SIZE 16
    514  1.1  mrg # endif
    515  1.1  mrg #endif
    516  1.1  mrg 
    517  1.1  mrg /* Initial guess as to how many page table entries we might need.  */
    518  1.1  mrg #define INITIAL_PTE_COUNT 128
    519  1.1  mrg 
    520  1.1  mrg static page_entry *lookup_page_table_entry (const void *);
    522  1.1  mrg static void set_page_table_entry (void *, page_entry *);
    523  1.1  mrg #ifdef USING_MMAP
    524  1.1  mrg static char *alloc_anon (char *, size_t, bool check);
    525  1.1  mrg #endif
    526  1.1  mrg #ifdef USING_MALLOC_PAGE_GROUPS
    527  1.1  mrg static size_t page_group_index (char *, char *);
    528  1.1  mrg static void set_page_group_in_use (page_group *, char *);
    529  1.1  mrg static void clear_page_group_in_use (page_group *, char *);
    530  1.1  mrg #endif
    531  1.1  mrg static struct page_entry * alloc_page (unsigned);
    532  1.1  mrg static void free_page (struct page_entry *);
    533  1.1  mrg static void clear_marks (void);
    534  1.1  mrg static void sweep_pages (void);
    535  1.1  mrg static void ggc_recalculate_in_use_p (page_entry *);
    536  1.1  mrg static void compute_inverse (unsigned);
    537  1.1  mrg static inline void adjust_depth (void);
    538  1.1  mrg static void move_ptes_to_front (int, int);
    539  1.1  mrg 
    540  1.1  mrg void debug_print_page_list (int);
    541  1.1  mrg static void push_depth (unsigned int);
    542  1.1  mrg static void push_by_depth (page_entry *, unsigned long *);
    543  1.1  mrg 
    544  1.1  mrg /* Push an entry onto G.depth.  */
    545  1.1  mrg 
    546  1.1  mrg inline static void
    547  1.1  mrg push_depth (unsigned int i)
    548  1.1  mrg {
    549  1.1  mrg   if (G.depth_in_use >= G.depth_max)
    550  1.1  mrg     {
    551  1.1  mrg       G.depth_max *= 2;
    552  1.1  mrg       G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
    553  1.1  mrg     }
    554  1.1  mrg   G.depth[G.depth_in_use++] = i;
    555  1.1  mrg }
    556  1.1  mrg 
    557  1.1  mrg /* Push an entry onto G.by_depth and G.save_in_use.  */
    558  1.1  mrg 
    559  1.1  mrg inline static void
    560  1.1  mrg push_by_depth (page_entry *p, unsigned long *s)
    561  1.1  mrg {
    562  1.1  mrg   if (G.by_depth_in_use >= G.by_depth_max)
    563  1.1  mrg     {
    564  1.1  mrg       G.by_depth_max *= 2;
    565  1.1  mrg       G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
    566  1.1  mrg       G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
    567  1.1  mrg 				  G.by_depth_max);
    568  1.1  mrg     }
    569  1.1  mrg   G.by_depth[G.by_depth_in_use] = p;
    570  1.1  mrg   G.save_in_use[G.by_depth_in_use++] = s;
    571  1.1  mrg }
    572  1.1  mrg 
    573  1.1  mrg #if (GCC_VERSION < 3001)
    574  1.1  mrg #define prefetch(X) ((void) X)
    575  1.1  mrg #else
    576  1.1  mrg #define prefetch(X) __builtin_prefetch (X)
    577  1.1  mrg #endif
    578  1.1  mrg 
    579  1.1  mrg #define save_in_use_p_i(__i) \
    580  1.1  mrg   (G.save_in_use[__i])
    581  1.1  mrg #define save_in_use_p(__p) \
    582  1.1  mrg   (save_in_use_p_i (__p->index_by_depth))
    583  1.1  mrg 
    584  1.1  mrg /* Traverse the page table and find the entry for a page.
    585  1.1  mrg    If the object wasn't allocated in GC return NULL.  */
    586  1.1  mrg 
    587  1.1  mrg static inline page_entry *
    588  1.1  mrg safe_lookup_page_table_entry (const void *p)
    589  1.1  mrg {
    590  1.1  mrg   page_entry ***base;
    591  1.1  mrg   size_t L1, L2;
    592  1.1  mrg 
    593  1.1  mrg #if HOST_BITS_PER_PTR <= 32
    594  1.1  mrg   base = &G.lookup[0];
    595  1.1  mrg #else
    596  1.1  mrg   page_table table = G.lookup;
    597  1.1  mrg   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
    598  1.1  mrg   while (1)
    599  1.1  mrg     {
    600  1.1  mrg       if (table == NULL)
    601  1.1  mrg 	return NULL;
    602  1.1  mrg       if (table->high_bits == high_bits)
    603  1.1  mrg 	break;
    604  1.1  mrg       table = table->next;
    605  1.1  mrg     }
    606  1.1  mrg   base = &table->table[0];
    607  1.1  mrg #endif
    608  1.1  mrg 
    609  1.1  mrg   /* Extract the level 1 and 2 indices.  */
    610  1.1  mrg   L1 = LOOKUP_L1 (p);
    611  1.1  mrg   L2 = LOOKUP_L2 (p);
    612  1.1  mrg   if (! base[L1])
    613  1.1  mrg     return NULL;
    614  1.1  mrg 
    615  1.1  mrg   return base[L1][L2];
    616  1.1  mrg }
    617  1.1  mrg 
    618  1.1  mrg /* Traverse the page table and find the entry for a page.
    619  1.1  mrg    Die (probably) if the object wasn't allocated via GC.  */
    620  1.1  mrg 
    621  1.1  mrg static inline page_entry *
    622  1.1  mrg lookup_page_table_entry (const void *p)
    623  1.1  mrg {
    624  1.1  mrg   page_entry ***base;
    625  1.1  mrg   size_t L1, L2;
    626  1.1  mrg 
    627  1.1  mrg #if HOST_BITS_PER_PTR <= 32
    628  1.1  mrg   base = &G.lookup[0];
    629  1.1  mrg #else
    630  1.1  mrg   page_table table = G.lookup;
    631  1.1  mrg   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
    632  1.1  mrg   while (table->high_bits != high_bits)
    633  1.1  mrg     table = table->next;
    634  1.1  mrg   base = &table->table[0];
    635  1.1  mrg #endif
    636  1.1  mrg 
    637  1.1  mrg   /* Extract the level 1 and 2 indices.  */
    638  1.1  mrg   L1 = LOOKUP_L1 (p);
    639  1.1  mrg   L2 = LOOKUP_L2 (p);
    640  1.1  mrg 
    641  1.1  mrg   return base[L1][L2];
    642  1.1  mrg }
    643  1.1  mrg 
    644  1.1  mrg /* Set the page table entry for a page.  */
    645  1.1  mrg 
    646  1.1  mrg static void
    647  1.1  mrg set_page_table_entry (void *p, page_entry *entry)
    648  1.1  mrg {
    649  1.1  mrg   page_entry ***base;
    650  1.1  mrg   size_t L1, L2;
    651  1.1  mrg 
    652  1.1  mrg #if HOST_BITS_PER_PTR <= 32
    653  1.1  mrg   base = &G.lookup[0];
    654  1.1  mrg #else
    655  1.1  mrg   page_table table;
    656  1.1  mrg   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
    657  1.1  mrg   for (table = G.lookup; table; table = table->next)
    658  1.1  mrg     if (table->high_bits == high_bits)
    659  1.1  mrg       goto found;
    660  1.1  mrg 
    661  1.1  mrg   /* Not found -- allocate a new table.  */
    662  1.1  mrg   table = XCNEW (struct page_table_chain);
    663  1.1  mrg   table->next = G.lookup;
    664  1.1  mrg   table->high_bits = high_bits;
    665  1.1  mrg   G.lookup = table;
    666  1.1  mrg found:
    667  1.1  mrg   base = &table->table[0];
    668  1.1  mrg #endif
    669  1.1  mrg 
    670  1.1  mrg   /* Extract the level 1 and 2 indices.  */
    671  1.1  mrg   L1 = LOOKUP_L1 (p);
    672  1.1  mrg   L2 = LOOKUP_L2 (p);
    673  1.1  mrg 
    674  1.1  mrg   if (base[L1] == NULL)
    675  1.1  mrg     base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
    676  1.1  mrg 
    677  1.1  mrg   base[L1][L2] = entry;
    678  1.1  mrg }
    679  1.1  mrg 
    680  1.1  mrg /* Prints the page-entry for object size ORDER, for debugging.  */
    681  1.1  mrg 
    682  1.1  mrg DEBUG_FUNCTION void
    683  1.1  mrg debug_print_page_list (int order)
    684  1.1  mrg {
    685  1.1  mrg   page_entry *p;
    686  1.1  mrg   printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
    687  1.1  mrg 	  (void *) G.page_tails[order]);
    688  1.1  mrg   p = G.pages[order];
    689  1.1  mrg   while (p != NULL)
    690  1.1  mrg     {
    691  1.1  mrg       printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
    692  1.1  mrg 	      p->num_free_objects);
    693  1.1  mrg       p = p->next;
    694  1.1  mrg     }
    695  1.1  mrg   printf ("NULL\n");
    696  1.1  mrg   fflush (stdout);
    697  1.1  mrg }
    698  1.1  mrg 
    699  1.1  mrg #ifdef USING_MMAP
    700  1.1  mrg /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
    701  1.1  mrg    (if non-null).  The ifdef structure here is intended to cause a
    702  1.1  mrg    compile error unless exactly one of the HAVE_* is defined.  */
    703  1.1  mrg 
    704  1.1  mrg static inline char *
    705  1.1  mrg alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
    706  1.1  mrg {
    707  1.1  mrg #ifdef HAVE_MMAP_ANON
    708  1.1  mrg   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
    709  1.1  mrg 			      MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
    710  1.1  mrg #endif
    711  1.1  mrg #ifdef HAVE_MMAP_DEV_ZERO
    712  1.1  mrg   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
    713  1.1  mrg 			      MAP_PRIVATE, G.dev_zero_fd, 0);
    714  1.1  mrg #endif
    715  1.1  mrg 
    716  1.1  mrg   if (page == (char *) MAP_FAILED)
    717  1.1  mrg     {
    718  1.1  mrg       if (!check)
    719  1.1  mrg         return NULL;
    720  1.1  mrg       perror ("virtual memory exhausted");
    721  1.1  mrg       exit (FATAL_EXIT_CODE);
    722  1.1  mrg     }
    723  1.1  mrg 
    724  1.1  mrg   /* Remember that we allocated this memory.  */
    725  1.1  mrg   G.bytes_mapped += size;
    726  1.1  mrg 
    727  1.1  mrg   /* Pretend we don't have access to the allocated pages.  We'll enable
    728  1.1  mrg      access to smaller pieces of the area in ggc_internal_alloc.  Discard the
    729  1.1  mrg      handle to avoid handle leak.  */
    730  1.1  mrg   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
    731  1.1  mrg 
    732  1.1  mrg   return page;
    733  1.1  mrg }
    734  1.1  mrg #endif
    735  1.1  mrg #ifdef USING_MALLOC_PAGE_GROUPS
    736  1.1  mrg /* Compute the index for this page into the page group.  */
    737  1.1  mrg 
    738  1.1  mrg static inline size_t
    739  1.1  mrg page_group_index (char *allocation, char *page)
    740  1.1  mrg {
    741  1.1  mrg   return (size_t) (page - allocation) >> G.lg_pagesize;
    742  1.1  mrg }
    743  1.1  mrg 
    744  1.1  mrg /* Set and clear the in_use bit for this page in the page group.  */
    745  1.1  mrg 
    746  1.1  mrg static inline void
    747  1.1  mrg set_page_group_in_use (page_group *group, char *page)
    748  1.1  mrg {
    749  1.1  mrg   group->in_use |= 1 << page_group_index (group->allocation, page);
    750  1.1  mrg }
    751  1.1  mrg 
    752  1.1  mrg static inline void
    753  1.1  mrg clear_page_group_in_use (page_group *group, char *page)
    754  1.1  mrg {
    755  1.1  mrg   group->in_use &= ~(1 << page_group_index (group->allocation, page));
    756  1.1  mrg }
    757  1.1  mrg #endif
    758  1.1  mrg 
    759  1.1  mrg /* Allocate a new page for allocating objects of size 2^ORDER,
    760  1.1  mrg    and return an entry for it.  The entry is not added to the
    761  1.1  mrg    appropriate page_table list.  */
    762  1.1  mrg 
    763  1.1  mrg static inline struct page_entry *
    764  1.1  mrg alloc_page (unsigned order)
    765  1.1  mrg {
    766  1.1  mrg   struct page_entry *entry, *p, **pp;
    767  1.1  mrg   char *page;
    768  1.1  mrg   size_t num_objects;
    769  1.1  mrg   size_t bitmap_size;
    770  1.1  mrg   size_t page_entry_size;
    771  1.1  mrg   size_t entry_size;
    772  1.1  mrg #ifdef USING_MALLOC_PAGE_GROUPS
    773  1.1  mrg   page_group *group;
    774  1.1  mrg #endif
    775  1.1  mrg 
    776  1.1  mrg   num_objects = OBJECTS_PER_PAGE (order);
    777  1.1  mrg   bitmap_size = BITMAP_SIZE (num_objects + 1);
    778  1.1  mrg   page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
    779  1.1  mrg   entry_size = num_objects * OBJECT_SIZE (order);
    780  1.1  mrg   if (entry_size < G.pagesize)
    781  1.1  mrg     entry_size = G.pagesize;
    782  1.1  mrg   entry_size = PAGE_ALIGN (entry_size);
    783  1.1  mrg 
    784  1.1  mrg   entry = NULL;
    785  1.1  mrg   page = NULL;
    786  1.1  mrg 
    787  1.1  mrg   /* Check the list of free pages for one we can use.  */
    788  1.1  mrg   for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
    789  1.1  mrg     if (p->bytes == entry_size)
    790  1.1  mrg       break;
    791  1.1  mrg 
    792  1.1  mrg   if (p != NULL)
    793  1.1  mrg     {
    794  1.1  mrg       if (p->discarded)
    795  1.1  mrg         G.bytes_mapped += p->bytes;
    796  1.1  mrg       p->discarded = false;
    797  1.1  mrg 
    798  1.1  mrg       /* Recycle the allocated memory from this page ...  */
    799  1.1  mrg       *pp = p->next;
    800  1.1  mrg       page = p->page;
    801  1.1  mrg 
    802  1.1  mrg #ifdef USING_MALLOC_PAGE_GROUPS
    803  1.1  mrg       group = p->group;
    804  1.1  mrg #endif
    805  1.1  mrg 
    806  1.1  mrg       /* ... and, if possible, the page entry itself.  */
    807  1.1  mrg       if (p->order == order)
    808  1.1  mrg 	{
    809  1.1  mrg 	  entry = p;
    810  1.1  mrg 	  memset (entry, 0, page_entry_size);
    811  1.1  mrg 	}
    812  1.1  mrg       else
    813  1.1  mrg 	free (p);
    814  1.1  mrg     }
    815  1.1  mrg #ifdef USING_MMAP
    816  1.1  mrg   else if (entry_size == G.pagesize)
    817  1.1  mrg     {
    818  1.1  mrg       /* We want just one page.  Allocate a bunch of them and put the
    819  1.1  mrg 	 extras on the freelist.  (Can only do this optimization with
    820  1.1  mrg 	 mmap for backing store.)  */
    821  1.1  mrg       struct page_entry *e, *f = G.free_pages;
    822  1.1  mrg       int i, entries = GGC_QUIRE_SIZE;
    823  1.1  mrg 
    824  1.1  mrg       page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
    825  1.1  mrg       if (page == NULL)
    826  1.1  mrg      	{
    827  1.1  mrg 	  page = alloc_anon (NULL, G.pagesize, true);
    828  1.1  mrg           entries = 1;
    829  1.1  mrg 	}
    830  1.1  mrg 
    831  1.1  mrg       /* This loop counts down so that the chain will be in ascending
    832  1.1  mrg 	 memory order.  */
    833  1.1  mrg       for (i = entries - 1; i >= 1; i--)
    834  1.1  mrg 	{
    835  1.1  mrg 	  e = XCNEWVAR (struct page_entry, page_entry_size);
    836  1.1  mrg 	  e->order = order;
    837  1.1  mrg 	  e->bytes = G.pagesize;
    838  1.1  mrg 	  e->page = page + (i << G.lg_pagesize);
    839  1.1  mrg 	  e->next = f;
    840  1.1  mrg 	  f = e;
    841  1.1  mrg 	}
    842  1.1  mrg 
    843  1.1  mrg       G.free_pages = f;
    844  1.1  mrg     }
    845  1.1  mrg   else
    846  1.1  mrg     page = alloc_anon (NULL, entry_size, true);
    847  1.1  mrg #endif
    848  1.1  mrg #ifdef USING_MALLOC_PAGE_GROUPS
    849  1.1  mrg   else
    850  1.1  mrg     {
    851  1.1  mrg       /* Allocate a large block of memory and serve out the aligned
    852  1.1  mrg 	 pages therein.  This results in much less memory wastage
    853  1.1  mrg 	 than the traditional implementation of valloc.  */
    854  1.1  mrg 
    855  1.1  mrg       char *allocation, *a, *enda;
    856  1.1  mrg       size_t alloc_size, head_slop, tail_slop;
    857  1.1  mrg       int multiple_pages = (entry_size == G.pagesize);
    858  1.1  mrg 
    859  1.1  mrg       if (multiple_pages)
    860  1.1  mrg 	alloc_size = GGC_QUIRE_SIZE * G.pagesize;
    861  1.1  mrg       else
    862  1.1  mrg 	alloc_size = entry_size + G.pagesize - 1;
    863  1.1  mrg       allocation = XNEWVEC (char, alloc_size);
    864  1.1  mrg 
    865  1.1  mrg       page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize);
    866  1.1  mrg       head_slop = page - allocation;
    867  1.1  mrg       if (multiple_pages)
    868  1.1  mrg 	tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
    869  1.1  mrg       else
    870  1.1  mrg 	tail_slop = alloc_size - entry_size - head_slop;
    871  1.1  mrg       enda = allocation + alloc_size - tail_slop;
    872  1.1  mrg 
    873  1.1  mrg       /* We allocated N pages, which are likely not aligned, leaving
    874  1.1  mrg 	 us with N-1 usable pages.  We plan to place the page_group
    875  1.1  mrg 	 structure somewhere in the slop.  */
    876  1.1  mrg       if (head_slop >= sizeof (page_group))
    877  1.1  mrg 	group = (page_group *)page - 1;
    878  1.1  mrg       else
    879  1.1  mrg 	{
    880  1.1  mrg 	  /* We magically got an aligned allocation.  Too bad, we have
    881  1.1  mrg 	     to waste a page anyway.  */
    882  1.1  mrg 	  if (tail_slop == 0)
    883  1.1  mrg 	    {
    884  1.1  mrg 	      enda -= G.pagesize;
    885  1.1  mrg 	      tail_slop += G.pagesize;
    886  1.1  mrg 	    }
    887  1.1  mrg 	  gcc_assert (tail_slop >= sizeof (page_group));
    888  1.1  mrg 	  group = (page_group *)enda;
    889  1.1  mrg 	  tail_slop -= sizeof (page_group);
    890  1.1  mrg 	}
    891  1.1  mrg 
    892  1.1  mrg       /* Remember that we allocated this memory.  */
    893  1.1  mrg       group->next = G.page_groups;
    894  1.1  mrg       group->allocation = allocation;
    895  1.1  mrg       group->alloc_size = alloc_size;
    896  1.1  mrg       group->in_use = 0;
    897  1.1  mrg       G.page_groups = group;
    898  1.1  mrg       G.bytes_mapped += alloc_size;
    899  1.1  mrg 
    900  1.1  mrg       /* If we allocated multiple pages, put the rest on the free list.  */
    901  1.1  mrg       if (multiple_pages)
    902  1.1  mrg 	{
    903  1.1  mrg 	  struct page_entry *e, *f = G.free_pages;
    904  1.1  mrg 	  for (a = enda - G.pagesize; a != page; a -= G.pagesize)
    905  1.1  mrg 	    {
    906  1.1  mrg 	      e = XCNEWVAR (struct page_entry, page_entry_size);
    907  1.1  mrg 	      e->order = order;
    908  1.1  mrg 	      e->bytes = G.pagesize;
    909  1.1  mrg 	      e->page = a;
    910  1.1  mrg 	      e->group = group;
    911  1.1  mrg 	      e->next = f;
    912  1.1  mrg 	      f = e;
    913  1.1  mrg 	    }
    914  1.1  mrg 	  G.free_pages = f;
    915  1.1  mrg 	}
    916  1.1  mrg     }
    917  1.1  mrg #endif
    918  1.1  mrg 
    919  1.1  mrg   if (entry == NULL)
    920  1.1  mrg     entry = XCNEWVAR (struct page_entry, page_entry_size);
    921  1.1  mrg 
    922  1.1  mrg   entry->bytes = entry_size;
    923  1.1  mrg   entry->page = page;
    924  1.1  mrg   entry->context_depth = G.context_depth;
    925  1.1  mrg   entry->order = order;
    926  1.1  mrg   entry->num_free_objects = num_objects;
    927  1.1  mrg   entry->next_bit_hint = 1;
    928  1.1  mrg 
    929  1.1  mrg   G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
    930  1.1  mrg 
    931  1.1  mrg #ifdef USING_MALLOC_PAGE_GROUPS
    932  1.1  mrg   entry->group = group;
    933  1.1  mrg   set_page_group_in_use (group, page);
    934  1.1  mrg #endif
    935  1.1  mrg 
    936  1.1  mrg   /* Set the one-past-the-end in-use bit.  This acts as a sentry as we
    937  1.1  mrg      increment the hint.  */
    938  1.1  mrg   entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
    939  1.1  mrg     = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
    940  1.1  mrg 
    941  1.1  mrg   set_page_table_entry (page, entry);
    942  1.1  mrg 
    943  1.1  mrg   if (GGC_DEBUG_LEVEL >= 2)
    944  1.1  mrg     fprintf (G.debug_file,
    945  1.1  mrg 	     "Allocating page at %p, object size=%lu, data %p-%p\n",
    946  1.1  mrg 	     (void *) entry, (unsigned long) OBJECT_SIZE (order),
    947  1.1  mrg 	     (void *) page, (void *) (page + entry_size - 1));
    948  1.1  mrg 
    949  1.1  mrg   return entry;
    950  1.1  mrg }
    951  1.1  mrg 
    952  1.1  mrg /* Adjust the size of G.depth so that no index greater than the one
    953  1.1  mrg    used by the top of the G.by_depth is used.  */
    954  1.1  mrg 
    955  1.1  mrg static inline void
    956  1.1  mrg adjust_depth (void)
    957  1.1  mrg {
    958  1.1  mrg   page_entry *top;
    959  1.1  mrg 
    960  1.1  mrg   if (G.by_depth_in_use)
    961  1.1  mrg     {
    962  1.1  mrg       top = G.by_depth[G.by_depth_in_use-1];
    963  1.1  mrg 
    964  1.1  mrg       /* Peel back indices in depth that index into by_depth, so that
    965  1.1  mrg 	 as new elements are added to by_depth, we note the indices
    966  1.1  mrg 	 of those elements, if they are for new context depths.  */
    967  1.1  mrg       while (G.depth_in_use > (size_t)top->context_depth+1)
    968  1.1  mrg 	--G.depth_in_use;
    969  1.1  mrg     }
    970  1.1  mrg }
    971  1.1  mrg 
    972  1.1  mrg /* For a page that is no longer needed, put it on the free page list.  */
    973  1.1  mrg 
    974  1.1  mrg static void
    975  1.1  mrg free_page (page_entry *entry)
    976  1.1  mrg {
    977  1.1  mrg   if (GGC_DEBUG_LEVEL >= 2)
    978  1.1  mrg     fprintf (G.debug_file,
    979  1.1  mrg 	     "Deallocating page at %p, data %p-%p\n", (void *) entry,
    980  1.1  mrg 	     (void *) entry->page, (void *) (entry->page + entry->bytes - 1));
    981  1.1  mrg 
    982  1.1  mrg   /* Mark the page as inaccessible.  Discard the handle to avoid handle
    983  1.1  mrg      leak.  */
    984  1.1  mrg   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
    985  1.1  mrg 
    986  1.1  mrg   set_page_table_entry (entry->page, NULL);
    987  1.1  mrg 
    988  1.1  mrg #ifdef USING_MALLOC_PAGE_GROUPS
    989  1.1  mrg   clear_page_group_in_use (entry->group, entry->page);
    990  1.1  mrg #endif
    991  1.1  mrg 
    992  1.1  mrg   if (G.by_depth_in_use > 1)
    993  1.1  mrg     {
    994  1.1  mrg       page_entry *top = G.by_depth[G.by_depth_in_use-1];
    995  1.1  mrg       int i = entry->index_by_depth;
    996  1.1  mrg 
    997  1.1  mrg       /* We cannot free a page from a context deeper than the current
    998  1.1  mrg 	 one.  */
    999  1.1  mrg       gcc_assert (entry->context_depth == top->context_depth);
   1000  1.1  mrg 
   1001  1.1  mrg       /* Put top element into freed slot.  */
   1002  1.1  mrg       G.by_depth[i] = top;
   1003  1.1  mrg       G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
   1004  1.1  mrg       top->index_by_depth = i;
   1005  1.1  mrg     }
   1006  1.1  mrg   --G.by_depth_in_use;
   1007  1.1  mrg 
   1008  1.1  mrg   adjust_depth ();
   1009  1.1  mrg 
   1010  1.1  mrg   entry->next = G.free_pages;
   1011  1.1  mrg   G.free_pages = entry;
   1012  1.1  mrg }
   1013  1.1  mrg 
   1014  1.1  mrg /* Release the free page cache to the system.  */
   1015  1.1  mrg 
   1016  1.1  mrg static void
   1017  1.1  mrg release_pages (void)
   1018  1.1  mrg {
   1019  1.1  mrg   size_t n1 = 0;
   1020  1.1  mrg   size_t n2 = 0;
   1021  1.1  mrg #ifdef USING_MADVISE
   1022  1.1  mrg   page_entry *p, *start_p;
   1023  1.1  mrg   char *start;
   1024  1.1  mrg   size_t len;
   1025  1.1  mrg   size_t mapped_len;
   1026  1.1  mrg   page_entry *next, *prev, *newprev;
   1027  1.1  mrg   size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
   1028  1.1  mrg 
   1029  1.1  mrg   /* First free larger continuous areas to the OS.
   1030  1.1  mrg      This allows other allocators to grab these areas if needed.
   1031  1.1  mrg      This is only done on larger chunks to avoid fragmentation.
   1032  1.1  mrg      This does not always work because the free_pages list is only
   1033  1.1  mrg      approximately sorted. */
   1034  1.1  mrg 
   1035  1.1  mrg   p = G.free_pages;
   1036  1.1  mrg   prev = NULL;
   1037  1.1  mrg   while (p)
   1038  1.1  mrg     {
   1039  1.1  mrg       start = p->page;
   1040  1.1  mrg       start_p = p;
   1041  1.1  mrg       len = 0;
   1042  1.1  mrg       mapped_len = 0;
   1043  1.1  mrg       newprev = prev;
   1044  1.1  mrg       while (p && p->page == start + len)
   1045  1.1  mrg         {
   1046  1.1  mrg           len += p->bytes;
   1047  1.1  mrg 	  if (!p->discarded)
   1048  1.1  mrg 	      mapped_len += p->bytes;
   1049  1.1  mrg 	  newprev = p;
   1050  1.1  mrg           p = p->next;
   1051  1.1  mrg         }
   1052  1.1  mrg       if (len >= free_unit)
   1053  1.1  mrg         {
   1054  1.1  mrg           while (start_p != p)
   1055  1.1  mrg             {
   1056  1.1  mrg               next = start_p->next;
   1057  1.1  mrg               free (start_p);
   1058  1.1  mrg               start_p = next;
   1059  1.1  mrg             }
   1060  1.1  mrg           munmap (start, len);
   1061  1.1  mrg 	  if (prev)
   1062  1.1  mrg 	    prev->next = p;
   1063  1.1  mrg           else
   1064  1.1  mrg             G.free_pages = p;
   1065  1.1  mrg           G.bytes_mapped -= mapped_len;
   1066  1.1  mrg 	  n1 += len;
   1067  1.1  mrg 	  continue;
   1068  1.1  mrg         }
   1069  1.1  mrg       prev = newprev;
   1070  1.1  mrg    }
   1071  1.1  mrg 
   1072  1.1  mrg   /* Now give back the fragmented pages to the OS, but keep the address
   1073  1.1  mrg      space to reuse it next time. */
   1074  1.1  mrg 
   1075  1.1  mrg   for (p = G.free_pages; p; )
   1076  1.1  mrg     {
   1077  1.1  mrg       if (p->discarded)
   1078  1.1  mrg         {
   1079  1.1  mrg           p = p->next;
   1080  1.1  mrg           continue;
   1081  1.1  mrg         }
   1082  1.1  mrg       start = p->page;
   1083  1.1  mrg       len = p->bytes;
   1084  1.1  mrg       start_p = p;
   1085  1.1  mrg       p = p->next;
   1086  1.1  mrg       while (p && p->page == start + len)
   1087  1.1  mrg         {
   1088  1.1  mrg           len += p->bytes;
   1089  1.1  mrg           p = p->next;
   1090  1.1  mrg         }
   1091  1.1  mrg       /* Give the page back to the kernel, but don't free the mapping.
   1092  1.1  mrg          This avoids fragmentation in the virtual memory map of the
   1093  1.1  mrg  	 process. Next time we can reuse it by just touching it. */
   1094  1.1  mrg       madvise (start, len, MADV_DONTNEED);
   1095  1.1  mrg       /* Don't count those pages as mapped to not touch the garbage collector
   1096  1.1  mrg          unnecessarily. */
   1097  1.1  mrg       G.bytes_mapped -= len;
   1098  1.1  mrg       n2 += len;
   1099  1.1  mrg       while (start_p != p)
   1100  1.1  mrg         {
   1101  1.1  mrg           start_p->discarded = true;
   1102  1.1  mrg           start_p = start_p->next;
   1103  1.1  mrg         }
   1104  1.1  mrg     }
   1105  1.1  mrg #endif
   1106  1.1  mrg #if defined(USING_MMAP) && !defined(USING_MADVISE)
   1107  1.1  mrg   page_entry *p, *next;
   1108  1.1  mrg   char *start;
   1109  1.1  mrg   size_t len;
   1110  1.1  mrg 
   1111  1.1  mrg   /* Gather up adjacent pages so they are unmapped together.  */
   1112  1.1  mrg   p = G.free_pages;
   1113  1.1  mrg 
   1114  1.1  mrg   while (p)
   1115  1.1  mrg     {
   1116  1.1  mrg       start = p->page;
   1117  1.1  mrg       next = p->next;
   1118  1.1  mrg       len = p->bytes;
   1119  1.1  mrg       free (p);
   1120  1.1  mrg       p = next;
   1121  1.1  mrg 
   1122  1.1  mrg       while (p && p->page == start + len)
   1123  1.1  mrg 	{
   1124  1.1  mrg 	  next = p->next;
   1125  1.1  mrg 	  len += p->bytes;
   1126  1.1  mrg 	  free (p);
   1127  1.1  mrg 	  p = next;
   1128  1.1  mrg 	}
   1129  1.1  mrg 
   1130  1.1  mrg       munmap (start, len);
   1131  1.1  mrg       n1 += len;
   1132  1.1  mrg       G.bytes_mapped -= len;
   1133  1.1  mrg     }
   1134  1.1  mrg 
   1135  1.1  mrg   G.free_pages = NULL;
   1136  1.1  mrg #endif
   1137  1.1  mrg #ifdef USING_MALLOC_PAGE_GROUPS
   1138  1.1  mrg   page_entry **pp, *p;
   1139  1.1  mrg   page_group **gp, *g;
   1140  1.1  mrg 
   1141  1.1  mrg   /* Remove all pages from free page groups from the list.  */
   1142  1.1  mrg   pp = &G.free_pages;
   1143  1.1  mrg   while ((p = *pp) != NULL)
   1144  1.1  mrg     if (p->group->in_use == 0)
   1145  1.1  mrg       {
   1146  1.1  mrg 	*pp = p->next;
   1147  1.1  mrg 	free (p);
   1148  1.1  mrg       }
   1149  1.1  mrg     else
   1150  1.1  mrg       pp = &p->next;
   1151  1.1  mrg 
   1152  1.1  mrg   /* Remove all free page groups, and release the storage.  */
   1153  1.1  mrg   gp = &G.page_groups;
   1154  1.1  mrg   while ((g = *gp) != NULL)
   1155  1.1  mrg     if (g->in_use == 0)
   1156  1.1  mrg       {
   1157  1.1  mrg 	*gp = g->next;
   1158  1.1  mrg 	G.bytes_mapped -= g->alloc_size;
   1159  1.1  mrg 	n1 += g->alloc_size;
   1160  1.1  mrg 	free (g->allocation);
   1161  1.1  mrg       }
   1162  1.1  mrg     else
   1163  1.1  mrg       gp = &g->next;
   1164  1.1  mrg #endif
   1165  1.1  mrg   if (!quiet_flag && (n1 || n2))
   1166  1.1  mrg     {
   1167  1.1  mrg       fprintf (stderr, " {GC");
   1168  1.1  mrg       if (n1)
   1169  1.1  mrg 	fprintf (stderr, " released " PRsa (0), SIZE_AMOUNT (n1));
   1170  1.1  mrg       if (n2)
   1171  1.1  mrg 	fprintf (stderr, " madv_dontneed " PRsa (0), SIZE_AMOUNT (n2));
   1172  1.1  mrg       fprintf (stderr, "}");
   1173  1.1  mrg     }
   1174  1.1  mrg }
   1175  1.1  mrg 
   1176  1.1  mrg /* This table provides a fast way to determine ceil(log_2(size)) for
   1177  1.1  mrg    allocation requests.  The minimum allocation size is eight bytes.  */
   1178  1.1  mrg #define NUM_SIZE_LOOKUP 512
   1179  1.1  mrg static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
   1180  1.1  mrg {
   1181  1.1  mrg   3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
   1182  1.1  mrg   4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
   1183  1.1  mrg   5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
   1184  1.1  mrg   6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
   1185  1.1  mrg   6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
   1186  1.1  mrg   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
   1187  1.1  mrg   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
   1188  1.1  mrg   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
   1189  1.1  mrg   7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
   1190  1.1  mrg   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
   1191  1.1  mrg   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
   1192  1.1  mrg   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
   1193  1.1  mrg   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
   1194  1.1  mrg   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
   1195  1.1  mrg   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
   1196  1.1  mrg   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
   1197  1.1  mrg   8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
   1198  1.1  mrg   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
   1199  1.1  mrg   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
   1200  1.1  mrg   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
   1201  1.1  mrg   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
   1202  1.1  mrg   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
   1203  1.1  mrg   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
   1204  1.1  mrg   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
   1205  1.1  mrg   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
   1206  1.1  mrg   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
   1207  1.1  mrg   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
   1208  1.1  mrg   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
   1209  1.1  mrg   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
   1210  1.1  mrg   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
   1211  1.1  mrg   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
   1212  1.1  mrg   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
   1213  1.1  mrg };
   1214  1.1  mrg 
   1215  1.1  mrg /* For a given size of memory requested for allocation, return the
   1216  1.1  mrg    actual size that is going to be allocated, as well as the size
   1217  1.1  mrg    order.  */
   1218  1.1  mrg 
   1219  1.1  mrg static void
   1220  1.1  mrg ggc_round_alloc_size_1 (size_t requested_size,
   1221  1.1  mrg 			size_t *size_order,
   1222  1.1  mrg 			size_t *alloced_size)
   1223  1.1  mrg {
   1224  1.1  mrg   size_t order, object_size;
   1225  1.1  mrg 
   1226  1.1  mrg   if (requested_size < NUM_SIZE_LOOKUP)
   1227  1.1  mrg     {
   1228  1.1  mrg       order = size_lookup[requested_size];
   1229  1.1  mrg       object_size = OBJECT_SIZE (order);
   1230  1.1  mrg     }
   1231  1.1  mrg   else
   1232  1.1  mrg     {
   1233  1.1  mrg       order = 10;
   1234  1.1  mrg       while (requested_size > (object_size = OBJECT_SIZE (order)))
   1235  1.1  mrg         order++;
   1236  1.1  mrg     }
   1237  1.1  mrg 
   1238  1.1  mrg   if (size_order)
   1239  1.1  mrg     *size_order = order;
   1240  1.1  mrg   if (alloced_size)
   1241  1.1  mrg     *alloced_size = object_size;
   1242  1.1  mrg }
   1243  1.1  mrg 
   1244  1.1  mrg /* For a given size of memory requested for allocation, return the
   1245  1.1  mrg    actual size that is going to be allocated.  */
   1246  1.1  mrg 
   1247  1.1  mrg size_t
   1248  1.1  mrg ggc_round_alloc_size (size_t requested_size)
   1249  1.1  mrg {
   1250  1.1  mrg   size_t size = 0;
   1251  1.1  mrg 
   1252  1.1  mrg   ggc_round_alloc_size_1 (requested_size, NULL, &size);
   1253  1.1  mrg   return size;
   1254  1.1  mrg }
   1255  1.1  mrg 
   1256  1.1  mrg /* Push a finalizer onto the appropriate vec.  */
   1257  1.1  mrg 
   1258  1.1  mrg static void
   1259  1.1  mrg add_finalizer (void *result, void (*f)(void *), size_t s, size_t n)
   1260  1.1  mrg {
   1261  1.1  mrg   if (f == NULL)
   1262  1.1  mrg     /* No finalizer.  */;
   1263  1.1  mrg   else if (n == 1)
   1264  1.1  mrg     {
   1265  1.1  mrg       finalizer fin (result, f);
   1266  1.1  mrg       G.finalizers[G.context_depth].safe_push (fin);
   1267  1.1  mrg     }
   1268  1.1  mrg   else
   1269  1.1  mrg     {
   1270  1.1  mrg       vec_finalizer fin (reinterpret_cast<uintptr_t> (result), f, s, n);
   1271  1.1  mrg       G.vec_finalizers[G.context_depth].safe_push (fin);
   1272  1.1  mrg     }
   1273  1.1  mrg }
   1274  1.1  mrg 
   1275  1.1  mrg /* Allocate a chunk of memory of SIZE bytes.  Its contents are undefined.  */
   1276  1.1  mrg 
   1277  1.1  mrg void *
   1278  1.1  mrg ggc_internal_alloc (size_t size, void (*f)(void *), size_t s, size_t n
   1279  1.1  mrg 		    MEM_STAT_DECL)
   1280  1.1  mrg {
   1281  1.1  mrg   size_t order, word, bit, object_offset, object_size;
   1282  1.1  mrg   struct page_entry *entry;
   1283  1.1  mrg   void *result;
   1284  1.1  mrg 
   1285  1.1  mrg   ggc_round_alloc_size_1 (size, &order, &object_size);
   1286  1.1  mrg 
   1287  1.1  mrg   /* If there are non-full pages for this size allocation, they are at
   1288  1.1  mrg      the head of the list.  */
   1289  1.1  mrg   entry = G.pages[order];
   1290  1.1  mrg 
   1291  1.1  mrg   /* If there is no page for this object size, or all pages in this
   1292  1.1  mrg      context are full, allocate a new page.  */
   1293  1.1  mrg   if (entry == NULL || entry->num_free_objects == 0)
   1294  1.1  mrg     {
   1295  1.1  mrg       struct page_entry *new_entry;
   1296  1.1  mrg       new_entry = alloc_page (order);
   1297  1.1  mrg 
   1298  1.1  mrg       new_entry->index_by_depth = G.by_depth_in_use;
   1299  1.1  mrg       push_by_depth (new_entry, 0);
   1300  1.1  mrg 
   1301  1.1  mrg       /* We can skip context depths, if we do, make sure we go all the
   1302  1.1  mrg 	 way to the new depth.  */
   1303  1.1  mrg       while (new_entry->context_depth >= G.depth_in_use)
   1304  1.1  mrg 	push_depth (G.by_depth_in_use-1);
   1305  1.1  mrg 
   1306  1.1  mrg       /* If this is the only entry, it's also the tail.  If it is not
   1307  1.1  mrg 	 the only entry, then we must update the PREV pointer of the
   1308  1.1  mrg 	 ENTRY (G.pages[order]) to point to our new page entry.  */
   1309  1.1  mrg       if (entry == NULL)
   1310  1.1  mrg 	G.page_tails[order] = new_entry;
   1311  1.1  mrg       else
   1312  1.1  mrg 	entry->prev = new_entry;
   1313  1.1  mrg 
   1314  1.1  mrg       /* Put new pages at the head of the page list.  By definition the
   1315  1.1  mrg 	 entry at the head of the list always has a NULL pointer.  */
   1316  1.1  mrg       new_entry->next = entry;
   1317  1.1  mrg       new_entry->prev = NULL;
   1318  1.1  mrg       entry = new_entry;
   1319  1.1  mrg       G.pages[order] = new_entry;
   1320  1.1  mrg 
   1321  1.1  mrg       /* For a new page, we know the word and bit positions (in the
   1322  1.1  mrg 	 in_use bitmap) of the first available object -- they're zero.  */
   1323  1.1  mrg       new_entry->next_bit_hint = 1;
   1324  1.1  mrg       word = 0;
   1325  1.1  mrg       bit = 0;
   1326  1.1  mrg       object_offset = 0;
   1327  1.1  mrg     }
   1328  1.1  mrg   else
   1329  1.1  mrg     {
   1330  1.1  mrg       /* First try to use the hint left from the previous allocation
   1331  1.1  mrg 	 to locate a clear bit in the in-use bitmap.  We've made sure
   1332  1.1  mrg 	 that the one-past-the-end bit is always set, so if the hint
   1333  1.1  mrg 	 has run over, this test will fail.  */
   1334  1.1  mrg       unsigned hint = entry->next_bit_hint;
   1335  1.1  mrg       word = hint / HOST_BITS_PER_LONG;
   1336  1.1  mrg       bit = hint % HOST_BITS_PER_LONG;
   1337  1.1  mrg 
   1338  1.1  mrg       /* If the hint didn't work, scan the bitmap from the beginning.  */
   1339  1.1  mrg       if ((entry->in_use_p[word] >> bit) & 1)
   1340  1.1  mrg 	{
   1341  1.1  mrg 	  word = bit = 0;
   1342  1.1  mrg 	  while (~entry->in_use_p[word] == 0)
   1343  1.1  mrg 	    ++word;
   1344  1.1  mrg 
   1345  1.1  mrg #if GCC_VERSION >= 3004
   1346  1.1  mrg 	  bit = __builtin_ctzl (~entry->in_use_p[word]);
   1347  1.1  mrg #else
   1348  1.1  mrg 	  while ((entry->in_use_p[word] >> bit) & 1)
   1349  1.1  mrg 	    ++bit;
   1350  1.1  mrg #endif
   1351  1.1  mrg 
   1352  1.1  mrg 	  hint = word * HOST_BITS_PER_LONG + bit;
   1353  1.1  mrg 	}
   1354  1.1  mrg 
   1355  1.1  mrg       /* Next time, try the next bit.  */
   1356  1.1  mrg       entry->next_bit_hint = hint + 1;
   1357  1.1  mrg 
   1358  1.1  mrg       object_offset = hint * object_size;
   1359  1.1  mrg     }
   1360  1.1  mrg 
   1361  1.1  mrg   /* Set the in-use bit.  */
   1362  1.1  mrg   entry->in_use_p[word] |= ((unsigned long) 1 << bit);
   1363  1.1  mrg 
   1364  1.1  mrg   /* Keep a running total of the number of free objects.  If this page
   1365  1.1  mrg      fills up, we may have to move it to the end of the list if the
   1366  1.1  mrg      next page isn't full.  If the next page is full, all subsequent
   1367  1.1  mrg      pages are full, so there's no need to move it.  */
   1368  1.1  mrg   if (--entry->num_free_objects == 0
   1369  1.1  mrg       && entry->next != NULL
   1370  1.1  mrg       && entry->next->num_free_objects > 0)
   1371  1.1  mrg     {
   1372  1.1  mrg       /* We have a new head for the list.  */
   1373  1.1  mrg       G.pages[order] = entry->next;
   1374  1.1  mrg 
   1375  1.1  mrg       /* We are moving ENTRY to the end of the page table list.
   1376  1.1  mrg 	 The new page at the head of the list will have NULL in
   1377  1.1  mrg 	 its PREV field and ENTRY will have NULL in its NEXT field.  */
   1378  1.1  mrg       entry->next->prev = NULL;
   1379  1.1  mrg       entry->next = NULL;
   1380  1.1  mrg 
   1381  1.1  mrg       /* Append ENTRY to the tail of the list.  */
   1382  1.1  mrg       entry->prev = G.page_tails[order];
   1383  1.1  mrg       G.page_tails[order]->next = entry;
   1384  1.1  mrg       G.page_tails[order] = entry;
   1385  1.1  mrg     }
   1386  1.1  mrg 
   1387  1.1  mrg   /* Calculate the object's address.  */
   1388  1.1  mrg   result = entry->page + object_offset;
   1389  1.1  mrg   if (GATHER_STATISTICS)
   1390  1.1  mrg     ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
   1391  1.1  mrg 			 result FINAL_PASS_MEM_STAT);
   1392  1.1  mrg 
   1393  1.1  mrg #ifdef ENABLE_GC_CHECKING
   1394  1.1  mrg   /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
   1395  1.1  mrg      exact same semantics in presence of memory bugs, regardless of
   1396  1.1  mrg      ENABLE_VALGRIND_CHECKING.  We override this request below.  Drop the
   1397  1.1  mrg      handle to avoid handle leak.  */
   1398  1.1  mrg   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
   1399  1.1  mrg 
   1400  1.1  mrg   /* `Poison' the entire allocated object, including any padding at
   1401  1.1  mrg      the end.  */
   1402  1.1  mrg   memset (result, 0xaf, object_size);
   1403  1.1  mrg 
   1404  1.1  mrg   /* Make the bytes after the end of the object unaccessible.  Discard the
   1405  1.1  mrg      handle to avoid handle leak.  */
   1406  1.1  mrg   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
   1407  1.1  mrg 						object_size - size));
   1408  1.1  mrg #endif
   1409  1.1  mrg 
   1410  1.1  mrg   /* Tell Valgrind that the memory is there, but its content isn't
   1411  1.1  mrg      defined.  The bytes at the end of the object are still marked
   1412  1.1  mrg      unaccessible.  */
   1413  1.1  mrg   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
   1414  1.1  mrg 
   1415  1.1  mrg   /* Keep track of how many bytes are being allocated.  This
   1416  1.1  mrg      information is used in deciding when to collect.  */
   1417  1.1  mrg   G.allocated += object_size;
   1418  1.1  mrg 
   1419  1.1  mrg   /* For timevar statistics.  */
   1420  1.1  mrg   timevar_ggc_mem_total += object_size;
   1421  1.1  mrg 
   1422  1.1  mrg   if (f)
   1423  1.1  mrg     add_finalizer (result, f, s, n);
   1424  1.1  mrg 
   1425  1.1  mrg   if (GATHER_STATISTICS)
   1426  1.1  mrg     {
   1427  1.1  mrg       size_t overhead = object_size - size;
   1428  1.1  mrg 
   1429  1.1  mrg       G.stats.total_overhead += overhead;
   1430  1.1  mrg       G.stats.total_allocated += object_size;
   1431  1.1  mrg       G.stats.total_overhead_per_order[order] += overhead;
   1432  1.1  mrg       G.stats.total_allocated_per_order[order] += object_size;
   1433  1.1  mrg 
   1434  1.1  mrg       if (size <= 32)
   1435  1.1  mrg 	{
   1436  1.1  mrg 	  G.stats.total_overhead_under32 += overhead;
   1437  1.1  mrg 	  G.stats.total_allocated_under32 += object_size;
   1438  1.1  mrg 	}
   1439  1.1  mrg       if (size <= 64)
   1440  1.1  mrg 	{
   1441  1.1  mrg 	  G.stats.total_overhead_under64 += overhead;
   1442  1.1  mrg 	  G.stats.total_allocated_under64 += object_size;
   1443  1.1  mrg 	}
   1444  1.1  mrg       if (size <= 128)
   1445  1.1  mrg 	{
   1446  1.1  mrg 	  G.stats.total_overhead_under128 += overhead;
   1447  1.1  mrg 	  G.stats.total_allocated_under128 += object_size;
   1448  1.1  mrg 	}
   1449  1.1  mrg     }
   1450  1.1  mrg 
   1451  1.1  mrg   if (GGC_DEBUG_LEVEL >= 3)
   1452  1.1  mrg     fprintf (G.debug_file,
   1453  1.1  mrg 	     "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
   1454  1.1  mrg 	     (unsigned long) size, (unsigned long) object_size, result,
   1455  1.1  mrg 	     (void *) entry);
   1456  1.1  mrg 
   1457  1.1  mrg   return result;
   1458  1.1  mrg }
   1459  1.1  mrg 
   1460  1.1  mrg /* Mark function for strings.  */
   1461  1.1  mrg 
   1462  1.1  mrg void
   1463  1.1  mrg gt_ggc_m_S (const void *p)
   1464  1.1  mrg {
   1465  1.1  mrg   page_entry *entry;
   1466  1.1  mrg   unsigned bit, word;
   1467  1.1  mrg   unsigned long mask;
   1468  1.1  mrg   unsigned long offset;
   1469  1.1  mrg 
   1470  1.1  mrg   if (!p)
   1471  1.1  mrg     return;
   1472  1.1  mrg 
   1473  1.1  mrg   /* Look up the page on which the object is alloced.  If it was not
   1474  1.1  mrg      GC allocated, gracefully bail out.  */
   1475  1.1  mrg   entry = safe_lookup_page_table_entry (p);
   1476  1.1  mrg   if (!entry)
   1477  1.1  mrg     return;
   1478  1.1  mrg 
   1479  1.1  mrg   /* Calculate the index of the object on the page; this is its bit
   1480  1.1  mrg      position in the in_use_p bitmap.  Note that because a char* might
   1481  1.1  mrg      point to the middle of an object, we need special code here to
   1482  1.1  mrg      make sure P points to the start of an object.  */
   1483  1.1  mrg   offset = ((const char *) p - entry->page) % object_size_table[entry->order];
   1484  1.1  mrg   if (offset)
   1485  1.1  mrg     {
   1486  1.1  mrg       /* Here we've seen a char* which does not point to the beginning
   1487  1.1  mrg 	 of an allocated object.  We assume it points to the middle of
   1488  1.1  mrg 	 a STRING_CST.  */
   1489  1.1  mrg       gcc_assert (offset == offsetof (struct tree_string, str));
   1490  1.1  mrg       p = ((const char *) p) - offset;
   1491  1.1  mrg       gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
   1492  1.1  mrg       return;
   1493  1.1  mrg     }
   1494  1.1  mrg 
   1495  1.1  mrg   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
   1496  1.1  mrg   word = bit / HOST_BITS_PER_LONG;
   1497  1.1  mrg   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
   1498  1.1  mrg 
   1499  1.1  mrg   /* If the bit was previously set, skip it.  */
   1500  1.1  mrg   if (entry->in_use_p[word] & mask)
   1501  1.1  mrg     return;
   1502  1.1  mrg 
   1503  1.1  mrg   /* Otherwise set it, and decrement the free object count.  */
   1504  1.1  mrg   entry->in_use_p[word] |= mask;
   1505  1.1  mrg   entry->num_free_objects -= 1;
   1506  1.1  mrg 
   1507  1.1  mrg   if (GGC_DEBUG_LEVEL >= 4)
   1508  1.1  mrg     fprintf (G.debug_file, "Marking %p\n", p);
   1509  1.1  mrg 
   1510  1.1  mrg   return;
   1511  1.1  mrg }
   1512  1.1  mrg 
   1513  1.1  mrg 
   1514  1.1  mrg /* User-callable entry points for marking string X.  */
   1515  1.1  mrg 
   1516  1.1  mrg void
   1517  1.1  mrg gt_ggc_mx (const char *& x)
   1518  1.1  mrg {
   1519  1.1  mrg   gt_ggc_m_S (x);
   1520  1.1  mrg }
   1521  1.1  mrg 
   1522  1.1  mrg void
   1523  1.1  mrg gt_ggc_mx (char *& x)
   1524  1.1  mrg {
   1525  1.1  mrg   gt_ggc_m_S (x);
   1526  1.1  mrg }
   1527  1.1  mrg 
   1528  1.1  mrg void
   1529  1.1  mrg gt_ggc_mx (unsigned char *& x)
   1530  1.1  mrg {
   1531  1.1  mrg   gt_ggc_m_S (x);
   1532  1.1  mrg }
   1533  1.1  mrg 
   1534  1.1  mrg void
   1535  1.1  mrg gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED)
   1536  1.1  mrg {
   1537  1.1  mrg }
   1538  1.1  mrg 
   1539  1.1  mrg /* If P is not marked, marks it and return false.  Otherwise return true.
   1540  1.1  mrg    P must have been allocated by the GC allocator; it mustn't point to
   1541  1.1  mrg    static objects, stack variables, or memory allocated with malloc.  */
   1542  1.1  mrg 
   1543  1.1  mrg int
   1544  1.1  mrg ggc_set_mark (const void *p)
   1545  1.1  mrg {
   1546  1.1  mrg   page_entry *entry;
   1547  1.1  mrg   unsigned bit, word;
   1548  1.1  mrg   unsigned long mask;
   1549  1.1  mrg 
   1550  1.1  mrg   /* Look up the page on which the object is alloced.  If the object
   1551  1.1  mrg      wasn't allocated by the collector, we'll probably die.  */
   1552  1.1  mrg   entry = lookup_page_table_entry (p);
   1553  1.1  mrg   gcc_assert (entry);
   1554  1.1  mrg 
   1555  1.1  mrg   /* Calculate the index of the object on the page; this is its bit
   1556  1.1  mrg      position in the in_use_p bitmap.  */
   1557  1.1  mrg   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
   1558  1.1  mrg   word = bit / HOST_BITS_PER_LONG;
   1559  1.1  mrg   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
   1560  1.1  mrg 
   1561  1.1  mrg   /* If the bit was previously set, skip it.  */
   1562  1.1  mrg   if (entry->in_use_p[word] & mask)
   1563  1.1  mrg     return 1;
   1564  1.1  mrg 
   1565  1.1  mrg   /* Otherwise set it, and decrement the free object count.  */
   1566  1.1  mrg   entry->in_use_p[word] |= mask;
   1567  1.1  mrg   entry->num_free_objects -= 1;
   1568  1.1  mrg 
   1569  1.1  mrg   if (GGC_DEBUG_LEVEL >= 4)
   1570  1.1  mrg     fprintf (G.debug_file, "Marking %p\n", p);
   1571  1.1  mrg 
   1572  1.1  mrg   return 0;
   1573  1.1  mrg }
   1574  1.1  mrg 
   1575  1.1  mrg /* Return 1 if P has been marked, zero otherwise.
   1576  1.1  mrg    P must have been allocated by the GC allocator; it mustn't point to
   1577  1.1  mrg    static objects, stack variables, or memory allocated with malloc.  */
   1578  1.1  mrg 
   1579  1.1  mrg int
   1580  1.1  mrg ggc_marked_p (const void *p)
   1581  1.1  mrg {
   1582  1.1  mrg   page_entry *entry;
   1583  1.1  mrg   unsigned bit, word;
   1584  1.1  mrg   unsigned long mask;
   1585  1.1  mrg 
   1586  1.1  mrg   /* Look up the page on which the object is alloced.  If the object
   1587  1.1  mrg      wasn't allocated by the collector, we'll probably die.  */
   1588  1.1  mrg   entry = lookup_page_table_entry (p);
   1589  1.1  mrg   gcc_assert (entry);
   1590  1.1  mrg 
   1591  1.1  mrg   /* Calculate the index of the object on the page; this is its bit
   1592  1.1  mrg      position in the in_use_p bitmap.  */
   1593  1.1  mrg   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
   1594  1.1  mrg   word = bit / HOST_BITS_PER_LONG;
   1595  1.1  mrg   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
   1596  1.1  mrg 
   1597  1.1  mrg   return (entry->in_use_p[word] & mask) != 0;
   1598  1.1  mrg }
   1599  1.1  mrg 
   1600  1.1  mrg /* Return the size of the gc-able object P.  */
   1601  1.1  mrg 
   1602  1.1  mrg size_t
   1603  1.1  mrg ggc_get_size (const void *p)
   1604  1.1  mrg {
   1605  1.1  mrg   page_entry *pe = lookup_page_table_entry (p);
   1606  1.1  mrg   return OBJECT_SIZE (pe->order);
   1607  1.1  mrg }
   1608  1.1  mrg 
   1609  1.1  mrg /* Release the memory for object P.  */
   1610  1.1  mrg 
   1611  1.1  mrg void
   1612  1.1  mrg ggc_free (void *p)
   1613  1.1  mrg {
   1614  1.1  mrg   if (in_gc)
   1615  1.1  mrg     return;
   1616  1.1  mrg 
   1617  1.1  mrg   page_entry *pe = lookup_page_table_entry (p);
   1618  1.1  mrg   size_t order = pe->order;
   1619  1.1  mrg   size_t size = OBJECT_SIZE (order);
   1620  1.1  mrg 
   1621  1.1  mrg   if (GATHER_STATISTICS)
   1622  1.1  mrg     ggc_free_overhead (p);
   1623  1.1  mrg 
   1624  1.1  mrg   if (GGC_DEBUG_LEVEL >= 3)
   1625  1.1  mrg     fprintf (G.debug_file,
   1626  1.1  mrg 	     "Freeing object, actual size=%lu, at %p on %p\n",
   1627  1.1  mrg 	     (unsigned long) size, p, (void *) pe);
   1628  1.1  mrg 
   1629  1.1  mrg #ifdef ENABLE_GC_CHECKING
   1630  1.1  mrg   /* Poison the data, to indicate the data is garbage.  */
   1631  1.1  mrg   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
   1632  1.1  mrg   memset (p, 0xa5, size);
   1633  1.1  mrg #endif
   1634  1.1  mrg   /* Let valgrind know the object is free.  */
   1635  1.1  mrg   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
   1636  1.1  mrg 
   1637  1.1  mrg #ifdef ENABLE_GC_ALWAYS_COLLECT
   1638  1.1  mrg   /* In the completely-anal-checking mode, we do *not* immediately free
   1639  1.1  mrg      the data, but instead verify that the data is *actually* not
   1640  1.1  mrg      reachable the next time we collect.  */
   1641  1.1  mrg   {
   1642  1.1  mrg     struct free_object *fo = XNEW (struct free_object);
   1643  1.1  mrg     fo->object = p;
   1644  1.1  mrg     fo->next = G.free_object_list;
   1645  1.1  mrg     G.free_object_list = fo;
   1646  1.1  mrg   }
   1647  1.1  mrg #else
   1648  1.1  mrg   {
   1649  1.1  mrg     unsigned int bit_offset, word, bit;
   1650  1.1  mrg 
   1651  1.1  mrg     G.allocated -= size;
   1652  1.1  mrg 
   1653  1.1  mrg     /* Mark the object not-in-use.  */
   1654  1.1  mrg     bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
   1655  1.1  mrg     word = bit_offset / HOST_BITS_PER_LONG;
   1656  1.1  mrg     bit = bit_offset % HOST_BITS_PER_LONG;
   1657  1.1  mrg     pe->in_use_p[word] &= ~(1UL << bit);
   1658  1.1  mrg 
   1659  1.1  mrg     if (pe->num_free_objects++ == 0)
   1660  1.1  mrg       {
   1661  1.1  mrg 	page_entry *p, *q;
   1662  1.1  mrg 
   1663  1.1  mrg 	/* If the page is completely full, then it's supposed to
   1664  1.1  mrg 	   be after all pages that aren't.  Since we've freed one
   1665  1.1  mrg 	   object from a page that was full, we need to move the
   1666  1.1  mrg 	   page to the head of the list.
   1667  1.1  mrg 
   1668  1.1  mrg 	   PE is the node we want to move.  Q is the previous node
   1669  1.1  mrg 	   and P is the next node in the list.  */
   1670  1.1  mrg 	q = pe->prev;
   1671  1.1  mrg 	if (q && q->num_free_objects == 0)
   1672  1.1  mrg 	  {
   1673  1.1  mrg 	    p = pe->next;
   1674  1.1  mrg 
   1675  1.1  mrg 	    q->next = p;
   1676  1.1  mrg 
   1677  1.1  mrg 	    /* If PE was at the end of the list, then Q becomes the
   1678  1.1  mrg 	       new end of the list.  If PE was not the end of the
   1679  1.1  mrg 	       list, then we need to update the PREV field for P.  */
   1680  1.1  mrg 	    if (!p)
   1681  1.1  mrg 	      G.page_tails[order] = q;
   1682  1.1  mrg 	    else
   1683  1.1  mrg 	      p->prev = q;
   1684  1.1  mrg 
   1685  1.1  mrg 	    /* Move PE to the head of the list.  */
   1686  1.1  mrg 	    pe->next = G.pages[order];
   1687  1.1  mrg 	    pe->prev = NULL;
   1688  1.1  mrg 	    G.pages[order]->prev = pe;
   1689  1.1  mrg 	    G.pages[order] = pe;
   1690  1.1  mrg 	  }
   1691  1.1  mrg 
   1692  1.1  mrg 	/* Reset the hint bit to point to the only free object.  */
   1693  1.1  mrg 	pe->next_bit_hint = bit_offset;
   1694  1.1  mrg       }
   1695  1.1  mrg   }
   1696  1.1  mrg #endif
   1697  1.1  mrg }
   1698  1.1  mrg 
   1699  1.1  mrg /* Subroutine of init_ggc which computes the pair of numbers used to
   1701  1.1  mrg    perform division by OBJECT_SIZE (order) and fills in inverse_table[].
   1702  1.1  mrg 
   1703  1.1  mrg    This algorithm is taken from Granlund and Montgomery's paper
   1704  1.1  mrg    "Division by Invariant Integers using Multiplication"
   1705  1.1  mrg    (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
   1706  1.1  mrg    constants).  */
   1707  1.1  mrg 
   1708  1.1  mrg static void
   1709  1.1  mrg compute_inverse (unsigned order)
   1710  1.1  mrg {
   1711  1.1  mrg   size_t size, inv;
   1712  1.1  mrg   unsigned int e;
   1713  1.1  mrg 
   1714  1.1  mrg   size = OBJECT_SIZE (order);
   1715  1.1  mrg   e = 0;
   1716  1.1  mrg   while (size % 2 == 0)
   1717  1.1  mrg     {
   1718  1.1  mrg       e++;
   1719  1.1  mrg       size >>= 1;
   1720  1.1  mrg     }
   1721  1.1  mrg 
   1722  1.1  mrg   inv = size;
   1723  1.1  mrg   while (inv * size != 1)
   1724  1.1  mrg     inv = inv * (2 - inv*size);
   1725  1.1  mrg 
   1726  1.1  mrg   DIV_MULT (order) = inv;
   1727  1.1  mrg   DIV_SHIFT (order) = e;
   1728  1.1  mrg }
   1729  1.1  mrg 
   1730  1.1  mrg /* Initialize the ggc-mmap allocator.  */
   1731  1.1  mrg void
   1732  1.1  mrg init_ggc (void)
   1733  1.1  mrg {
   1734  1.1  mrg   static bool init_p = false;
   1735  1.1  mrg   unsigned order;
   1736  1.1  mrg 
   1737  1.1  mrg   if (init_p)
   1738  1.1  mrg     return;
   1739  1.1  mrg   init_p = true;
   1740  1.1  mrg 
   1741  1.1  mrg   G.pagesize = getpagesize ();
   1742  1.1  mrg   G.lg_pagesize = exact_log2 (G.pagesize);
   1743  1.1  mrg 
   1744  1.1  mrg #ifdef HAVE_MMAP_DEV_ZERO
   1745  1.1  mrg   G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
   1746  1.1  mrg   if (G.dev_zero_fd == -1)
   1747  1.1  mrg     internal_error ("open /dev/zero: %m");
   1748  1.1  mrg #endif
   1749  1.1  mrg 
   1750  1.1  mrg #if 0
   1751  1.1  mrg   G.debug_file = fopen ("ggc-mmap.debug", "w");
   1752  1.1  mrg #else
   1753  1.1  mrg   G.debug_file = stdout;
   1754  1.1  mrg #endif
   1755  1.1  mrg 
   1756  1.1  mrg #ifdef USING_MMAP
   1757  1.1  mrg   /* StunOS has an amazing off-by-one error for the first mmap allocation
   1758  1.1  mrg      after fiddling with RLIMIT_STACK.  The result, as hard as it is to
   1759  1.1  mrg      believe, is an unaligned page allocation, which would cause us to
   1760  1.1  mrg      hork badly if we tried to use it.  */
   1761  1.1  mrg   {
   1762  1.1  mrg     char *p = alloc_anon (NULL, G.pagesize, true);
   1763  1.1  mrg     struct page_entry *e;
   1764  1.1  mrg     if ((uintptr_t)p & (G.pagesize - 1))
   1765  1.1  mrg       {
   1766  1.1  mrg 	/* How losing.  Discard this one and try another.  If we still
   1767  1.1  mrg 	   can't get something useful, give up.  */
   1768  1.1  mrg 
   1769  1.1  mrg 	p = alloc_anon (NULL, G.pagesize, true);
   1770  1.1  mrg 	gcc_assert (!((uintptr_t)p & (G.pagesize - 1)));
   1771  1.1  mrg       }
   1772  1.1  mrg 
   1773  1.1  mrg     /* We have a good page, might as well hold onto it...  */
   1774  1.1  mrg     e = XCNEW (struct page_entry);
   1775  1.1  mrg     e->bytes = G.pagesize;
   1776  1.1  mrg     e->page = p;
   1777  1.1  mrg     e->next = G.free_pages;
   1778  1.1  mrg     G.free_pages = e;
   1779  1.1  mrg   }
   1780  1.1  mrg #endif
   1781  1.1  mrg 
   1782  1.1  mrg   /* Initialize the object size table.  */
   1783  1.1  mrg   for (order = 0; order < HOST_BITS_PER_PTR; ++order)
   1784  1.1  mrg     object_size_table[order] = (size_t) 1 << order;
   1785  1.1  mrg   for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
   1786  1.1  mrg     {
   1787  1.1  mrg       size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
   1788  1.1  mrg 
   1789  1.1  mrg       /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
   1790  1.1  mrg 	 so that we're sure of getting aligned memory.  */
   1791  1.1  mrg       s = ROUND_UP (s, MAX_ALIGNMENT);
   1792  1.1  mrg       object_size_table[order] = s;
   1793  1.1  mrg     }
   1794  1.1  mrg 
   1795  1.1  mrg   /* Initialize the objects-per-page and inverse tables.  */
   1796  1.1  mrg   for (order = 0; order < NUM_ORDERS; ++order)
   1797  1.1  mrg     {
   1798  1.1  mrg       objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
   1799  1.1  mrg       if (objects_per_page_table[order] == 0)
   1800  1.1  mrg 	objects_per_page_table[order] = 1;
   1801  1.1  mrg       compute_inverse (order);
   1802  1.1  mrg     }
   1803  1.1  mrg 
   1804  1.1  mrg   /* Reset the size_lookup array to put appropriately sized objects in
   1805  1.1  mrg      the special orders.  All objects bigger than the previous power
   1806  1.1  mrg      of two, but no greater than the special size, should go in the
   1807  1.1  mrg      new order.  */
   1808  1.1  mrg   for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
   1809  1.1  mrg     {
   1810  1.1  mrg       int o;
   1811  1.1  mrg       int i;
   1812  1.1  mrg 
   1813  1.1  mrg       i = OBJECT_SIZE (order);
   1814  1.1  mrg       if (i >= NUM_SIZE_LOOKUP)
   1815  1.1  mrg 	continue;
   1816  1.1  mrg 
   1817  1.1  mrg       for (o = size_lookup[i]; o == size_lookup [i]; --i)
   1818  1.1  mrg 	size_lookup[i] = order;
   1819  1.1  mrg     }
   1820  1.1  mrg 
   1821  1.1  mrg   G.depth_in_use = 0;
   1822  1.1  mrg   G.depth_max = 10;
   1823  1.1  mrg   G.depth = XNEWVEC (unsigned int, G.depth_max);
   1824  1.1  mrg 
   1825  1.1  mrg   G.by_depth_in_use = 0;
   1826  1.1  mrg   G.by_depth_max = INITIAL_PTE_COUNT;
   1827  1.1  mrg   G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
   1828  1.1  mrg   G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
   1829  1.1  mrg 
   1830  1.1  mrg   /* Allocate space for the depth 0 finalizers.  */
   1831  1.1  mrg   G.finalizers.safe_push (vNULL);
   1832  1.1  mrg   G.vec_finalizers.safe_push (vNULL);
   1833  1.1  mrg   gcc_assert (G.finalizers.length() == 1);
   1834  1.1  mrg }
   1835  1.1  mrg 
   1836  1.1  mrg /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
   1837  1.1  mrg    reflects reality.  Recalculate NUM_FREE_OBJECTS as well.  */
   1838  1.1  mrg 
   1839  1.1  mrg static void
   1840  1.1  mrg ggc_recalculate_in_use_p (page_entry *p)
   1841  1.1  mrg {
   1842  1.1  mrg   unsigned int i;
   1843  1.1  mrg   size_t num_objects;
   1844  1.1  mrg 
   1845  1.1  mrg   /* Because the past-the-end bit in in_use_p is always set, we
   1846  1.1  mrg      pretend there is one additional object.  */
   1847  1.1  mrg   num_objects = OBJECTS_IN_PAGE (p) + 1;
   1848  1.1  mrg 
   1849  1.1  mrg   /* Reset the free object count.  */
   1850  1.1  mrg   p->num_free_objects = num_objects;
   1851  1.1  mrg 
   1852  1.1  mrg   /* Combine the IN_USE_P and SAVE_IN_USE_P arrays.  */
   1853  1.1  mrg   for (i = 0;
   1854  1.1  mrg        i < CEIL (BITMAP_SIZE (num_objects),
   1855  1.1  mrg 		 sizeof (*p->in_use_p));
   1856  1.1  mrg        ++i)
   1857  1.1  mrg     {
   1858  1.1  mrg       unsigned long j;
   1859  1.1  mrg 
   1860  1.1  mrg       /* Something is in use if it is marked, or if it was in use in a
   1861  1.1  mrg 	 context further down the context stack.  */
   1862  1.1  mrg       p->in_use_p[i] |= save_in_use_p (p)[i];
   1863  1.1  mrg 
   1864  1.1  mrg       /* Decrement the free object count for every object allocated.  */
   1865  1.1  mrg       for (j = p->in_use_p[i]; j; j >>= 1)
   1866  1.1  mrg 	p->num_free_objects -= (j & 1);
   1867  1.1  mrg     }
   1868  1.1  mrg 
   1869  1.1  mrg   gcc_assert (p->num_free_objects < num_objects);
   1870  1.1  mrg }
   1871  1.1  mrg 
   1872  1.1  mrg /* Unmark all objects.  */
   1874  1.1  mrg 
   1875  1.1  mrg static void
   1876  1.1  mrg clear_marks (void)
   1877  1.1  mrg {
   1878  1.1  mrg   unsigned order;
   1879  1.1  mrg 
   1880  1.1  mrg   for (order = 2; order < NUM_ORDERS; order++)
   1881  1.1  mrg     {
   1882  1.1  mrg       page_entry *p;
   1883  1.1  mrg 
   1884  1.1  mrg       for (p = G.pages[order]; p != NULL; p = p->next)
   1885  1.1  mrg 	{
   1886  1.1  mrg 	  size_t num_objects = OBJECTS_IN_PAGE (p);
   1887  1.1  mrg 	  size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
   1888  1.1  mrg 
   1889  1.1  mrg 	  /* The data should be page-aligned.  */
   1890  1.1  mrg 	  gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1)));
   1891  1.1  mrg 
   1892  1.1  mrg 	  /* Pages that aren't in the topmost context are not collected;
   1893  1.1  mrg 	     nevertheless, we need their in-use bit vectors to store GC
   1894  1.1  mrg 	     marks.  So, back them up first.  */
   1895  1.1  mrg 	  if (p->context_depth < G.context_depth)
   1896  1.1  mrg 	    {
   1897  1.1  mrg 	      if (! save_in_use_p (p))
   1898  1.1  mrg 		save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
   1899  1.1  mrg 	      memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
   1900  1.1  mrg 	    }
   1901  1.1  mrg 
   1902  1.1  mrg 	  /* Reset reset the number of free objects and clear the
   1903  1.1  mrg              in-use bits.  These will be adjusted by mark_obj.  */
   1904  1.1  mrg 	  p->num_free_objects = num_objects;
   1905  1.1  mrg 	  memset (p->in_use_p, 0, bitmap_size);
   1906  1.1  mrg 
   1907  1.1  mrg 	  /* Make sure the one-past-the-end bit is always set.  */
   1908  1.1  mrg 	  p->in_use_p[num_objects / HOST_BITS_PER_LONG]
   1909  1.1  mrg 	    = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
   1910  1.1  mrg 	}
   1911  1.1  mrg     }
   1912  1.1  mrg }
   1913  1.1  mrg 
   1914  1.1  mrg /* Check if any blocks with a registered finalizer have become unmarked. If so
   1915  1.1  mrg    run the finalizer and unregister it because the block is about to be freed.
   1916  1.1  mrg    Note that no garantee is made about what order finalizers will run in so
   1917  1.1  mrg    touching other objects in gc memory is extremely unwise.  */
   1918  1.1  mrg 
   1919  1.1  mrg static void
   1920  1.1  mrg ggc_handle_finalizers ()
   1921  1.1  mrg {
   1922  1.1  mrg   unsigned dlen = G.finalizers.length();
   1923  1.1  mrg   for (unsigned d = G.context_depth; d < dlen; ++d)
   1924  1.1  mrg     {
   1925  1.1  mrg       vec<finalizer> &v = G.finalizers[d];
   1926  1.1  mrg       unsigned length = v.length ();
   1927  1.1  mrg       for (unsigned int i = 0; i < length;)
   1928  1.1  mrg 	{
   1929  1.1  mrg 	  finalizer &f = v[i];
   1930  1.1  mrg 	  if (!ggc_marked_p (f.addr ()))
   1931  1.1  mrg 	    {
   1932  1.1  mrg 	      f.call ();
   1933  1.1  mrg 	      v.unordered_remove (i);
   1934  1.1  mrg 	      length--;
   1935  1.1  mrg 	    }
   1936  1.1  mrg 	  else
   1937  1.1  mrg 	    i++;
   1938  1.1  mrg 	}
   1939  1.1  mrg     }
   1940  1.1  mrg 
   1941  1.1  mrg   gcc_assert (dlen == G.vec_finalizers.length());
   1942  1.1  mrg   for (unsigned d = G.context_depth; d < dlen; ++d)
   1943  1.1  mrg     {
   1944  1.1  mrg       vec<vec_finalizer> &vv = G.vec_finalizers[d];
   1945  1.1  mrg       unsigned length = vv.length ();
   1946  1.1  mrg       for (unsigned int i = 0; i < length;)
   1947  1.1  mrg 	{
   1948  1.1  mrg 	  vec_finalizer &f = vv[i];
   1949  1.1  mrg 	  if (!ggc_marked_p (f.addr ()))
   1950  1.1  mrg 	    {
   1951  1.1  mrg 	      f.call ();
   1952  1.1  mrg 	      vv.unordered_remove (i);
   1953  1.1  mrg 	      length--;
   1954  1.1  mrg 	    }
   1955  1.1  mrg 	  else
   1956  1.1  mrg 	    i++;
   1957  1.1  mrg 	}
   1958  1.1  mrg     }
   1959  1.1  mrg }
   1960  1.1  mrg 
   1961  1.1  mrg /* Free all empty pages.  Partially empty pages need no attention
   1962  1.1  mrg    because the `mark' bit doubles as an `unused' bit.  */
   1963  1.1  mrg 
   1964  1.1  mrg static void
   1965  1.1  mrg sweep_pages (void)
   1966  1.1  mrg {
   1967  1.1  mrg   unsigned order;
   1968  1.1  mrg 
   1969  1.1  mrg   for (order = 2; order < NUM_ORDERS; order++)
   1970  1.1  mrg     {
   1971  1.1  mrg       /* The last page-entry to consider, regardless of entries
   1972  1.1  mrg 	 placed at the end of the list.  */
   1973  1.1  mrg       page_entry * const last = G.page_tails[order];
   1974  1.1  mrg 
   1975  1.1  mrg       size_t num_objects;
   1976  1.1  mrg       size_t live_objects;
   1977  1.1  mrg       page_entry *p, *previous;
   1978  1.1  mrg       int done;
   1979  1.1  mrg 
   1980  1.1  mrg       p = G.pages[order];
   1981  1.1  mrg       if (p == NULL)
   1982  1.1  mrg 	continue;
   1983  1.1  mrg 
   1984  1.1  mrg       previous = NULL;
   1985  1.1  mrg       do
   1986  1.1  mrg 	{
   1987  1.1  mrg 	  page_entry *next = p->next;
   1988  1.1  mrg 
   1989  1.1  mrg 	  /* Loop until all entries have been examined.  */
   1990  1.1  mrg 	  done = (p == last);
   1991  1.1  mrg 
   1992  1.1  mrg 	  num_objects = OBJECTS_IN_PAGE (p);
   1993  1.1  mrg 
   1994  1.1  mrg 	  /* Add all live objects on this page to the count of
   1995  1.1  mrg              allocated memory.  */
   1996  1.1  mrg 	  live_objects = num_objects - p->num_free_objects;
   1997  1.1  mrg 
   1998  1.1  mrg 	  G.allocated += OBJECT_SIZE (order) * live_objects;
   1999  1.1  mrg 
   2000  1.1  mrg 	  /* Only objects on pages in the topmost context should get
   2001  1.1  mrg 	     collected.  */
   2002  1.1  mrg 	  if (p->context_depth < G.context_depth)
   2003  1.1  mrg 	    ;
   2004  1.1  mrg 
   2005  1.1  mrg 	  /* Remove the page if it's empty.  */
   2006  1.1  mrg 	  else if (live_objects == 0)
   2007  1.1  mrg 	    {
   2008  1.1  mrg 	      /* If P was the first page in the list, then NEXT
   2009  1.1  mrg 		 becomes the new first page in the list, otherwise
   2010  1.1  mrg 		 splice P out of the forward pointers.  */
   2011  1.1  mrg 	      if (! previous)
   2012  1.1  mrg 		G.pages[order] = next;
   2013  1.1  mrg 	      else
   2014  1.1  mrg 		previous->next = next;
   2015  1.1  mrg 
   2016  1.1  mrg 	      /* Splice P out of the back pointers too.  */
   2017  1.1  mrg 	      if (next)
   2018  1.1  mrg 		next->prev = previous;
   2019  1.1  mrg 
   2020  1.1  mrg 	      /* Are we removing the last element?  */
   2021  1.1  mrg 	      if (p == G.page_tails[order])
   2022  1.1  mrg 		G.page_tails[order] = previous;
   2023  1.1  mrg 	      free_page (p);
   2024  1.1  mrg 	      p = previous;
   2025  1.1  mrg 	    }
   2026  1.1  mrg 
   2027  1.1  mrg 	  /* If the page is full, move it to the end.  */
   2028  1.1  mrg 	  else if (p->num_free_objects == 0)
   2029  1.1  mrg 	    {
   2030  1.1  mrg 	      /* Don't move it if it's already at the end.  */
   2031  1.1  mrg 	      if (p != G.page_tails[order])
   2032  1.1  mrg 		{
   2033  1.1  mrg 		  /* Move p to the end of the list.  */
   2034  1.1  mrg 		  p->next = NULL;
   2035  1.1  mrg 		  p->prev = G.page_tails[order];
   2036  1.1  mrg 		  G.page_tails[order]->next = p;
   2037  1.1  mrg 
   2038  1.1  mrg 		  /* Update the tail pointer...  */
   2039  1.1  mrg 		  G.page_tails[order] = p;
   2040  1.1  mrg 
   2041  1.1  mrg 		  /* ... and the head pointer, if necessary.  */
   2042  1.1  mrg 		  if (! previous)
   2043  1.1  mrg 		    G.pages[order] = next;
   2044  1.1  mrg 		  else
   2045  1.1  mrg 		    previous->next = next;
   2046  1.1  mrg 
   2047  1.1  mrg 		  /* And update the backpointer in NEXT if necessary.  */
   2048  1.1  mrg 		  if (next)
   2049  1.1  mrg 		    next->prev = previous;
   2050  1.1  mrg 
   2051  1.1  mrg 		  p = previous;
   2052  1.1  mrg 		}
   2053  1.1  mrg 	    }
   2054  1.1  mrg 
   2055  1.1  mrg 	  /* If we've fallen through to here, it's a page in the
   2056  1.1  mrg 	     topmost context that is neither full nor empty.  Such a
   2057  1.1  mrg 	     page must precede pages at lesser context depth in the
   2058  1.1  mrg 	     list, so move it to the head.  */
   2059  1.1  mrg 	  else if (p != G.pages[order])
   2060  1.1  mrg 	    {
   2061  1.1  mrg 	      previous->next = p->next;
   2062  1.1  mrg 
   2063  1.1  mrg 	      /* Update the backchain in the next node if it exists.  */
   2064  1.1  mrg 	      if (p->next)
   2065  1.1  mrg 		p->next->prev = previous;
   2066  1.1  mrg 
   2067  1.1  mrg 	      /* Move P to the head of the list.  */
   2068  1.1  mrg 	      p->next = G.pages[order];
   2069  1.1  mrg 	      p->prev = NULL;
   2070  1.1  mrg 	      G.pages[order]->prev = p;
   2071  1.1  mrg 
   2072  1.1  mrg 	      /* Update the head pointer.  */
   2073  1.1  mrg 	      G.pages[order] = p;
   2074  1.1  mrg 
   2075  1.1  mrg 	      /* Are we moving the last element?  */
   2076  1.1  mrg 	      if (G.page_tails[order] == p)
   2077  1.1  mrg 	        G.page_tails[order] = previous;
   2078  1.1  mrg 	      p = previous;
   2079  1.1  mrg 	    }
   2080  1.1  mrg 
   2081  1.1  mrg 	  previous = p;
   2082  1.1  mrg 	  p = next;
   2083  1.1  mrg 	}
   2084  1.1  mrg       while (! done);
   2085  1.1  mrg 
   2086  1.1  mrg       /* Now, restore the in_use_p vectors for any pages from contexts
   2087  1.1  mrg          other than the current one.  */
   2088  1.1  mrg       for (p = G.pages[order]; p; p = p->next)
   2089  1.1  mrg 	if (p->context_depth != G.context_depth)
   2090  1.1  mrg 	  ggc_recalculate_in_use_p (p);
   2091  1.1  mrg     }
   2092  1.1  mrg }
   2093  1.1  mrg 
   2094  1.1  mrg #ifdef ENABLE_GC_CHECKING
   2095  1.1  mrg /* Clobber all free objects.  */
   2096  1.1  mrg 
   2097  1.1  mrg static void
   2098  1.1  mrg poison_pages (void)
   2099  1.1  mrg {
   2100  1.1  mrg   unsigned order;
   2101  1.1  mrg 
   2102  1.1  mrg   for (order = 2; order < NUM_ORDERS; order++)
   2103  1.1  mrg     {
   2104  1.1  mrg       size_t size = OBJECT_SIZE (order);
   2105  1.1  mrg       page_entry *p;
   2106  1.1  mrg 
   2107  1.1  mrg       for (p = G.pages[order]; p != NULL; p = p->next)
   2108  1.1  mrg 	{
   2109  1.1  mrg 	  size_t num_objects;
   2110  1.1  mrg 	  size_t i;
   2111  1.1  mrg 
   2112  1.1  mrg 	  if (p->context_depth != G.context_depth)
   2113  1.1  mrg 	    /* Since we don't do any collection for pages in pushed
   2114  1.1  mrg 	       contexts, there's no need to do any poisoning.  And
   2115  1.1  mrg 	       besides, the IN_USE_P array isn't valid until we pop
   2116  1.1  mrg 	       contexts.  */
   2117  1.1  mrg 	    continue;
   2118  1.1  mrg 
   2119  1.1  mrg 	  num_objects = OBJECTS_IN_PAGE (p);
   2120  1.1  mrg 	  for (i = 0; i < num_objects; i++)
   2121  1.1  mrg 	    {
   2122  1.1  mrg 	      size_t word, bit;
   2123  1.1  mrg 	      word = i / HOST_BITS_PER_LONG;
   2124  1.1  mrg 	      bit = i % HOST_BITS_PER_LONG;
   2125  1.1  mrg 	      if (((p->in_use_p[word] >> bit) & 1) == 0)
   2126  1.1  mrg 		{
   2127  1.1  mrg 		  char *object = p->page + i * size;
   2128  1.1  mrg 
   2129  1.1  mrg 		  /* Keep poison-by-write when we expect to use Valgrind,
   2130  1.1  mrg 		     so the exact same memory semantics is kept, in case
   2131  1.1  mrg 		     there are memory errors.  We override this request
   2132  1.1  mrg 		     below.  */
   2133  1.1  mrg 		  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
   2134  1.1  mrg 								 size));
   2135  1.1  mrg 		  memset (object, 0xa5, size);
   2136  1.1  mrg 
   2137  1.1  mrg 		  /* Drop the handle to avoid handle leak.  */
   2138  1.1  mrg 		  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
   2139  1.1  mrg 		}
   2140  1.1  mrg 	    }
   2141  1.1  mrg 	}
   2142  1.1  mrg     }
   2143  1.1  mrg }
   2144  1.1  mrg #else
   2145  1.1  mrg #define poison_pages()
   2146  1.1  mrg #endif
   2147  1.1  mrg 
   2148  1.1  mrg #ifdef ENABLE_GC_ALWAYS_COLLECT
   2149  1.1  mrg /* Validate that the reportedly free objects actually are.  */
   2150  1.1  mrg 
   2151  1.1  mrg static void
   2152  1.1  mrg validate_free_objects (void)
   2153  1.1  mrg {
   2154  1.1  mrg   struct free_object *f, *next, *still_free = NULL;
   2155  1.1  mrg 
   2156  1.1  mrg   for (f = G.free_object_list; f ; f = next)
   2157  1.1  mrg     {
   2158  1.1  mrg       page_entry *pe = lookup_page_table_entry (f->object);
   2159  1.1  mrg       size_t bit, word;
   2160  1.1  mrg 
   2161  1.1  mrg       bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
   2162  1.1  mrg       word = bit / HOST_BITS_PER_LONG;
   2163  1.1  mrg       bit = bit % HOST_BITS_PER_LONG;
   2164  1.1  mrg       next = f->next;
   2165  1.1  mrg 
   2166  1.1  mrg       /* Make certain it isn't visible from any root.  Notice that we
   2167  1.1  mrg 	 do this check before sweep_pages merges save_in_use_p.  */
   2168  1.1  mrg       gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
   2169  1.1  mrg 
   2170  1.1  mrg       /* If the object comes from an outer context, then retain the
   2171  1.1  mrg 	 free_object entry, so that we can verify that the address
   2172  1.1  mrg 	 isn't live on the stack in some outer context.  */
   2173  1.1  mrg       if (pe->context_depth != G.context_depth)
   2174  1.1  mrg 	{
   2175  1.1  mrg 	  f->next = still_free;
   2176  1.1  mrg 	  still_free = f;
   2177  1.1  mrg 	}
   2178  1.1  mrg       else
   2179  1.1  mrg 	free (f);
   2180  1.1  mrg     }
   2181  1.1  mrg 
   2182  1.1  mrg   G.free_object_list = still_free;
   2183  1.1  mrg }
   2184  1.1  mrg #else
   2185  1.1  mrg #define validate_free_objects()
   2186  1.1  mrg #endif
   2187  1.1  mrg 
   2188  1.1  mrg /* Top level mark-and-sweep routine.  */
   2189  1.1  mrg 
   2190  1.1  mrg void
   2191  1.1  mrg ggc_collect (enum ggc_collect mode)
   2192  1.1  mrg {
   2193  1.1  mrg   /* Avoid frequent unnecessary work by skipping collection if the
   2194  1.1  mrg      total allocations haven't expanded much since the last
   2195  1.1  mrg      collection.  */
   2196  1.1  mrg   float allocated_last_gc =
   2197  1.1  mrg     MAX (G.allocated_last_gc, (size_t)param_ggc_min_heapsize * ONE_K);
   2198  1.1  mrg 
   2199  1.1  mrg   /* It is also good time to get memory block pool into limits.  */
   2200  1.1  mrg   memory_block_pool::trim ();
   2201  1.1  mrg 
   2202  1.1  mrg   float min_expand = allocated_last_gc * param_ggc_min_expand / 100;
   2203  1.1  mrg   if (mode == GGC_COLLECT_HEURISTIC
   2204  1.1  mrg       && G.allocated < allocated_last_gc + min_expand)
   2205  1.1  mrg     return;
   2206  1.1  mrg 
   2207  1.1  mrg   timevar_push (TV_GC);
   2208  1.1  mrg   if (GGC_DEBUG_LEVEL >= 2)
   2209  1.1  mrg     fprintf (G.debug_file, "BEGIN COLLECTING\n");
   2210  1.1  mrg 
   2211  1.1  mrg   /* Zero the total allocated bytes.  This will be recalculated in the
   2212  1.1  mrg      sweep phase.  */
   2213  1.1  mrg   size_t allocated = G.allocated;
   2214  1.1  mrg   G.allocated = 0;
   2215  1.1  mrg 
   2216  1.1  mrg   /* Release the pages we freed the last time we collected, but didn't
   2217  1.1  mrg      reuse in the interim.  */
   2218  1.1  mrg   release_pages ();
   2219  1.1  mrg 
   2220  1.1  mrg   /* Output this later so we do not interfere with release_pages.  */
   2221  1.1  mrg   if (!quiet_flag)
   2222  1.1  mrg     fprintf (stderr, " {GC " PRsa (0) " -> ", SIZE_AMOUNT (allocated));
   2223  1.1  mrg 
   2224  1.1  mrg   /* Indicate that we've seen collections at this context depth.  */
   2225  1.1  mrg   G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
   2226  1.1  mrg 
   2227  1.1  mrg   invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
   2228  1.1  mrg 
   2229  1.1  mrg   in_gc = true;
   2230  1.1  mrg   clear_marks ();
   2231  1.1  mrg   ggc_mark_roots ();
   2232  1.1  mrg   ggc_handle_finalizers ();
   2233  1.1  mrg 
   2234  1.1  mrg   if (GATHER_STATISTICS)
   2235  1.1  mrg     ggc_prune_overhead_list ();
   2236  1.1  mrg 
   2237  1.1  mrg   poison_pages ();
   2238  1.1  mrg   validate_free_objects ();
   2239  1.1  mrg   sweep_pages ();
   2240  1.1  mrg 
   2241  1.1  mrg   in_gc = false;
   2242  1.1  mrg   G.allocated_last_gc = G.allocated;
   2243  1.1  mrg 
   2244  1.1  mrg   invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
   2245  1.1  mrg 
   2246  1.1  mrg   timevar_pop (TV_GC);
   2247  1.1  mrg 
   2248  1.1  mrg   if (!quiet_flag)
   2249  1.1  mrg     fprintf (stderr, PRsa (0) "}", SIZE_AMOUNT (G.allocated));
   2250  1.1  mrg   if (GGC_DEBUG_LEVEL >= 2)
   2251  1.1  mrg     fprintf (G.debug_file, "END COLLECTING\n");
   2252  1.1  mrg }
   2253  1.1  mrg 
   2254  1.1  mrg /* Return free pages to the system.  */
   2255  1.1  mrg 
   2256  1.1  mrg void
   2257  1.1  mrg ggc_trim ()
   2258  1.1  mrg {
   2259  1.1  mrg   timevar_push (TV_GC);
   2260  1.1  mrg   G.allocated = 0;
   2261  1.1  mrg   sweep_pages ();
   2262  1.1  mrg   release_pages ();
   2263  1.1  mrg   if (!quiet_flag)
   2264  1.1  mrg     fprintf (stderr, " {GC trimmed to " PRsa (0) ", " PRsa (0) " mapped}",
   2265  1.1  mrg 	     SIZE_AMOUNT (G.allocated), SIZE_AMOUNT (G.bytes_mapped));
   2266  1.1  mrg   timevar_pop (TV_GC);
   2267  1.1  mrg }
   2268  1.1  mrg 
   2269  1.1  mrg /* Assume that all GGC memory is reachable and grow the limits for next
   2270  1.1  mrg    collection.  With checking, trigger GGC so -Q compilation outputs how much
   2271  1.1  mrg    of memory really is reachable.  */
   2272  1.1  mrg 
   2273  1.1  mrg void
   2274  1.1  mrg ggc_grow (void)
   2275  1.1  mrg {
   2276  1.1  mrg   if (!flag_checking)
   2277  1.1  mrg     G.allocated_last_gc = MAX (G.allocated_last_gc,
   2278  1.1  mrg 			       G.allocated);
   2279  1.1  mrg   else
   2280  1.1  mrg     ggc_collect ();
   2281  1.1  mrg   if (!quiet_flag)
   2282  1.1  mrg     fprintf (stderr, " {GC " PRsa (0) "} ", SIZE_AMOUNT (G.allocated));
   2283  1.1  mrg }
   2284  1.1  mrg 
   2285  1.1  mrg void
   2286  1.1  mrg ggc_print_statistics (void)
   2287  1.1  mrg {
   2288  1.1  mrg   struct ggc_statistics stats;
   2289  1.1  mrg   unsigned int i;
   2290  1.1  mrg   size_t total_overhead = 0;
   2291  1.1  mrg 
   2292  1.1  mrg   /* Clear the statistics.  */
   2293  1.1  mrg   memset (&stats, 0, sizeof (stats));
   2294  1.1  mrg 
   2295  1.1  mrg   /* Make sure collection will really occur.  */
   2296  1.1  mrg   G.allocated_last_gc = 0;
   2297  1.1  mrg 
   2298  1.1  mrg   /* Collect and print the statistics common across collectors.  */
   2299  1.1  mrg   ggc_print_common_statistics (stderr, &stats);
   2300  1.1  mrg 
   2301  1.1  mrg   /* Release free pages so that we will not count the bytes allocated
   2302  1.1  mrg      there as part of the total allocated memory.  */
   2303  1.1  mrg   release_pages ();
   2304  1.1  mrg 
   2305  1.1  mrg   /* Collect some information about the various sizes of
   2306  1.1  mrg      allocation.  */
   2307  1.1  mrg   fprintf (stderr,
   2308  1.1  mrg            "Memory still allocated at the end of the compilation process\n");
   2309  1.1  mrg   fprintf (stderr, "%-8s %10s  %10s  %10s\n",
   2310  1.1  mrg 	   "Size", "Allocated", "Used", "Overhead");
   2311  1.1  mrg   for (i = 0; i < NUM_ORDERS; ++i)
   2312  1.1  mrg     {
   2313  1.1  mrg       page_entry *p;
   2314  1.1  mrg       size_t allocated;
   2315  1.1  mrg       size_t in_use;
   2316  1.1  mrg       size_t overhead;
   2317  1.1  mrg 
   2318  1.1  mrg       /* Skip empty entries.  */
   2319  1.1  mrg       if (!G.pages[i])
   2320  1.1  mrg 	continue;
   2321  1.1  mrg 
   2322  1.1  mrg       overhead = allocated = in_use = 0;
   2323  1.1  mrg 
   2324  1.1  mrg       /* Figure out the total number of bytes allocated for objects of
   2325  1.1  mrg 	 this size, and how many of them are actually in use.  Also figure
   2326  1.1  mrg 	 out how much memory the page table is using.  */
   2327  1.1  mrg       for (p = G.pages[i]; p; p = p->next)
   2328  1.1  mrg 	{
   2329  1.1  mrg 	  allocated += p->bytes;
   2330  1.1  mrg 	  in_use +=
   2331  1.1  mrg 	    (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
   2332  1.1  mrg 
   2333  1.1  mrg 	  overhead += (sizeof (page_entry) - sizeof (long)
   2334  1.1  mrg 		       + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
   2335  1.1  mrg 	}
   2336  1.1  mrg       fprintf (stderr, "%-8" PRIu64 " " PRsa (10) " " PRsa (10) " "
   2337  1.1  mrg 	       PRsa (10) "\n",
   2338  1.1  mrg 	       (uint64_t)OBJECT_SIZE (i),
   2339  1.1  mrg 	       SIZE_AMOUNT (allocated),
   2340  1.1  mrg 	       SIZE_AMOUNT (in_use),
   2341  1.1  mrg 	       SIZE_AMOUNT (overhead));
   2342  1.1  mrg       total_overhead += overhead;
   2343  1.1  mrg     }
   2344  1.1  mrg   fprintf (stderr, "%-8s " PRsa (10) " " PRsa (10) " " PRsa (10) "\n",
   2345  1.1  mrg 	   "Total",
   2346  1.1  mrg 	   SIZE_AMOUNT (G.bytes_mapped),
   2347  1.1  mrg 	   SIZE_AMOUNT (G.allocated),
   2348  1.1  mrg 	   SIZE_AMOUNT (total_overhead));
   2349  1.1  mrg 
   2350  1.1  mrg   if (GATHER_STATISTICS)
   2351  1.1  mrg     {
   2352  1.1  mrg       fprintf (stderr, "\nTotal allocations and overheads during "
   2353  1.1  mrg 	       "the compilation process\n");
   2354  1.1  mrg 
   2355  1.1  mrg       fprintf (stderr, "Total Overhead:                          "
   2356  1.1  mrg 	       PRsa (9) "\n",
   2357  1.1  mrg 	       SIZE_AMOUNT (G.stats.total_overhead));
   2358  1.1  mrg       fprintf (stderr, "Total Allocated:                         "
   2359  1.1  mrg 	       PRsa (9) "\n",
   2360  1.1  mrg 	       SIZE_AMOUNT (G.stats.total_allocated));
   2361  1.1  mrg 
   2362  1.1  mrg       fprintf (stderr, "Total Overhead  under  32B:              "
   2363  1.1  mrg 	       PRsa (9) "\n",
   2364  1.1  mrg 	       SIZE_AMOUNT (G.stats.total_overhead_under32));
   2365  1.1  mrg       fprintf (stderr, "Total Allocated under  32B:              "
   2366  1.1  mrg 	       PRsa (9) "\n",
   2367  1.1  mrg 	       SIZE_AMOUNT (G.stats.total_allocated_under32));
   2368  1.1  mrg       fprintf (stderr, "Total Overhead  under  64B:              "
   2369  1.1  mrg 	       PRsa (9) "\n",
   2370  1.1  mrg 	       SIZE_AMOUNT (G.stats.total_overhead_under64));
   2371  1.1  mrg       fprintf (stderr, "Total Allocated under  64B:              "
   2372  1.1  mrg 	       PRsa (9) "\n",
   2373  1.1  mrg 	       SIZE_AMOUNT (G.stats.total_allocated_under64));
   2374  1.1  mrg       fprintf (stderr, "Total Overhead  under 128B:              "
   2375  1.1  mrg 	       PRsa (9) "\n",
   2376  1.1  mrg 	       SIZE_AMOUNT (G.stats.total_overhead_under128));
   2377  1.1  mrg       fprintf (stderr, "Total Allocated under 128B:              "
   2378  1.1  mrg 	       PRsa (9) "\n",
   2379  1.1  mrg 	       SIZE_AMOUNT (G.stats.total_allocated_under128));
   2380  1.1  mrg 
   2381  1.1  mrg       for (i = 0; i < NUM_ORDERS; i++)
   2382  1.1  mrg 	if (G.stats.total_allocated_per_order[i])
   2383  1.1  mrg 	  {
   2384  1.1  mrg 	    fprintf (stderr, "Total Overhead  page size %9" PRIu64 ":     "
   2385  1.1  mrg 		     PRsa (9) "\n",
   2386  1.1  mrg 		     (uint64_t)OBJECT_SIZE (i),
   2387  1.1  mrg 		     SIZE_AMOUNT (G.stats.total_overhead_per_order[i]));
   2388  1.1  mrg 	    fprintf (stderr, "Total Allocated page size %9" PRIu64 ":     "
   2389  1.1  mrg 		     PRsa (9) "\n",
   2390  1.1  mrg 		     (uint64_t)OBJECT_SIZE (i),
   2391  1.1  mrg 		     SIZE_AMOUNT (G.stats.total_allocated_per_order[i]));
   2392  1.1  mrg 	  }
   2393  1.1  mrg   }
   2394  1.1  mrg }
   2395  1.1  mrg 
   2396  1.1  mrg struct ggc_pch_ondisk
   2398  1.1  mrg {
   2399  1.1  mrg   unsigned totals[NUM_ORDERS];
   2400  1.1  mrg };
   2401  1.1  mrg 
   2402  1.1  mrg struct ggc_pch_data
   2403  1.1  mrg {
   2404  1.1  mrg   struct ggc_pch_ondisk d;
   2405  1.1  mrg   uintptr_t base[NUM_ORDERS];
   2406  1.1  mrg   size_t written[NUM_ORDERS];
   2407  1.1  mrg };
   2408  1.1  mrg 
   2409  1.1  mrg struct ggc_pch_data *
   2410  1.1  mrg init_ggc_pch (void)
   2411  1.1  mrg {
   2412  1.1  mrg   return XCNEW (struct ggc_pch_data);
   2413  1.1  mrg }
   2414  1.1  mrg 
   2415  1.1  mrg void
   2416  1.1  mrg ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
   2417  1.1  mrg 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
   2418  1.1  mrg {
   2419  1.1  mrg   unsigned order;
   2420  1.1  mrg 
   2421  1.1  mrg   if (size < NUM_SIZE_LOOKUP)
   2422  1.1  mrg     order = size_lookup[size];
   2423  1.1  mrg   else
   2424  1.1  mrg     {
   2425  1.1  mrg       order = 10;
   2426  1.1  mrg       while (size > OBJECT_SIZE (order))
   2427  1.1  mrg 	order++;
   2428  1.1  mrg     }
   2429  1.1  mrg 
   2430  1.1  mrg   d->d.totals[order]++;
   2431  1.1  mrg }
   2432  1.1  mrg 
   2433  1.1  mrg size_t
   2434  1.1  mrg ggc_pch_total_size (struct ggc_pch_data *d)
   2435  1.1  mrg {
   2436  1.1  mrg   size_t a = 0;
   2437  1.1  mrg   unsigned i;
   2438  1.1  mrg 
   2439  1.1  mrg   for (i = 0; i < NUM_ORDERS; i++)
   2440  1.1  mrg     a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
   2441  1.1  mrg   return a;
   2442  1.1  mrg }
   2443  1.1  mrg 
   2444  1.1  mrg void
   2445  1.1  mrg ggc_pch_this_base (struct ggc_pch_data *d, void *base)
   2446  1.1  mrg {
   2447  1.1  mrg   uintptr_t a = (uintptr_t) base;
   2448  1.1  mrg   unsigned i;
   2449  1.1  mrg 
   2450  1.1  mrg   for (i = 0; i < NUM_ORDERS; i++)
   2451  1.1  mrg     {
   2452  1.1  mrg       d->base[i] = a;
   2453  1.1  mrg       a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
   2454  1.1  mrg     }
   2455  1.1  mrg }
   2456  1.1  mrg 
   2457  1.1  mrg 
   2458  1.1  mrg char *
   2459  1.1  mrg ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
   2460  1.1  mrg 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
   2461  1.1  mrg {
   2462  1.1  mrg   unsigned order;
   2463  1.1  mrg   char *result;
   2464  1.1  mrg 
   2465  1.1  mrg   if (size < NUM_SIZE_LOOKUP)
   2466  1.1  mrg     order = size_lookup[size];
   2467  1.1  mrg   else
   2468  1.1  mrg     {
   2469  1.1  mrg       order = 10;
   2470  1.1  mrg       while (size > OBJECT_SIZE (order))
   2471  1.1  mrg 	order++;
   2472  1.1  mrg     }
   2473  1.1  mrg 
   2474  1.1  mrg   result = (char *) d->base[order];
   2475  1.1  mrg   d->base[order] += OBJECT_SIZE (order);
   2476  1.1  mrg   return result;
   2477  1.1  mrg }
   2478  1.1  mrg 
   2479  1.1  mrg void
   2480  1.1  mrg ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
   2481  1.1  mrg 		       FILE *f ATTRIBUTE_UNUSED)
   2482  1.1  mrg {
   2483  1.1  mrg   /* Nothing to do.  */
   2484  1.1  mrg }
   2485  1.1  mrg 
   2486  1.1  mrg void
   2487  1.1  mrg ggc_pch_write_object (struct ggc_pch_data *d,
   2488  1.1  mrg 		      FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
   2489  1.1  mrg 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
   2490  1.1  mrg {
   2491  1.1  mrg   unsigned order;
   2492  1.1  mrg   static const char emptyBytes[256] = { 0 };
   2493  1.1  mrg 
   2494  1.1  mrg   if (size < NUM_SIZE_LOOKUP)
   2495  1.1  mrg     order = size_lookup[size];
   2496  1.1  mrg   else
   2497  1.1  mrg     {
   2498  1.1  mrg       order = 10;
   2499  1.1  mrg       while (size > OBJECT_SIZE (order))
   2500  1.1  mrg 	order++;
   2501  1.1  mrg     }
   2502  1.1  mrg 
   2503  1.1  mrg   if (fwrite (x, size, 1, f) != 1)
   2504  1.1  mrg     fatal_error (input_location, "cannot write PCH file: %m");
   2505  1.1  mrg 
   2506  1.1  mrg   /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
   2507  1.1  mrg      object out to OBJECT_SIZE(order).  This happens for strings.  */
   2508  1.1  mrg 
   2509  1.1  mrg   if (size != OBJECT_SIZE (order))
   2510  1.1  mrg     {
   2511  1.1  mrg       unsigned padding = OBJECT_SIZE (order) - size;
   2512  1.1  mrg 
   2513  1.1  mrg       /* To speed small writes, we use a nulled-out array that's larger
   2514  1.1  mrg          than most padding requests as the source for our null bytes.  This
   2515  1.1  mrg          permits us to do the padding with fwrite() rather than fseek(), and
   2516  1.1  mrg          limits the chance the OS may try to flush any outstanding writes.  */
   2517  1.1  mrg       if (padding <= sizeof (emptyBytes))
   2518  1.1  mrg         {
   2519  1.1  mrg           if (fwrite (emptyBytes, 1, padding, f) != padding)
   2520  1.1  mrg 	    fatal_error (input_location, "cannot write PCH file");
   2521  1.1  mrg         }
   2522  1.1  mrg       else
   2523  1.1  mrg         {
   2524  1.1  mrg           /* Larger than our buffer?  Just default to fseek.  */
   2525  1.1  mrg           if (fseek (f, padding, SEEK_CUR) != 0)
   2526  1.1  mrg 	    fatal_error (input_location, "cannot write PCH file");
   2527  1.1  mrg         }
   2528  1.1  mrg     }
   2529  1.1  mrg 
   2530  1.1  mrg   d->written[order]++;
   2531  1.1  mrg   if (d->written[order] == d->d.totals[order]
   2532  1.1  mrg       && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
   2533  1.1  mrg 				   G.pagesize),
   2534  1.1  mrg 		SEEK_CUR) != 0)
   2535  1.1  mrg     fatal_error (input_location, "cannot write PCH file: %m");
   2536  1.1  mrg }
   2537  1.1  mrg 
   2538  1.1  mrg void
   2539  1.1  mrg ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
   2540  1.1  mrg {
   2541  1.1  mrg   if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
   2542  1.1  mrg     fatal_error (input_location, "cannot write PCH file: %m");
   2543  1.1  mrg   free (d);
   2544  1.1  mrg }
   2545  1.1  mrg 
   2546  1.1  mrg /* Move the PCH PTE entries just added to the end of by_depth, to the
   2547  1.1  mrg    front.  */
   2548  1.1  mrg 
   2549  1.1  mrg static void
   2550  1.1  mrg move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
   2551  1.1  mrg {
   2552  1.1  mrg   /* First, we swap the new entries to the front of the varrays.  */
   2553  1.1  mrg   page_entry **new_by_depth;
   2554  1.1  mrg   unsigned long **new_save_in_use;
   2555  1.1  mrg 
   2556  1.1  mrg   new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
   2557  1.1  mrg   new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
   2558  1.1  mrg 
   2559  1.1  mrg   memcpy (&new_by_depth[0],
   2560  1.1  mrg 	  &G.by_depth[count_old_page_tables],
   2561  1.1  mrg 	  count_new_page_tables * sizeof (void *));
   2562  1.1  mrg   memcpy (&new_by_depth[count_new_page_tables],
   2563  1.1  mrg 	  &G.by_depth[0],
   2564  1.1  mrg 	  count_old_page_tables * sizeof (void *));
   2565  1.1  mrg   memcpy (&new_save_in_use[0],
   2566  1.1  mrg 	  &G.save_in_use[count_old_page_tables],
   2567  1.1  mrg 	  count_new_page_tables * sizeof (void *));
   2568  1.1  mrg   memcpy (&new_save_in_use[count_new_page_tables],
   2569  1.1  mrg 	  &G.save_in_use[0],
   2570  1.1  mrg 	  count_old_page_tables * sizeof (void *));
   2571  1.1  mrg 
   2572  1.1  mrg   free (G.by_depth);
   2573  1.1  mrg   free (G.save_in_use);
   2574  1.1  mrg 
   2575  1.1  mrg   G.by_depth = new_by_depth;
   2576  1.1  mrg   G.save_in_use = new_save_in_use;
   2577  1.1  mrg 
   2578  1.1  mrg   /* Now update all the index_by_depth fields.  */
   2579  1.1  mrg   for (unsigned i = G.by_depth_in_use; i--;)
   2580  1.1  mrg     {
   2581  1.1  mrg       page_entry *p = G.by_depth[i];
   2582  1.1  mrg       p->index_by_depth = i;
   2583  1.1  mrg     }
   2584  1.1  mrg 
   2585  1.1  mrg   /* And last, we update the depth pointers in G.depth.  The first
   2586  1.1  mrg      entry is already 0, and context 0 entries always start at index
   2587  1.1  mrg      0, so there is nothing to update in the first slot.  We need a
   2588  1.1  mrg      second slot, only if we have old ptes, and if we do, they start
   2589  1.1  mrg      at index count_new_page_tables.  */
   2590  1.1  mrg   if (count_old_page_tables)
   2591  1.1  mrg     push_depth (count_new_page_tables);
   2592  1.1  mrg }
   2593  1.1  mrg 
   2594  1.1  mrg void
   2595  1.1  mrg ggc_pch_read (FILE *f, void *addr)
   2596  1.1  mrg {
   2597  1.1  mrg   struct ggc_pch_ondisk d;
   2598  1.1  mrg   unsigned i;
   2599  1.1  mrg   char *offs = (char *) addr;
   2600  1.1  mrg   unsigned long count_old_page_tables;
   2601  1.1  mrg   unsigned long count_new_page_tables;
   2602  1.1  mrg 
   2603  1.1  mrg   count_old_page_tables = G.by_depth_in_use;
   2604  1.1  mrg 
   2605  1.1  mrg   if (fread (&d, sizeof (d), 1, f) != 1)
   2606  1.1  mrg     fatal_error (input_location, "cannot read PCH file: %m");
   2607  1.1  mrg 
   2608  1.1  mrg   /* We've just read in a PCH file.  So, every object that used to be
   2609  1.1  mrg      allocated is now free.  */
   2610  1.1  mrg   clear_marks ();
   2611  1.1  mrg #ifdef ENABLE_GC_CHECKING
   2612  1.1  mrg   poison_pages ();
   2613  1.1  mrg #endif
   2614  1.1  mrg   /* Since we free all the allocated objects, the free list becomes
   2615  1.1  mrg      useless.  Validate it now, which will also clear it.  */
   2616  1.1  mrg   validate_free_objects ();
   2617  1.1  mrg 
   2618  1.1  mrg   /* No object read from a PCH file should ever be freed.  So, set the
   2619  1.1  mrg      context depth to 1, and set the depth of all the currently-allocated
   2620  1.1  mrg      pages to be 1 too.  PCH pages will have depth 0.  */
   2621  1.1  mrg   gcc_assert (!G.context_depth);
   2622  1.1  mrg   G.context_depth = 1;
   2623  1.1  mrg   /* Allocate space for the depth 1 finalizers.  */
   2624  1.1  mrg   G.finalizers.safe_push (vNULL);
   2625  1.1  mrg   G.vec_finalizers.safe_push (vNULL);
   2626  1.1  mrg   gcc_assert (G.finalizers.length() == 2);
   2627  1.1  mrg   for (i = 0; i < NUM_ORDERS; i++)
   2628  1.1  mrg     {
   2629  1.1  mrg       page_entry *p;
   2630  1.1  mrg       for (p = G.pages[i]; p != NULL; p = p->next)
   2631  1.1  mrg 	p->context_depth = G.context_depth;
   2632  1.1  mrg     }
   2633  1.1  mrg 
   2634  1.1  mrg   /* Allocate the appropriate page-table entries for the pages read from
   2635  1.1  mrg      the PCH file.  */
   2636  1.1  mrg 
   2637  1.1  mrg   for (i = 0; i < NUM_ORDERS; i++)
   2638  1.1  mrg     {
   2639  1.1  mrg       struct page_entry *entry;
   2640  1.1  mrg       char *pte;
   2641  1.1  mrg       size_t bytes;
   2642  1.1  mrg       size_t num_objs;
   2643  1.1  mrg       size_t j;
   2644  1.1  mrg 
   2645  1.1  mrg       if (d.totals[i] == 0)
   2646  1.1  mrg 	continue;
   2647  1.1  mrg 
   2648  1.1  mrg       bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
   2649  1.1  mrg       num_objs = bytes / OBJECT_SIZE (i);
   2650  1.1  mrg       entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
   2651  1.1  mrg 					    - sizeof (long)
   2652  1.1  mrg 					    + BITMAP_SIZE (num_objs + 1)));
   2653  1.1  mrg       entry->bytes = bytes;
   2654  1.1  mrg       entry->page = offs;
   2655  1.1  mrg       entry->context_depth = 0;
   2656  1.1  mrg       offs += bytes;
   2657  1.1  mrg       entry->num_free_objects = 0;
   2658  1.1  mrg       entry->order = i;
   2659  1.1  mrg 
   2660  1.1  mrg       for (j = 0;
   2661  1.1  mrg 	   j + HOST_BITS_PER_LONG <= num_objs + 1;
   2662  1.1  mrg 	   j += HOST_BITS_PER_LONG)
   2663  1.1  mrg 	entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
   2664  1.1  mrg       for (; j < num_objs + 1; j++)
   2665  1.1  mrg 	entry->in_use_p[j / HOST_BITS_PER_LONG]
   2666  1.1  mrg 	  |= 1L << (j % HOST_BITS_PER_LONG);
   2667  1.1  mrg 
   2668  1.1  mrg       for (pte = entry->page;
   2669  1.1  mrg 	   pte < entry->page + entry->bytes;
   2670  1.1  mrg 	   pte += G.pagesize)
   2671  1.1  mrg 	set_page_table_entry (pte, entry);
   2672  1.1  mrg 
   2673  1.1  mrg       if (G.page_tails[i] != NULL)
   2674  1.1  mrg 	G.page_tails[i]->next = entry;
   2675  1.1  mrg       else
   2676  1.1  mrg 	G.pages[i] = entry;
   2677  1.1  mrg       G.page_tails[i] = entry;
   2678  1.1  mrg 
   2679  1.1  mrg       /* We start off by just adding all the new information to the
   2680  1.1  mrg 	 end of the varrays, later, we will move the new information
   2681  1.1  mrg 	 to the front of the varrays, as the PCH page tables are at
   2682  1.1  mrg 	 context 0.  */
   2683  1.1  mrg       push_by_depth (entry, 0);
   2684  1.1  mrg     }
   2685  1.1  mrg 
   2686  1.1  mrg   /* Now, we update the various data structures that speed page table
   2687  1.1  mrg      handling.  */
   2688  1.1  mrg   count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
   2689  1.1  mrg 
   2690             move_ptes_to_front (count_old_page_tables, count_new_page_tables);
   2691           
   2692             /* Update the statistics.  */
   2693             G.allocated = G.allocated_last_gc = offs - (char *)addr;
   2694           }
   2695