Home | History | Annotate | Download | only in stdlib

Lines Matching refs:chunk

91  *           in the associated arena chunk header maps.
431 * Chunk data structures.
437 /* Linkage for the chunk tree. */
441 * Pointer to the chunk that this tree node is responsible for. In some
443 * beginning of the corresponding chunk, so this field may point to this
446 void *chunk;
448 /* Total chunk size. */
485 /* Arena chunk header. */
488 /* Linkage for the arena's chunk tree. */
491 /* Arena that owns the chunk. */
516 * Map of pages within chunk that keeps track of free/large/small. For
620 * In order to avoid rapid chunk allocation/deallocation when an arena
621 * oscillates right on the cusp of needing a new chunk, cache the most
622 * recently freed chunk. This caching is disabled by opt_hint.
624 * There is one spare chunk per arena, rather than one spare total, in
681 /* Various chunk-related settings. */
695 /* Protects chunk-related data structures. */
704 * Try to use brk for chunk-size allocations, due to address space constraints.
749 static chunk_node_t *base_chunk_nodes; /* LIFO cache of chunk nodes. */
772 /* Chunk statistics. */
841 static void chunk_dealloc(void *chunk, size_t size);
844 static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
855 static void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr);
891 /* Return the chunk address for allocation address a. */
895 /* Return the chunk offset of address a. */
899 /* Return the smallest chunk multiple that is >= s. */
1000 * be chunk-aligned.
1016 * chunk-align the end of brk. Don't worry about
1017 * brk_cur not being chunk-aligned though.
1194 * Begin chunk management functions.
1205 return ptrcmp(a->chunk, b->chunk);
1269 void *ret, *chunk;
1287 chunk = tchunk->chunk;
1296 if ((uintptr_t)chunk >= (uintptr_t)brk_base
1297 && (uintptr_t)chunk < (uintptr_t)brk_max) {
1298 /* Re-use a previously freed brk chunk. */
1299 ret = chunk;
1303 if ((ret = pages_map(chunk, size)) != NULL) {
1341 * chunk-align the end of brk.
1373 key.chunk = ret;
1376 && (uintptr_t)tchunk->chunk >= (uintptr_t)ret
1377 && (uintptr_t)tchunk->chunk < (uintptr_t)ret + size) {
1400 chunk_dealloc(void *chunk, size_t size)
1404 assert(chunk != NULL);
1405 assert(CHUNK_ADDR2BASE(chunk) == chunk);
1412 if ((uintptr_t)chunk >= (uintptr_t)brk_base
1413 && (uintptr_t)chunk < (uintptr_t)brk_max) {
1421 * Try to shrink the data segment if this chunk is at the end
1428 && (void *)((uintptr_t)chunk + size) == brk_max
1441 madvise(chunk, size, MADV_FREE);
1444 * Iteratively create records of each chunk-sized
1445 * memory region that 'chunk' is comprised of, so that
1454 node->chunk = (void *)((uintptr_t)chunk
1462 pages_unmap(chunk, size);
1465 * Make a record of the chunk's address, so that the address
1474 node->chunk = (void *)(uintptr_t)chunk;
1490 * End chunk management functions.
1718 arena_chunk_t *chunk;
1722 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1723 run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
1725 total_pages = chunk->map[run_ind].npages;
1733 chunk->map[map_offset + i].npages = need_pages;
1734 chunk->map[map_offset + i].pos = i;
1741 chunk->map[map_offset].npages = rem_pages;
1742 chunk->map[map_offset].pos = POS_FREE;
1743 chunk->map[map_offset + rem_pages - 1].npages = rem_pages;
1744 chunk->map[map_offset + rem_pages - 1].pos = POS_FREE;
1747 chunk->pages_used += need_pages;
1753 arena_chunk_t *chunk;
1756 chunk = arena->spare;
1759 rb_tree_insert_node(&arena->chunks, chunk);
1761 chunk = (arena_chunk_t *)chunk_alloc(chunksize);
1762 if (chunk == NULL)
1768 chunk->arena = arena;
1774 chunk->pages_used = 0;
1776 chunk->max_frun_npages = chunk_npages -
1778 chunk->min_frun_ind = arena_chunk_header_npages;
1783 chunk->map[arena_chunk_header_npages].npages = chunk_npages -
1785 chunk->map[arena_chunk_header_npages].pos = POS_FREE;
1786 chunk->map[chunk_npages - 1].npages = chunk_npages -
1788 chunk->map[chunk_npages - 1].pos = POS_FREE;
1790 rb_tree_insert_node(&arena->chunks, chunk);
1793 return (chunk);
1797 arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
1801 * Remove chunk from the chunk tree, regardless of whether this chunk
1804 rb_tree_remove_node(&chunk->arena->chunks, chunk);
1813 arena->spare = chunk;
1816 chunk_dealloc((void *)chunk, chunksize);
1826 arena_chunk_t *chunk;
1835 * Search through the arena chunk tree for a large enough free run.
1843 chunk = NULL;
1848 chunk = chunk_tmp;
1855 chunk = chunk_tmp;
1858 if (chunk == NULL)
1861 * At this point, the chunk must have a cached run size large
1864 assert(need_npages <= chunk->max_frun_npages);
1871 assert(chunk->min_frun_ind >=
1873 for (i = chunk->min_frun_ind; i < chunk_npages;) {
1874 mapelm = &chunk->map[i];
1878 ((uintptr_t)chunk + (i <<
1892 if (i < chunk->min_frun_ind)
1893 chunk->min_frun_ind = i;
1899 * Search failure. Reset cached chunk->max_frun_npages.
1900 * chunk->min_frun_ind was already reset above (if
1903 rb_tree_remove_node(&arena->chunks, chunk);
1904 chunk->max_frun_npages = max_frun_npages;
1905 rb_tree_insert_node(&arena->chunks, chunk);
1910 * No usable runs. Create a new chunk from which to allocate the run.
1912 chunk = arena_chunk_alloc(arena);
1913 if (chunk == NULL)
1915 run = (arena_run_t *)((uintptr_t)chunk + (arena_chunk_header_npages <<
1925 arena_chunk_t *chunk;
1928 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1930 run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
1935 assert(run_pages == chunk->map[run_ind].npages);
1937 /* Subtract pages from count of pages used in chunk. */
1938 chunk->pages_used -= run_pages;
1941 assert(chunk->map[run_ind].npages == run_pages);
1942 chunk->map[run_ind].pos = POS_FREE;
1943 assert(chunk->map[run_ind + run_pages - 1].npages == run_pages);
1944 chunk->map[run_ind + run_pages - 1].pos = POS_FREE;
1955 chunk->map[run_ind - 1].pos == POS_FREE) {
1959 prev_npages = chunk->map[run_ind - 1].npages;
1961 assert(chunk->map[run_ind].npages == prev_npages);
1962 assert(chunk->map[run_ind].pos == POS_FREE);
1965 chunk->map[run_ind].npages = run_pages;
1966 assert(chunk->map[run_ind].pos == POS_FREE);
1967 chunk->map[run_ind + run_pages - 1].npages = run_pages;
1968 assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE);
1972 chunk->map[run_ind + run_pages].pos == POS_FREE) {
1976 next_npages = chunk->map[run_ind + run_pages].npages;
1978 assert(chunk->map[run_ind + run_pages - 1].npages ==
1980 assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE);
1982 chunk->map[run_ind].npages = run_pages;
1983 chunk->map[run_ind].pos = POS_FREE;
1984 chunk->map[run_ind + run_pages - 1].npages = run_pages;
1985 assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE);
1988 if (chunk->map[run_ind].npages > chunk->max_frun_npages) {
1989 rb_tree_remove_node(&arena->chunks, chunk);
1990 chunk->max_frun_npages = chunk->map[run_ind].npages;
1991 rb_tree_insert_node(&arena->chunks, chunk);
1993 if (run_ind < chunk->min_frun_ind)
1994 chunk->min_frun_ind = run_ind;
1996 /* Deallocate chunk if it is now completely unused. */
1997 if (chunk->pages_used == 0)
1998 arena_chunk_dealloc(arena, chunk);
2249 arena_palloc_trim(arena_t *arena, arena_chunk_t *chunk, unsigned pageind,
2261 chunk->map[pageind + i].npages = npages;
2262 chunk->map[pageind + i].pos = i;
2264 arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)chunk + (pageind <<
2274 arena_chunk_t *chunk;
2289 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
2295 pageind = (unsigned)(((uintptr_t)ret - (uintptr_t)chunk) >>
2300 chunk->map[pageind + i].npages = npages;
2301 assert(chunk->map[pageind + i].pos == i);
2305 arena_palloc_trim(arena, chunk, pageind + npages,
2312 pageind = (unsigned)(((uintptr_t)ret - (uintptr_t)chunk) >>
2317 chunk->map[pageind + i].npages = npages;
2318 chunk->map[pageind + i].pos = i;
2322 arena_palloc_trim(arena, chunk,
2330 arena_palloc_trim(arena, chunk, pageind + npages,
2353 arena_chunk_t *chunk;
2364 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
2365 pageind = (unsigned)(((uintptr_t)ptr - (uintptr_t)chunk) >>
2367 mapelm = &chunk->map[pageind];
2368 if (mapelm->pos != 0 || ptr != (char *)((uintptr_t)chunk) + (pageind <<
2374 run = (arena_run_t *)((uintptr_t)chunk + (pageind <<
2434 arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
2442 assert(chunk->arena == arena);
2446 pageind = (unsigned)(((uintptr_t)ptr - (uintptr_t)chunk) >>
2448 mapelm = &chunk->map[pageind];
2449 if (mapelm->pos != 0 || ptr != (char *)((uintptr_t)chunk) + (pageind <<
2458 run = (arena_run_t *)((uintptr_t)chunk + (pageind <<
2656 /* Allocate a chunk node with which to track the chunk. */
2668 node->chunk = ret;
2687 /* Only handles large allocations that require more than chunk alignment. */
2696 * This allocation requires alignment that is even larger than chunk
2712 /* Allocate a chunk node with which to track the chunk. */
2748 node->chunk = ret;
2811 key.chunk = __UNCONST(ptr);
2814 assert(node->chunk == ptr);
2832 node->chunk = newptr;
2872 /* The old allocation is a chunk. */
2895 key.chunk = ptr;
2898 assert(node->chunk == ptr);
2908 /* Unmap chunk. */
2911 memset(node->chunk, 0x5a, node->size);
2913 chunk_dealloc(node->chunk, node->size);
3055 * This may be a re-used brk chunk. Therefore, zero
3070 arena_chunk_t *chunk;
3074 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3075 if (chunk != ptr) {
3077 assert(chunk->arena->magic == ARENA_MAGIC);
3083 /* Chunk (huge allocation). */
3088 key.chunk = __UNCONST(ptr);
3122 arena_chunk_t *chunk;
3126 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3127 if (chunk != ptr) {
3129 arena_dalloc(chunk->arena, chunk, ptr);
3165 _malloc_message("Chunk size: ", size_t2s(chunksize, s), "", "");
3202 /* Print chunk stats. */
3218 /* Print chunk stats. */
3532 * Allocate a base chunk here, since it doesn't actually have to be
3533 * chunk-aligned. Doing this before allocating any other chunks allows