Lines Matching defs:arena
91 * in the associated arena chunk header maps.
463 * Arena data structures.
485 /* Arena chunk header. */
488 /* Linkage for the arena's chunk tree. */
491 /* Arena that owns the chunk. */
492 arena_t *arena;
607 /* All operations on this arena require that mtx be locked. */
615 * Tree of chunks this arena manages.
620 * In order to avoid rapid chunk allocation/deallocation when an arena
624 * There is one spare chunk per arena, rather than one spare total, in
835 static void stats_print(arena_t *arena);
842 static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size);
843 static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
844 static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
845 static arena_run_t *arena_run_alloc(arena_t *arena, size_t size);
846 static void arena_run_dalloc(arena_t *arena, arena_run_t *run, size_t size);
847 static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
848 static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
850 static void *arena_malloc(arena_t *arena, size_t size);
851 static void *arena_palloc(arena_t *arena, size_t alignment, size_t size,
855 static void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr);
856 static void arena_new(arena_t *arena);
1124 stats_print(arena_t *arena)
1133 arena->stats.allocated_small, "", arena->stats.nmalloc_small,
1134 arena->stats.ndalloc_small);
1136 arena->stats.allocated_large, "", arena->stats.nmalloc_large,
1137 arena->stats.ndalloc_large);
1139 arena->stats.allocated_small + arena->stats.allocated_large,
1140 arena->stats.mapped,
1141 arena->stats.nmalloc_small + arena->stats.nmalloc_large,
1142 arena->stats.ndalloc_small + arena->stats.ndalloc_large);
1147 if (arena->bins[i].stats.nrequests == 0) {
1167 arena->bins[i].reg_size,
1168 arena->bins[i].nregs,
1169 arena->bins[i].run_size >> pagesize_2pow,
1170 arena->bins[i].stats.nrequests,
1171 arena->bins[i].stats.nruns,
1172 arena->bins[i].stats.reruns,
1173 arena
1174 arena->bins[i].stats.curruns);
1494 * Begin arena.
1498 * Choose a per-CPU arena.
1518 arena_t *arena;
1521 arena = arenas[thr_curcpu()];
1522 if (__predict_true(arena != NULL))
1523 return arena;
1716 arena_run_split(arena_t *arena, arena_run_t *run, size_t size)
1751 arena_chunk_alloc(arena_t *arena)
1755 if (arena->spare != NULL) {
1756 chunk = arena->spare;
1757 arena->spare = NULL;
1759 rb_tree_insert_node(&arena->chunks, chunk);
1765 arena->stats.mapped += chunksize;
1768 chunk->arena = arena;
1790 rb_tree_insert_node(&arena->chunks, chunk);
1797 arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
1802 * will be cached, so that the arena does not use it.
1804 rb_tree_remove_node(&chunk->arena->chunks, chunk);
1807 if (arena->spare != NULL) {
1808 chunk_dealloc((void *)arena->spare, chunksize);
1810 arena->stats.mapped -= chunksize;
1813 arena->spare = chunk;
1815 assert(arena->spare == NULL);
1818 arena->stats.mapped -= chunksize;
1824 arena_run_alloc(arena_t *arena, size_t size)
1835 * Search through the arena chunk tree for a large enough free run.
1842 rb_node_t *node = arena->chunks.rbt_root;
1881 arena_run_split(arena, run,
1903 rb_tree_remove_node(&arena->chunks, chunk);
1905 rb_tree_insert_node(&arena->chunks, chunk);
1912 chunk = arena_chunk_alloc(arena);
1918 arena_run_split(arena, run, size);
1923 arena_run_dalloc(arena_t *arena, arena_run_t *run, size_t size)
1989 rb_tree_remove_node(&arena->chunks, chunk);
1991 rb_tree_insert_node(&arena->chunks, chunk);
1998 arena_chunk_dealloc(arena, chunk);
2002 arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
2019 run = arena_run_alloc(arena, bin->run_size);
2053 arena_bin_malloc_easy(arena_t *arena, arena_bin_t *bin, arena_run_t *run)
2069 arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
2072 bin->runcur = arena_bin_nonfull_run_get(arena, bin);
2078 return (arena_bin_malloc_easy(arena, bin, bin->runcur));
2166 arena_malloc(arena_t *arena, size_t size)
2170 assert(arena != NULL);
2171 assert(arena->magic == ARENA_MAGIC);
2184 bin = &arena->bins[ffs((int)(size >> (TINY_MIN_2POW +
2198 bin = &arena->bins[ntbins + (size >> opt_quantum_2pow)
2203 bin = &arena->bins[ntbins + nqbins
2208 malloc_mutex_lock(&arena->mtx);
2210 ret = arena_bin_malloc_easy(arena, bin, run);
2212 ret = arena_bin_malloc_hard(arena, bin);
2215 malloc_mutex_unlock(&arena->mtx);
2221 arena->stats.nmalloc_small++;
2222 arena->stats.allocated_small += size;
2227 malloc_mutex_lock(&arena->mtx);
2228 ret = (void *)arena_run_alloc(arena, size);
2230 malloc_mutex_unlock(&arena->mtx);
2234 arena->stats.nmalloc_large++;
2235 arena->stats.allocated_large += size;
2239 malloc_mutex_unlock(&arena->mtx);
2249 arena_palloc_trim(arena_t *arena, arena_chunk_t *chunk, unsigned pageind,
2264 arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)chunk + (pageind <<
2270 arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
2282 malloc_mutex_lock(&arena->mtx);
2283 ret = (void *)arena_run_alloc(arena, alloc_size);
2285 malloc_mutex_unlock(&arena->mtx);
2305 arena_palloc_trim(arena, chunk, pageind + npages,
2322 arena_palloc_trim(arena, chunk,
2330 arena_palloc_trim(arena, chunk, pageind + npages,
2336 arena->stats.nmalloc_large++;
2337 arena->stats.allocated_large += size;
2339 malloc_mutex_unlock(&arena->mtx);
2361 * No arena data structures that we query here can change in a way that
2434 arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
2440 assert(arena != NULL);
2441 assert(arena->magic == ARENA_MAGIC);
2442 assert(chunk->arena == arena);
2467 malloc_mutex_lock(&arena->mtx);
2487 arena_run_dalloc(arena, run, bin->run_size);
2510 arena->stats.allocated_small -= size;
2511 arena->stats.ndalloc_small++;
2522 malloc_mutex_lock(&arena->mtx);
2523 arena_run_dalloc(arena, (arena_run_t *)ptr, size);
2525 arena->stats.allocated_large -= size;
2526 arena->stats.ndalloc_large++;
2530 malloc_mutex_unlock(&arena->mtx);
2534 arena_new(arena_t *arena)
2540 malloc_mutex_init(&arena->mtx);
2543 memset(&arena->stats, 0, sizeof(arena_stats_t));
2547 rb_tree_init(&arena->chunks, &arena_chunk_tree_ops);
2548 arena->spare = NULL;
2555 bin = &arena->bins[i];
2569 bin = &arena->bins[i];
2586 bin = &arena->bins[i];
2600 arena->magic = ARENA_MAGIC;
2604 /* Create a new arena and insert it into the arenas array at index ind. */
2626 ": (malloc) Error initializing arena\n", "", "");
2634 * End arena.
3077 assert(chunk->arena->magic == ARENA_MAGIC);
3129 arena_dalloc(chunk->arena, chunk, ptr);
3173 arena_t *arena;
3226 /* Print stats for each arena. */
3228 arena = arenas[i];
3229 if (arena != NULL) {
3231 "\narenas[%u] @ %p\n", i, arena);
3232 malloc_mutex_lock(&arena->mtx);
3233 stats_print(arena);
3234 malloc_mutex_unlock(&arena->mtx);
3554 * Initialize one arena here. The rest are lazily created in