| /src/external/gpl3/gdb.old/dist/gdb/testsuite/gdb.base/ |
| print-internal-string.c | 23 static char arena[256]; variable 26 will always return the same pointer (to arena). This does mean we can't 28 simple test. On each malloc call the contents of arena are reset, which 35 /* Reset the contents of arena, and ensure there's a null-character at 37 memset (arena, 'X', sizeof (arena)); 38 arena [sizeof (arena) - 1] = '\0'; 39 if (size > sizeof (arena)) 41 return arena; [all...] |
| /src/external/gpl3/gdb/dist/gdb/testsuite/gdb.base/ |
| print-internal-string.c | 23 static char arena[256]; variable 26 will always return the same pointer (to arena). This does mean we can't 28 simple test. On each malloc call the contents of arena are reset, which 35 /* Reset the contents of arena, and ensure there's a null-character at 37 memset (arena, 'X', sizeof (arena)); 38 arena [sizeof (arena) - 1] = '\0'; 39 if (size > sizeof (arena)) 41 return arena; [all...] |
| /src/external/bsd/jemalloc/dist/include/jemalloc/internal/ |
| arena_inlines_a.h | 5 arena_ind_get(const arena_t *arena) { 6 return arena->ind; 10 arena_internal_add(arena_t *arena, size_t size) { 11 atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED); 15 arena_internal_sub(arena_t *arena, size_t size) { 16 atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED); 20 arena_internal_get(arena_t *arena) { 21 return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED);
|
| arena_externs.h | 37 void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, 40 void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, 45 void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena); 46 edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, 48 void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, 50 void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, 52 void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, 54 bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state, 56 ssize_t arena_decay_ms_get(arena_t *arena, extent_state_t state); 57 void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread [all...] |
| jemalloc_internal_inlines_b.h | 18 /* Set new arena/tcache associations. */ 30 /* Choose an arena based on a per-thread value. */ 32 arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) { 35 if (arena != NULL) { 36 return arena; 39 /* During reentrancy, arena 0 is the safest bet. */ 51 if (tcache_slow->arena != NULL) { 53 assert(tcache_slow->arena == 55 if (tcache_slow->arena != ret) { 67 * Note that for percpu arena, if the current arena is outside of th [all...] |
| /src/external/bsd/jemalloc/include/jemalloc/internal/ |
| arena_inlines_a.h | 5 arena_ind_get(const arena_t *arena) { 6 return arena->ind; 10 arena_internal_add(arena_t *arena, size_t size) { 11 atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED); 15 arena_internal_sub(arena_t *arena, size_t size) { 16 atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED); 20 arena_internal_get(arena_t *arena) { 21 return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED);
|
| arena_externs.h | 37 void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, 40 void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, 45 void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena); 46 edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, 48 void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, 50 void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, 52 void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, 54 bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state, 56 ssize_t arena_decay_ms_get(arena_t *arena, extent_state_t state); 57 void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread [all...] |
| jemalloc_internal_inlines_b.h | 18 /* Set new arena/tcache associations. */ 30 /* Choose an arena based on a per-thread value. */ 32 arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) { 35 if (arena != NULL) { 36 return arena; 39 /* During reentrancy, arena 0 is the safest bet. */ 51 if (tcache_slow->arena != NULL) { 53 assert(tcache_slow->arena == 55 if (tcache_slow->arena != ret) { 67 * Note that for percpu arena, if the current arena is outside of th [all...] |
| extent_externs.h | 15 extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena); 16 void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent); 18 extent_hooks_t *extent_hooks_get(arena_t *arena); 19 extent_hooks_t *extent_hooks_set(tsd_t *tsd, arena_t *arena, 34 extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena, 38 void extents_dalloc(tsdn_t *tsdn, arena_t *arena, 40 extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena, 45 extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, 48 void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent); 49 void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, [all...] |
| /src/external/bsd/jemalloc.old/dist/include/jemalloc/internal/ |
| arena_inlines_a.h | 5 arena_ind_get(const arena_t *arena) { 6 return arena->ind; 10 arena_internal_add(arena_t *arena, size_t size) { 11 atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED); 15 arena_internal_sub(arena_t *arena, size_t size) { 16 atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED); 20 arena_internal_get(arena_t *arena) { 21 return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED);
|
| arena_externs.h | 37 void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, 40 void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, 45 void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena); 46 edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, 48 void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, 50 void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, 52 void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, 54 bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state, 56 ssize_t arena_decay_ms_get(arena_t *arena, extent_state_t state); 57 void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread [all...] |
| jemalloc_internal_inlines_b.h | 18 /* Set new arena/tcache associations. */ 30 /* Choose an arena based on a per-thread value. */ 32 arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) { 35 if (arena != NULL) { 36 return arena; 39 /* During reentrancy, arena 0 is the safest bet. */ 51 if (tcache_slow->arena != NULL) { 53 assert(tcache_slow->arena == 55 if (tcache_slow->arena != ret) { 67 * Note that for percpu arena, if the current arena is outside of th [all...] |
| extent_externs.h | 15 extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena); 16 void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent); 18 extent_hooks_t *extent_hooks_get(arena_t *arena); 19 extent_hooks_t *extent_hooks_set(tsd_t *tsd, arena_t *arena, 34 extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena, 38 void extents_dalloc(tsdn_t *tsdn, arena_t *arena, 40 extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena, 45 extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, 48 void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent); 49 void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, [all...] |
| /src/external/bsd/jemalloc.old/include/jemalloc/internal/ |
| arena_inlines_a.h | 5 arena_ind_get(const arena_t *arena) { 6 return arena->ind; 10 arena_internal_add(arena_t *arena, size_t size) { 11 atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED); 15 arena_internal_sub(arena_t *arena, size_t size) { 16 atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED); 20 arena_internal_get(arena_t *arena) { 21 return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED);
|
| arena_externs.h | 37 void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, 40 void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, 45 void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena); 46 edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, 48 void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, 50 void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, 52 void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, 54 bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state, 56 ssize_t arena_decay_ms_get(arena_t *arena, extent_state_t state); 57 void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread [all...] |
| jemalloc_internal_inlines_b.h | 18 /* Set new arena/tcache associations. */ 30 /* Choose an arena based on a per-thread value. */ 32 arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) { 35 if (arena != NULL) { 36 return arena; 39 /* During reentrancy, arena 0 is the safest bet. */ 51 if (tcache_slow->arena != NULL) { 53 assert(tcache_slow->arena == 55 if (tcache_slow->arena != ret) { 67 * Note that for percpu arena, if the current arena is outside of th [all...] |
| extent_externs.h | 15 extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena); 16 void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent); 18 extent_hooks_t *extent_hooks_get(arena_t *arena); 19 extent_hooks_t *extent_hooks_set(tsd_t *tsd, arena_t *arena, 34 extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena, 38 void extents_dalloc(tsdn_t *tsdn, arena_t *arena, 40 extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena, 45 extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, 48 void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent); 49 void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, [all...] |
| /src/external/bsd/jemalloc/dist/test/unit/ |
| oversize_threshold.c | 6 arena_mallctl(const char *mallctl_str, unsigned arena, void *oldp, 10 malloc_snprintf(buf, sizeof(buf), mallctl_str, arena); 22 unsigned arena; local 23 size_t arena_sz = sizeof(arena); 24 err = mallctl("arenas.create", (void *)&arena, &arena_sz, NULL, 0); 25 expect_d_eq(0, err, "Arena creation failed"); 29 arena_mallctl("arena.%u.oversize_threshold", arena, NULL, NULL, 34 arena_mallctl("arena.%u.oversize_threshold", arena, &old_threshold 75 unsigned arena; local [all...] |
| /src/external/bsd/jemalloc.old/dist/test/unit/ |
| oversize_threshold.c | 6 arena_mallctl(const char *mallctl_str, unsigned arena, void *oldp, 10 malloc_snprintf(buf, sizeof(buf), mallctl_str, arena); 22 unsigned arena; local 23 size_t arena_sz = sizeof(arena); 24 err = mallctl("arenas.create", (void *)&arena, &arena_sz, NULL, 0); 25 expect_d_eq(0, err, "Arena creation failed"); 29 arena_mallctl("arena.%u.oversize_threshold", arena, NULL, NULL, 34 arena_mallctl("arena.%u.oversize_threshold", arena, &old_threshold 75 unsigned arena; local [all...] |
| /src/external/bsd/jemalloc/dist/src/ |
| arena.c | 63 static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, 65 static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab, 68 arena_maybe_do_deferred_work(tsdn_t *tsdn, arena_t *arena, decay_t *decay, 74 arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, 77 *nthreads += arena_nthreads_get(arena, false); 78 *dss = dss_prec_names[arena_dss_prec_get(arena)]; 79 *dirty_decay_ms = arena_decay_ms_get(arena, extent_state_dirty); 80 *muzzy_decay_ms = arena_decay_ms_get(arena, extent_state_muzzy); 81 pa_shard_basic_stats_merge(&arena->pa_shard, nactive, ndirty, nmuzzy); 85 arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads 637 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); local 642 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); local 649 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); local 779 arena_t *arena = arena_get(tsdn, i, false); local 1389 arena_t *arena = arena_get_from_edata(edata); local 1425 arena_t *arena = arena_get_from_edata(edata); local 1600 arena_t *arena; local 1695 pre_reentrancy(tsdn_tsd(tsdn), arena); local [all...] |
| large.c | 14 large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) { 17 return large_palloc(tsdn, arena, usize, CACHELINE, zero); 21 large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, 27 assert(!tsdn_null(tsdn) || arena != NULL); 35 arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize); 37 if (unlikely(arena == NULL) || (edata = arena_extent_alloc_large(tsdn, 38 arena, usize, alignment, zero)) == NULL) { 43 if (!arena_is_auto(arena)) { 45 malloc_mutex_lock(tsdn, &arena->large_mtx) 56 arena_t *arena = arena_get_from_edata(edata); local 85 arena_t *arena = arena_get_from_edata(edata); local 271 arena_t *arena = arena_get_from_edata(edata); local [all...] |
| extent_dss.c | 109 extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, 125 gap = edata_cache_get(tsdn, &arena->pa_shard.edata_cache); 157 edata_init(gap, arena_ind_get(arena), 160 &arena->pa_shard.pac), 192 arena); 194 &arena->pa_shard.pac, ehooks, gap); 197 &arena->pa_shard.edata_cache, gap); 205 arena); 208 arena_ind_get(arena), ret, size, 233 edata_cache_put(tsdn, &arena->pa_shard.edata_cache, gap) [all...] |
| /src/external/bsd/jemalloc.old/dist/src/ |
| arena.c | 63 static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, 65 static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab, 68 arena_maybe_do_deferred_work(tsdn_t *tsdn, arena_t *arena, decay_t *decay, 74 arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, 77 *nthreads += arena_nthreads_get(arena, false); 78 *dss = dss_prec_names[arena_dss_prec_get(arena)]; 79 *dirty_decay_ms = arena_decay_ms_get(arena, extent_state_dirty); 80 *muzzy_decay_ms = arena_decay_ms_get(arena, extent_state_muzzy); 81 pa_shard_basic_stats_merge(&arena->pa_shard, nactive, ndirty, nmuzzy); 85 arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads 637 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); local 642 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); local 649 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); local 779 arena_t *arena = arena_get(tsdn, i, false); local 1389 arena_t *arena = arena_get_from_edata(edata); local 1425 arena_t *arena = arena_get_from_edata(edata); local 1600 arena_t *arena; local 1695 pre_reentrancy(tsdn_tsd(tsdn), arena); local [all...] |
| large.c | 14 large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) { 17 return large_palloc(tsdn, arena, usize, CACHELINE, zero); 21 large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, 27 assert(!tsdn_null(tsdn) || arena != NULL); 35 arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize); 37 if (unlikely(arena == NULL) || (edata = arena_extent_alloc_large(tsdn, 38 arena, usize, alignment, zero)) == NULL) { 43 if (!arena_is_auto(arena)) { 45 malloc_mutex_lock(tsdn, &arena->large_mtx) 56 arena_t *arena = arena_get_from_edata(edata); local 85 arena_t *arena = arena_get_from_edata(edata); local 271 arena_t *arena = arena_get_from_edata(edata); local [all...] |
| extent_dss.c | 109 extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, 125 gap = edata_cache_get(tsdn, &arena->pa_shard.edata_cache); 157 edata_init(gap, arena_ind_get(arena), 160 &arena->pa_shard.pac), 192 arena); 194 &arena->pa_shard.pac, ehooks, gap); 197 &arena->pa_shard.edata_cache, gap); 205 arena); 208 arena_ind_get(arena), ret, size, 233 edata_cache_put(tsdn, &arena->pa_shard.edata_cache, gap) [all...] |