| /src/external/bsd/jemalloc.old/dist/include/jemalloc/internal/ |
| arena_structs.h | 77 pa_shard_t pa_shard; member in struct:arena_s
|
| /src/external/bsd/jemalloc.old/include/jemalloc/internal/ |
| arena_structs.h | 77 pa_shard_t pa_shard; member in struct:arena_s
|
| /src/external/bsd/jemalloc/dist/include/jemalloc/internal/ |
| arena_structs.h | 78 pa_shard_t pa_shard; member in struct:arena_s
|
| arena_inlines_b.h | 46 &tsd_arena->pa_shard.pac.oversize_threshold, ATOMIC_RELAXED);
|
| /src/external/bsd/jemalloc/include/jemalloc/internal/ |
| arena_structs.h | 78 pa_shard_t pa_shard; member in struct:arena_s
|
| arena_inlines_b.h | 46 &tsd_arena->pa_shard.pac.oversize_threshold, ATOMIC_RELAXED);
|
| /src/external/bsd/jemalloc.old/dist/src/ |
| extent_dss.c | 125 gap = edata_cache_get(tsdn, &arena->pa_shard.edata_cache); 160 &arena->pa_shard.pac), 194 &arena->pa_shard.pac, ehooks, gap); 197 &arena->pa_shard.edata_cache, gap); 233 edata_cache_put(tsdn, &arena->pa_shard.edata_cache, gap);
|
| arena.c | 81 pa_shard_basic_stats_merge(&arena->pa_shard, nactive, ndirty, nmuzzy); 98 size_t pac_mapped_sz = pac_mapped(&arena->pa_shard.pac); 146 pa_shard_stats_merge(tsdn, &arena->pa_shard, &astats->pa_shard_stats, 184 pa_shard_mtx_stats_read(tsdn, &arena->pa_shard, 209 &arena->pa_shard.pac.decay_dirty, 0); 220 if (decay_immediately(&arena->pa_shard.pac.decay_dirty)) { 338 edata_t *edata = pa_alloc(tsdn, &arena->pa_shard, esize, alignment, 412 return pa_decay_ms_set(tsdn, &arena->pa_shard, state, decay_ms, 418 return pa_decay_ms_get(&arena->pa_shard, state); 427 pac_decay_all(tsdn, &arena->pa_shard.pac, decay, decay_stats [all...] |
| large.c | 68 bool err = pa_shrink(tsdn, &arena->pa_shard, edata, old_size, 94 bool err = pa_expand(tsdn, &arena->pa_shard, edata, old_size, new_size, 253 pa_dalloc(tsdn, &arena->pa_shard, edata, &deferred_work_generated);
|
| background_thread.c | 212 tsdn, &arena->pa_shard); 567 &arena->pa_shard, true); 589 &arena->pa_shard, false);
|
| ctl.c | 2803 &arena->pa_shard.pac.oversize_threshold, ATOMIC_RELAXED); 2811 atomic_store_zu(&arena->pa_shard.pac.oversize_threshold, 3654 MUTEX_PROF_RESET(arena->pa_shard.edata_cache.mtx); 3655 MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_dirty.mtx); 3656 MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_muzzy.mtx); 3657 MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_retained.mtx); 3658 MUTEX_PROF_RESET(arena->pa_shard.pac.decay_dirty.mtx); 3659 MUTEX_PROF_RESET(arena->pa_shard.pac.decay_muzzy.mtx); 4182 pactivep = (size_t *)&(arena->pa_shard.nactive.repr);
|
| jemalloc.c | 1929 if (pa_shard_enable_hpa(TSDN_NULL, &a0->pa_shard,
|
| /src/external/bsd/jemalloc/dist/src/ |
| extent_dss.c | 123 gap = edata_cache_get(tsdn, &arena->pa_shard.edata_cache); 159 extent_sn_next(&arena->pa_shard.pac), 195 &arena->pa_shard.pac, ehooks, gap); 198 &arena->pa_shard.edata_cache, gap); 213 &arena->pa_shard.pac), 240 edata_cache_put(tsdn, &arena->pa_shard.edata_cache, gap);
|
| arena.c | 82 pa_shard_basic_stats_merge(&arena->pa_shard, nactive, ndirty, nmuzzy); 101 size_t pac_mapped_sz = pac_mapped(&arena->pa_shard.pac); 159 pa_shard_stats_merge(tsdn, &arena->pa_shard, &astats->pa_shard_stats, 200 tsdn, &arena->pa_shard, astats->mutex_prof_data); 224 tsdn, arena, &arena->pa_shard.pac.decay_dirty, 0); 236 if (decay_immediately(&arena->pa_shard.pac.decay_dirty)) { 317 edata_t *edata = pa_alloc(tsdn, &arena->pa_shard, esize, alignment, 395 tsdn, &arena->pa_shard, state, decay_ms, eagerness); 400 return pa_decay_ms_get(&arena->pa_shard, state); 409 pac_decay_all(tsdn, &arena->pa_shard.pac, decay, decay_stats [all...] |
| large.c | 70 bool err = pa_shrink(tsdn, &arena->pa_shard, edata, old_size, 96 bool err = pa_expand(tsdn, &arena->pa_shard, edata, old_size, new_size, 255 pa_dalloc(tsdn, &arena->pa_shard, edata, &deferred_work_generated);
|
| background_thread.c | 283 tsdn, &arena->pa_shard); 660 tsd_tsdn(tsd), &arena->pa_shard, true); 682 tsd_tsdn(tsd), &arena->pa_shard, false);
|
| ctl.c | 2940 &arena->pa_shard.pac.oversize_threshold, ATOMIC_RELAXED); 2948 atomic_store_zu(&arena->pa_shard.pac.oversize_threshold, 3762 /* Accumulate nactive pages from each arena's pa_shard */ 3763 approximate_nactive += pa_shard_nactive(&arena->pa_shard); 3957 MUTEX_PROF_RESET(arena->pa_shard.edata_cache.mtx); 3958 MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_dirty.mtx); 3959 MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_muzzy.mtx); 3960 MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_retained.mtx); 3961 MUTEX_PROF_RESET(arena->pa_shard.pac.decay_dirty.mtx); 3962 MUTEX_PROF_RESET(arena->pa_shard.pac.decay_muzzy.mtx) [all...] |
| jemalloc.c | 1275 if (pa_shard_enable_hpa(tsd_tsdn(tsd), &xa0->pa_shard,
|
| /src/external/bsd/jemalloc.old/dist/test/unit/ |
| san_bump.c | 19 pac_t *pac = &arena->pa_shard.pac; 84 pac_t *pac = &arena->pa_shard.pac;
|
| retained.c | 149 arena->pa_shard.pac.exp_grow.next; pind++) {
|
| /src/external/bsd/jemalloc/dist/test/stress/pa/ |
| pa_microbench.c | 60 pa_shard_t pa_shard; /* PA shard */ member in struct:__anon63 199 &g_shard_infra[i].pa_shard, &g_pa_central, 225 &g_shard_infra[i].pa_shard, &hpa_opts, &sec_opts)) { 245 tsd_tsdn(tsd_fetch()), &g_shard_infra[i].pa_shard); 337 if (!g_shard_infra[shard_id].pa_shard.ever_used_hpa) { 343 tsdn, &g_shard_infra[shard_id].pa_shard.hpa_shard, hpa_stats_out); 443 &g_shard_infra[event->shard_ind].pa_shard, size, 478 &g_shard_infra[event->shard_ind].pa_shard, 538 &g_shard_infra[shard_ind].pa_shard,
|
| /src/external/bsd/jemalloc/dist/test/unit/ |
| san_bump.c | 63 pac_t *pac = &arena->pa_shard.pac; 129 pac_t *pac = &arena->pa_shard.pac; 170 pac_t *pac = &arena->pa_shard.pac;
|
| hpa_background_thread.c | 23 bool deferral_allowed = a0->pa_shard.hpa_shard.opts.deferral_allowed;
|
| retained.c | 156 pind < arena->pa_shard.pac.exp_grow.next; pind++) {
|
| stats.c | 466 size_t nactive_initial = pa_shard_nactive(&arena->pa_shard); 471 * arena's pa_shard. 478 size_t nactive_after_small = pa_shard_nactive(&arena->pa_shard); 500 size_t nactive_after_large = pa_shard_nactive(&arena->pa_shard); 521 size_t nactive_final = pa_shard_nactive(&arena->pa_shard);
|