HomeSort by: relevance | last modified time | path
    Searched refs:edata (Results 1 - 25 of 382) sorted by relevancy

1 2 3 4 5 6 7 8 91011>>

  /src/external/bsd/jemalloc/dist/include/jemalloc/internal/
edata.h 66 * The information about a particular edata that lives in an emap. Space is
67 * more precious there (the information, plus the edata pointer, has to live in
208 * If this edata is a user allocation from an HPA, it comes out of some
257 edata_arena_ind_get(const edata_t *edata) {
258 unsigned arena_ind = (unsigned)((edata->e_bits &
266 edata_szind_get_maybe_invalid(const edata_t *edata) {
267 szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >>
274 edata_szind_get(const edata_t *edata) {
275 szind_t szind = edata_szind_get_maybe_invalid(edata);
281 edata_usize_get(const edata_t *edata) {
    [all...]
prof_recent.h 8 void prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size, size_t usize);
9 void prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata);
11 void edata_prof_recent_alloc_init(edata_t *edata);
17 prof_recent_t *edata_prof_recent_alloc_get_no_lock_test(const edata_t *edata);
large_externs.h 9 bool large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
15 void large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata);
16 void large_dalloc_finish(tsdn_t *tsdn, edata_t *edata);
17 void large_dalloc(tsdn_t *tsdn, edata_t *edata);
18 size_t large_salloc(tsdn_t *tsdn, const edata_t *edata);
19 void large_prof_info_get(tsd_t *tsd, edata_t *edata, prof_info_t *prof_info,
21 void large_prof_tctx_reset(edata_t *edata);
22 void large_prof_info_set(edata_t *edata, prof_tctx_t *tctx, size_t size);
  /src/external/bsd/jemalloc/include/jemalloc/internal/
edata.h 66 * The information about a particular edata that lives in an emap. Space is
67 * more precious there (the information, plus the edata pointer, has to live in
208 * If this edata is a user allocation from an HPA, it comes out of some
257 edata_arena_ind_get(const edata_t *edata) {
258 unsigned arena_ind = (unsigned)((edata->e_bits &
266 edata_szind_get_maybe_invalid(const edata_t *edata) {
267 szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >>
274 edata_szind_get(const edata_t *edata) {
275 szind_t szind = edata_szind_get_maybe_invalid(edata);
281 edata_usize_get(const edata_t *edata) {
    [all...]
prof_recent.h 8 void prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size, size_t usize);
9 void prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata);
11 void edata_prof_recent_alloc_init(edata_t *edata);
17 prof_recent_t *edata_prof_recent_alloc_get_no_lock_test(const edata_t *edata);
large_externs.h 9 bool large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
15 void large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata);
16 void large_dalloc_finish(tsdn_t *tsdn, edata_t *edata);
17 void large_dalloc(tsdn_t *tsdn, edata_t *edata);
18 size_t large_salloc(tsdn_t *tsdn, const edata_t *edata);
19 void large_prof_info_get(tsd_t *tsd, edata_t *edata, prof_info_t *prof_info,
21 void large_prof_tctx_reset(edata_t *edata);
22 void large_prof_info_set(edata_t *edata, prof_tctx_t *tctx, size_t size);
  /src/external/bsd/jemalloc.old/dist/include/jemalloc/internal/
edata.h 66 * The information about a particular edata that lives in an emap. Space is
67 * more precious there (the information, plus the edata pointer, has to live in
208 * If this edata is a user allocation from an HPA, it comes out of some
257 edata_arena_ind_get(const edata_t *edata) {
258 unsigned arena_ind = (unsigned)((edata->e_bits &
266 edata_szind_get_maybe_invalid(const edata_t *edata) {
267 szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >>
274 edata_szind_get(const edata_t *edata) {
275 szind_t szind = edata_szind_get_maybe_invalid(edata);
281 edata_usize_get(const edata_t *edata) {
    [all...]
prof_recent.h 8 void prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size, size_t usize);
9 void prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata);
11 void edata_prof_recent_alloc_init(edata_t *edata);
17 prof_recent_t *edata_prof_recent_alloc_get_no_lock_test(const edata_t *edata);
  /src/external/bsd/jemalloc.old/include/jemalloc/internal/
edata.h 66 * The information about a particular edata that lives in an emap. Space is
67 * more precious there (the information, plus the edata pointer, has to live in
208 * If this edata is a user allocation from an HPA, it comes out of some
257 edata_arena_ind_get(const edata_t *edata) {
258 unsigned arena_ind = (unsigned)((edata->e_bits &
266 edata_szind_get_maybe_invalid(const edata_t *edata) {
267 szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >>
274 edata_szind_get(const edata_t *edata) {
275 szind_t szind = edata_szind_get_maybe_invalid(edata);
281 edata_usize_get(const edata_t *edata) {
    [all...]
prof_recent.h 8 void prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size, size_t usize);
9 void prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata);
11 void edata_prof_recent_alloc_init(edata_t *edata);
17 prof_recent_t *edata_prof_recent_alloc_get_no_lock_test(const edata_t *edata);
  /src/external/bsd/jemalloc/dist/src/
extent.c 16 static bool extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
19 edata_t *edata, size_t offset, size_t length, bool growing_retained);
21 edata_t *edata, size_t offset, size_t length, bool growing_retained);
23 edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks);
37 static void extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata);
42 ecache_t *ecache, edata_t *edata, bool *coalesced);
62 ecache_t *ecache, edata_t *edata) {
63 emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active);
66 edata = extent_try_coalesce(tsdn, pac, ehooks, ecache,
67 edata, &coalesced)
87 edata_t *edata = extent_recycle(tsdn, pac, ehooks, ecache, expand_edata, local
104 edata_t *edata = extent_alloc_retained(tsdn, pac, ehooks, expand_edata, local
159 edata_t *edata; local
402 edata_t *edata; local
606 edata_t *edata = extent_recycle_extract(tsdn, pac, ehooks, ecache, local
665 edata_t *edata = edata_cache_get(tsdn, pac->edata_cache); local
782 edata_t *edata = extent_recycle(tsdn, pac, ehooks, local
998 edata_t *edata = edata_cache_get(tsdn, pac->edata_cache); local
    [all...]
large.c 24 edata_t *edata; local
37 if (unlikely(arena == NULL) || (edata = arena_extent_alloc_large(tsdn,
44 /* Insert edata into large. */
46 edata_list_active_append(&arena->large, edata);
51 return edata_addr_get(edata);
55 large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) {
56 arena_t *arena = arena_get_from_edata(edata);
58 size_t old_size = edata_size_get(edata);
59 size_t old_usize = edata_usize_get(edata);
68 bool err = pa_shrink(tsdn, &arena->pa_shard, edata, old_size
187 edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); local
    [all...]
emap.c 19 emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
24 edata_state_set(edata, state);
28 rtree_ctx, (uintptr_t)edata_base_get(edata), /* dependent */ true,
31 rtree_leaf_elm_t *elm2 = edata_size_get(edata) == PAGE ? NULL :
33 (uintptr_t)edata_last_get(edata), /* dependent */ true,
38 emap_assert_mapped(tsdn, emap, edata);
42 emap_try_acquire_edata_neighbor_impl(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
47 assert(!edata_guarded_get(edata));
54 void *neighbor_addr = forward ? edata_past_get(edata) :
55 edata_before_get(edata);
    [all...]
pai.c 9 edata_t *edata = pai_alloc(tsdn, self, size, PAGE, local
13 if (edata == NULL) {
16 edata_list_active_append(results, edata);
24 edata_t *edata; local
25 while ((edata = edata_list_active_first(list)) != NULL) {
27 edata_list_active_remove(list, edata);
28 pai_dalloc(tsdn, self, edata, &deferred_by_dalloc);
edata_cache.c 24 edata_t *edata = edata_avail_first(&edata_cache->avail); local
25 if (edata == NULL) {
29 edata_avail_remove(&edata_cache->avail, edata);
32 return edata;
36 edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata) {
38 edata_avail_insert(&edata_cache->avail, edata);
68 edata_t *edata; local
71 edata = edata_avail_remove_first(&ecs->fallback->avail);
72 if (edata == NULL) {
75 edata_list_inactive_append(&ecs->list, edata);
91 edata_t *edata = edata_list_inactive_first(&ecs->list); local
120 edata_t *edata; local
    [all...]
inspect.c 10 const edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); local
11 if (unlikely(edata == NULL)) {
16 *size = edata_size_get(edata);
17 if (!edata_slab_get(edata)) {
21 *nfree = edata_nfree_get(edata);
22 *nregs = bin_infos[edata_szind_get(edata)].nregs;
24 assert(*nfree * edata_usize_get(edata) <= *size);
35 const edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); local
36 if (unlikely(edata == NULL)) {
42 *size = edata_size_get(edata);
    [all...]
  /src/external/bsd/jemalloc.old/dist/src/
extent.c 16 static bool extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
19 edata_t *edata, size_t offset, size_t length, bool growing_retained);
21 edata_t *edata, size_t offset, size_t length, bool growing_retained);
23 edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks);
37 static void extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata);
42 ecache_t *ecache, edata_t *edata, bool *coalesced);
62 ecache_t *ecache, edata_t *edata) {
63 emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active);
66 edata = extent_try_coalesce(tsdn, pac, ehooks, ecache,
67 edata, &coalesced)
87 edata_t *edata = extent_recycle(tsdn, pac, ehooks, ecache, expand_edata, local
104 edata_t *edata = extent_alloc_retained(tsdn, pac, ehooks, expand_edata, local
159 edata_t *edata; local
402 edata_t *edata; local
606 edata_t *edata = extent_recycle_extract(tsdn, pac, ehooks, ecache, local
665 edata_t *edata = edata_cache_get(tsdn, pac->edata_cache); local
782 edata_t *edata = extent_recycle(tsdn, pac, ehooks, local
998 edata_t *edata = edata_cache_get(tsdn, pac->edata_cache); local
    [all...]
large.c 24 edata_t *edata; local
37 if (unlikely(arena == NULL) || (edata = arena_extent_alloc_large(tsdn,
44 /* Insert edata into large. */
46 edata_list_active_append(&arena->large, edata);
51 return edata_addr_get(edata);
55 large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) {
56 arena_t *arena = arena_get_from_edata(edata);
58 size_t old_size = edata_size_get(edata);
59 size_t old_usize = edata_usize_get(edata);
68 bool err = pa_shrink(tsdn, &arena->pa_shard, edata, old_size
187 edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); local
    [all...]
emap.c 19 emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
24 edata_state_set(edata, state);
28 rtree_ctx, (uintptr_t)edata_base_get(edata), /* dependent */ true,
31 rtree_leaf_elm_t *elm2 = edata_size_get(edata) == PAGE ? NULL :
33 (uintptr_t)edata_last_get(edata), /* dependent */ true,
38 emap_assert_mapped(tsdn, emap, edata);
42 emap_try_acquire_edata_neighbor_impl(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
47 assert(!edata_guarded_get(edata));
54 void *neighbor_addr = forward ? edata_past_get(edata) :
55 edata_before_get(edata);
    [all...]
pai.c 9 edata_t *edata = pai_alloc(tsdn, self, size, PAGE, local
13 if (edata == NULL) {
16 edata_list_active_append(results, edata);
24 edata_t *edata; local
25 while ((edata = edata_list_active_first(list)) != NULL) {
27 edata_list_active_remove(list, edata);
28 pai_dalloc(tsdn, self, edata, &deferred_by_dalloc);
edata_cache.c 24 edata_t *edata = edata_avail_first(&edata_cache->avail); local
25 if (edata == NULL) {
29 edata_avail_remove(&edata_cache->avail, edata);
32 return edata;
36 edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata) {
38 edata_avail_insert(&edata_cache->avail, edata);
68 edata_t *edata; local
71 edata = edata_avail_remove_first(&ecs->fallback->avail);
72 if (edata == NULL) {
75 edata_list_inactive_append(&ecs->list, edata);
91 edata_t *edata = edata_list_inactive_first(&ecs->list); local
120 edata_t *edata; local
    [all...]
inspect.c 10 const edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); local
11 if (unlikely(edata == NULL)) {
16 *size = edata_size_get(edata);
17 if (!edata_slab_get(edata)) {
21 *nfree = edata_nfree_get(edata);
22 *nregs = bin_infos[edata_szind_get(edata)].nregs;
24 assert(*nfree * edata_usize_get(edata) <= *size);
35 const edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); local
36 if (unlikely(edata == NULL)) {
42 *size = edata_size_get(edata);
    [all...]
  /src/external/bsd/jemalloc/dist/test/include/test/
san.h 11 edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); local
12 return edata_guarded_get(edata);
  /src/external/bsd/jemalloc.old/dist/test/include/test/
san.h 11 edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); local
12 return edata_guarded_get(edata);
  /src/sys/dev/sysmon/
sysmon_envsys_events.c 118 sme_event_register(prop_dictionary_t sdict, envsys_data_t *edata,
129 KASSERT(edata != NULL);
150 " edata-flags 0x%04x\n", __func__, edata->units, props,
151 edata->upropset, edata->value_max, edata->flags));
154 if (edata->units == ENVSYS_INDICATOR ||
155 edata->units == ENVSYS_BATTERY_CHARGE)
159 ((edata->value_max == 0) |
780 envsys_data_t *edata = see->see_edata; local
893 envsys_data_t *edata = see->see_edata; local
1066 envsys_data_t *edata; local
1113 envsys_data_t *edata; local
    [all...]

Completed in 23 milliseconds

1 2 3 4 5 6 7 8 91011>>