Home | History | Annotate | Line # | Download | only in internal
      1 #ifndef JEMALLOC_INTERNAL_PROF_INLINES_H
      2 #define JEMALLOC_INTERNAL_PROF_INLINES_H
      3 
      4 #include "jemalloc/internal/safety_check.h"
      5 #include "jemalloc/internal/sz.h"
      6 #include "jemalloc/internal/thread_event.h"
      7 
      8 JEMALLOC_ALWAYS_INLINE void
      9 prof_active_assert(void) {
     10 	cassert(config_prof);
     11 	/*
     12 	 * If opt_prof is off, then prof_active must always be off, regardless
     13 	 * of whether prof_active_mtx is in effect or not.
     14 	 */
     15 	assert(opt_prof || !prof_active_state);
     16 }
     17 
     18 JEMALLOC_ALWAYS_INLINE bool
     19 prof_active_get_unlocked(void) {
     20 	prof_active_assert();
     21 	/*
     22 	 * Even if opt_prof is true, sampling can be temporarily disabled by
     23 	 * setting prof_active to false.  No locking is used when reading
     24 	 * prof_active in the fast path, so there are no guarantees regarding
     25 	 * how long it will take for all threads to notice state changes.
     26 	 */
     27 	return prof_active_state;
     28 }
     29 
     30 JEMALLOC_ALWAYS_INLINE bool
     31 prof_gdump_get_unlocked(void) {
     32 	/*
     33 	 * No locking is used when reading prof_gdump_val in the fast path, so
     34 	 * there are no guarantees regarding how long it will take for all
     35 	 * threads to notice state changes.
     36 	 */
     37 	return prof_gdump_val;
     38 }
     39 
     40 JEMALLOC_ALWAYS_INLINE prof_tdata_t *
     41 prof_tdata_get(tsd_t *tsd, bool create) {
     42 	prof_tdata_t *tdata;
     43 
     44 	cassert(config_prof);
     45 
     46 	tdata = tsd_prof_tdata_get(tsd);
     47 	if (create) {
     48 		assert(tsd_reentrancy_level_get(tsd) == 0);
     49 		if (unlikely(tdata == NULL)) {
     50 			if (tsd_nominal(tsd)) {
     51 				tdata = prof_tdata_init(tsd);
     52 				tsd_prof_tdata_set(tsd, tdata);
     53 			}
     54 		} else if (unlikely(tdata->expired)) {
     55 			tdata = prof_tdata_reinit(tsd, tdata);
     56 			tsd_prof_tdata_set(tsd, tdata);
     57 		}
     58 		assert(tdata == NULL || tdata->attached);
     59 	}
     60 
     61 	return tdata;
     62 }
     63 
     64 JEMALLOC_ALWAYS_INLINE void
     65 prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx,
     66     prof_info_t *prof_info) {
     67 	cassert(config_prof);
     68 	assert(ptr != NULL);
     69 	assert(prof_info != NULL);
     70 
     71 	arena_prof_info_get(tsd, ptr, alloc_ctx, prof_info, false);
     72 }
     73 
     74 JEMALLOC_ALWAYS_INLINE void
     75 prof_info_get_and_reset_recent(tsd_t *tsd, const void *ptr,
     76     emap_alloc_ctx_t *alloc_ctx, prof_info_t *prof_info) {
     77 	cassert(config_prof);
     78 	assert(ptr != NULL);
     79 	assert(prof_info != NULL);
     80 
     81 	arena_prof_info_get(tsd, ptr, alloc_ctx, prof_info, true);
     82 }
     83 
     84 JEMALLOC_ALWAYS_INLINE void
     85 prof_tctx_reset(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx) {
     86 	cassert(config_prof);
     87 	assert(ptr != NULL);
     88 
     89 	arena_prof_tctx_reset(tsd, ptr, alloc_ctx);
     90 }
     91 
     92 JEMALLOC_ALWAYS_INLINE void
     93 prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) {
     94 	cassert(config_prof);
     95 	assert(ptr != NULL);
     96 
     97 	arena_prof_tctx_reset_sampled(tsd, ptr);
     98 }
     99 
    100 JEMALLOC_ALWAYS_INLINE void
    101 prof_info_set(tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx, size_t size) {
    102 	cassert(config_prof);
    103 	assert(edata != NULL);
    104 	assert((uintptr_t)tctx > (uintptr_t)1U);
    105 
    106 	arena_prof_info_set(tsd, edata, tctx, size);
    107 }
    108 
    109 JEMALLOC_ALWAYS_INLINE bool
    110 prof_sample_should_skip(tsd_t *tsd, bool sample_event) {
    111 	cassert(config_prof);
    112 
    113 	/* Fastpath: no need to load tdata */
    114 	if (likely(!sample_event)) {
    115 		return true;
    116 	}
    117 
    118 	/*
    119 	 * sample_event is always obtained from the thread event module, and
    120 	 * whenever it's true, it means that the thread event module has
    121 	 * already checked the reentrancy level.
    122 	 */
    123 	assert(tsd_reentrancy_level_get(tsd) == 0);
    124 
    125 	prof_tdata_t *tdata = prof_tdata_get(tsd, true);
    126 	if (unlikely(tdata == NULL)) {
    127 		return true;
    128 	}
    129 
    130 	return !tdata->active;
    131 }
    132 
    133 JEMALLOC_ALWAYS_INLINE prof_tctx_t *
    134 prof_alloc_prep(tsd_t *tsd, bool prof_active, bool sample_event) {
    135 	prof_tctx_t *ret;
    136 
    137 	if (!prof_active ||
    138 	    likely(prof_sample_should_skip(tsd, sample_event))) {
    139 		ret = (prof_tctx_t *)(uintptr_t)1U;
    140 	} else {
    141 		ret = prof_tctx_create(tsd);
    142 	}
    143 
    144 	return ret;
    145 }
    146 
    147 JEMALLOC_ALWAYS_INLINE void
    148 prof_malloc(tsd_t *tsd, const void *ptr, size_t size, size_t usize,
    149     emap_alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
    150 	cassert(config_prof);
    151 	assert(ptr != NULL);
    152 	assert(usize == isalloc(tsd_tsdn(tsd), ptr));
    153 
    154 	if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
    155 		prof_malloc_sample_object(tsd, ptr, size, usize, tctx);
    156 	} else {
    157 		prof_tctx_reset(tsd, ptr, alloc_ctx);
    158 	}
    159 }
    160 
    161 JEMALLOC_ALWAYS_INLINE void
    162 prof_realloc(tsd_t *tsd, const void *ptr, size_t size, size_t usize,
    163     prof_tctx_t *tctx, bool prof_active, const void *old_ptr, size_t old_usize,
    164     prof_info_t *old_prof_info, bool sample_event) {
    165 	bool sampled, old_sampled, moved;
    166 
    167 	cassert(config_prof);
    168 	assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
    169 
    170 	if (prof_active && ptr != NULL) {
    171 		assert(usize == isalloc(tsd_tsdn(tsd), ptr));
    172 		if (prof_sample_should_skip(tsd, sample_event)) {
    173 			/*
    174 			 * Don't sample.  The usize passed to prof_alloc_prep()
    175 			 * was larger than what actually got allocated, so a
    176 			 * backtrace was captured for this allocation, even
    177 			 * though its actual usize was insufficient to cross the
    178 			 * sample threshold.
    179 			 */
    180 			prof_alloc_rollback(tsd, tctx);
    181 			tctx = (prof_tctx_t *)(uintptr_t)1U;
    182 		}
    183 	}
    184 
    185 	sampled = ((uintptr_t)tctx > (uintptr_t)1U);
    186 	old_sampled = ((uintptr_t)old_prof_info->alloc_tctx > (uintptr_t)1U);
    187 	moved = (ptr != old_ptr);
    188 
    189 	if (unlikely(sampled)) {
    190 		prof_malloc_sample_object(tsd, ptr, size, usize, tctx);
    191 	} else if (moved) {
    192 		prof_tctx_reset(tsd, ptr, NULL);
    193 	} else if (unlikely(old_sampled)) {
    194 		/*
    195 		 * prof_tctx_reset() would work for the !moved case as well,
    196 		 * but prof_tctx_reset_sampled() is slightly cheaper, and the
    197 		 * proper thing to do here in the presence of explicit
    198 		 * knowledge re: moved state.
    199 		 */
    200 		prof_tctx_reset_sampled(tsd, ptr);
    201 	} else {
    202 		prof_info_t prof_info;
    203 		prof_info_get(tsd, ptr, NULL, &prof_info);
    204 		assert((uintptr_t)prof_info.alloc_tctx == (uintptr_t)1U);
    205 	}
    206 
    207 	/*
    208 	 * The prof_free_sampled_object() call must come after the
    209 	 * prof_malloc_sample_object() call, because tctx and old_tctx may be
    210 	 * the same, in which case reversing the call order could cause the tctx
    211 	 * to be prematurely destroyed as a side effect of momentarily zeroed
    212 	 * counters.
    213 	 */
    214 	if (unlikely(old_sampled)) {
    215 		prof_free_sampled_object(tsd, old_usize, old_prof_info);
    216 	}
    217 }
    218 
    219 JEMALLOC_ALWAYS_INLINE size_t
    220 prof_sample_align(size_t orig_align) {
    221 	/*
    222 	 * Enforce page alignment, so that sampled allocations can be identified
    223 	 * w/o metadata lookup.
    224 	 */
    225 	assert(opt_prof);
    226 	return (opt_cache_oblivious && orig_align < PAGE) ? PAGE :
    227 	    orig_align;
    228 }
    229 
    230 JEMALLOC_ALWAYS_INLINE bool
    231 prof_sample_aligned(const void *ptr) {
    232 	return ((uintptr_t)ptr & PAGE_MASK) == 0;
    233 }
    234 
    235 JEMALLOC_ALWAYS_INLINE bool
    236 prof_sampled(tsd_t *tsd, const void *ptr) {
    237 	prof_info_t prof_info;
    238 	prof_info_get(tsd, ptr, NULL, &prof_info);
    239 	bool sampled = (uintptr_t)prof_info.alloc_tctx > (uintptr_t)1U;
    240 	if (sampled) {
    241 		assert(prof_sample_aligned(ptr));
    242 	}
    243 	return sampled;
    244 }
    245 
    246 JEMALLOC_ALWAYS_INLINE void
    247 prof_free(tsd_t *tsd, const void *ptr, size_t usize,
    248     emap_alloc_ctx_t *alloc_ctx) {
    249 	prof_info_t prof_info;
    250 	prof_info_get_and_reset_recent(tsd, ptr, alloc_ctx, &prof_info);
    251 
    252 	cassert(config_prof);
    253 	assert(usize == isalloc(tsd_tsdn(tsd), ptr));
    254 
    255 	if (unlikely((uintptr_t)prof_info.alloc_tctx > (uintptr_t)1U)) {
    256 		assert(prof_sample_aligned(ptr));
    257 		prof_free_sampled_object(tsd, usize, &prof_info);
    258 	}
    259 }
    260 
    261 #endif /* JEMALLOC_INTERNAL_PROF_INLINES_H */
    262