Home | History | Annotate | Line # | Download | only in internal
      1 #ifndef JEMALLOC_INTERNAL_LOCKEDINT_H
      2 #define JEMALLOC_INTERNAL_LOCKEDINT_H
      3 
      4 /*
      5  * In those architectures that support 64-bit atomics, we use atomic updates for
      6  * our 64-bit values.  Otherwise, we use a plain uint64_t and synchronize
      7  * externally.
      8  */
      9 
     10 typedef struct locked_u64_s locked_u64_t;
     11 #ifdef JEMALLOC_ATOMIC_U64
     12 struct locked_u64_s {
     13 	atomic_u64_t val;
     14 };
     15 #else
     16 /* Must hold the associated mutex. */
     17 struct locked_u64_s {
     18 	uint64_t val;
     19 };
     20 #endif
     21 
     22 typedef struct locked_zu_s locked_zu_t;
     23 struct locked_zu_s {
     24 	atomic_zu_t val;
     25 };
     26 
     27 #ifndef JEMALLOC_ATOMIC_U64
     28 #  define LOCKEDINT_MTX_DECLARE(name) malloc_mutex_t name;
     29 #  define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode)			\
     30     malloc_mutex_init(&(mu), name, rank, rank_mode)
     31 #  define LOCKEDINT_MTX(mtx) (&(mtx))
     32 #  define LOCKEDINT_MTX_LOCK(tsdn, mu) malloc_mutex_lock(tsdn, &(mu))
     33 #  define LOCKEDINT_MTX_UNLOCK(tsdn, mu) malloc_mutex_unlock(tsdn, &(mu))
     34 #  define LOCKEDINT_MTX_PREFORK(tsdn, mu) malloc_mutex_prefork(tsdn, &(mu))
     35 #  define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu)			\
     36     malloc_mutex_postfork_parent(tsdn, &(mu))
     37 #  define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu)			\
     38     malloc_mutex_postfork_child(tsdn, &(mu))
     39 #else
     40 #  define LOCKEDINT_MTX_DECLARE(name)
     41 #  define LOCKEDINT_MTX(mtx) NULL
     42 #  define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) false
     43 #  define LOCKEDINT_MTX_LOCK(tsdn, mu)
     44 #  define LOCKEDINT_MTX_UNLOCK(tsdn, mu)
     45 #  define LOCKEDINT_MTX_PREFORK(tsdn, mu)
     46 #  define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu)
     47 #  define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu)
     48 #endif
     49 
     50 #ifdef JEMALLOC_ATOMIC_U64
     51 #  define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) assert((mtx) == NULL)
     52 #else
     53 #  define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx)			\
     54     malloc_mutex_assert_owner(tsdn, (mtx))
     55 #endif
     56 
     57 static inline uint64_t
     58 locked_read_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p) {
     59 	LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
     60 #ifdef JEMALLOC_ATOMIC_U64
     61 	return atomic_load_u64(&p->val, ATOMIC_RELAXED);
     62 #else
     63 	return p->val;
     64 #endif
     65 }
     66 
     67 static inline void
     68 locked_inc_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
     69     uint64_t x) {
     70 	LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
     71 #ifdef JEMALLOC_ATOMIC_U64
     72 	atomic_fetch_add_u64(&p->val, x, ATOMIC_RELAXED);
     73 #else
     74 	p->val += x;
     75 #endif
     76 }
     77 
     78 static inline void
     79 locked_dec_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
     80     uint64_t x) {
     81 	LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
     82 #ifdef JEMALLOC_ATOMIC_U64
     83 	uint64_t r = atomic_fetch_sub_u64(&p->val, x, ATOMIC_RELAXED);
     84 	assert(r - x <= r);
     85 #else
     86 	p->val -= x;
     87 	assert(p->val + x >= p->val);
     88 #endif
     89 }
     90 
     91 /* Increment and take modulus.  Returns whether the modulo made any change.  */
     92 static inline bool
     93 locked_inc_mod_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
     94     const uint64_t x, const uint64_t modulus) {
     95 	LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
     96 	uint64_t before, after;
     97 	bool overflow;
     98 #ifdef JEMALLOC_ATOMIC_U64
     99 	before = atomic_load_u64(&p->val, ATOMIC_RELAXED);
    100 	do {
    101 		after = before + x;
    102 		assert(after >= before);
    103 		overflow = (after >= modulus);
    104 		if (overflow) {
    105 			after %= modulus;
    106 		}
    107 	} while (!atomic_compare_exchange_weak_u64(&p->val, &before, after,
    108 	    ATOMIC_RELAXED, ATOMIC_RELAXED));
    109 #else
    110 	before = p->val;
    111 	after = before + x;
    112 	overflow = (after >= modulus);
    113 	if (overflow) {
    114 		after %= modulus;
    115 	}
    116 	p->val = after;
    117 #endif
    118 	return overflow;
    119 }
    120 
    121 /*
    122  * Non-atomically sets *dst += src.  *dst needs external synchronization.
    123  * This lets us avoid the cost of a fetch_add when its unnecessary (note that
    124  * the types here are atomic).
    125  */
    126 static inline void
    127 locked_inc_u64_unsynchronized(locked_u64_t *dst, uint64_t src) {
    128 #ifdef JEMALLOC_ATOMIC_U64
    129 	uint64_t cur_dst = atomic_load_u64(&dst->val, ATOMIC_RELAXED);
    130 	atomic_store_u64(&dst->val, src + cur_dst, ATOMIC_RELAXED);
    131 #else
    132 	dst->val += src;
    133 #endif
    134 }
    135 
    136 static inline uint64_t
    137 locked_read_u64_unsynchronized(locked_u64_t *p) {
    138 #ifdef JEMALLOC_ATOMIC_U64
    139 	return atomic_load_u64(&p->val, ATOMIC_RELAXED);
    140 #else
    141 	return p->val;
    142 #endif
    143 }
    144 
    145 static inline void
    146 locked_init_u64_unsynchronized(locked_u64_t *p, uint64_t x) {
    147 #ifdef JEMALLOC_ATOMIC_U64
    148 	atomic_store_u64(&p->val, x, ATOMIC_RELAXED);
    149 #else
    150 	p->val = x;
    151 #endif
    152 }
    153 
    154 static inline size_t
    155 locked_read_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p) {
    156 	LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
    157 #ifdef JEMALLOC_ATOMIC_U64
    158 	return atomic_load_zu(&p->val, ATOMIC_RELAXED);
    159 #else
    160 	return atomic_load_zu(&p->val, ATOMIC_RELAXED);
    161 #endif
    162 }
    163 
    164 static inline void
    165 locked_inc_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p,
    166     size_t x) {
    167 	LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
    168 #ifdef JEMALLOC_ATOMIC_U64
    169 	atomic_fetch_add_zu(&p->val, x, ATOMIC_RELAXED);
    170 #else
    171 	size_t cur = atomic_load_zu(&p->val, ATOMIC_RELAXED);
    172 	atomic_store_zu(&p->val, cur + x, ATOMIC_RELAXED);
    173 #endif
    174 }
    175 
    176 static inline void
    177 locked_dec_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p,
    178     size_t x) {
    179 	LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
    180 #ifdef JEMALLOC_ATOMIC_U64
    181 	size_t r = atomic_fetch_sub_zu(&p->val, x, ATOMIC_RELAXED);
    182 	assert(r - x <= r);
    183 #else
    184 	size_t cur = atomic_load_zu(&p->val, ATOMIC_RELAXED);
    185 	atomic_store_zu(&p->val, cur - x, ATOMIC_RELAXED);
    186 #endif
    187 }
    188 
    189 /* Like the _u64 variant, needs an externally synchronized *dst. */
    190 static inline void
    191 locked_inc_zu_unsynchronized(locked_zu_t *dst, size_t src) {
    192 	size_t cur_dst = atomic_load_zu(&dst->val, ATOMIC_RELAXED);
    193 	atomic_store_zu(&dst->val, src + cur_dst, ATOMIC_RELAXED);
    194 }
    195 
    196 /*
    197  * Unlike the _u64 variant, this is safe to call unconditionally.
    198  */
    199 static inline size_t
    200 locked_read_atomic_zu(locked_zu_t *p) {
    201 	return atomic_load_zu(&p->val, ATOMIC_RELAXED);
    202 }
    203 
    204 #endif /* JEMALLOC_INTERNAL_LOCKEDINT_H */
    205