Home | History | Annotate | Line # | Download | only in internal
      1 #ifndef JEMALLOC_INTERNAL_SEC_H
      2 #define JEMALLOC_INTERNAL_SEC_H
      3 
      4 #include "jemalloc/internal/atomic.h"
      5 #include "jemalloc/internal/pai.h"
      6 
      7 /*
      8  * Small extent cache.
      9  *
     10  * This includes some utilities to cache small extents.  We have a per-pszind
     11  * bin with its own list of extents of that size.  We don't try to do any
     12  * coalescing of extents (since it would in general require cross-shard locks or
     13  * knowledge of the underlying PAI implementation).
     14  */
     15 
     16 /*
     17  * For now, this is just one field; eventually, we'll probably want to get more
     18  * fine-grained data out (like per-size class statistics).
     19  */
     20 typedef struct sec_stats_s sec_stats_t;
     21 struct sec_stats_s {
     22 	/* Sum of bytes_cur across all shards. */
     23 	size_t bytes;
     24 };
     25 
     26 static inline void
     27 sec_stats_accum(sec_stats_t *dst, sec_stats_t *src) {
     28 	dst->bytes += src->bytes;
     29 }
     30 
     31 /* A collections of free extents, all of the same size. */
     32 typedef struct sec_bin_s sec_bin_t;
     33 struct sec_bin_s {
     34 	/*
     35 	 * When we fail to fulfill an allocation, we do a batch-alloc on the
     36 	 * underlying allocator to fill extra items, as well.  We drop the SEC
     37 	 * lock while doing so, to allow operations on other bins to succeed.
     38 	 * That introduces the possibility of other threads also trying to
     39 	 * allocate out of this bin, failing, and also going to the backing
     40 	 * allocator.  To avoid a thundering herd problem in which lots of
     41 	 * threads do batch allocs and overfill this bin as a result, we only
     42 	 * allow one batch allocation at a time for a bin.  This bool tracks
     43 	 * whether or not some thread is already batch allocating.
     44 	 *
     45 	 * Eventually, the right answer may be a smarter sharding policy for the
     46 	 * bins (e.g. a mutex per bin, which would also be more scalable
     47 	 * generally; the batch-allocating thread could hold it while
     48 	 * batch-allocating).
     49 	 */
     50 	bool being_batch_filled;
     51 
     52 	/*
     53 	 * Number of bytes in this particular bin (as opposed to the
     54 	 * sec_shard_t's bytes_cur.  This isn't user visible or reported in
     55 	 * stats; rather, it allows us to quickly determine the change in the
     56 	 * centralized counter when flushing.
     57 	 */
     58 	size_t bytes_cur;
     59 	edata_list_active_t freelist;
     60 };
     61 
     62 typedef struct sec_shard_s sec_shard_t;
     63 struct sec_shard_s {
     64 	/*
     65 	 * We don't keep per-bin mutexes, even though that would allow more
     66 	 * sharding; this allows global cache-eviction, which in turn allows for
     67 	 * better balancing across free lists.
     68 	 */
     69 	malloc_mutex_t mtx;
     70 	/*
     71 	 * A SEC may need to be shut down (i.e. flushed of its contents and
     72 	 * prevented from further caching).  To avoid tricky synchronization
     73 	 * issues, we just track enabled-status in each shard, guarded by a
     74 	 * mutex.  In practice, this is only ever checked during brief races,
     75 	 * since the arena-level atomic boolean tracking HPA enabled-ness means
     76 	 * that we won't go down these pathways very often after custom extent
     77 	 * hooks are installed.
     78 	 */
     79 	bool enabled;
     80 	sec_bin_t *bins;
     81 	/* Number of bytes in all bins in the shard. */
     82 	size_t bytes_cur;
     83 	/* The next pszind to flush in the flush-some pathways. */
     84 	pszind_t to_flush_next;
     85 };
     86 
     87 typedef struct sec_s sec_t;
     88 struct sec_s {
     89 	pai_t pai;
     90 	pai_t *fallback;
     91 
     92 	sec_opts_t opts;
     93 	sec_shard_t *shards;
     94 	pszind_t npsizes;
     95 };
     96 
     97 bool sec_init(tsdn_t *tsdn, sec_t *sec, base_t *base, pai_t *fallback,
     98     const sec_opts_t *opts);
     99 void sec_flush(tsdn_t *tsdn, sec_t *sec);
    100 void sec_disable(tsdn_t *tsdn, sec_t *sec);
    101 
    102 /*
    103  * Morally, these two stats methods probably ought to be a single one (and the
    104  * mutex_prof_data ought to live in the sec_stats_t.  But splitting them apart
    105  * lets them fit easily into the pa_shard stats framework (which also has this
    106  * split), which simplifies the stats management.
    107  */
    108 void sec_stats_merge(tsdn_t *tsdn, sec_t *sec, sec_stats_t *stats);
    109 void sec_mutex_stats_read(tsdn_t *tsdn, sec_t *sec,
    110     mutex_prof_data_t *mutex_prof_data);
    111 
    112 /*
    113  * We use the arena lock ordering; these are acquired in phase 2 of forking, but
    114  * should be acquired before the underlying allocator mutexes.
    115  */
    116 void sec_prefork2(tsdn_t *tsdn, sec_t *sec);
    117 void sec_postfork_parent(tsdn_t *tsdn, sec_t *sec);
    118 void sec_postfork_child(tsdn_t *tsdn, sec_t *sec);
    119 
    120 #endif /* JEMALLOC_INTERNAL_SEC_H */
    121