Home | History | Annotate | Line # | Download | only in unit
      1 #include "test/jemalloc_test.h"
      2 
      3 #include "jemalloc/internal/hpa.h"
      4 #include "jemalloc/internal/nstime.h"
      5 
      6 #define SHARD_IND 111
      7 
      8 #define ALLOC_MAX (HUGEPAGE)
      9 
     10 typedef struct test_data_s test_data_t;
     11 struct test_data_s {
     12 	/*
     13 	 * Must be the first member -- we convert back and forth between the
     14 	 * test_data_t and the hpa_shard_t;
     15 	 */
     16 	hpa_shard_t   shard;
     17 	hpa_central_t central;
     18 	base_t       *base;
     19 	edata_cache_t shard_edata_cache;
     20 
     21 	emap_t emap;
     22 };
     23 
     24 static hpa_shard_opts_t test_hpa_shard_opts = {
     25     /* slab_max_alloc */
     26     HUGEPAGE,
     27     /* hugification_threshold */
     28     0.9 * HUGEPAGE,
     29     /* dirty_mult */
     30     FXP_INIT_PERCENT(10),
     31     /* deferral_allowed */
     32     true,
     33     /* hugify_delay_ms */
     34     0,
     35     /* hugify_sync */
     36     false,
     37     /* min_purge_interval_ms */
     38     5,
     39     /* experimental_max_purge_nhp */
     40     -1,
     41     /* purge_threshold */
     42     PAGE,
     43     /* min_purge_delay_ms */
     44     10,
     45     /* hugify_style */
     46     hpa_hugify_style_lazy};
     47 
     48 static hpa_shard_t *
     49 create_test_data(const hpa_hooks_t *hooks, hpa_shard_opts_t *opts,
     50     const sec_opts_t *sec_opts) {
     51 	bool    err;
     52 	base_t *base = base_new(TSDN_NULL, /* ind */ SHARD_IND,
     53 	    &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
     54 	assert_ptr_not_null(base, "");
     55 
     56 	test_data_t *test_data = malloc(sizeof(test_data_t));
     57 	assert_ptr_not_null(test_data, "");
     58 
     59 	test_data->base = base;
     60 
     61 	err = edata_cache_init(&test_data->shard_edata_cache, base);
     62 	assert_false(err, "");
     63 
     64 	err = emap_init(&test_data->emap, test_data->base, /* zeroed */ false);
     65 	assert_false(err, "");
     66 
     67 	err = hpa_central_init(&test_data->central, test_data->base, hooks);
     68 	assert_false(err, "");
     69 	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
     70 	err = hpa_shard_init(tsdn, &test_data->shard, &test_data->central,
     71 	    &test_data->emap, test_data->base, &test_data->shard_edata_cache,
     72 	    SHARD_IND, opts, sec_opts);
     73 	assert_false(err, "");
     74 
     75 	return (hpa_shard_t *)test_data;
     76 }
     77 
     78 static void
     79 destroy_test_data(hpa_shard_t *shard) {
     80 	test_data_t *test_data = (test_data_t *)shard;
     81 	base_delete(TSDN_NULL, test_data->base);
     82 	free(test_data);
     83 }
     84 
     85 static uintptr_t defer_bump_ptr = HUGEPAGE * 123;
     86 static void *
     87 defer_test_map(size_t size) {
     88 	void *result = (void *)defer_bump_ptr;
     89 	defer_bump_ptr += size;
     90 	return result;
     91 }
     92 
     93 static void
     94 defer_test_unmap(void *ptr, size_t size) {
     95 	(void)ptr;
     96 	(void)size;
     97 }
     98 
     99 static size_t ndefer_purge_calls = 0;
    100 static size_t npurge_size = 0;
    101 static void
    102 defer_test_purge(void *ptr, size_t size) {
    103 	(void)ptr;
    104 	npurge_size = size;
    105 	++ndefer_purge_calls;
    106 }
    107 
    108 static bool defer_vectorized_purge_called = false;
    109 static bool
    110 defer_vectorized_purge(void *vec, size_t vlen, size_t nbytes) {
    111 	(void)vec;
    112 	(void)nbytes;
    113 	++ndefer_purge_calls;
    114 	defer_vectorized_purge_called = true;
    115 	return false;
    116 }
    117 
    118 static size_t ndefer_hugify_calls = 0;
    119 static bool
    120 defer_test_hugify(void *ptr, size_t size, bool sync) {
    121 	++ndefer_hugify_calls;
    122 	return false;
    123 }
    124 
    125 static size_t ndefer_dehugify_calls = 0;
    126 static void
    127 defer_test_dehugify(void *ptr, size_t size) {
    128 	++ndefer_dehugify_calls;
    129 }
    130 
    131 static nstime_t defer_curtime;
    132 static void
    133 defer_test_curtime(nstime_t *r_time, bool first_reading) {
    134 	*r_time = defer_curtime;
    135 }
    136 
    137 static uint64_t
    138 defer_test_ms_since(nstime_t *past_time) {
    139 	return (nstime_ns(&defer_curtime) - nstime_ns(past_time)) / 1000 / 1000;
    140 }
    141 
    142 // test that freed pages stay in SEC and hpa thinks they are active
    143 
    144 TEST_BEGIN(test_hpa_sec) {
    145 	test_skip_if(!hpa_supported());
    146 
    147 	hpa_hooks_t hooks;
    148 	hooks.map = &defer_test_map;
    149 	hooks.unmap = &defer_test_unmap;
    150 	hooks.purge = &defer_test_purge;
    151 	hooks.hugify = &defer_test_hugify;
    152 	hooks.dehugify = &defer_test_dehugify;
    153 	hooks.curtime = &defer_test_curtime;
    154 	hooks.ms_since = &defer_test_ms_since;
    155 	hooks.vectorized_purge = &defer_vectorized_purge;
    156 
    157 	hpa_shard_opts_t opts = test_hpa_shard_opts;
    158 
    159 	enum { NALLOCS = 8 };
    160 	sec_opts_t sec_opts;
    161 	sec_opts.nshards = 1;
    162 	sec_opts.max_alloc = 2 * PAGE;
    163 	sec_opts.max_bytes = NALLOCS * PAGE;
    164 	sec_opts.batch_fill_extra = 4;
    165 
    166 	hpa_shard_t *shard = create_test_data(&hooks, &opts, &sec_opts);
    167 	bool         deferred_work_generated = false;
    168 	tsdn_t      *tsdn = tsd_tsdn(tsd_fetch());
    169 
    170 	/* alloc 1 PAGE, confirm sec has fill_extra bytes. */
    171 	edata_t *edata1 = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, false,
    172 	    false, &deferred_work_generated);
    173 	expect_ptr_not_null(edata1, "Unexpected null edata");
    174 	hpa_shard_stats_t hpa_stats;
    175 	memset(&hpa_stats, 0, sizeof(hpa_shard_stats_t));
    176 	hpa_shard_stats_merge(tsdn, shard, &hpa_stats);
    177 	expect_zu_eq(hpa_stats.psset_stats.merged.nactive,
    178 	    1 + sec_opts.batch_fill_extra, "");
    179 	expect_zu_eq(hpa_stats.secstats.bytes, PAGE * sec_opts.batch_fill_extra,
    180 	    "sec should have fill extra pages");
    181 
    182 	/* Alloc/dealloc NALLOCS times and confirm extents are in sec. */
    183 	edata_t *edatas[NALLOCS];
    184 	for (int i = 0; i < NALLOCS; i++) {
    185 		edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
    186 		    false, false, &deferred_work_generated);
    187 		expect_ptr_not_null(edatas[i], "Unexpected null edata");
    188 	}
    189 	memset(&hpa_stats, 0, sizeof(hpa_shard_stats_t));
    190 	hpa_shard_stats_merge(tsdn, shard, &hpa_stats);
    191 	expect_zu_eq(hpa_stats.psset_stats.merged.nactive, 2 + NALLOCS, "");
    192 	expect_zu_eq(hpa_stats.secstats.bytes, PAGE, "2 refills (at 0 and 4)");
    193 
    194 	for (int i = 0; i < NALLOCS - 1; i++) {
    195 		pai_dalloc(
    196 		    tsdn, &shard->pai, edatas[i], &deferred_work_generated);
    197 	}
    198 	memset(&hpa_stats, 0, sizeof(hpa_shard_stats_t));
    199 	hpa_shard_stats_merge(tsdn, shard, &hpa_stats);
    200 	expect_zu_eq(hpa_stats.psset_stats.merged.nactive, (2 + NALLOCS), "");
    201 	expect_zu_eq(
    202 	    hpa_stats.secstats.bytes, sec_opts.max_bytes, "sec should be full");
    203 
    204 	/* this one should flush 1 + 0.25 * 8 = 3 extents */
    205 	pai_dalloc(
    206 	    tsdn, &shard->pai, edatas[NALLOCS - 1], &deferred_work_generated);
    207 	memset(&hpa_stats, 0, sizeof(hpa_shard_stats_t));
    208 	hpa_shard_stats_merge(tsdn, shard, &hpa_stats);
    209 	expect_zu_eq(hpa_stats.psset_stats.merged.nactive, (NALLOCS - 1), "");
    210 	expect_zu_eq(hpa_stats.psset_stats.merged.ndirty, 3, "");
    211 	expect_zu_eq(hpa_stats.secstats.bytes, 0.75 * sec_opts.max_bytes,
    212 	    "sec should be full");
    213 
    214 	/* Next allocation should come from SEC and not increase active */
    215 	edata_t *edata2 = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, false,
    216 	    false, &deferred_work_generated);
    217 	expect_ptr_not_null(edata2, "Unexpected null edata");
    218 	memset(&hpa_stats, 0, sizeof(hpa_shard_stats_t));
    219 	hpa_shard_stats_merge(tsdn, shard, &hpa_stats);
    220 	expect_zu_eq(hpa_stats.psset_stats.merged.nactive, NALLOCS - 1, "");
    221 	expect_zu_eq(hpa_stats.secstats.bytes, 0.75 * sec_opts.max_bytes - PAGE,
    222 	    "sec should have max_bytes minus one page that just came from it");
    223 
    224 	/* We return this one and it stays in the cache */
    225 	pai_dalloc(tsdn, &shard->pai, edata2, &deferred_work_generated);
    226 	memset(&hpa_stats, 0, sizeof(hpa_shard_stats_t));
    227 	hpa_shard_stats_merge(tsdn, shard, &hpa_stats);
    228 	expect_zu_eq(hpa_stats.psset_stats.merged.nactive, NALLOCS - 1, "");
    229 	expect_zu_eq(hpa_stats.psset_stats.merged.ndirty, 3, "");
    230 	expect_zu_eq(hpa_stats.secstats.bytes, 0.75 * sec_opts.max_bytes, "");
    231 
    232 	destroy_test_data(shard);
    233 }
    234 TEST_END
    235 
    236 int
    237 main(void) {
    238 	return test_no_reentrancy(test_hpa_sec);
    239 }
    240