Home | History | Annotate | Line # | Download | only in unit
      1 #include "test/jemalloc_test.h"
      2 
      3 #include "jemalloc/internal/hpa.h"
      4 #include "jemalloc/internal/hpa_utils.h"
      5 #include "jemalloc/internal/nstime.h"
      6 
      7 #define SHARD_IND 111
      8 
      9 #define ALLOC_MAX (HUGEPAGE)
     10 
     11 typedef struct test_data_s test_data_t;
     12 struct test_data_s {
     13 	/*
     14 	 * Must be the first member -- we convert back and forth between the
     15 	 * test_data_t and the hpa_shard_t;
     16 	 */
     17 	hpa_shard_t   shard;
     18 	hpa_central_t central;
     19 	base_t       *base;
     20 	edata_cache_t shard_edata_cache;
     21 
     22 	emap_t emap;
     23 };
     24 
     25 static hpa_shard_opts_t test_hpa_shard_opts_default = {
     26     /* slab_max_alloc */
     27     ALLOC_MAX,
     28     /* hugification_threshold */
     29     HUGEPAGE,
     30     /* dirty_mult */
     31     FXP_INIT_PERCENT(25),
     32     /* deferral_allowed */
     33     false,
     34     /* hugify_delay_ms */
     35     10 * 1000,
     36     /* hugify_sync */
     37     false,
     38     /* min_purge_interval_ms */
     39     5 * 1000,
     40     /* experimental_max_purge_nhp */
     41     -1,
     42     /* purge_threshold */
     43     1,
     44     /* min_purge_delay_ms */
     45     0,
     46     /* hugify_style */
     47     hpa_hugify_style_lazy};
     48 
     49 static hpa_shard_t *
     50 create_test_data(const hpa_hooks_t *hooks, hpa_shard_opts_t *opts) {
     51 	bool    err;
     52 	base_t *base = base_new(TSDN_NULL, /* ind */ SHARD_IND,
     53 	    &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
     54 	assert_ptr_not_null(base, "");
     55 
     56 	test_data_t *test_data = malloc(sizeof(test_data_t));
     57 	assert_ptr_not_null(test_data, "");
     58 
     59 	test_data->base = base;
     60 
     61 	err = edata_cache_init(&test_data->shard_edata_cache, base);
     62 	assert_false(err, "");
     63 
     64 	err = emap_init(&test_data->emap, test_data->base, /* zeroed */ false);
     65 	assert_false(err, "");
     66 
     67 	err = hpa_central_init(&test_data->central, test_data->base, hooks);
     68 	assert_false(err, "");
     69 	sec_opts_t sec_opts;
     70 	sec_opts.nshards = 0;
     71 	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
     72 	err = hpa_shard_init(tsdn, &test_data->shard, &test_data->central,
     73 	    &test_data->emap, test_data->base, &test_data->shard_edata_cache,
     74 	    SHARD_IND, opts, &sec_opts);
     75 	assert_false(err, "");
     76 
     77 	return (hpa_shard_t *)test_data;
     78 }
     79 
     80 static void
     81 destroy_test_data(hpa_shard_t *shard) {
     82 	test_data_t *test_data = (test_data_t *)shard;
     83 	base_delete(TSDN_NULL, test_data->base);
     84 	free(test_data);
     85 }
     86 
     87 static uintptr_t defer_bump_ptr = HUGEPAGE * 123;
     88 static void *
     89 defer_test_map(size_t size) {
     90 	void *result = (void *)defer_bump_ptr;
     91 	defer_bump_ptr += size;
     92 	return result;
     93 }
     94 
     95 static void
     96 defer_test_unmap(void *ptr, size_t size) {
     97 	(void)ptr;
     98 	(void)size;
     99 }
    100 
    101 static size_t ndefer_purge_calls = 0;
    102 static void
    103 defer_test_purge(void *ptr, size_t size) {
    104 	(void)ptr;
    105 	(void)size;
    106 	++ndefer_purge_calls;
    107 }
    108 
    109 static size_t ndefer_vec_purge_calls = 0;
    110 static bool
    111 defer_vectorized_purge(void *vec, size_t vlen, size_t nbytes) {
    112 	(void)vec;
    113 	(void)nbytes;
    114 	++ndefer_vec_purge_calls;
    115 	return false;
    116 }
    117 
    118 static size_t ndefer_hugify_calls = 0;
    119 static bool
    120 defer_test_hugify(void *ptr, size_t size, bool sync) {
    121 	++ndefer_hugify_calls;
    122 	return false;
    123 }
    124 
    125 static size_t ndefer_dehugify_calls = 0;
    126 static void
    127 defer_test_dehugify(void *ptr, size_t size) {
    128 	++ndefer_dehugify_calls;
    129 }
    130 
    131 static nstime_t defer_curtime;
    132 static void
    133 defer_test_curtime(nstime_t *r_time, bool first_reading) {
    134 	*r_time = defer_curtime;
    135 }
    136 
    137 static uint64_t
    138 defer_test_ms_since(nstime_t *past_time) {
    139 	return (nstime_ns(&defer_curtime) - nstime_ns(past_time)) / 1000 / 1000;
    140 }
    141 
    142 TEST_BEGIN(test_vectorized_purge) {
    143 	test_skip_if(!hpa_supported() || opt_process_madvise_max_batch == 0
    144 	    || HUGEPAGE_PAGES <= 4);
    145 	assert(opt_process_madvise_max_batch == 64);
    146 
    147 	hpa_hooks_t hooks;
    148 	hooks.map = &defer_test_map;
    149 	hooks.unmap = &defer_test_unmap;
    150 	hooks.purge = &defer_test_purge;
    151 	hooks.hugify = &defer_test_hugify;
    152 	hooks.dehugify = &defer_test_dehugify;
    153 	hooks.curtime = &defer_test_curtime;
    154 	hooks.ms_since = &defer_test_ms_since;
    155 	hooks.vectorized_purge = &defer_vectorized_purge;
    156 
    157 	hpa_shard_opts_t opts = test_hpa_shard_opts_default;
    158 	opts.deferral_allowed = true;
    159 	opts.min_purge_interval_ms = 0;
    160 	ndefer_vec_purge_calls = 0;
    161 	ndefer_purge_calls = 0;
    162 
    163 	hpa_shard_t *shard = create_test_data(&hooks, &opts);
    164 
    165 	bool deferred_work_generated = false;
    166 
    167 	nstime_init(&defer_curtime, 0);
    168 	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
    169 
    170 	enum { NALLOCS = 8 * HUGEPAGE_PAGES };
    171 	edata_t *edatas[NALLOCS];
    172 	for (int i = 0; i < NALLOCS; i++) {
    173 		edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
    174 		    false, false, &deferred_work_generated);
    175 		expect_ptr_not_null(edatas[i], "Unexpected null edata");
    176 	}
    177 	/* Deallocate almost 3 hugepages out of 8, and to force batching
    178 	 * leave the 2nd and 4th PAGE in the first 3 hugepages.
    179 	 */
    180 	for (int i = 0; i < 3 * (int)HUGEPAGE_PAGES; i++) {
    181 		int j = i % HUGEPAGE_PAGES;
    182 		if (j != 1 && j != 3) {
    183 			pai_dalloc(tsdn, &shard->pai, edatas[i],
    184 			    &deferred_work_generated);
    185 		}
    186 	}
    187 
    188 	hpa_shard_do_deferred_work(tsdn, shard);
    189 
    190 	/*
    191 	 * We purge from 2 huge pages, each one 3 dirty continous segments.
    192 	 * For opt_process_madvise_max_batch = 64, that is all just one call
    193 	 */
    194 	expect_zu_eq(1, ndefer_vec_purge_calls, "Expect single purge");
    195 	ndefer_vec_purge_calls = 0;
    196 
    197 	destroy_test_data(shard);
    198 }
    199 TEST_END
    200 
    201 TEST_BEGIN(test_purge_more_than_one_batch_pages) {
    202 	test_skip_if(!hpa_supported()
    203 	    || (opt_process_madvise_max_batch < HPA_PURGE_BATCH_MAX)
    204 	    || HUGEPAGE_PAGES <= 4);
    205 
    206 	hpa_hooks_t hooks;
    207 	hooks.map = &defer_test_map;
    208 	hooks.unmap = &defer_test_unmap;
    209 	hooks.purge = &defer_test_purge;
    210 	hooks.hugify = &defer_test_hugify;
    211 	hooks.dehugify = &defer_test_dehugify;
    212 	hooks.curtime = &defer_test_curtime;
    213 	hooks.ms_since = &defer_test_ms_since;
    214 	hooks.vectorized_purge = &defer_vectorized_purge;
    215 
    216 	hpa_shard_opts_t opts = test_hpa_shard_opts_default;
    217 	opts.deferral_allowed = true;
    218 	opts.min_purge_interval_ms = 0;
    219 	opts.dirty_mult = FXP_INIT_PERCENT(1);
    220 	ndefer_vec_purge_calls = 0;
    221 	ndefer_purge_calls = 0;
    222 	ndefer_hugify_calls = 0;
    223 	ndefer_dehugify_calls = 0;
    224 
    225 	hpa_shard_t *shard = create_test_data(&hooks, &opts);
    226 
    227 	bool deferred_work_generated = false;
    228 
    229 	nstime_init(&defer_curtime, 0);
    230 	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
    231 
    232 	enum { NALLOCS = HPA_PURGE_BATCH_MAX * 3 * HUGEPAGE_PAGES };
    233 	edata_t *edatas[NALLOCS];
    234 	for (int i = 0; i < NALLOCS; i++) {
    235 		edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
    236 		    false, false, &deferred_work_generated);
    237 		expect_ptr_not_null(edatas[i], "Unexpected null edata");
    238 	}
    239 	for (int i = 0; i < HPA_PURGE_BATCH_MAX * 2 * (int)HUGEPAGE_PAGES;
    240 	    i++) {
    241 		pai_dalloc(
    242 		    tsdn, &shard->pai, edatas[i], &deferred_work_generated);
    243 	}
    244 
    245 	hpa_shard_do_deferred_work(tsdn, shard);
    246 
    247 	/*
    248 	 * Strict minimum purge interval is not set, we should purge as long as
    249 	 * we have dirty pages.
    250 	 */
    251 	expect_zu_eq(0, ndefer_hugify_calls, "Hugified too early");
    252 	expect_zu_eq(0, ndefer_dehugify_calls, "Dehugified too early");
    253 
    254 	/* We have page batch size = HPA_PURGE_BATCH_MAX.  We have
    255 	 * HPA_PURGE_BATCH_MAX active pages, 2 * HPA_PURGE_BATCH_MAX dirty.
    256 	 * To achieve the balance of 1% max dirty we need to purge more than one
    257 	 * batch.
    258 	 */
    259 	size_t nexpected = 2;
    260 	expect_zu_eq(nexpected, ndefer_vec_purge_calls, "Expect purge");
    261 	expect_zu_eq(0, ndefer_purge_calls, "Expect no non-vec purge");
    262 	ndefer_vec_purge_calls = 0;
    263 
    264 	destroy_test_data(shard);
    265 }
    266 TEST_END
    267 
    268 int
    269 main(void) {
    270 	return test_no_reentrancy(
    271 	    test_vectorized_purge, test_purge_more_than_one_batch_pages);
    272 }
    273