Home | History | Annotate | Line # | Download | only in unit
      1 #include "test/jemalloc_test.h"
      2 
      3 #include "jemalloc/internal/hpa.h"
      4 #include "jemalloc/internal/nstime.h"
      5 
      6 #define SHARD_IND 111
      7 
      8 #define ALLOC_MAX (HUGEPAGE / 4)
      9 
     10 typedef struct test_data_s test_data_t;
     11 struct test_data_s {
     12 	/*
     13 	 * Must be the first member -- we convert back and forth between the
     14 	 * test_data_t and the hpa_shard_t;
     15 	 */
     16 	hpa_shard_t shard;
     17 	hpa_central_t central;
     18 	base_t *base;
     19 	edata_cache_t shard_edata_cache;
     20 
     21 	emap_t emap;
     22 };
     23 
     24 static hpa_shard_opts_t test_hpa_shard_opts_default = {
     25 	/* slab_max_alloc */
     26 	ALLOC_MAX,
     27 	/* hugification threshold */
     28 	HUGEPAGE,
     29 	/* dirty_mult */
     30 	FXP_INIT_PERCENT(25),
     31 	/* deferral_allowed */
     32 	false,
     33 	/* hugify_delay_ms */
     34 	10 * 1000,
     35 };
     36 
     37 static hpa_shard_t *
     38 create_test_data(hpa_hooks_t *hooks, hpa_shard_opts_t *opts) {
     39 	bool err;
     40 	base_t *base = base_new(TSDN_NULL, /* ind */ SHARD_IND,
     41 	    &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
     42 	assert_ptr_not_null(base, "");
     43 
     44 	test_data_t *test_data = malloc(sizeof(test_data_t));
     45 	assert_ptr_not_null(test_data, "");
     46 
     47 	test_data->base = base;
     48 
     49 	err = edata_cache_init(&test_data->shard_edata_cache, base);
     50 	assert_false(err, "");
     51 
     52 	err = emap_init(&test_data->emap, test_data->base, /* zeroed */ false);
     53 	assert_false(err, "");
     54 
     55 	err = hpa_central_init(&test_data->central, test_data->base, hooks);
     56 	assert_false(err, "");
     57 
     58 	err = hpa_shard_init(&test_data->shard, &test_data->central,
     59 	    &test_data->emap, test_data->base, &test_data->shard_edata_cache,
     60 	    SHARD_IND, opts);
     61 	assert_false(err, "");
     62 
     63 	return (hpa_shard_t *)test_data;
     64 }
     65 
     66 static void
     67 destroy_test_data(hpa_shard_t *shard) {
     68 	test_data_t *test_data = (test_data_t *)shard;
     69 	base_delete(TSDN_NULL, test_data->base);
     70 	free(test_data);
     71 }
     72 
     73 TEST_BEGIN(test_alloc_max) {
     74 	test_skip_if(!hpa_supported());
     75 
     76 	hpa_shard_t *shard = create_test_data(&hpa_hooks_default,
     77 	    &test_hpa_shard_opts_default);
     78 	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
     79 
     80 	edata_t *edata;
     81 
     82 	/* Small max */
     83 	bool deferred_work_generated = false;
     84 	edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false, false,
     85 	    false, &deferred_work_generated);
     86 	expect_ptr_not_null(edata, "Allocation of small max failed");
     87 	edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX + PAGE, PAGE, false,
     88 	    false, false, &deferred_work_generated);
     89 	expect_ptr_null(edata, "Allocation of larger than small max succeeded");
     90 
     91 	destroy_test_data(shard);
     92 }
     93 TEST_END
     94 
     95 typedef struct mem_contents_s mem_contents_t;
     96 struct mem_contents_s {
     97 	uintptr_t my_addr;
     98 	size_t size;
     99 	edata_t *my_edata;
    100 	rb_node(mem_contents_t) link;
    101 };
    102 
    103 static int
    104 mem_contents_cmp(const mem_contents_t *a, const mem_contents_t *b) {
    105 	return (a->my_addr > b->my_addr) - (a->my_addr < b->my_addr);
    106 }
    107 
    108 typedef rb_tree(mem_contents_t) mem_tree_t;
    109 rb_gen(static, mem_tree_, mem_tree_t, mem_contents_t, link,
    110     mem_contents_cmp);
    111 
    112 static void
    113 node_assert_ordered(mem_contents_t *a, mem_contents_t *b) {
    114 	assert_zu_lt(a->my_addr, a->my_addr + a->size, "Overflow");
    115 	assert_zu_le(a->my_addr + a->size, b->my_addr, "");
    116 }
    117 
    118 static void
    119 node_check(mem_tree_t *tree, mem_contents_t *contents) {
    120 	edata_t *edata = contents->my_edata;
    121 	assert_ptr_eq(contents, (void *)contents->my_addr, "");
    122 	assert_ptr_eq(contents, edata_base_get(edata), "");
    123 	assert_zu_eq(contents->size, edata_size_get(edata), "");
    124 	assert_ptr_eq(contents->my_edata, edata, "");
    125 
    126 	mem_contents_t *next = mem_tree_next(tree, contents);
    127 	if (next != NULL) {
    128 		node_assert_ordered(contents, next);
    129 	}
    130 	mem_contents_t *prev = mem_tree_prev(tree, contents);
    131 	if (prev != NULL) {
    132 		node_assert_ordered(prev, contents);
    133 	}
    134 }
    135 
    136 static void
    137 node_insert(mem_tree_t *tree, edata_t *edata, size_t npages) {
    138 	mem_contents_t *contents = (mem_contents_t *)edata_base_get(edata);
    139 	contents->my_addr = (uintptr_t)edata_base_get(edata);
    140 	contents->size = edata_size_get(edata);
    141 	contents->my_edata = edata;
    142 	mem_tree_insert(tree, contents);
    143 	node_check(tree, contents);
    144 }
    145 
    146 static void
    147 node_remove(mem_tree_t *tree, edata_t *edata) {
    148 	mem_contents_t *contents = (mem_contents_t *)edata_base_get(edata);
    149 	node_check(tree, contents);
    150 	mem_tree_remove(tree, contents);
    151 }
    152 
    153 TEST_BEGIN(test_stress) {
    154 	test_skip_if(!hpa_supported());
    155 
    156 	hpa_shard_t *shard = create_test_data(&hpa_hooks_default,
    157 	    &test_hpa_shard_opts_default);
    158 
    159 	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
    160 
    161 	const size_t nlive_edatas_max = 500;
    162 	size_t nlive_edatas = 0;
    163 	edata_t **live_edatas = calloc(nlive_edatas_max, sizeof(edata_t *));
    164 	/*
    165 	 * Nothing special about this constant; we're only fixing it for
    166 	 * consistency across runs.
    167 	 */
    168 	size_t prng_state = (size_t)0x76999ffb014df07c;
    169 
    170 	mem_tree_t tree;
    171 	mem_tree_new(&tree);
    172 
    173 	bool deferred_work_generated = false;
    174 
    175 	for (size_t i = 0; i < 100 * 1000; i++) {
    176 		size_t operation = prng_range_zu(&prng_state, 2);
    177 		if (operation == 0) {
    178 			/* Alloc */
    179 			if (nlive_edatas == nlive_edatas_max) {
    180 				continue;
    181 			}
    182 
    183 			/*
    184 			 * We make sure to get an even balance of small and
    185 			 * large allocations.
    186 			 */
    187 			size_t npages_min = 1;
    188 			size_t npages_max = ALLOC_MAX / PAGE;
    189 			size_t npages = npages_min + prng_range_zu(&prng_state,
    190 			    npages_max - npages_min);
    191 			edata_t *edata = pai_alloc(tsdn, &shard->pai,
    192 			    npages * PAGE, PAGE, false, false, false,
    193 			    &deferred_work_generated);
    194 			assert_ptr_not_null(edata,
    195 			    "Unexpected allocation failure");
    196 			live_edatas[nlive_edatas] = edata;
    197 			nlive_edatas++;
    198 			node_insert(&tree, edata, npages);
    199 		} else {
    200 			/* Free. */
    201 			if (nlive_edatas == 0) {
    202 				continue;
    203 			}
    204 			size_t victim = prng_range_zu(&prng_state, nlive_edatas);
    205 			edata_t *to_free = live_edatas[victim];
    206 			live_edatas[victim] = live_edatas[nlive_edatas - 1];
    207 			nlive_edatas--;
    208 			node_remove(&tree, to_free);
    209 			pai_dalloc(tsdn, &shard->pai, to_free,
    210 			    &deferred_work_generated);
    211 		}
    212 	}
    213 
    214 	size_t ntreenodes = 0;
    215 	for (mem_contents_t *contents = mem_tree_first(&tree); contents != NULL;
    216 	    contents = mem_tree_next(&tree, contents)) {
    217 		ntreenodes++;
    218 		node_check(&tree, contents);
    219 	}
    220 	expect_zu_eq(ntreenodes, nlive_edatas, "");
    221 
    222 	/*
    223 	 * Test hpa_shard_destroy, which requires as a precondition that all its
    224 	 * extents have been deallocated.
    225 	 */
    226 	for (size_t i = 0; i < nlive_edatas; i++) {
    227 		edata_t *to_free = live_edatas[i];
    228 		node_remove(&tree, to_free);
    229 		pai_dalloc(tsdn, &shard->pai, to_free,
    230 		    &deferred_work_generated);
    231 	}
    232 	hpa_shard_destroy(tsdn, shard);
    233 
    234 	free(live_edatas);
    235 	destroy_test_data(shard);
    236 }
    237 TEST_END
    238 
    239 static void
    240 expect_contiguous(edata_t **edatas, size_t nedatas) {
    241 	for (size_t i = 0; i < nedatas; i++) {
    242 		size_t expected = (size_t)edata_base_get(edatas[0])
    243 		    + i * PAGE;
    244 		expect_zu_eq(expected, (size_t)edata_base_get(edatas[i]),
    245 		    "Mismatch at index %zu", i);
    246 	}
    247 }
    248 
    249 TEST_BEGIN(test_alloc_dalloc_batch) {
    250 	test_skip_if(!hpa_supported());
    251 
    252 	hpa_shard_t *shard = create_test_data(&hpa_hooks_default,
    253 	    &test_hpa_shard_opts_default);
    254 	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
    255 
    256 	bool deferred_work_generated = false;
    257 
    258 	enum {NALLOCS = 8};
    259 
    260 	edata_t *allocs[NALLOCS];
    261 	/*
    262 	 * Allocate a mix of ways; first half from regular alloc, second half
    263 	 * from alloc_batch.
    264 	 */
    265 	for (size_t i = 0; i < NALLOCS / 2; i++) {
    266 		allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
    267 		    /* zero */ false, /* guarded */ false,
    268 		    /* frequent_reuse */ false, &deferred_work_generated);
    269 		expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
    270 	}
    271 	edata_list_active_t allocs_list;
    272 	edata_list_active_init(&allocs_list);
    273 	size_t nsuccess = pai_alloc_batch(tsdn, &shard->pai, PAGE, NALLOCS / 2,
    274 	    &allocs_list, &deferred_work_generated);
    275 	expect_zu_eq(NALLOCS / 2, nsuccess, "Unexpected oom");
    276 	for (size_t i = NALLOCS / 2; i < NALLOCS; i++) {
    277 		allocs[i] = edata_list_active_first(&allocs_list);
    278 		edata_list_active_remove(&allocs_list, allocs[i]);
    279 	}
    280 
    281 	/*
    282 	 * Should have allocated them contiguously, despite the differing
    283 	 * methods used.
    284 	 */
    285 	void *orig_base = edata_base_get(allocs[0]);
    286 	expect_contiguous(allocs, NALLOCS);
    287 
    288 	/*
    289 	 * Batch dalloc the first half, individually deallocate the second half.
    290 	 */
    291 	for (size_t i = 0; i < NALLOCS / 2; i++) {
    292 		edata_list_active_append(&allocs_list, allocs[i]);
    293 	}
    294 	pai_dalloc_batch(tsdn, &shard->pai, &allocs_list,
    295 	    &deferred_work_generated);
    296 	for (size_t i = NALLOCS / 2; i < NALLOCS; i++) {
    297 		pai_dalloc(tsdn, &shard->pai, allocs[i],
    298 		    &deferred_work_generated);
    299 	}
    300 
    301 	/* Reallocate (individually), and ensure reuse and contiguity. */
    302 	for (size_t i = 0; i < NALLOCS; i++) {
    303 		allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
    304 		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
    305 		    false, &deferred_work_generated);
    306 		expect_ptr_not_null(allocs[i], "Unexpected alloc failure.");
    307 	}
    308 	void *new_base = edata_base_get(allocs[0]);
    309 	expect_ptr_eq(orig_base, new_base,
    310 	    "Failed to reuse the allocated memory.");
    311 	expect_contiguous(allocs, NALLOCS);
    312 
    313 	destroy_test_data(shard);
    314 }
    315 TEST_END
    316 
    317 static uintptr_t defer_bump_ptr = HUGEPAGE * 123;
    318 static void *
    319 defer_test_map(size_t size) {
    320 	void *result = (void *)defer_bump_ptr;
    321 	defer_bump_ptr += size;
    322 	return result;
    323 }
    324 
    325 static void
    326 defer_test_unmap(void *ptr, size_t size) {
    327 	(void)ptr;
    328 	(void)size;
    329 }
    330 
    331 static bool defer_purge_called = false;
    332 static void
    333 defer_test_purge(void *ptr, size_t size) {
    334 	(void)ptr;
    335 	(void)size;
    336 	defer_purge_called = true;
    337 }
    338 
    339 static bool defer_hugify_called = false;
    340 static void
    341 defer_test_hugify(void *ptr, size_t size) {
    342 	defer_hugify_called = true;
    343 }
    344 
    345 static bool defer_dehugify_called = false;
    346 static void
    347 defer_test_dehugify(void *ptr, size_t size) {
    348 	defer_dehugify_called = true;
    349 }
    350 
    351 static nstime_t defer_curtime;
    352 static void
    353 defer_test_curtime(nstime_t *r_time, bool first_reading) {
    354 	*r_time = defer_curtime;
    355 }
    356 
    357 static uint64_t
    358 defer_test_ms_since(nstime_t *past_time) {
    359 	return (nstime_ns(&defer_curtime) - nstime_ns(past_time)) / 1000 / 1000;
    360 }
    361 
    362 TEST_BEGIN(test_defer_time) {
    363 	test_skip_if(!hpa_supported());
    364 
    365 	hpa_hooks_t hooks;
    366 	hooks.map = &defer_test_map;
    367 	hooks.unmap = &defer_test_unmap;
    368 	hooks.purge = &defer_test_purge;
    369 	hooks.hugify = &defer_test_hugify;
    370 	hooks.dehugify = &defer_test_dehugify;
    371 	hooks.curtime = &defer_test_curtime;
    372 	hooks.ms_since = &defer_test_ms_since;
    373 
    374 	hpa_shard_opts_t opts = test_hpa_shard_opts_default;
    375 	opts.deferral_allowed = true;
    376 
    377 	hpa_shard_t *shard = create_test_data(&hooks, &opts);
    378 
    379 	bool deferred_work_generated = false;
    380 
    381 	nstime_init(&defer_curtime, 0);
    382 	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
    383 	edata_t *edatas[HUGEPAGE_PAGES];
    384 	for (int i = 0; i < (int)HUGEPAGE_PAGES; i++) {
    385 		edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
    386 		    false, false, &deferred_work_generated);
    387 		expect_ptr_not_null(edatas[i], "Unexpected null edata");
    388 	}
    389 	hpa_shard_do_deferred_work(tsdn, shard);
    390 	expect_false(defer_hugify_called, "Hugified too early");
    391 
    392 	/* Hugification delay is set to 10 seconds in options. */
    393 	nstime_init2(&defer_curtime, 11, 0);
    394 	hpa_shard_do_deferred_work(tsdn, shard);
    395 	expect_true(defer_hugify_called, "Failed to hugify");
    396 
    397 	defer_hugify_called = false;
    398 
    399 	/* Purge.  Recall that dirty_mult is .25. */
    400 	for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
    401 		pai_dalloc(tsdn, &shard->pai, edatas[i],
    402 		    &deferred_work_generated);
    403 	}
    404 
    405 	hpa_shard_do_deferred_work(tsdn, shard);
    406 
    407 	expect_false(defer_hugify_called, "Hugified too early");
    408 	expect_true(defer_dehugify_called, "Should have dehugified");
    409 	expect_true(defer_purge_called, "Should have purged");
    410 	defer_hugify_called = false;
    411 	defer_dehugify_called = false;
    412 	defer_purge_called = false;
    413 
    414 	/*
    415 	 * Refill the page.  We now meet the hugification threshold; we should
    416 	 * be marked for pending hugify.
    417 	 */
    418 	for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
    419 		edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
    420 		    false, false, &deferred_work_generated);
    421 		expect_ptr_not_null(edatas[i], "Unexpected null edata");
    422 	}
    423 	/*
    424 	 * We would be ineligible for hugification, had we not already met the
    425 	 * threshold before dipping below it.
    426 	 */
    427 	pai_dalloc(tsdn, &shard->pai, edatas[0],
    428 	    &deferred_work_generated);
    429 	/* Wait for the threshold again. */
    430 	nstime_init2(&defer_curtime, 22, 0);
    431 	hpa_shard_do_deferred_work(tsdn, shard);
    432 	expect_true(defer_hugify_called, "Hugified too early");
    433 	expect_false(defer_dehugify_called, "Unexpected dehugify");
    434 	expect_false(defer_purge_called, "Unexpected purge");
    435 
    436 	destroy_test_data(shard);
    437 }
    438 TEST_END
    439 
    440 int
    441 main(void) {
    442 	/*
    443 	 * These trigger unused-function warnings on CI runs, even if declared
    444 	 * with static inline.
    445 	 */
    446 	(void)mem_tree_empty;
    447 	(void)mem_tree_last;
    448 	(void)mem_tree_search;
    449 	(void)mem_tree_nsearch;
    450 	(void)mem_tree_psearch;
    451 	(void)mem_tree_iter;
    452 	(void)mem_tree_reverse_iter;
    453 	(void)mem_tree_destroy;
    454 	return test_no_reentrancy(
    455 	    test_alloc_max,
    456 	    test_stress,
    457 	    test_alloc_dalloc_batch,
    458 	    test_defer_time);
    459 }
    460