Home | History | Annotate | Line # | Download | only in unit
      1  1.1  christos #include "test/jemalloc_test.h"
      2  1.1  christos 
      3  1.1  christos #include "jemalloc/internal/hpa.h"
      4  1.1  christos #include "jemalloc/internal/nstime.h"
      5  1.1  christos 
      6  1.1  christos #define SHARD_IND 111
      7  1.1  christos 
      8  1.1  christos #define ALLOC_MAX (HUGEPAGE / 4)
      9  1.1  christos 
     10  1.1  christos typedef struct test_data_s test_data_t;
     11  1.1  christos struct test_data_s {
     12  1.1  christos 	/*
     13  1.1  christos 	 * Must be the first member -- we convert back and forth between the
     14  1.1  christos 	 * test_data_t and the hpa_shard_t;
     15  1.1  christos 	 */
     16  1.1  christos 	hpa_shard_t shard;
     17  1.1  christos 	hpa_central_t central;
     18  1.1  christos 	base_t *base;
     19  1.1  christos 	edata_cache_t shard_edata_cache;
     20  1.1  christos 
     21  1.1  christos 	emap_t emap;
     22  1.1  christos };
     23  1.1  christos 
     24  1.1  christos static hpa_shard_opts_t test_hpa_shard_opts_default = {
     25  1.1  christos 	/* slab_max_alloc */
     26  1.1  christos 	ALLOC_MAX,
     27  1.1  christos 	/* hugification threshold */
     28  1.1  christos 	HUGEPAGE,
     29  1.1  christos 	/* dirty_mult */
     30  1.1  christos 	FXP_INIT_PERCENT(25),
     31  1.1  christos 	/* deferral_allowed */
     32  1.1  christos 	false,
     33  1.1  christos 	/* hugify_delay_ms */
     34  1.1  christos 	10 * 1000,
     35  1.1  christos };
     36  1.1  christos 
     37  1.1  christos static hpa_shard_t *
     38  1.1  christos create_test_data(hpa_hooks_t *hooks, hpa_shard_opts_t *opts) {
     39  1.1  christos 	bool err;
     40  1.1  christos 	base_t *base = base_new(TSDN_NULL, /* ind */ SHARD_IND,
     41  1.1  christos 	    &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
     42  1.1  christos 	assert_ptr_not_null(base, "");
     43  1.1  christos 
     44  1.1  christos 	test_data_t *test_data = malloc(sizeof(test_data_t));
     45  1.1  christos 	assert_ptr_not_null(test_data, "");
     46  1.1  christos 
     47  1.1  christos 	test_data->base = base;
     48  1.1  christos 
     49  1.1  christos 	err = edata_cache_init(&test_data->shard_edata_cache, base);
     50  1.1  christos 	assert_false(err, "");
     51  1.1  christos 
     52  1.1  christos 	err = emap_init(&test_data->emap, test_data->base, /* zeroed */ false);
     53  1.1  christos 	assert_false(err, "");
     54  1.1  christos 
     55  1.1  christos 	err = hpa_central_init(&test_data->central, test_data->base, hooks);
     56  1.1  christos 	assert_false(err, "");
     57  1.1  christos 
     58  1.1  christos 	err = hpa_shard_init(&test_data->shard, &test_data->central,
     59  1.1  christos 	    &test_data->emap, test_data->base, &test_data->shard_edata_cache,
     60  1.1  christos 	    SHARD_IND, opts);
     61  1.1  christos 	assert_false(err, "");
     62  1.1  christos 
     63  1.1  christos 	return (hpa_shard_t *)test_data;
     64  1.1  christos }
     65  1.1  christos 
     66  1.1  christos static void
     67  1.1  christos destroy_test_data(hpa_shard_t *shard) {
     68  1.1  christos 	test_data_t *test_data = (test_data_t *)shard;
     69  1.1  christos 	base_delete(TSDN_NULL, test_data->base);
     70  1.1  christos 	free(test_data);
     71  1.1  christos }
     72  1.1  christos 
     73  1.1  christos TEST_BEGIN(test_alloc_max) {
     74  1.1  christos 	test_skip_if(!hpa_supported());
     75  1.1  christos 
     76  1.1  christos 	hpa_shard_t *shard = create_test_data(&hpa_hooks_default,
     77  1.1  christos 	    &test_hpa_shard_opts_default);
     78  1.1  christos 	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
     79  1.1  christos 
     80  1.1  christos 	edata_t *edata;
     81  1.1  christos 
     82  1.1  christos 	/* Small max */
     83  1.1  christos 	bool deferred_work_generated = false;
     84  1.1  christos 	edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false, false,
     85  1.1  christos 	    false, &deferred_work_generated);
     86  1.1  christos 	expect_ptr_not_null(edata, "Allocation of small max failed");
     87  1.1  christos 	edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX + PAGE, PAGE, false,
     88  1.1  christos 	    false, false, &deferred_work_generated);
     89  1.1  christos 	expect_ptr_null(edata, "Allocation of larger than small max succeeded");
     90  1.1  christos 
     91  1.1  christos 	destroy_test_data(shard);
     92  1.1  christos }
     93  1.1  christos TEST_END
     94  1.1  christos 
     95  1.1  christos typedef struct mem_contents_s mem_contents_t;
     96  1.1  christos struct mem_contents_s {
     97  1.1  christos 	uintptr_t my_addr;
     98  1.1  christos 	size_t size;
     99  1.1  christos 	edata_t *my_edata;
    100  1.1  christos 	rb_node(mem_contents_t) link;
    101  1.1  christos };
    102  1.1  christos 
    103  1.1  christos static int
    104  1.1  christos mem_contents_cmp(const mem_contents_t *a, const mem_contents_t *b) {
    105  1.1  christos 	return (a->my_addr > b->my_addr) - (a->my_addr < b->my_addr);
    106  1.1  christos }
    107  1.1  christos 
    108  1.1  christos typedef rb_tree(mem_contents_t) mem_tree_t;
    109  1.1  christos rb_gen(static, mem_tree_, mem_tree_t, mem_contents_t, link,
    110  1.1  christos     mem_contents_cmp);
    111  1.1  christos 
    112  1.1  christos static void
    113  1.1  christos node_assert_ordered(mem_contents_t *a, mem_contents_t *b) {
    114  1.1  christos 	assert_zu_lt(a->my_addr, a->my_addr + a->size, "Overflow");
    115  1.1  christos 	assert_zu_le(a->my_addr + a->size, b->my_addr, "");
    116  1.1  christos }
    117  1.1  christos 
    118  1.1  christos static void
    119  1.1  christos node_check(mem_tree_t *tree, mem_contents_t *contents) {
    120  1.1  christos 	edata_t *edata = contents->my_edata;
    121  1.1  christos 	assert_ptr_eq(contents, (void *)contents->my_addr, "");
    122  1.1  christos 	assert_ptr_eq(contents, edata_base_get(edata), "");
    123  1.1  christos 	assert_zu_eq(contents->size, edata_size_get(edata), "");
    124  1.1  christos 	assert_ptr_eq(contents->my_edata, edata, "");
    125  1.1  christos 
    126  1.1  christos 	mem_contents_t *next = mem_tree_next(tree, contents);
    127  1.1  christos 	if (next != NULL) {
    128  1.1  christos 		node_assert_ordered(contents, next);
    129  1.1  christos 	}
    130  1.1  christos 	mem_contents_t *prev = mem_tree_prev(tree, contents);
    131  1.1  christos 	if (prev != NULL) {
    132  1.1  christos 		node_assert_ordered(prev, contents);
    133  1.1  christos 	}
    134  1.1  christos }
    135  1.1  christos 
    136  1.1  christos static void
    137  1.1  christos node_insert(mem_tree_t *tree, edata_t *edata, size_t npages) {
    138  1.1  christos 	mem_contents_t *contents = (mem_contents_t *)edata_base_get(edata);
    139  1.1  christos 	contents->my_addr = (uintptr_t)edata_base_get(edata);
    140  1.1  christos 	contents->size = edata_size_get(edata);
    141  1.1  christos 	contents->my_edata = edata;
    142  1.1  christos 	mem_tree_insert(tree, contents);
    143  1.1  christos 	node_check(tree, contents);
    144  1.1  christos }
    145  1.1  christos 
    146  1.1  christos static void
    147  1.1  christos node_remove(mem_tree_t *tree, edata_t *edata) {
    148  1.1  christos 	mem_contents_t *contents = (mem_contents_t *)edata_base_get(edata);
    149  1.1  christos 	node_check(tree, contents);
    150  1.1  christos 	mem_tree_remove(tree, contents);
    151  1.1  christos }
    152  1.1  christos 
    153  1.1  christos TEST_BEGIN(test_stress) {
    154  1.1  christos 	test_skip_if(!hpa_supported());
    155  1.1  christos 
    156  1.1  christos 	hpa_shard_t *shard = create_test_data(&hpa_hooks_default,
    157  1.1  christos 	    &test_hpa_shard_opts_default);
    158  1.1  christos 
    159  1.1  christos 	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
    160  1.1  christos 
    161  1.1  christos 	const size_t nlive_edatas_max = 500;
    162  1.1  christos 	size_t nlive_edatas = 0;
    163  1.1  christos 	edata_t **live_edatas = calloc(nlive_edatas_max, sizeof(edata_t *));
    164  1.1  christos 	/*
    165  1.1  christos 	 * Nothing special about this constant; we're only fixing it for
    166  1.1  christos 	 * consistency across runs.
    167  1.1  christos 	 */
    168  1.1  christos 	size_t prng_state = (size_t)0x76999ffb014df07c;
    169  1.1  christos 
    170  1.1  christos 	mem_tree_t tree;
    171  1.1  christos 	mem_tree_new(&tree);
    172  1.1  christos 
    173  1.1  christos 	bool deferred_work_generated = false;
    174  1.1  christos 
    175  1.1  christos 	for (size_t i = 0; i < 100 * 1000; i++) {
    176  1.1  christos 		size_t operation = prng_range_zu(&prng_state, 2);
    177  1.1  christos 		if (operation == 0) {
    178  1.1  christos 			/* Alloc */
    179  1.1  christos 			if (nlive_edatas == nlive_edatas_max) {
    180  1.1  christos 				continue;
    181  1.1  christos 			}
    182  1.1  christos 
    183  1.1  christos 			/*
    184  1.1  christos 			 * We make sure to get an even balance of small and
    185  1.1  christos 			 * large allocations.
    186  1.1  christos 			 */
    187  1.1  christos 			size_t npages_min = 1;
    188  1.1  christos 			size_t npages_max = ALLOC_MAX / PAGE;
    189  1.1  christos 			size_t npages = npages_min + prng_range_zu(&prng_state,
    190  1.1  christos 			    npages_max - npages_min);
    191  1.1  christos 			edata_t *edata = pai_alloc(tsdn, &shard->pai,
    192  1.1  christos 			    npages * PAGE, PAGE, false, false, false,
    193  1.1  christos 			    &deferred_work_generated);
    194  1.1  christos 			assert_ptr_not_null(edata,
    195  1.1  christos 			    "Unexpected allocation failure");
    196  1.1  christos 			live_edatas[nlive_edatas] = edata;
    197  1.1  christos 			nlive_edatas++;
    198  1.1  christos 			node_insert(&tree, edata, npages);
    199  1.1  christos 		} else {
    200  1.1  christos 			/* Free. */
    201  1.1  christos 			if (nlive_edatas == 0) {
    202  1.1  christos 				continue;
    203  1.1  christos 			}
    204  1.1  christos 			size_t victim = prng_range_zu(&prng_state, nlive_edatas);
    205  1.1  christos 			edata_t *to_free = live_edatas[victim];
    206  1.1  christos 			live_edatas[victim] = live_edatas[nlive_edatas - 1];
    207  1.1  christos 			nlive_edatas--;
    208  1.1  christos 			node_remove(&tree, to_free);
    209  1.1  christos 			pai_dalloc(tsdn, &shard->pai, to_free,
    210  1.1  christos 			    &deferred_work_generated);
    211  1.1  christos 		}
    212  1.1  christos 	}
    213  1.1  christos 
    214  1.1  christos 	size_t ntreenodes = 0;
    215  1.1  christos 	for (mem_contents_t *contents = mem_tree_first(&tree); contents != NULL;
    216  1.1  christos 	    contents = mem_tree_next(&tree, contents)) {
    217  1.1  christos 		ntreenodes++;
    218  1.1  christos 		node_check(&tree, contents);
    219  1.1  christos 	}
    220  1.1  christos 	expect_zu_eq(ntreenodes, nlive_edatas, "");
    221  1.1  christos 
    222  1.1  christos 	/*
    223  1.1  christos 	 * Test hpa_shard_destroy, which requires as a precondition that all its
    224  1.1  christos 	 * extents have been deallocated.
    225  1.1  christos 	 */
    226  1.1  christos 	for (size_t i = 0; i < nlive_edatas; i++) {
    227  1.1  christos 		edata_t *to_free = live_edatas[i];
    228  1.1  christos 		node_remove(&tree, to_free);
    229  1.1  christos 		pai_dalloc(tsdn, &shard->pai, to_free,
    230  1.1  christos 		    &deferred_work_generated);
    231  1.1  christos 	}
    232  1.1  christos 	hpa_shard_destroy(tsdn, shard);
    233  1.1  christos 
    234  1.1  christos 	free(live_edatas);
    235  1.1  christos 	destroy_test_data(shard);
    236  1.1  christos }
    237  1.1  christos TEST_END
    238  1.1  christos 
    239  1.1  christos static void
    240  1.1  christos expect_contiguous(edata_t **edatas, size_t nedatas) {
    241  1.1  christos 	for (size_t i = 0; i < nedatas; i++) {
    242  1.1  christos 		size_t expected = (size_t)edata_base_get(edatas[0])
    243  1.1  christos 		    + i * PAGE;
    244  1.1  christos 		expect_zu_eq(expected, (size_t)edata_base_get(edatas[i]),
    245  1.1  christos 		    "Mismatch at index %zu", i);
    246  1.1  christos 	}
    247  1.1  christos }
    248  1.1  christos 
    249  1.1  christos TEST_BEGIN(test_alloc_dalloc_batch) {
    250  1.1  christos 	test_skip_if(!hpa_supported());
    251  1.1  christos 
    252  1.1  christos 	hpa_shard_t *shard = create_test_data(&hpa_hooks_default,
    253  1.1  christos 	    &test_hpa_shard_opts_default);
    254  1.1  christos 	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
    255  1.1  christos 
    256  1.1  christos 	bool deferred_work_generated = false;
    257  1.1  christos 
    258  1.1  christos 	enum {NALLOCS = 8};
    259  1.1  christos 
    260  1.1  christos 	edata_t *allocs[NALLOCS];
    261  1.1  christos 	/*
    262  1.1  christos 	 * Allocate a mix of ways; first half from regular alloc, second half
    263  1.1  christos 	 * from alloc_batch.
    264  1.1  christos 	 */
    265  1.1  christos 	for (size_t i = 0; i < NALLOCS / 2; i++) {
    266  1.1  christos 		allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
    267  1.1  christos 		    /* zero */ false, /* guarded */ false,
    268  1.1  christos 		    /* frequent_reuse */ false, &deferred_work_generated);
    269  1.1  christos 		expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
    270  1.1  christos 	}
    271  1.1  christos 	edata_list_active_t allocs_list;
    272  1.1  christos 	edata_list_active_init(&allocs_list);
    273  1.1  christos 	size_t nsuccess = pai_alloc_batch(tsdn, &shard->pai, PAGE, NALLOCS / 2,
    274  1.1  christos 	    &allocs_list, &deferred_work_generated);
    275  1.1  christos 	expect_zu_eq(NALLOCS / 2, nsuccess, "Unexpected oom");
    276  1.1  christos 	for (size_t i = NALLOCS / 2; i < NALLOCS; i++) {
    277  1.1  christos 		allocs[i] = edata_list_active_first(&allocs_list);
    278  1.1  christos 		edata_list_active_remove(&allocs_list, allocs[i]);
    279  1.1  christos 	}
    280  1.1  christos 
    281  1.1  christos 	/*
    282  1.1  christos 	 * Should have allocated them contiguously, despite the differing
    283  1.1  christos 	 * methods used.
    284  1.1  christos 	 */
    285  1.1  christos 	void *orig_base = edata_base_get(allocs[0]);
    286  1.1  christos 	expect_contiguous(allocs, NALLOCS);
    287  1.1  christos 
    288  1.1  christos 	/*
    289  1.1  christos 	 * Batch dalloc the first half, individually deallocate the second half.
    290  1.1  christos 	 */
    291  1.1  christos 	for (size_t i = 0; i < NALLOCS / 2; i++) {
    292  1.1  christos 		edata_list_active_append(&allocs_list, allocs[i]);
    293  1.1  christos 	}
    294  1.1  christos 	pai_dalloc_batch(tsdn, &shard->pai, &allocs_list,
    295  1.1  christos 	    &deferred_work_generated);
    296  1.1  christos 	for (size_t i = NALLOCS / 2; i < NALLOCS; i++) {
    297  1.1  christos 		pai_dalloc(tsdn, &shard->pai, allocs[i],
    298  1.1  christos 		    &deferred_work_generated);
    299  1.1  christos 	}
    300  1.1  christos 
    301  1.1  christos 	/* Reallocate (individually), and ensure reuse and contiguity. */
    302  1.1  christos 	for (size_t i = 0; i < NALLOCS; i++) {
    303  1.1  christos 		allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
    304  1.1  christos 		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
    305  1.1  christos 		    false, &deferred_work_generated);
    306  1.1  christos 		expect_ptr_not_null(allocs[i], "Unexpected alloc failure.");
    307  1.1  christos 	}
    308  1.1  christos 	void *new_base = edata_base_get(allocs[0]);
    309  1.1  christos 	expect_ptr_eq(orig_base, new_base,
    310  1.1  christos 	    "Failed to reuse the allocated memory.");
    311  1.1  christos 	expect_contiguous(allocs, NALLOCS);
    312  1.1  christos 
    313  1.1  christos 	destroy_test_data(shard);
    314  1.1  christos }
    315  1.1  christos TEST_END
    316  1.1  christos 
    317  1.1  christos static uintptr_t defer_bump_ptr = HUGEPAGE * 123;
    318  1.1  christos static void *
    319  1.1  christos defer_test_map(size_t size) {
    320  1.1  christos 	void *result = (void *)defer_bump_ptr;
    321  1.1  christos 	defer_bump_ptr += size;
    322  1.1  christos 	return result;
    323  1.1  christos }
    324  1.1  christos 
    325  1.1  christos static void
    326  1.1  christos defer_test_unmap(void *ptr, size_t size) {
    327  1.1  christos 	(void)ptr;
    328  1.1  christos 	(void)size;
    329  1.1  christos }
    330  1.1  christos 
    331  1.1  christos static bool defer_purge_called = false;
    332  1.1  christos static void
    333  1.1  christos defer_test_purge(void *ptr, size_t size) {
    334  1.1  christos 	(void)ptr;
    335  1.1  christos 	(void)size;
    336  1.1  christos 	defer_purge_called = true;
    337  1.1  christos }
    338  1.1  christos 
    339  1.1  christos static bool defer_hugify_called = false;
    340  1.1  christos static void
    341  1.1  christos defer_test_hugify(void *ptr, size_t size) {
    342  1.1  christos 	defer_hugify_called = true;
    343  1.1  christos }
    344  1.1  christos 
    345  1.1  christos static bool defer_dehugify_called = false;
    346  1.1  christos static void
    347  1.1  christos defer_test_dehugify(void *ptr, size_t size) {
    348  1.1  christos 	defer_dehugify_called = true;
    349  1.1  christos }
    350  1.1  christos 
    351  1.1  christos static nstime_t defer_curtime;
    352  1.1  christos static void
    353  1.1  christos defer_test_curtime(nstime_t *r_time, bool first_reading) {
    354  1.1  christos 	*r_time = defer_curtime;
    355  1.1  christos }
    356  1.1  christos 
    357  1.1  christos static uint64_t
    358  1.1  christos defer_test_ms_since(nstime_t *past_time) {
    359  1.1  christos 	return (nstime_ns(&defer_curtime) - nstime_ns(past_time)) / 1000 / 1000;
    360  1.1  christos }
    361  1.1  christos 
    362  1.1  christos TEST_BEGIN(test_defer_time) {
    363  1.1  christos 	test_skip_if(!hpa_supported());
    364  1.1  christos 
    365  1.1  christos 	hpa_hooks_t hooks;
    366  1.1  christos 	hooks.map = &defer_test_map;
    367  1.1  christos 	hooks.unmap = &defer_test_unmap;
    368  1.1  christos 	hooks.purge = &defer_test_purge;
    369  1.1  christos 	hooks.hugify = &defer_test_hugify;
    370  1.1  christos 	hooks.dehugify = &defer_test_dehugify;
    371  1.1  christos 	hooks.curtime = &defer_test_curtime;
    372  1.1  christos 	hooks.ms_since = &defer_test_ms_since;
    373  1.1  christos 
    374  1.1  christos 	hpa_shard_opts_t opts = test_hpa_shard_opts_default;
    375  1.1  christos 	opts.deferral_allowed = true;
    376  1.1  christos 
    377  1.1  christos 	hpa_shard_t *shard = create_test_data(&hooks, &opts);
    378  1.1  christos 
    379  1.1  christos 	bool deferred_work_generated = false;
    380  1.1  christos 
    381  1.1  christos 	nstime_init(&defer_curtime, 0);
    382  1.1  christos 	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
    383  1.1  christos 	edata_t *edatas[HUGEPAGE_PAGES];
    384  1.1  christos 	for (int i = 0; i < (int)HUGEPAGE_PAGES; i++) {
    385  1.1  christos 		edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
    386  1.1  christos 		    false, false, &deferred_work_generated);
    387  1.1  christos 		expect_ptr_not_null(edatas[i], "Unexpected null edata");
    388  1.1  christos 	}
    389  1.1  christos 	hpa_shard_do_deferred_work(tsdn, shard);
    390  1.1  christos 	expect_false(defer_hugify_called, "Hugified too early");
    391  1.1  christos 
    392  1.1  christos 	/* Hugification delay is set to 10 seconds in options. */
    393  1.1  christos 	nstime_init2(&defer_curtime, 11, 0);
    394  1.1  christos 	hpa_shard_do_deferred_work(tsdn, shard);
    395  1.1  christos 	expect_true(defer_hugify_called, "Failed to hugify");
    396  1.1  christos 
    397  1.1  christos 	defer_hugify_called = false;
    398  1.1  christos 
    399  1.1  christos 	/* Purge.  Recall that dirty_mult is .25. */
    400  1.1  christos 	for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
    401  1.1  christos 		pai_dalloc(tsdn, &shard->pai, edatas[i],
    402  1.1  christos 		    &deferred_work_generated);
    403  1.1  christos 	}
    404  1.1  christos 
    405  1.1  christos 	hpa_shard_do_deferred_work(tsdn, shard);
    406  1.1  christos 
    407  1.1  christos 	expect_false(defer_hugify_called, "Hugified too early");
    408  1.1  christos 	expect_true(defer_dehugify_called, "Should have dehugified");
    409  1.1  christos 	expect_true(defer_purge_called, "Should have purged");
    410  1.1  christos 	defer_hugify_called = false;
    411  1.1  christos 	defer_dehugify_called = false;
    412  1.1  christos 	defer_purge_called = false;
    413  1.1  christos 
    414  1.1  christos 	/*
    415  1.1  christos 	 * Refill the page.  We now meet the hugification threshold; we should
    416  1.1  christos 	 * be marked for pending hugify.
    417  1.1  christos 	 */
    418  1.1  christos 	for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
    419  1.1  christos 		edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
    420  1.1  christos 		    false, false, &deferred_work_generated);
    421  1.1  christos 		expect_ptr_not_null(edatas[i], "Unexpected null edata");
    422  1.1  christos 	}
    423  1.1  christos 	/*
    424  1.1  christos 	 * We would be ineligible for hugification, had we not already met the
    425  1.1  christos 	 * threshold before dipping below it.
    426  1.1  christos 	 */
    427  1.1  christos 	pai_dalloc(tsdn, &shard->pai, edatas[0],
    428  1.1  christos 	    &deferred_work_generated);
    429  1.1  christos 	/* Wait for the threshold again. */
    430  1.1  christos 	nstime_init2(&defer_curtime, 22, 0);
    431  1.1  christos 	hpa_shard_do_deferred_work(tsdn, shard);
    432  1.1  christos 	expect_true(defer_hugify_called, "Hugified too early");
    433  1.1  christos 	expect_false(defer_dehugify_called, "Unexpected dehugify");
    434  1.1  christos 	expect_false(defer_purge_called, "Unexpected purge");
    435  1.1  christos 
    436  1.1  christos 	destroy_test_data(shard);
    437  1.1  christos }
    438  1.1  christos TEST_END
    439  1.1  christos 
    440  1.1  christos int
    441  1.1  christos main(void) {
    442  1.1  christos 	/*
    443  1.1  christos 	 * These trigger unused-function warnings on CI runs, even if declared
    444  1.1  christos 	 * with static inline.
    445  1.1  christos 	 */
    446  1.1  christos 	(void)mem_tree_empty;
    447  1.1  christos 	(void)mem_tree_last;
    448  1.1  christos 	(void)mem_tree_search;
    449  1.1  christos 	(void)mem_tree_nsearch;
    450  1.1  christos 	(void)mem_tree_psearch;
    451  1.1  christos 	(void)mem_tree_iter;
    452  1.1  christos 	(void)mem_tree_reverse_iter;
    453  1.1  christos 	(void)mem_tree_destroy;
    454  1.1  christos 	return test_no_reentrancy(
    455  1.1  christos 	    test_alloc_max,
    456  1.1  christos 	    test_stress,
    457  1.1  christos 	    test_alloc_dalloc_batch,
    458  1.1  christos 	    test_defer_time);
    459  1.1  christos }
    460