Home | History | Annotate | Line # | Download | only in unit
sec.c revision 1.1.1.1.4.2
      1 #include "test/jemalloc_test.h"
      2 
      3 #include "jemalloc/internal/sec.h"
      4 
      5 typedef struct pai_test_allocator_s pai_test_allocator_t;
      6 struct pai_test_allocator_s {
      7 	pai_t pai;
      8 	bool alloc_fail;
      9 	size_t alloc_count;
     10 	size_t alloc_batch_count;
     11 	size_t dalloc_count;
     12 	size_t dalloc_batch_count;
     13 	/*
     14 	 * We use a simple bump allocator as the implementation.  This isn't
     15 	 * *really* correct, since we may allow expansion into a subsequent
     16 	 * allocation, but it's not like the SEC is really examining the
     17 	 * pointers it gets back; this is mostly just helpful for debugging.
     18 	 */
     19 	uintptr_t next_ptr;
     20 	size_t expand_count;
     21 	bool expand_return_value;
     22 	size_t shrink_count;
     23 	bool shrink_return_value;
     24 };
     25 
     26 static void
     27 test_sec_init(sec_t *sec, pai_t *fallback, size_t nshards, size_t max_alloc,
     28     size_t max_bytes) {
     29 	sec_opts_t opts;
     30 	opts.nshards = 1;
     31 	opts.max_alloc = max_alloc;
     32 	opts.max_bytes = max_bytes;
     33 	/*
     34 	 * Just choose reasonable defaults for these; most tests don't care so
     35 	 * long as they're something reasonable.
     36 	 */
     37 	opts.bytes_after_flush = max_bytes / 2;
     38 	opts.batch_fill_extra = 4;
     39 
     40 	/*
     41 	 * We end up leaking this base, but that's fine; this test is
     42 	 * short-running, and SECs are arena-scoped in reality.
     43 	 */
     44 	base_t *base = base_new(TSDN_NULL, /* ind */ 123,
     45 	    &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
     46 
     47 	bool err = sec_init(TSDN_NULL, sec, base, fallback, &opts);
     48 	assert_false(err, "Unexpected initialization failure");
     49 	assert_u_ge(sec->npsizes, 0, "Zero size classes allowed for caching");
     50 }
     51 
     52 static inline edata_t *
     53 pai_test_allocator_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
     54     size_t alignment, bool zero, bool guarded, bool frequent_reuse,
     55     bool *deferred_work_generated) {
     56 	assert(!guarded);
     57 	pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
     58 	if (ta->alloc_fail) {
     59 		return NULL;
     60 	}
     61 	edata_t *edata = malloc(sizeof(edata_t));
     62 	assert_ptr_not_null(edata, "");
     63 	ta->next_ptr += alignment - 1;
     64 	edata_init(edata, /* arena_ind */ 0,
     65 	    (void *)(ta->next_ptr & ~(alignment - 1)), size,
     66 	    /* slab */ false,
     67 	    /* szind */ 0, /* sn */ 1, extent_state_active, /* zero */ zero,
     68 	    /* comitted */ true, /* ranged */ false, EXTENT_NOT_HEAD);
     69 	ta->next_ptr += size;
     70 	ta->alloc_count++;
     71 	return edata;
     72 }
     73 
     74 static inline size_t
     75 pai_test_allocator_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
     76     size_t nallocs, edata_list_active_t *results,
     77     bool *deferred_work_generated) {
     78 	pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
     79 	if (ta->alloc_fail) {
     80 		return 0;
     81 	}
     82 	for (size_t i = 0; i < nallocs; i++) {
     83 		edata_t *edata = malloc(sizeof(edata_t));
     84 		assert_ptr_not_null(edata, "");
     85 		edata_init(edata, /* arena_ind */ 0,
     86 		    (void *)ta->next_ptr, size,
     87 		    /* slab */ false, /* szind */ 0, /* sn */ 1,
     88 		    extent_state_active, /* zero */ false, /* comitted */ true,
     89 		    /* ranged */ false, EXTENT_NOT_HEAD);
     90 		ta->next_ptr += size;
     91 		ta->alloc_batch_count++;
     92 		edata_list_active_append(results, edata);
     93 	}
     94 	return nallocs;
     95 }
     96 
     97 static bool
     98 pai_test_allocator_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
     99     size_t old_size, size_t new_size, bool zero,
    100     bool *deferred_work_generated) {
    101 	pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
    102 	ta->expand_count++;
    103 	return ta->expand_return_value;
    104 }
    105 
    106 static bool
    107 pai_test_allocator_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
    108     size_t old_size, size_t new_size, bool *deferred_work_generated) {
    109 	pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
    110 	ta->shrink_count++;
    111 	return ta->shrink_return_value;
    112 }
    113 
    114 static void
    115 pai_test_allocator_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
    116     bool *deferred_work_generated) {
    117 	pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
    118 	ta->dalloc_count++;
    119 	free(edata);
    120 }
    121 
    122 static void
    123 pai_test_allocator_dalloc_batch(tsdn_t *tsdn, pai_t *self,
    124     edata_list_active_t *list, bool *deferred_work_generated) {
    125 	pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
    126 
    127 	edata_t *edata;
    128 	while ((edata = edata_list_active_first(list)) != NULL) {
    129 		edata_list_active_remove(list, edata);
    130 		ta->dalloc_batch_count++;
    131 		free(edata);
    132 	}
    133 }
    134 
    135 static inline void
    136 pai_test_allocator_init(pai_test_allocator_t *ta) {
    137 	ta->alloc_fail = false;
    138 	ta->alloc_count = 0;
    139 	ta->alloc_batch_count = 0;
    140 	ta->dalloc_count = 0;
    141 	ta->dalloc_batch_count = 0;
    142 	/* Just don't start the edata at 0. */
    143 	ta->next_ptr = 10 * PAGE;
    144 	ta->expand_count = 0;
    145 	ta->expand_return_value = false;
    146 	ta->shrink_count = 0;
    147 	ta->shrink_return_value = false;
    148 	ta->pai.alloc = &pai_test_allocator_alloc;
    149 	ta->pai.alloc_batch = &pai_test_allocator_alloc_batch;
    150 	ta->pai.expand = &pai_test_allocator_expand;
    151 	ta->pai.shrink = &pai_test_allocator_shrink;
    152 	ta->pai.dalloc = &pai_test_allocator_dalloc;
    153 	ta->pai.dalloc_batch = &pai_test_allocator_dalloc_batch;
    154 }
    155 
    156 TEST_BEGIN(test_reuse) {
    157 	pai_test_allocator_t ta;
    158 	pai_test_allocator_init(&ta);
    159 	sec_t sec;
    160 	/*
    161 	 * We can't use the "real" tsd, since we malloc within the test
    162 	 * allocator hooks; we'd get lock inversion crashes.  Eventually, we
    163 	 * should have a way to mock tsds, but for now just don't do any
    164 	 * lock-order checking.
    165 	 */
    166 	tsdn_t *tsdn = TSDN_NULL;
    167 	/*
    168 	 * 11 allocs apiece of 1-PAGE and 2-PAGE objects means that we should be
    169 	 * able to get to 33 pages in the cache before triggering a flush.  We
    170 	 * set the flush liimt to twice this amount, to avoid accidentally
    171 	 * triggering a flush caused by the batch-allocation down the cache fill
    172 	 * pathway disrupting ordering.
    173 	 */
    174 	enum { NALLOCS = 11 };
    175 	edata_t *one_page[NALLOCS];
    176 	edata_t *two_page[NALLOCS];
    177 	bool deferred_work_generated = false;
    178 	test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 2 * PAGE,
    179 	    /* max_bytes */ 2 * (NALLOCS * PAGE + NALLOCS * 2 * PAGE));
    180 	for (int i = 0; i < NALLOCS; i++) {
    181 		one_page[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
    182 		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
    183 		    false, &deferred_work_generated);
    184 		expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
    185 		two_page[i] = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
    186 		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
    187 		    false, &deferred_work_generated);
    188 		expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
    189 	}
    190 	expect_zu_eq(0, ta.alloc_count, "Should be using batch allocs");
    191 	size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
    192 	expect_zu_le(2 * NALLOCS, max_allocs,
    193 	    "Incorrect number of allocations");
    194 	expect_zu_eq(0, ta.dalloc_count,
    195 	    "Incorrect number of allocations");
    196 	/*
    197 	 * Free in a different order than we allocated, to make sure free-list
    198 	 * separation works correctly.
    199 	 */
    200 	for (int i = NALLOCS - 1; i >= 0; i--) {
    201 		pai_dalloc(tsdn, &sec.pai, one_page[i],
    202 		    &deferred_work_generated);
    203 	}
    204 	for (int i = NALLOCS - 1; i >= 0; i--) {
    205 		pai_dalloc(tsdn, &sec.pai, two_page[i],
    206 		    &deferred_work_generated);
    207 	}
    208 	expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
    209 	    "Incorrect number of allocations");
    210 	expect_zu_eq(0, ta.dalloc_count,
    211 	    "Incorrect number of allocations");
    212 	/*
    213 	 * Check that the n'th most recent deallocated extent is returned for
    214 	 * the n'th alloc request of a given size.
    215 	 */
    216 	for (int i = 0; i < NALLOCS; i++) {
    217 		edata_t *alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
    218 		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
    219 		    false, &deferred_work_generated);
    220 		edata_t *alloc2 = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
    221 		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
    222 		    false, &deferred_work_generated);
    223 		expect_ptr_eq(one_page[i], alloc1,
    224 		    "Got unexpected allocation");
    225 		expect_ptr_eq(two_page[i], alloc2,
    226 		    "Got unexpected allocation");
    227 	}
    228 	expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
    229 	    "Incorrect number of allocations");
    230 	expect_zu_eq(0, ta.dalloc_count,
    231 	    "Incorrect number of allocations");
    232 }
    233 TEST_END
    234 
    235 
    236 TEST_BEGIN(test_auto_flush) {
    237 	pai_test_allocator_t ta;
    238 	pai_test_allocator_init(&ta);
    239 	sec_t sec;
    240 	/* See the note above -- we can't use the real tsd. */
    241 	tsdn_t *tsdn = TSDN_NULL;
    242 	/*
    243 	 * 10-allocs apiece of 1-PAGE and 2-PAGE objects means that we should be
    244 	 * able to get to 30 pages in the cache before triggering a flush.  The
    245 	 * choice of NALLOCS here is chosen to match the batch allocation
    246 	 * default (4 extra + 1 == 5; so 10 allocations leaves the cache exactly
    247 	 * empty, even in the presence of batch allocation on fill).
    248 	 * Eventually, once our allocation batching strategies become smarter,
    249 	 * this should change.
    250 	 */
    251 	enum { NALLOCS = 10 };
    252 	edata_t *extra_alloc;
    253 	edata_t *allocs[NALLOCS];
    254 	bool deferred_work_generated = false;
    255 	test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
    256 	    /* max_bytes */ NALLOCS * PAGE);
    257 	for (int i = 0; i < NALLOCS; i++) {
    258 		allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
    259 		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
    260 		    false, &deferred_work_generated);
    261 		expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
    262 	}
    263 	extra_alloc = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
    264 	    /* guarded */ false, /* frequent_reuse */ false,
    265 	    &deferred_work_generated);
    266 	expect_ptr_not_null(extra_alloc, "Unexpected alloc failure");
    267 	size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
    268 	expect_zu_le(NALLOCS + 1, max_allocs,
    269 	    "Incorrect number of allocations");
    270 	expect_zu_eq(0, ta.dalloc_count,
    271 	    "Incorrect number of allocations");
    272 	/* Free until the SEC is full, but should not have flushed yet. */
    273 	for (int i = 0; i < NALLOCS; i++) {
    274 		pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
    275 	}
    276 	expect_zu_le(NALLOCS + 1, max_allocs,
    277 	    "Incorrect number of allocations");
    278 	expect_zu_eq(0, ta.dalloc_count,
    279 	    "Incorrect number of allocations");
    280 	/*
    281 	 * Free the extra allocation; this should trigger a flush.  The internal
    282 	 * flushing logic is allowed to get complicated; for now, we rely on our
    283 	 * whitebox knowledge of the fact that the SEC flushes bins in their
    284 	 * entirety when it decides to do so, and it has only one bin active
    285 	 * right now.
    286 	 */
    287 	pai_dalloc(tsdn, &sec.pai, extra_alloc, &deferred_work_generated);
    288 	expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
    289 	    "Incorrect number of allocations");
    290 	expect_zu_eq(0, ta.dalloc_count,
    291 	    "Incorrect number of (non-batch) deallocations");
    292 	expect_zu_eq(NALLOCS + 1, ta.dalloc_batch_count,
    293 	    "Incorrect number of batch deallocations");
    294 }
    295 TEST_END
    296 
    297 /*
    298  * A disable and a flush are *almost* equivalent; the only difference is what
    299  * happens afterwards; disabling disallows all future caching as well.
    300  */
    301 static void
    302 do_disable_flush_test(bool is_disable) {
    303 	pai_test_allocator_t ta;
    304 	pai_test_allocator_init(&ta);
    305 	sec_t sec;
    306 	/* See the note above -- we can't use the real tsd. */
    307 	tsdn_t *tsdn = TSDN_NULL;
    308 
    309 	enum { NALLOCS = 11 };
    310 	edata_t *allocs[NALLOCS];
    311 	bool deferred_work_generated = false;
    312 	test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
    313 	    /* max_bytes */ NALLOCS * PAGE);
    314 	for (int i = 0; i < NALLOCS; i++) {
    315 		allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
    316 		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
    317 		    false, &deferred_work_generated);
    318 		expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
    319 	}
    320 	/* Free all but the last aloc. */
    321 	for (int i = 0; i < NALLOCS - 1; i++) {
    322 		pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
    323 	}
    324 	size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
    325 
    326 	expect_zu_le(NALLOCS, max_allocs, "Incorrect number of allocations");
    327 	expect_zu_eq(0, ta.dalloc_count,
    328 	    "Incorrect number of allocations");
    329 
    330 	if (is_disable) {
    331 		sec_disable(tsdn, &sec);
    332 	} else {
    333 		sec_flush(tsdn, &sec);
    334 	}
    335 
    336 	expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
    337 	    "Incorrect number of allocations");
    338 	expect_zu_eq(0, ta.dalloc_count,
    339 	    "Incorrect number of (non-batch) deallocations");
    340 	expect_zu_le(NALLOCS - 1, ta.dalloc_batch_count,
    341 	    "Incorrect number of batch deallocations");
    342 	size_t old_dalloc_batch_count = ta.dalloc_batch_count;
    343 
    344 	/*
    345 	 * If we free into a disabled SEC, it should forward to the fallback.
    346 	 * Otherwise, the SEC should accept the allocation.
    347 	 */
    348 	pai_dalloc(tsdn, &sec.pai, allocs[NALLOCS - 1],
    349 	    &deferred_work_generated);
    350 
    351 	expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
    352 	    "Incorrect number of allocations");
    353 	expect_zu_eq(is_disable ? 1 : 0, ta.dalloc_count,
    354 	    "Incorrect number of (non-batch) deallocations");
    355 	expect_zu_eq(old_dalloc_batch_count, ta.dalloc_batch_count,
    356 	    "Incorrect number of batch deallocations");
    357 }
    358 
    359 TEST_BEGIN(test_disable) {
    360 	do_disable_flush_test(/* is_disable */ true);
    361 }
    362 TEST_END
    363 
    364 TEST_BEGIN(test_flush) {
    365 	do_disable_flush_test(/* is_disable */ false);
    366 }
    367 TEST_END
    368 
    369 TEST_BEGIN(test_max_alloc_respected) {
    370 	pai_test_allocator_t ta;
    371 	pai_test_allocator_init(&ta);
    372 	sec_t sec;
    373 	/* See the note above -- we can't use the real tsd. */
    374 	tsdn_t *tsdn = TSDN_NULL;
    375 
    376 	size_t max_alloc = 2 * PAGE;
    377 	size_t attempted_alloc = 3 * PAGE;
    378 
    379 	bool deferred_work_generated = false;
    380 
    381 	test_sec_init(&sec, &ta.pai, /* nshards */ 1, max_alloc,
    382 	    /* max_bytes */ 1000 * PAGE);
    383 
    384 	for (size_t i = 0; i < 100; i++) {
    385 		expect_zu_eq(i, ta.alloc_count,
    386 		    "Incorrect number of allocations");
    387 		expect_zu_eq(i, ta.dalloc_count,
    388 		    "Incorrect number of deallocations");
    389 		edata_t *edata = pai_alloc(tsdn, &sec.pai, attempted_alloc,
    390 		    PAGE, /* zero */ false, /* guarded */ false,
    391 		    /* frequent_reuse */ false, &deferred_work_generated);
    392 		expect_ptr_not_null(edata, "Unexpected alloc failure");
    393 		expect_zu_eq(i + 1, ta.alloc_count,
    394 		    "Incorrect number of allocations");
    395 		expect_zu_eq(i, ta.dalloc_count,
    396 		    "Incorrect number of deallocations");
    397 		pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
    398 	}
    399 }
    400 TEST_END
    401 
    402 TEST_BEGIN(test_expand_shrink_delegate) {
    403 	/*
    404 	 * Expand and shrink shouldn't affect sec state; they should just
    405 	 * delegate to the fallback PAI.
    406 	 */
    407 	pai_test_allocator_t ta;
    408 	pai_test_allocator_init(&ta);
    409 	sec_t sec;
    410 	/* See the note above -- we can't use the real tsd. */
    411 	tsdn_t *tsdn = TSDN_NULL;
    412 
    413 	bool deferred_work_generated = false;
    414 
    415 	test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 10 * PAGE,
    416 	    /* max_bytes */ 1000 * PAGE);
    417 	edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
    418 	    /* zero */ false, /* guarded */ false, /* frequent_reuse */ false,
    419 	    &deferred_work_generated);
    420 	expect_ptr_not_null(edata, "Unexpected alloc failure");
    421 
    422 	bool err = pai_expand(tsdn, &sec.pai, edata, PAGE, 4 * PAGE,
    423 	    /* zero */ false, &deferred_work_generated);
    424 	expect_false(err, "Unexpected expand failure");
    425 	expect_zu_eq(1, ta.expand_count, "");
    426 	ta.expand_return_value = true;
    427 	err = pai_expand(tsdn, &sec.pai, edata, 4 * PAGE, 3 * PAGE,
    428 	    /* zero */ false, &deferred_work_generated);
    429 	expect_true(err, "Unexpected expand success");
    430 	expect_zu_eq(2, ta.expand_count, "");
    431 
    432 	err = pai_shrink(tsdn, &sec.pai, edata, 4 * PAGE, 2 * PAGE,
    433 	    &deferred_work_generated);
    434 	expect_false(err, "Unexpected shrink failure");
    435 	expect_zu_eq(1, ta.shrink_count, "");
    436 	ta.shrink_return_value = true;
    437 	err = pai_shrink(tsdn, &sec.pai, edata, 2 * PAGE, PAGE,
    438 	    &deferred_work_generated);
    439 	expect_true(err, "Unexpected shrink success");
    440 	expect_zu_eq(2, ta.shrink_count, "");
    441 }
    442 TEST_END
    443 
    444 TEST_BEGIN(test_nshards_0) {
    445 	pai_test_allocator_t ta;
    446 	pai_test_allocator_init(&ta);
    447 	sec_t sec;
    448 	/* See the note above -- we can't use the real tsd. */
    449 	tsdn_t *tsdn = TSDN_NULL;
    450 	base_t *base = base_new(TSDN_NULL, /* ind */ 123,
    451 	    &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
    452 
    453 	sec_opts_t opts = SEC_OPTS_DEFAULT;
    454 	opts.nshards = 0;
    455 	sec_init(TSDN_NULL, &sec, base, &ta.pai, &opts);
    456 
    457 	bool deferred_work_generated = false;
    458 	edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
    459 	    /* zero */ false, /* guarded */ false, /* frequent_reuse */ false,
    460 	    &deferred_work_generated);
    461 	pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
    462 
    463 	/* Both operations should have gone directly to the fallback. */
    464 	expect_zu_eq(1, ta.alloc_count, "");
    465 	expect_zu_eq(1, ta.dalloc_count, "");
    466 }
    467 TEST_END
    468 
    469 static void
    470 expect_stats_pages(tsdn_t *tsdn, sec_t *sec, size_t npages) {
    471 	sec_stats_t stats;
    472 	/*
    473 	 * Check that the stats merging accumulates rather than overwrites by
    474 	 * putting some (made up) data there to begin with.
    475 	 */
    476 	stats.bytes = 123;
    477 	sec_stats_merge(tsdn, sec, &stats);
    478 	assert_zu_le(npages * PAGE + 123, stats.bytes, "");
    479 }
    480 
    481 TEST_BEGIN(test_stats_simple) {
    482 	pai_test_allocator_t ta;
    483 	pai_test_allocator_init(&ta);
    484 	sec_t sec;
    485 
    486 	/* See the note above -- we can't use the real tsd. */
    487 	tsdn_t *tsdn = TSDN_NULL;
    488 
    489 	enum {
    490 		NITERS = 100,
    491 		FLUSH_PAGES = 20,
    492 	};
    493 
    494 	bool deferred_work_generated = false;
    495 
    496 	test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
    497 	    /* max_bytes */ FLUSH_PAGES * PAGE);
    498 
    499 	edata_t *allocs[FLUSH_PAGES];
    500 	for (size_t i = 0; i < FLUSH_PAGES; i++) {
    501 		allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
    502 		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
    503 		    false, &deferred_work_generated);
    504 		expect_stats_pages(tsdn, &sec, 0);
    505 	}
    506 
    507 	/* Increase and decrease, without flushing. */
    508 	for (size_t i = 0; i < NITERS; i++) {
    509 		for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
    510 			pai_dalloc(tsdn, &sec.pai, allocs[j],
    511 			    &deferred_work_generated);
    512 			expect_stats_pages(tsdn, &sec, j + 1);
    513 		}
    514 		for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
    515 			allocs[j] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
    516 			    /* zero */ false, /* guarded */ false,
    517 			    /* frequent_reuse */ false,
    518 			    &deferred_work_generated);
    519 			expect_stats_pages(tsdn, &sec, FLUSH_PAGES / 2 - j - 1);
    520 		}
    521 	}
    522 }
    523 TEST_END
    524 
    525 TEST_BEGIN(test_stats_auto_flush) {
    526 	pai_test_allocator_t ta;
    527 	pai_test_allocator_init(&ta);
    528 	sec_t sec;
    529 
    530 	/* See the note above -- we can't use the real tsd. */
    531 	tsdn_t *tsdn = TSDN_NULL;
    532 
    533 	enum {
    534 		FLUSH_PAGES = 10,
    535 	};
    536 
    537 	test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
    538 	    /* max_bytes */ FLUSH_PAGES * PAGE);
    539 
    540 	edata_t *extra_alloc0;
    541 	edata_t *extra_alloc1;
    542 	edata_t *allocs[2 * FLUSH_PAGES];
    543 
    544 	bool deferred_work_generated = false;
    545 
    546 	extra_alloc0 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
    547 	    /* guarded */ false, /* frequent_reuse */ false,
    548 	    &deferred_work_generated);
    549 	extra_alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
    550 	    /* guarded */ false, /* frequent_reuse */ false,
    551 	    &deferred_work_generated);
    552 
    553 	for (size_t i = 0; i < 2 * FLUSH_PAGES; i++) {
    554 		allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
    555 		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
    556 		    false, &deferred_work_generated);
    557 	}
    558 
    559 	for (size_t i = 0; i < FLUSH_PAGES; i++) {
    560 		pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
    561 	}
    562 	pai_dalloc(tsdn, &sec.pai, extra_alloc0, &deferred_work_generated);
    563 
    564 	/* Flush the remaining pages; stats should still work. */
    565 	for (size_t i = 0; i < FLUSH_PAGES; i++) {
    566 		pai_dalloc(tsdn, &sec.pai, allocs[FLUSH_PAGES + i],
    567 		    &deferred_work_generated);
    568 	}
    569 
    570 	pai_dalloc(tsdn, &sec.pai, extra_alloc1, &deferred_work_generated);
    571 
    572 	expect_stats_pages(tsdn, &sec, ta.alloc_count + ta.alloc_batch_count
    573 	    - ta.dalloc_count - ta.dalloc_batch_count);
    574 }
    575 TEST_END
    576 
    577 TEST_BEGIN(test_stats_manual_flush) {
    578 	pai_test_allocator_t ta;
    579 	pai_test_allocator_init(&ta);
    580 	sec_t sec;
    581 
    582 	/* See the note above -- we can't use the real tsd. */
    583 	tsdn_t *tsdn = TSDN_NULL;
    584 
    585 	enum {
    586 		FLUSH_PAGES = 10,
    587 	};
    588 
    589 	test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
    590 	    /* max_bytes */ FLUSH_PAGES * PAGE);
    591 
    592 	bool deferred_work_generated = false;
    593 	edata_t *allocs[FLUSH_PAGES];
    594 	for (size_t i = 0; i < FLUSH_PAGES; i++) {
    595 		allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
    596 		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
    597 		    false, &deferred_work_generated);
    598 		expect_stats_pages(tsdn, &sec, 0);
    599 	}
    600 
    601 	/* Dalloc the first half of the allocations. */
    602 	for (size_t i = 0; i < FLUSH_PAGES / 2; i++) {
    603 		pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
    604 		expect_stats_pages(tsdn, &sec, i + 1);
    605 	}
    606 
    607 	sec_flush(tsdn, &sec);
    608 	expect_stats_pages(tsdn, &sec, 0);
    609 
    610 	/* Flush the remaining pages. */
    611 	for (size_t i = 0; i < FLUSH_PAGES / 2; i++) {
    612 		pai_dalloc(tsdn, &sec.pai, allocs[FLUSH_PAGES / 2 + i],
    613 		    &deferred_work_generated);
    614 		expect_stats_pages(tsdn, &sec, i + 1);
    615 	}
    616 	sec_disable(tsdn, &sec);
    617 	expect_stats_pages(tsdn, &sec, 0);
    618 }
    619 TEST_END
    620 
    621 int
    622 main(void) {
    623 	return test(
    624 	    test_reuse,
    625 	    test_auto_flush,
    626 	    test_disable,
    627 	    test_flush,
    628 	    test_max_alloc_respected,
    629 	    test_expand_shrink_delegate,
    630 	    test_nshards_0,
    631 	    test_stats_simple,
    632 	    test_stats_auto_flush,
    633 	    test_stats_manual_flush);
    634 }
    635