Home | History | Annotate | Line # | Download | only in unit
      1 #include "test/jemalloc_test.h"
      2 #include "test/arena_util.h"
      3 
      4 #include "jemalloc/internal/ticker.h"
      5 
      6 static nstime_monotonic_t *nstime_monotonic_orig;
      7 static nstime_update_t *nstime_update_orig;
      8 
      9 static unsigned nupdates_mock;
     10 static nstime_t time_mock;
     11 static bool monotonic_mock;
     12 
     13 static bool
     14 nstime_monotonic_mock(void) {
     15 	return monotonic_mock;
     16 }
     17 
     18 static void
     19 nstime_update_mock(nstime_t *time) {
     20 	nupdates_mock++;
     21 	if (monotonic_mock) {
     22 		nstime_copy(time, &time_mock);
     23 	}
     24 }
     25 
     26 TEST_BEGIN(test_decay_ticks) {
     27 	test_skip_if(is_background_thread_enabled());
     28 	test_skip_if(opt_hpa);
     29 
     30 	ticker_geom_t *decay_ticker;
     31 	unsigned tick0, tick1, arena_ind;
     32 	size_t sz, large0;
     33 	void *p;
     34 
     35 	sz = sizeof(size_t);
     36 	expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
     37 	    0), 0, "Unexpected mallctl failure");
     38 
     39 	/* Set up a manually managed arena for test. */
     40 	arena_ind = do_arena_create(0, 0);
     41 
     42 	/* Migrate to the new arena, and get the ticker. */
     43 	unsigned old_arena_ind;
     44 	size_t sz_arena_ind = sizeof(old_arena_ind);
     45 	expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind,
     46 	    &sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), 0,
     47 	    "Unexpected mallctl() failure");
     48 	decay_ticker = tsd_arena_decay_tickerp_get(tsd_fetch());
     49 	expect_ptr_not_null(decay_ticker,
     50 	    "Unexpected failure getting decay ticker");
     51 
     52 	/*
     53 	 * Test the standard APIs using a large size class, since we can't
     54 	 * control tcache interactions for small size classes (except by
     55 	 * completely disabling tcache for the entire test program).
     56 	 */
     57 
     58 	/* malloc(). */
     59 	tick0 = ticker_geom_read(decay_ticker);
     60 	p = malloc(large0);
     61 	expect_ptr_not_null(p, "Unexpected malloc() failure");
     62 	tick1 = ticker_geom_read(decay_ticker);
     63 	expect_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
     64 	/* free(). */
     65 	tick0 = ticker_geom_read(decay_ticker);
     66 	free(p);
     67 	tick1 = ticker_geom_read(decay_ticker);
     68 	expect_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
     69 
     70 	/* calloc(). */
     71 	tick0 = ticker_geom_read(decay_ticker);
     72 	p = calloc(1, large0);
     73 	expect_ptr_not_null(p, "Unexpected calloc() failure");
     74 	tick1 = ticker_geom_read(decay_ticker);
     75 	expect_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
     76 	free(p);
     77 
     78 	/* posix_memalign(). */
     79 	tick0 = ticker_geom_read(decay_ticker);
     80 	expect_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0,
     81 	    "Unexpected posix_memalign() failure");
     82 	tick1 = ticker_geom_read(decay_ticker);
     83 	expect_u32_ne(tick1, tick0,
     84 	    "Expected ticker to tick during posix_memalign()");
     85 	free(p);
     86 
     87 	/* aligned_alloc(). */
     88 	tick0 = ticker_geom_read(decay_ticker);
     89 	p = aligned_alloc(sizeof(size_t), large0);
     90 	expect_ptr_not_null(p, "Unexpected aligned_alloc() failure");
     91 	tick1 = ticker_geom_read(decay_ticker);
     92 	expect_u32_ne(tick1, tick0,
     93 	    "Expected ticker to tick during aligned_alloc()");
     94 	free(p);
     95 
     96 	/* realloc(). */
     97 	/* Allocate. */
     98 	tick0 = ticker_geom_read(decay_ticker);
     99 	p = realloc(NULL, large0);
    100 	expect_ptr_not_null(p, "Unexpected realloc() failure");
    101 	tick1 = ticker_geom_read(decay_ticker);
    102 	expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
    103 	/* Reallocate. */
    104 	tick0 = ticker_geom_read(decay_ticker);
    105 	p = realloc(p, large0);
    106 	expect_ptr_not_null(p, "Unexpected realloc() failure");
    107 	tick1 = ticker_geom_read(decay_ticker);
    108 	expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
    109 	/* Deallocate. */
    110 	tick0 = ticker_geom_read(decay_ticker);
    111 	realloc(p, 0);
    112 	tick1 = ticker_geom_read(decay_ticker);
    113 	expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
    114 
    115 	/*
    116 	 * Test the *allocx() APIs using large and small size classes, with
    117 	 * tcache explicitly disabled.
    118 	 */
    119 	{
    120 		unsigned i;
    121 		size_t allocx_sizes[2];
    122 		allocx_sizes[0] = large0;
    123 		allocx_sizes[1] = 1;
    124 
    125 		for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) {
    126 			sz = allocx_sizes[i];
    127 
    128 			/* mallocx(). */
    129 			tick0 = ticker_geom_read(decay_ticker);
    130 			p = mallocx(sz, MALLOCX_TCACHE_NONE);
    131 			expect_ptr_not_null(p, "Unexpected mallocx() failure");
    132 			tick1 = ticker_geom_read(decay_ticker);
    133 			expect_u32_ne(tick1, tick0,
    134 			    "Expected ticker to tick during mallocx() (sz=%zu)",
    135 			    sz);
    136 			/* rallocx(). */
    137 			tick0 = ticker_geom_read(decay_ticker);
    138 			p = rallocx(p, sz, MALLOCX_TCACHE_NONE);
    139 			expect_ptr_not_null(p, "Unexpected rallocx() failure");
    140 			tick1 = ticker_geom_read(decay_ticker);
    141 			expect_u32_ne(tick1, tick0,
    142 			    "Expected ticker to tick during rallocx() (sz=%zu)",
    143 			    sz);
    144 			/* xallocx(). */
    145 			tick0 = ticker_geom_read(decay_ticker);
    146 			xallocx(p, sz, 0, MALLOCX_TCACHE_NONE);
    147 			tick1 = ticker_geom_read(decay_ticker);
    148 			expect_u32_ne(tick1, tick0,
    149 			    "Expected ticker to tick during xallocx() (sz=%zu)",
    150 			    sz);
    151 			/* dallocx(). */
    152 			tick0 = ticker_geom_read(decay_ticker);
    153 			dallocx(p, MALLOCX_TCACHE_NONE);
    154 			tick1 = ticker_geom_read(decay_ticker);
    155 			expect_u32_ne(tick1, tick0,
    156 			    "Expected ticker to tick during dallocx() (sz=%zu)",
    157 			    sz);
    158 			/* sdallocx(). */
    159 			p = mallocx(sz, MALLOCX_TCACHE_NONE);
    160 			expect_ptr_not_null(p, "Unexpected mallocx() failure");
    161 			tick0 = ticker_geom_read(decay_ticker);
    162 			sdallocx(p, sz, MALLOCX_TCACHE_NONE);
    163 			tick1 = ticker_geom_read(decay_ticker);
    164 			expect_u32_ne(tick1, tick0,
    165 			    "Expected ticker to tick during sdallocx() "
    166 			    "(sz=%zu)", sz);
    167 		}
    168 	}
    169 
    170 	/*
    171 	 * Test tcache fill/flush interactions for large and small size classes,
    172 	 * using an explicit tcache.
    173 	 */
    174 	unsigned tcache_ind, i;
    175 	size_t tcache_sizes[2];
    176 	tcache_sizes[0] = large0;
    177 	tcache_sizes[1] = 1;
    178 
    179 	size_t tcache_max, sz_tcache_max;
    180 	sz_tcache_max = sizeof(tcache_max);
    181 	expect_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
    182 	    &sz_tcache_max, NULL, 0), 0, "Unexpected mallctl() failure");
    183 
    184 	sz = sizeof(unsigned);
    185 	expect_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz,
    186 	    NULL, 0), 0, "Unexpected mallctl failure");
    187 
    188 	for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
    189 		sz = tcache_sizes[i];
    190 
    191 		/* tcache fill. */
    192 		tick0 = ticker_geom_read(decay_ticker);
    193 		p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
    194 		expect_ptr_not_null(p, "Unexpected mallocx() failure");
    195 		tick1 = ticker_geom_read(decay_ticker);
    196 		expect_u32_ne(tick1, tick0,
    197 		    "Expected ticker to tick during tcache fill "
    198 		    "(sz=%zu)", sz);
    199 		/* tcache flush. */
    200 		dallocx(p, MALLOCX_TCACHE(tcache_ind));
    201 		tick0 = ticker_geom_read(decay_ticker);
    202 		expect_d_eq(mallctl("tcache.flush", NULL, NULL,
    203 		    (void *)&tcache_ind, sizeof(unsigned)), 0,
    204 		    "Unexpected mallctl failure");
    205 		tick1 = ticker_geom_read(decay_ticker);
    206 
    207 		/* Will only tick if it's in tcache. */
    208 		expect_u32_ne(tick1, tick0,
    209 		    "Expected ticker to tick during tcache flush (sz=%zu)", sz);
    210 	}
    211 }
    212 TEST_END
    213 
    214 static void
    215 decay_ticker_helper(unsigned arena_ind, int flags, bool dirty, ssize_t dt,
    216     uint64_t dirty_npurge0, uint64_t muzzy_npurge0, bool terminate_asap) {
    217 #define NINTERVALS 101
    218 	nstime_t time, update_interval, decay_ms, deadline;
    219 
    220 	nstime_init_update(&time);
    221 
    222 	nstime_init2(&decay_ms, dt, 0);
    223 	nstime_copy(&deadline, &time);
    224 	nstime_add(&deadline, &decay_ms);
    225 
    226 	nstime_init2(&update_interval, dt, 0);
    227 	nstime_idivide(&update_interval, NINTERVALS);
    228 
    229 	/*
    230 	 * Keep q's slab from being deallocated during the looping below.  If a
    231 	 * cached slab were to repeatedly come and go during looping, it could
    232 	 * prevent the decay backlog ever becoming empty.
    233 	 */
    234 	void *p = do_mallocx(1, flags);
    235 	uint64_t dirty_npurge1, muzzy_npurge1;
    236 	do {
    237 		for (unsigned i = 0; i < ARENA_DECAY_NTICKS_PER_UPDATE / 2;
    238 		    i++) {
    239 			void *q = do_mallocx(1, flags);
    240 			dallocx(q, flags);
    241 		}
    242 		dirty_npurge1 = get_arena_dirty_npurge(arena_ind);
    243 		muzzy_npurge1 = get_arena_muzzy_npurge(arena_ind);
    244 
    245 		nstime_add(&time_mock, &update_interval);
    246 		nstime_update(&time);
    247 	} while (nstime_compare(&time, &deadline) <= 0 && ((dirty_npurge1 ==
    248 	    dirty_npurge0 && muzzy_npurge1 == muzzy_npurge0) ||
    249 	    !terminate_asap));
    250 	dallocx(p, flags);
    251 
    252 	if (config_stats) {
    253 		expect_u64_gt(dirty_npurge1 + muzzy_npurge1, dirty_npurge0 +
    254 		    muzzy_npurge0, "Expected purging to occur");
    255 	}
    256 #undef NINTERVALS
    257 }
    258 
    259 TEST_BEGIN(test_decay_ticker) {
    260 	test_skip_if(is_background_thread_enabled());
    261 	test_skip_if(opt_hpa);
    262 #define NPS 2048
    263 	ssize_t ddt = opt_dirty_decay_ms;
    264 	ssize_t mdt = opt_muzzy_decay_ms;
    265 	unsigned arena_ind = do_arena_create(ddt, mdt);
    266 	int flags = (MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE);
    267 	void *ps[NPS];
    268 
    269 	/*
    270 	 * Allocate a bunch of large objects, pause the clock, deallocate every
    271 	 * other object (to fragment virtual memory), restore the clock, then
    272 	 * [md]allocx() in a tight loop while advancing time rapidly to verify
    273 	 * the ticker triggers purging.
    274 	 */
    275 	size_t large;
    276 	size_t sz = sizeof(size_t);
    277 	expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large, &sz, NULL,
    278 	    0), 0, "Unexpected mallctl failure");
    279 
    280 	do_purge(arena_ind);
    281 	uint64_t dirty_npurge0 = get_arena_dirty_npurge(arena_ind);
    282 	uint64_t muzzy_npurge0 = get_arena_muzzy_npurge(arena_ind);
    283 
    284 	for (unsigned i = 0; i < NPS; i++) {
    285 		ps[i] = do_mallocx(large, flags);
    286 	}
    287 
    288 	nupdates_mock = 0;
    289 	nstime_init_update(&time_mock);
    290 	monotonic_mock = true;
    291 
    292 	nstime_monotonic_orig = nstime_monotonic;
    293 	nstime_update_orig = nstime_update;
    294 	nstime_monotonic = nstime_monotonic_mock;
    295 	nstime_update = nstime_update_mock;
    296 
    297 	for (unsigned i = 0; i < NPS; i += 2) {
    298 		dallocx(ps[i], flags);
    299 		unsigned nupdates0 = nupdates_mock;
    300 		do_decay(arena_ind);
    301 		expect_u_gt(nupdates_mock, nupdates0,
    302 		    "Expected nstime_update() to be called");
    303 	}
    304 
    305 	decay_ticker_helper(arena_ind, flags, true, ddt, dirty_npurge0,
    306 	    muzzy_npurge0, true);
    307 	decay_ticker_helper(arena_ind, flags, false, ddt+mdt, dirty_npurge0,
    308 	    muzzy_npurge0, false);
    309 
    310 	do_arena_destroy(arena_ind);
    311 
    312 	nstime_monotonic = nstime_monotonic_orig;
    313 	nstime_update = nstime_update_orig;
    314 #undef NPS
    315 }
    316 TEST_END
    317 
    318 TEST_BEGIN(test_decay_nonmonotonic) {
    319 	test_skip_if(is_background_thread_enabled());
    320 	test_skip_if(opt_hpa);
    321 #define NPS (SMOOTHSTEP_NSTEPS + 1)
    322 	int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
    323 	void *ps[NPS];
    324 	uint64_t npurge0 = 0;
    325 	uint64_t npurge1 = 0;
    326 	size_t sz, large0;
    327 	unsigned i, nupdates0;
    328 
    329 	sz = sizeof(size_t);
    330 	expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
    331 	    0), 0, "Unexpected mallctl failure");
    332 
    333 	expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
    334 	    "Unexpected mallctl failure");
    335 	do_epoch();
    336 	sz = sizeof(uint64_t);
    337 	npurge0 = get_arena_npurge(0);
    338 
    339 	nupdates_mock = 0;
    340 	nstime_init_update(&time_mock);
    341 	monotonic_mock = false;
    342 
    343 	nstime_monotonic_orig = nstime_monotonic;
    344 	nstime_update_orig = nstime_update;
    345 	nstime_monotonic = nstime_monotonic_mock;
    346 	nstime_update = nstime_update_mock;
    347 
    348 	for (i = 0; i < NPS; i++) {
    349 		ps[i] = mallocx(large0, flags);
    350 		expect_ptr_not_null(ps[i], "Unexpected mallocx() failure");
    351 	}
    352 
    353 	for (i = 0; i < NPS; i++) {
    354 		dallocx(ps[i], flags);
    355 		nupdates0 = nupdates_mock;
    356 		expect_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
    357 		    "Unexpected arena.0.decay failure");
    358 		expect_u_gt(nupdates_mock, nupdates0,
    359 		    "Expected nstime_update() to be called");
    360 	}
    361 
    362 	do_epoch();
    363 	sz = sizeof(uint64_t);
    364 	npurge1 = get_arena_npurge(0);
    365 
    366 	if (config_stats) {
    367 		expect_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
    368 	}
    369 
    370 	nstime_monotonic = nstime_monotonic_orig;
    371 	nstime_update = nstime_update_orig;
    372 #undef NPS
    373 }
    374 TEST_END
    375 
    376 TEST_BEGIN(test_decay_now) {
    377 	test_skip_if(is_background_thread_enabled());
    378 	test_skip_if(opt_hpa);
    379 
    380 	unsigned arena_ind = do_arena_create(0, 0);
    381 	expect_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
    382 	expect_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
    383 	size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
    384 	/* Verify that dirty/muzzy pages never linger after deallocation. */
    385 	for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
    386 		size_t size = sizes[i];
    387 		generate_dirty(arena_ind, size);
    388 		expect_zu_eq(get_arena_pdirty(arena_ind), 0,
    389 		    "Unexpected dirty pages");
    390 		expect_zu_eq(get_arena_pmuzzy(arena_ind), 0,
    391 		    "Unexpected muzzy pages");
    392 	}
    393 	do_arena_destroy(arena_ind);
    394 }
    395 TEST_END
    396 
    397 TEST_BEGIN(test_decay_never) {
    398 	test_skip_if(is_background_thread_enabled() || !config_stats);
    399 	test_skip_if(opt_hpa);
    400 
    401 	unsigned arena_ind = do_arena_create(-1, -1);
    402 	int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
    403 	expect_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
    404 	expect_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
    405 	size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
    406 	void *ptrs[sizeof(sizes)/sizeof(size_t)];
    407 	for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
    408 		ptrs[i] = do_mallocx(sizes[i], flags);
    409 	}
    410 	/* Verify that each deallocation generates additional dirty pages. */
    411 	size_t pdirty_prev = get_arena_pdirty(arena_ind);
    412 	size_t pmuzzy_prev = get_arena_pmuzzy(arena_ind);
    413 	expect_zu_eq(pdirty_prev, 0, "Unexpected dirty pages");
    414 	expect_zu_eq(pmuzzy_prev, 0, "Unexpected muzzy pages");
    415 	for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
    416 		dallocx(ptrs[i], flags);
    417 		size_t pdirty = get_arena_pdirty(arena_ind);
    418 		size_t pmuzzy = get_arena_pmuzzy(arena_ind);
    419 		expect_zu_gt(pdirty + (size_t)get_arena_dirty_purged(arena_ind),
    420 		    pdirty_prev, "Expected dirty pages to increase.");
    421 		expect_zu_eq(pmuzzy, 0, "Unexpected muzzy pages");
    422 		pdirty_prev = pdirty;
    423 	}
    424 	do_arena_destroy(arena_ind);
    425 }
    426 TEST_END
    427 
    428 int
    429 main(void) {
    430 	return test(
    431 	    test_decay_ticks,
    432 	    test_decay_ticker,
    433 	    test_decay_nonmonotonic,
    434 	    test_decay_now,
    435 	    test_decay_never);
    436 }
    437