Home | History | Annotate | Line # | Download | only in unit
      1 #include "test/jemalloc_test.h"
      2 #include "test/arena_util.h"
      3 #include "test/san.h"
      4 
      5 #include "jemalloc/internal/san.h"
      6 
      7 static void
      8 verify_extent_guarded(tsdn_t *tsdn, void *ptr) {
      9 	expect_true(extent_is_guarded(tsdn, ptr),
     10 	    "All extents should be guarded.");
     11 }
     12 
     13 #define MAX_SMALL_ALLOCATIONS 4096
     14 void *small_alloc[MAX_SMALL_ALLOCATIONS];
     15 
     16 /*
     17  * This test allocates page sized slabs and checks that every two slabs have
     18  * at least one page in between them. That page is supposed to be the guard
     19  * page.
     20  */
     21 TEST_BEGIN(test_guarded_small) {
     22 	test_skip_if(opt_prof);
     23 
     24 	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
     25 	unsigned npages = 16, pages_found = 0, ends_found = 0;
     26 	VARIABLE_ARRAY(uintptr_t, pages, npages);
     27 
     28 	/* Allocate to get sanitized pointers. */
     29 	size_t slab_sz = PAGE;
     30 	size_t sz = slab_sz / 8;
     31 	unsigned n_alloc = 0;
     32 	while (n_alloc < MAX_SMALL_ALLOCATIONS) {
     33 		void *ptr = malloc(sz);
     34 		expect_ptr_not_null(ptr, "Unexpected malloc() failure");
     35 		small_alloc[n_alloc] = ptr;
     36 		verify_extent_guarded(tsdn, ptr);
     37 		if ((uintptr_t)ptr % PAGE == 0) {
     38 			assert_u_lt(pages_found, npages,
     39 			    "Unexpectedly large number of page aligned allocs");
     40 			pages[pages_found++] = (uintptr_t)ptr;
     41 		}
     42 		if (((uintptr_t)ptr + (uintptr_t)sz) % PAGE == 0) {
     43 			ends_found++;
     44 		}
     45 		n_alloc++;
     46 		if (pages_found == npages && ends_found == npages) {
     47 			break;
     48 		}
     49 	}
     50 	/* Should found the ptrs being checked for overflow and underflow. */
     51 	expect_u_eq(pages_found, npages, "Could not found the expected pages.");
     52 	expect_u_eq(ends_found, npages, "Could not found the expected pages.");
     53 
     54 	/* Verify the pages are not continuous, i.e. separated by guards. */
     55 	for (unsigned i = 0; i < npages - 1; i++) {
     56 		for (unsigned j = i + 1; j < npages; j++) {
     57 			uintptr_t ptr_diff = pages[i] > pages[j] ?
     58 			    pages[i] - pages[j] : pages[j] - pages[i];
     59 			expect_zu_ge((size_t)ptr_diff, slab_sz + PAGE,
     60 			    "There should be at least one pages between "
     61 			    "guarded slabs");
     62 		}
     63 	}
     64 
     65 	for (unsigned i = 0; i < n_alloc + 1; i++) {
     66 		free(small_alloc[i]);
     67 	}
     68 }
     69 TEST_END
     70 
     71 TEST_BEGIN(test_guarded_large) {
     72 	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
     73 	unsigned nlarge = 32;
     74 	VARIABLE_ARRAY(uintptr_t, large, nlarge);
     75 
     76 	/* Allocate to get sanitized pointers. */
     77 	size_t large_sz = SC_LARGE_MINCLASS;
     78 	for (unsigned i = 0; i < nlarge; i++) {
     79 		void *ptr = malloc(large_sz);
     80 		verify_extent_guarded(tsdn, ptr);
     81 		expect_ptr_not_null(ptr, "Unexpected malloc() failure");
     82 		large[i] = (uintptr_t)ptr;
     83 	}
     84 
     85 	/* Verify the pages are not continuous, i.e. separated by guards. */
     86 	for (unsigned i = 0; i < nlarge; i++) {
     87 		for (unsigned j = i + 1; j < nlarge; j++) {
     88 			uintptr_t ptr_diff = large[i] > large[j] ?
     89 			    large[i] - large[j] : large[j] - large[i];
     90 			expect_zu_ge((size_t)ptr_diff, large_sz + 2 * PAGE,
     91 			    "There should be at least two pages between "
     92 			    " guarded large allocations");
     93 		}
     94 	}
     95 
     96 	for (unsigned i = 0; i < nlarge; i++) {
     97 		free((void *)large[i]);
     98 	}
     99 }
    100 TEST_END
    101 
    102 static void
    103 verify_pdirty(unsigned arena_ind, uint64_t expected) {
    104 	uint64_t pdirty = get_arena_pdirty(arena_ind);
    105 	expect_u64_eq(pdirty, expected / PAGE,
    106 	    "Unexpected dirty page amount.");
    107 }
    108 
    109 static void
    110 verify_pmuzzy(unsigned arena_ind, uint64_t expected) {
    111 	uint64_t pmuzzy = get_arena_pmuzzy(arena_ind);
    112 	expect_u64_eq(pmuzzy, expected / PAGE,
    113 	    "Unexpected muzzy page amount.");
    114 }
    115 
    116 TEST_BEGIN(test_guarded_decay) {
    117 	unsigned arena_ind = do_arena_create(-1, -1);
    118 	do_decay(arena_ind);
    119 	do_purge(arena_ind);
    120 
    121 	verify_pdirty(arena_ind, 0);
    122 	verify_pmuzzy(arena_ind, 0);
    123 
    124 	/* Verify that guarded extents as dirty. */
    125 	size_t sz1 = PAGE, sz2 = PAGE * 2;
    126 	/* W/o maps_coalesce, guarded extents are unguarded eagerly. */
    127 	size_t add_guard_size = maps_coalesce ? 0 : SAN_PAGE_GUARDS_SIZE;
    128 	generate_dirty(arena_ind, sz1);
    129 	verify_pdirty(arena_ind, sz1 + add_guard_size);
    130 	verify_pmuzzy(arena_ind, 0);
    131 
    132 	/* Should reuse the first extent. */
    133 	generate_dirty(arena_ind, sz1);
    134 	verify_pdirty(arena_ind, sz1 + add_guard_size);
    135 	verify_pmuzzy(arena_ind, 0);
    136 
    137 	/* Should not reuse; expect new dirty pages. */
    138 	generate_dirty(arena_ind, sz2);
    139 	verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
    140 	verify_pmuzzy(arena_ind, 0);
    141 
    142 	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
    143 	int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
    144 
    145 	/* Should reuse dirty extents for the two mallocx. */
    146 	void *p1 = do_mallocx(sz1, flags);
    147 	verify_extent_guarded(tsdn, p1);
    148 	verify_pdirty(arena_ind, sz2 + add_guard_size);
    149 
    150 	void *p2 = do_mallocx(sz2, flags);
    151 	verify_extent_guarded(tsdn, p2);
    152 	verify_pdirty(arena_ind, 0);
    153 	verify_pmuzzy(arena_ind, 0);
    154 
    155 	dallocx(p1, flags);
    156 	verify_pdirty(arena_ind, sz1 + add_guard_size);
    157 	dallocx(p2, flags);
    158 	verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
    159 	verify_pmuzzy(arena_ind, 0);
    160 
    161 	do_purge(arena_ind);
    162 	verify_pdirty(arena_ind, 0);
    163 	verify_pmuzzy(arena_ind, 0);
    164 
    165 	if (config_stats) {
    166 		expect_u64_eq(get_arena_npurge(arena_ind), 1,
    167 		    "Expected purging to occur");
    168 		expect_u64_eq(get_arena_dirty_npurge(arena_ind), 1,
    169 		    "Expected purging to occur");
    170 		expect_u64_eq(get_arena_dirty_purged(arena_ind),
    171 		    (sz1 + sz2 + 2 * add_guard_size) / PAGE,
    172 		    "Expected purging to occur");
    173 		expect_u64_eq(get_arena_muzzy_npurge(arena_ind), 0,
    174 		    "Expected purging to occur");
    175 	}
    176 
    177 	if (opt_retain) {
    178 		/*
    179 		 * With retain, guarded extents are not mergable and will be
    180 		 * cached in ecache_retained.  They should be reused.
    181 		 */
    182 		void *new_p1 = do_mallocx(sz1, flags);
    183 		verify_extent_guarded(tsdn, p1);
    184 		expect_ptr_eq(p1, new_p1, "Expect to reuse p1");
    185 
    186 		void *new_p2 = do_mallocx(sz2, flags);
    187 		verify_extent_guarded(tsdn, p2);
    188 		expect_ptr_eq(p2, new_p2, "Expect to reuse p2");
    189 
    190 		dallocx(new_p1, flags);
    191 		verify_pdirty(arena_ind, sz1 + add_guard_size);
    192 		dallocx(new_p2, flags);
    193 		verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
    194 		verify_pmuzzy(arena_ind, 0);
    195 	}
    196 
    197 	do_arena_destroy(arena_ind);
    198 }
    199 TEST_END
    200 
    201 int
    202 main(void) {
    203 	return test(
    204 	    test_guarded_small,
    205 	    test_guarded_large,
    206 	    test_guarded_decay);
    207 }
    208