Home | History | Annotate | Line # | Download | only in unit
      1 #include "test/jemalloc_test.h"
      2 
      3 static int
      4 prof_dump_open_intercept(bool propagate_err, const char *filename) {
      5 	int fd;
      6 
      7 	fd = open("/dev/null", O_WRONLY);
      8 	assert_d_ne(fd, -1, "Unexpected open() failure");
      9 
     10 	return fd;
     11 }
     12 
     13 static void
     14 set_prof_active(bool active) {
     15 	assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
     16 	    sizeof(active)), 0, "Unexpected mallctl failure");
     17 }
     18 
     19 static size_t
     20 get_lg_prof_sample(void) {
     21 	size_t lg_prof_sample;
     22 	size_t sz = sizeof(size_t);
     23 
     24 	assert_d_eq(mallctl("prof.lg_sample", (void *)&lg_prof_sample, &sz,
     25 	    NULL, 0), 0,
     26 	    "Unexpected mallctl failure while reading profiling sample rate");
     27 	return lg_prof_sample;
     28 }
     29 
     30 static void
     31 do_prof_reset(size_t lg_prof_sample) {
     32 	assert_d_eq(mallctl("prof.reset", NULL, NULL,
     33 	    (void *)&lg_prof_sample, sizeof(size_t)), 0,
     34 	    "Unexpected mallctl failure while resetting profile data");
     35 	assert_zu_eq(lg_prof_sample, get_lg_prof_sample(),
     36 	    "Expected profile sample rate change");
     37 }
     38 
     39 TEST_BEGIN(test_prof_reset_basic) {
     40 	size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next;
     41 	size_t sz;
     42 	unsigned i;
     43 
     44 	test_skip_if(!config_prof);
     45 
     46 	sz = sizeof(size_t);
     47 	assert_d_eq(mallctl("opt.lg_prof_sample", (void *)&lg_prof_sample_orig,
     48 	    &sz, NULL, 0), 0,
     49 	    "Unexpected mallctl failure while reading profiling sample rate");
     50 	assert_zu_eq(lg_prof_sample_orig, 0,
     51 	    "Unexpected profiling sample rate");
     52 	lg_prof_sample = get_lg_prof_sample();
     53 	assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
     54 	    "Unexpected disagreement between \"opt.lg_prof_sample\" and "
     55 	    "\"prof.lg_sample\"");
     56 
     57 	/* Test simple resets. */
     58 	for (i = 0; i < 2; i++) {
     59 		assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
     60 		    "Unexpected mallctl failure while resetting profile data");
     61 		lg_prof_sample = get_lg_prof_sample();
     62 		assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
     63 		    "Unexpected profile sample rate change");
     64 	}
     65 
     66 	/* Test resets with prof.lg_sample changes. */
     67 	lg_prof_sample_next = 1;
     68 	for (i = 0; i < 2; i++) {
     69 		do_prof_reset(lg_prof_sample_next);
     70 		lg_prof_sample = get_lg_prof_sample();
     71 		assert_zu_eq(lg_prof_sample, lg_prof_sample_next,
     72 		    "Expected profile sample rate change");
     73 		lg_prof_sample_next = lg_prof_sample_orig;
     74 	}
     75 
     76 	/* Make sure the test code restored prof.lg_sample. */
     77 	lg_prof_sample = get_lg_prof_sample();
     78 	assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
     79 	    "Unexpected disagreement between \"opt.lg_prof_sample\" and "
     80 	    "\"prof.lg_sample\"");
     81 }
     82 TEST_END
     83 
     84 bool prof_dump_header_intercepted = false;
     85 prof_cnt_t cnt_all_copy = {0, 0, 0, 0};
     86 static bool
     87 prof_dump_header_intercept(tsdn_t *tsdn, bool propagate_err,
     88     const prof_cnt_t *cnt_all) {
     89 	prof_dump_header_intercepted = true;
     90 	memcpy(&cnt_all_copy, cnt_all, sizeof(prof_cnt_t));
     91 
     92 	return false;
     93 }
     94 
     95 TEST_BEGIN(test_prof_reset_cleanup) {
     96 	void *p;
     97 	prof_dump_header_t *prof_dump_header_orig;
     98 
     99 	test_skip_if(!config_prof);
    100 
    101 	set_prof_active(true);
    102 
    103 	assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
    104 	p = mallocx(1, 0);
    105 	assert_ptr_not_null(p, "Unexpected mallocx() failure");
    106 	assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
    107 
    108 	prof_dump_header_orig = prof_dump_header;
    109 	prof_dump_header = prof_dump_header_intercept;
    110 	assert_false(prof_dump_header_intercepted, "Unexpected intercept");
    111 
    112 	assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
    113 	    0, "Unexpected error while dumping heap profile");
    114 	assert_true(prof_dump_header_intercepted, "Expected intercept");
    115 	assert_u64_eq(cnt_all_copy.curobjs, 1, "Expected 1 allocation");
    116 
    117 	assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
    118 	    "Unexpected error while resetting heap profile data");
    119 	assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
    120 	    0, "Unexpected error while dumping heap profile");
    121 	assert_u64_eq(cnt_all_copy.curobjs, 0, "Expected 0 allocations");
    122 	assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
    123 
    124 	prof_dump_header = prof_dump_header_orig;
    125 
    126 	dallocx(p, 0);
    127 	assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
    128 
    129 	set_prof_active(false);
    130 }
    131 TEST_END
    132 
    133 #define NTHREADS		4
    134 #define NALLOCS_PER_THREAD	(1U << 13)
    135 #define OBJ_RING_BUF_COUNT	1531
    136 #define RESET_INTERVAL		(1U << 10)
    137 #define DUMP_INTERVAL		3677
    138 static void *
    139 thd_start(void *varg) {
    140 	unsigned thd_ind = *(unsigned *)varg;
    141 	unsigned i;
    142 	void *objs[OBJ_RING_BUF_COUNT];
    143 
    144 	memset(objs, 0, sizeof(objs));
    145 
    146 	for (i = 0; i < NALLOCS_PER_THREAD; i++) {
    147 		if (i % RESET_INTERVAL == 0) {
    148 			assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0),
    149 			    0, "Unexpected error while resetting heap profile "
    150 			    "data");
    151 		}
    152 
    153 		if (i % DUMP_INTERVAL == 0) {
    154 			assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
    155 			    0, "Unexpected error while dumping heap profile");
    156 		}
    157 
    158 		{
    159 			void **pp = &objs[i % OBJ_RING_BUF_COUNT];
    160 			if (*pp != NULL) {
    161 				dallocx(*pp, 0);
    162 				*pp = NULL;
    163 			}
    164 			*pp = btalloc(1, thd_ind*NALLOCS_PER_THREAD + i);
    165 			assert_ptr_not_null(*pp,
    166 			    "Unexpected btalloc() failure");
    167 		}
    168 	}
    169 
    170 	/* Clean up any remaining objects. */
    171 	for (i = 0; i < OBJ_RING_BUF_COUNT; i++) {
    172 		void **pp = &objs[i % OBJ_RING_BUF_COUNT];
    173 		if (*pp != NULL) {
    174 			dallocx(*pp, 0);
    175 			*pp = NULL;
    176 		}
    177 	}
    178 
    179 	return NULL;
    180 }
    181 
    182 TEST_BEGIN(test_prof_reset) {
    183 	size_t lg_prof_sample_orig;
    184 	thd_t thds[NTHREADS];
    185 	unsigned thd_args[NTHREADS];
    186 	unsigned i;
    187 	size_t bt_count, tdata_count;
    188 
    189 	test_skip_if(!config_prof);
    190 
    191 	bt_count = prof_bt_count();
    192 	assert_zu_eq(bt_count, 0,
    193 	    "Unexpected pre-existing tdata structures");
    194 	tdata_count = prof_tdata_count();
    195 
    196 	lg_prof_sample_orig = get_lg_prof_sample();
    197 	do_prof_reset(5);
    198 
    199 	set_prof_active(true);
    200 
    201 	for (i = 0; i < NTHREADS; i++) {
    202 		thd_args[i] = i;
    203 		thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
    204 	}
    205 	for (i = 0; i < NTHREADS; i++) {
    206 		thd_join(thds[i], NULL);
    207 	}
    208 
    209 	assert_zu_eq(prof_bt_count(), bt_count,
    210 	    "Unexpected bactrace count change");
    211 	assert_zu_eq(prof_tdata_count(), tdata_count,
    212 	    "Unexpected remaining tdata structures");
    213 
    214 	set_prof_active(false);
    215 
    216 	do_prof_reset(lg_prof_sample_orig);
    217 }
    218 TEST_END
    219 #undef NTHREADS
    220 #undef NALLOCS_PER_THREAD
    221 #undef OBJ_RING_BUF_COUNT
    222 #undef RESET_INTERVAL
    223 #undef DUMP_INTERVAL
    224 
    225 /* Test sampling at the same allocation site across resets. */
    226 #define NITER 10
    227 TEST_BEGIN(test_xallocx) {
    228 	size_t lg_prof_sample_orig;
    229 	unsigned i;
    230 	void *ptrs[NITER];
    231 
    232 	test_skip_if(!config_prof);
    233 
    234 	lg_prof_sample_orig = get_lg_prof_sample();
    235 	set_prof_active(true);
    236 
    237 	/* Reset profiling. */
    238 	do_prof_reset(0);
    239 
    240 	for (i = 0; i < NITER; i++) {
    241 		void *p;
    242 		size_t sz, nsz;
    243 
    244 		/* Reset profiling. */
    245 		do_prof_reset(0);
    246 
    247 		/* Allocate small object (which will be promoted). */
    248 		p = ptrs[i] = mallocx(1, 0);
    249 		assert_ptr_not_null(p, "Unexpected mallocx() failure");
    250 
    251 		/* Reset profiling. */
    252 		do_prof_reset(0);
    253 
    254 		/* Perform successful xallocx(). */
    255 		sz = sallocx(p, 0);
    256 		assert_zu_eq(xallocx(p, sz, 0, 0), sz,
    257 		    "Unexpected xallocx() failure");
    258 
    259 		/* Perform unsuccessful xallocx(). */
    260 		nsz = nallocx(sz+1, 0);
    261 		assert_zu_eq(xallocx(p, nsz, 0, 0), sz,
    262 		    "Unexpected xallocx() success");
    263 	}
    264 
    265 	for (i = 0; i < NITER; i++) {
    266 		/* dallocx. */
    267 		dallocx(ptrs[i], 0);
    268 	}
    269 
    270 	set_prof_active(false);
    271 	do_prof_reset(lg_prof_sample_orig);
    272 }
    273 TEST_END
    274 #undef NITER
    275 
    276 int
    277 main(void) {
    278 	/* Intercept dumping prior to running any tests. */
    279 	prof_dump_open = prof_dump_open_intercept;
    280 
    281 	return test_no_reentrancy(
    282 	    test_prof_reset_basic,
    283 	    test_prof_reset_cleanup,
    284 	    test_prof_reset,
    285 	    test_xallocx);
    286 }
    287