sec.c revision 1.1 1 1.1 christos #include "test/jemalloc_test.h"
2 1.1 christos
3 1.1 christos #include "jemalloc/internal/sec.h"
4 1.1 christos
5 1.1 christos typedef struct pai_test_allocator_s pai_test_allocator_t;
6 1.1 christos struct pai_test_allocator_s {
7 1.1 christos pai_t pai;
8 1.1 christos bool alloc_fail;
9 1.1 christos size_t alloc_count;
10 1.1 christos size_t alloc_batch_count;
11 1.1 christos size_t dalloc_count;
12 1.1 christos size_t dalloc_batch_count;
13 1.1 christos /*
14 1.1 christos * We use a simple bump allocator as the implementation. This isn't
15 1.1 christos * *really* correct, since we may allow expansion into a subsequent
16 1.1 christos * allocation, but it's not like the SEC is really examining the
17 1.1 christos * pointers it gets back; this is mostly just helpful for debugging.
18 1.1 christos */
19 1.1 christos uintptr_t next_ptr;
20 1.1 christos size_t expand_count;
21 1.1 christos bool expand_return_value;
22 1.1 christos size_t shrink_count;
23 1.1 christos bool shrink_return_value;
24 1.1 christos };
25 1.1 christos
26 1.1 christos static void
27 1.1 christos test_sec_init(sec_t *sec, pai_t *fallback, size_t nshards, size_t max_alloc,
28 1.1 christos size_t max_bytes) {
29 1.1 christos sec_opts_t opts;
30 1.1 christos opts.nshards = 1;
31 1.1 christos opts.max_alloc = max_alloc;
32 1.1 christos opts.max_bytes = max_bytes;
33 1.1 christos /*
34 1.1 christos * Just choose reasonable defaults for these; most tests don't care so
35 1.1 christos * long as they're something reasonable.
36 1.1 christos */
37 1.1 christos opts.bytes_after_flush = max_bytes / 2;
38 1.1 christos opts.batch_fill_extra = 4;
39 1.1 christos
40 1.1 christos /*
41 1.1 christos * We end up leaking this base, but that's fine; this test is
42 1.1 christos * short-running, and SECs are arena-scoped in reality.
43 1.1 christos */
44 1.1 christos base_t *base = base_new(TSDN_NULL, /* ind */ 123,
45 1.1 christos &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
46 1.1 christos
47 1.1 christos bool err = sec_init(TSDN_NULL, sec, base, fallback, &opts);
48 1.1 christos assert_false(err, "Unexpected initialization failure");
49 1.1 christos assert_u_ge(sec->npsizes, 0, "Zero size classes allowed for caching");
50 1.1 christos }
51 1.1 christos
52 1.1 christos static inline edata_t *
53 1.1 christos pai_test_allocator_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
54 1.1 christos size_t alignment, bool zero, bool guarded, bool frequent_reuse,
55 1.1 christos bool *deferred_work_generated) {
56 1.1 christos assert(!guarded);
57 1.1 christos pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
58 1.1 christos if (ta->alloc_fail) {
59 1.1 christos return NULL;
60 1.1 christos }
61 1.1 christos edata_t *edata = malloc(sizeof(edata_t));
62 1.1 christos assert_ptr_not_null(edata, "");
63 1.1 christos ta->next_ptr += alignment - 1;
64 1.1 christos edata_init(edata, /* arena_ind */ 0,
65 1.1 christos (void *)(ta->next_ptr & ~(alignment - 1)), size,
66 1.1 christos /* slab */ false,
67 1.1 christos /* szind */ 0, /* sn */ 1, extent_state_active, /* zero */ zero,
68 1.1 christos /* comitted */ true, /* ranged */ false, EXTENT_NOT_HEAD);
69 1.1 christos ta->next_ptr += size;
70 1.1 christos ta->alloc_count++;
71 1.1 christos return edata;
72 1.1 christos }
73 1.1 christos
74 1.1 christos static inline size_t
75 1.1 christos pai_test_allocator_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
76 1.1 christos size_t nallocs, edata_list_active_t *results,
77 1.1 christos bool *deferred_work_generated) {
78 1.1 christos pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
79 1.1 christos if (ta->alloc_fail) {
80 1.1 christos return 0;
81 1.1 christos }
82 1.1 christos for (size_t i = 0; i < nallocs; i++) {
83 1.1 christos edata_t *edata = malloc(sizeof(edata_t));
84 1.1 christos assert_ptr_not_null(edata, "");
85 1.1 christos edata_init(edata, /* arena_ind */ 0,
86 1.1 christos (void *)ta->next_ptr, size,
87 1.1 christos /* slab */ false, /* szind */ 0, /* sn */ 1,
88 1.1 christos extent_state_active, /* zero */ false, /* comitted */ true,
89 1.1 christos /* ranged */ false, EXTENT_NOT_HEAD);
90 1.1 christos ta->next_ptr += size;
91 1.1 christos ta->alloc_batch_count++;
92 1.1 christos edata_list_active_append(results, edata);
93 1.1 christos }
94 1.1 christos return nallocs;
95 1.1 christos }
96 1.1 christos
97 1.1 christos static bool
98 1.1 christos pai_test_allocator_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
99 1.1 christos size_t old_size, size_t new_size, bool zero,
100 1.1 christos bool *deferred_work_generated) {
101 1.1 christos pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
102 1.1 christos ta->expand_count++;
103 1.1 christos return ta->expand_return_value;
104 1.1 christos }
105 1.1 christos
106 1.1 christos static bool
107 1.1 christos pai_test_allocator_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
108 1.1 christos size_t old_size, size_t new_size, bool *deferred_work_generated) {
109 1.1 christos pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
110 1.1 christos ta->shrink_count++;
111 1.1 christos return ta->shrink_return_value;
112 1.1 christos }
113 1.1 christos
114 1.1 christos static void
115 1.1 christos pai_test_allocator_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
116 1.1 christos bool *deferred_work_generated) {
117 1.1 christos pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
118 1.1 christos ta->dalloc_count++;
119 1.1 christos free(edata);
120 1.1 christos }
121 1.1 christos
122 1.1 christos static void
123 1.1 christos pai_test_allocator_dalloc_batch(tsdn_t *tsdn, pai_t *self,
124 1.1 christos edata_list_active_t *list, bool *deferred_work_generated) {
125 1.1 christos pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
126 1.1 christos
127 1.1 christos edata_t *edata;
128 1.1 christos while ((edata = edata_list_active_first(list)) != NULL) {
129 1.1 christos edata_list_active_remove(list, edata);
130 1.1 christos ta->dalloc_batch_count++;
131 1.1 christos free(edata);
132 1.1 christos }
133 1.1 christos }
134 1.1 christos
135 1.1 christos static inline void
136 1.1 christos pai_test_allocator_init(pai_test_allocator_t *ta) {
137 1.1 christos ta->alloc_fail = false;
138 1.1 christos ta->alloc_count = 0;
139 1.1 christos ta->alloc_batch_count = 0;
140 1.1 christos ta->dalloc_count = 0;
141 1.1 christos ta->dalloc_batch_count = 0;
142 1.1 christos /* Just don't start the edata at 0. */
143 1.1 christos ta->next_ptr = 10 * PAGE;
144 1.1 christos ta->expand_count = 0;
145 1.1 christos ta->expand_return_value = false;
146 1.1 christos ta->shrink_count = 0;
147 1.1 christos ta->shrink_return_value = false;
148 1.1 christos ta->pai.alloc = &pai_test_allocator_alloc;
149 1.1 christos ta->pai.alloc_batch = &pai_test_allocator_alloc_batch;
150 1.1 christos ta->pai.expand = &pai_test_allocator_expand;
151 1.1 christos ta->pai.shrink = &pai_test_allocator_shrink;
152 1.1 christos ta->pai.dalloc = &pai_test_allocator_dalloc;
153 1.1 christos ta->pai.dalloc_batch = &pai_test_allocator_dalloc_batch;
154 1.1 christos }
155 1.1 christos
156 1.1 christos TEST_BEGIN(test_reuse) {
157 1.1 christos pai_test_allocator_t ta;
158 1.1 christos pai_test_allocator_init(&ta);
159 1.1 christos sec_t sec;
160 1.1 christos /*
161 1.1 christos * We can't use the "real" tsd, since we malloc within the test
162 1.1 christos * allocator hooks; we'd get lock inversion crashes. Eventually, we
163 1.1 christos * should have a way to mock tsds, but for now just don't do any
164 1.1 christos * lock-order checking.
165 1.1 christos */
166 1.1 christos tsdn_t *tsdn = TSDN_NULL;
167 1.1 christos /*
168 1.1 christos * 11 allocs apiece of 1-PAGE and 2-PAGE objects means that we should be
169 1.1 christos * able to get to 33 pages in the cache before triggering a flush. We
170 1.1 christos * set the flush liimt to twice this amount, to avoid accidentally
171 1.1 christos * triggering a flush caused by the batch-allocation down the cache fill
172 1.1 christos * pathway disrupting ordering.
173 1.1 christos */
174 1.1 christos enum { NALLOCS = 11 };
175 1.1 christos edata_t *one_page[NALLOCS];
176 1.1 christos edata_t *two_page[NALLOCS];
177 1.1 christos bool deferred_work_generated = false;
178 1.1 christos test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 2 * PAGE,
179 1.1 christos /* max_bytes */ 2 * (NALLOCS * PAGE + NALLOCS * 2 * PAGE));
180 1.1 christos for (int i = 0; i < NALLOCS; i++) {
181 1.1 christos one_page[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
182 1.1 christos /* zero */ false, /* guarded */ false, /* frequent_reuse */
183 1.1 christos false, &deferred_work_generated);
184 1.1 christos expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
185 1.1 christos two_page[i] = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
186 1.1 christos /* zero */ false, /* guarded */ false, /* frequent_reuse */
187 1.1 christos false, &deferred_work_generated);
188 1.1 christos expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
189 1.1 christos }
190 1.1 christos expect_zu_eq(0, ta.alloc_count, "Should be using batch allocs");
191 1.1 christos size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
192 1.1 christos expect_zu_le(2 * NALLOCS, max_allocs,
193 1.1 christos "Incorrect number of allocations");
194 1.1 christos expect_zu_eq(0, ta.dalloc_count,
195 1.1 christos "Incorrect number of allocations");
196 1.1 christos /*
197 1.1 christos * Free in a different order than we allocated, to make sure free-list
198 1.1 christos * separation works correctly.
199 1.1 christos */
200 1.1 christos for (int i = NALLOCS - 1; i >= 0; i--) {
201 1.1 christos pai_dalloc(tsdn, &sec.pai, one_page[i],
202 1.1 christos &deferred_work_generated);
203 1.1 christos }
204 1.1 christos for (int i = NALLOCS - 1; i >= 0; i--) {
205 1.1 christos pai_dalloc(tsdn, &sec.pai, two_page[i],
206 1.1 christos &deferred_work_generated);
207 1.1 christos }
208 1.1 christos expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
209 1.1 christos "Incorrect number of allocations");
210 1.1 christos expect_zu_eq(0, ta.dalloc_count,
211 1.1 christos "Incorrect number of allocations");
212 1.1 christos /*
213 1.1 christos * Check that the n'th most recent deallocated extent is returned for
214 1.1 christos * the n'th alloc request of a given size.
215 1.1 christos */
216 1.1 christos for (int i = 0; i < NALLOCS; i++) {
217 1.1 christos edata_t *alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
218 1.1 christos /* zero */ false, /* guarded */ false, /* frequent_reuse */
219 1.1 christos false, &deferred_work_generated);
220 1.1 christos edata_t *alloc2 = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
221 1.1 christos /* zero */ false, /* guarded */ false, /* frequent_reuse */
222 1.1 christos false, &deferred_work_generated);
223 1.1 christos expect_ptr_eq(one_page[i], alloc1,
224 1.1 christos "Got unexpected allocation");
225 1.1 christos expect_ptr_eq(two_page[i], alloc2,
226 1.1 christos "Got unexpected allocation");
227 1.1 christos }
228 1.1 christos expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
229 1.1 christos "Incorrect number of allocations");
230 1.1 christos expect_zu_eq(0, ta.dalloc_count,
231 1.1 christos "Incorrect number of allocations");
232 1.1 christos }
233 1.1 christos TEST_END
234 1.1 christos
235 1.1 christos
236 1.1 christos TEST_BEGIN(test_auto_flush) {
237 1.1 christos pai_test_allocator_t ta;
238 1.1 christos pai_test_allocator_init(&ta);
239 1.1 christos sec_t sec;
240 1.1 christos /* See the note above -- we can't use the real tsd. */
241 1.1 christos tsdn_t *tsdn = TSDN_NULL;
242 1.1 christos /*
243 1.1 christos * 10-allocs apiece of 1-PAGE and 2-PAGE objects means that we should be
244 1.1 christos * able to get to 30 pages in the cache before triggering a flush. The
245 1.1 christos * choice of NALLOCS here is chosen to match the batch allocation
246 1.1 christos * default (4 extra + 1 == 5; so 10 allocations leaves the cache exactly
247 1.1 christos * empty, even in the presence of batch allocation on fill).
248 1.1 christos * Eventually, once our allocation batching strategies become smarter,
249 1.1 christos * this should change.
250 1.1 christos */
251 1.1 christos enum { NALLOCS = 10 };
252 1.1 christos edata_t *extra_alloc;
253 1.1 christos edata_t *allocs[NALLOCS];
254 1.1 christos bool deferred_work_generated = false;
255 1.1 christos test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
256 1.1 christos /* max_bytes */ NALLOCS * PAGE);
257 1.1 christos for (int i = 0; i < NALLOCS; i++) {
258 1.1 christos allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
259 1.1 christos /* zero */ false, /* guarded */ false, /* frequent_reuse */
260 1.1 christos false, &deferred_work_generated);
261 1.1 christos expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
262 1.1 christos }
263 1.1 christos extra_alloc = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
264 1.1 christos /* guarded */ false, /* frequent_reuse */ false,
265 1.1 christos &deferred_work_generated);
266 1.1 christos expect_ptr_not_null(extra_alloc, "Unexpected alloc failure");
267 1.1 christos size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
268 1.1 christos expect_zu_le(NALLOCS + 1, max_allocs,
269 1.1 christos "Incorrect number of allocations");
270 1.1 christos expect_zu_eq(0, ta.dalloc_count,
271 1.1 christos "Incorrect number of allocations");
272 1.1 christos /* Free until the SEC is full, but should not have flushed yet. */
273 1.1 christos for (int i = 0; i < NALLOCS; i++) {
274 1.1 christos pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
275 1.1 christos }
276 1.1 christos expect_zu_le(NALLOCS + 1, max_allocs,
277 1.1 christos "Incorrect number of allocations");
278 1.1 christos expect_zu_eq(0, ta.dalloc_count,
279 1.1 christos "Incorrect number of allocations");
280 1.1 christos /*
281 1.1 christos * Free the extra allocation; this should trigger a flush. The internal
282 1.1 christos * flushing logic is allowed to get complicated; for now, we rely on our
283 1.1 christos * whitebox knowledge of the fact that the SEC flushes bins in their
284 1.1 christos * entirety when it decides to do so, and it has only one bin active
285 1.1 christos * right now.
286 1.1 christos */
287 1.1 christos pai_dalloc(tsdn, &sec.pai, extra_alloc, &deferred_work_generated);
288 1.1 christos expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
289 1.1 christos "Incorrect number of allocations");
290 1.1 christos expect_zu_eq(0, ta.dalloc_count,
291 1.1 christos "Incorrect number of (non-batch) deallocations");
292 1.1 christos expect_zu_eq(NALLOCS + 1, ta.dalloc_batch_count,
293 1.1 christos "Incorrect number of batch deallocations");
294 1.1 christos }
295 1.1 christos TEST_END
296 1.1 christos
297 1.1 christos /*
298 1.1 christos * A disable and a flush are *almost* equivalent; the only difference is what
299 1.1 christos * happens afterwards; disabling disallows all future caching as well.
300 1.1 christos */
301 1.1 christos static void
302 1.1 christos do_disable_flush_test(bool is_disable) {
303 1.1 christos pai_test_allocator_t ta;
304 1.1 christos pai_test_allocator_init(&ta);
305 1.1 christos sec_t sec;
306 1.1 christos /* See the note above -- we can't use the real tsd. */
307 1.1 christos tsdn_t *tsdn = TSDN_NULL;
308 1.1 christos
309 1.1 christos enum { NALLOCS = 11 };
310 1.1 christos edata_t *allocs[NALLOCS];
311 1.1 christos bool deferred_work_generated = false;
312 1.1 christos test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
313 1.1 christos /* max_bytes */ NALLOCS * PAGE);
314 1.1 christos for (int i = 0; i < NALLOCS; i++) {
315 1.1 christos allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
316 1.1 christos /* zero */ false, /* guarded */ false, /* frequent_reuse */
317 1.1 christos false, &deferred_work_generated);
318 1.1 christos expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
319 1.1 christos }
320 1.1 christos /* Free all but the last aloc. */
321 1.1 christos for (int i = 0; i < NALLOCS - 1; i++) {
322 1.1 christos pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
323 1.1 christos }
324 1.1 christos size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
325 1.1 christos
326 1.1 christos expect_zu_le(NALLOCS, max_allocs, "Incorrect number of allocations");
327 1.1 christos expect_zu_eq(0, ta.dalloc_count,
328 1.1 christos "Incorrect number of allocations");
329 1.1 christos
330 1.1 christos if (is_disable) {
331 1.1 christos sec_disable(tsdn, &sec);
332 1.1 christos } else {
333 1.1 christos sec_flush(tsdn, &sec);
334 1.1 christos }
335 1.1 christos
336 1.1 christos expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
337 1.1 christos "Incorrect number of allocations");
338 1.1 christos expect_zu_eq(0, ta.dalloc_count,
339 1.1 christos "Incorrect number of (non-batch) deallocations");
340 1.1 christos expect_zu_le(NALLOCS - 1, ta.dalloc_batch_count,
341 1.1 christos "Incorrect number of batch deallocations");
342 1.1 christos size_t old_dalloc_batch_count = ta.dalloc_batch_count;
343 1.1 christos
344 1.1 christos /*
345 1.1 christos * If we free into a disabled SEC, it should forward to the fallback.
346 1.1 christos * Otherwise, the SEC should accept the allocation.
347 1.1 christos */
348 1.1 christos pai_dalloc(tsdn, &sec.pai, allocs[NALLOCS - 1],
349 1.1 christos &deferred_work_generated);
350 1.1 christos
351 1.1 christos expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
352 1.1 christos "Incorrect number of allocations");
353 1.1 christos expect_zu_eq(is_disable ? 1 : 0, ta.dalloc_count,
354 1.1 christos "Incorrect number of (non-batch) deallocations");
355 1.1 christos expect_zu_eq(old_dalloc_batch_count, ta.dalloc_batch_count,
356 1.1 christos "Incorrect number of batch deallocations");
357 1.1 christos }
358 1.1 christos
359 1.1 christos TEST_BEGIN(test_disable) {
360 1.1 christos do_disable_flush_test(/* is_disable */ true);
361 1.1 christos }
362 1.1 christos TEST_END
363 1.1 christos
364 1.1 christos TEST_BEGIN(test_flush) {
365 1.1 christos do_disable_flush_test(/* is_disable */ false);
366 1.1 christos }
367 1.1 christos TEST_END
368 1.1 christos
369 1.1 christos TEST_BEGIN(test_max_alloc_respected) {
370 1.1 christos pai_test_allocator_t ta;
371 1.1 christos pai_test_allocator_init(&ta);
372 1.1 christos sec_t sec;
373 1.1 christos /* See the note above -- we can't use the real tsd. */
374 1.1 christos tsdn_t *tsdn = TSDN_NULL;
375 1.1 christos
376 1.1 christos size_t max_alloc = 2 * PAGE;
377 1.1 christos size_t attempted_alloc = 3 * PAGE;
378 1.1 christos
379 1.1 christos bool deferred_work_generated = false;
380 1.1 christos
381 1.1 christos test_sec_init(&sec, &ta.pai, /* nshards */ 1, max_alloc,
382 1.1 christos /* max_bytes */ 1000 * PAGE);
383 1.1 christos
384 1.1 christos for (size_t i = 0; i < 100; i++) {
385 1.1 christos expect_zu_eq(i, ta.alloc_count,
386 1.1 christos "Incorrect number of allocations");
387 1.1 christos expect_zu_eq(i, ta.dalloc_count,
388 1.1 christos "Incorrect number of deallocations");
389 1.1 christos edata_t *edata = pai_alloc(tsdn, &sec.pai, attempted_alloc,
390 1.1 christos PAGE, /* zero */ false, /* guarded */ false,
391 1.1 christos /* frequent_reuse */ false, &deferred_work_generated);
392 1.1 christos expect_ptr_not_null(edata, "Unexpected alloc failure");
393 1.1 christos expect_zu_eq(i + 1, ta.alloc_count,
394 1.1 christos "Incorrect number of allocations");
395 1.1 christos expect_zu_eq(i, ta.dalloc_count,
396 1.1 christos "Incorrect number of deallocations");
397 1.1 christos pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
398 1.1 christos }
399 1.1 christos }
400 1.1 christos TEST_END
401 1.1 christos
402 1.1 christos TEST_BEGIN(test_expand_shrink_delegate) {
403 1.1 christos /*
404 1.1 christos * Expand and shrink shouldn't affect sec state; they should just
405 1.1 christos * delegate to the fallback PAI.
406 1.1 christos */
407 1.1 christos pai_test_allocator_t ta;
408 1.1 christos pai_test_allocator_init(&ta);
409 1.1 christos sec_t sec;
410 1.1 christos /* See the note above -- we can't use the real tsd. */
411 1.1 christos tsdn_t *tsdn = TSDN_NULL;
412 1.1 christos
413 1.1 christos bool deferred_work_generated = false;
414 1.1 christos
415 1.1 christos test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 10 * PAGE,
416 1.1 christos /* max_bytes */ 1000 * PAGE);
417 1.1 christos edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
418 1.1 christos /* zero */ false, /* guarded */ false, /* frequent_reuse */ false,
419 1.1 christos &deferred_work_generated);
420 1.1 christos expect_ptr_not_null(edata, "Unexpected alloc failure");
421 1.1 christos
422 1.1 christos bool err = pai_expand(tsdn, &sec.pai, edata, PAGE, 4 * PAGE,
423 1.1 christos /* zero */ false, &deferred_work_generated);
424 1.1 christos expect_false(err, "Unexpected expand failure");
425 1.1 christos expect_zu_eq(1, ta.expand_count, "");
426 1.1 christos ta.expand_return_value = true;
427 1.1 christos err = pai_expand(tsdn, &sec.pai, edata, 4 * PAGE, 3 * PAGE,
428 1.1 christos /* zero */ false, &deferred_work_generated);
429 1.1 christos expect_true(err, "Unexpected expand success");
430 1.1 christos expect_zu_eq(2, ta.expand_count, "");
431 1.1 christos
432 1.1 christos err = pai_shrink(tsdn, &sec.pai, edata, 4 * PAGE, 2 * PAGE,
433 1.1 christos &deferred_work_generated);
434 1.1 christos expect_false(err, "Unexpected shrink failure");
435 1.1 christos expect_zu_eq(1, ta.shrink_count, "");
436 1.1 christos ta.shrink_return_value = true;
437 1.1 christos err = pai_shrink(tsdn, &sec.pai, edata, 2 * PAGE, PAGE,
438 1.1 christos &deferred_work_generated);
439 1.1 christos expect_true(err, "Unexpected shrink success");
440 1.1 christos expect_zu_eq(2, ta.shrink_count, "");
441 1.1 christos }
442 1.1 christos TEST_END
443 1.1 christos
444 1.1 christos TEST_BEGIN(test_nshards_0) {
445 1.1 christos pai_test_allocator_t ta;
446 1.1 christos pai_test_allocator_init(&ta);
447 1.1 christos sec_t sec;
448 1.1 christos /* See the note above -- we can't use the real tsd. */
449 1.1 christos tsdn_t *tsdn = TSDN_NULL;
450 1.1 christos base_t *base = base_new(TSDN_NULL, /* ind */ 123,
451 1.1 christos &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
452 1.1 christos
453 1.1 christos sec_opts_t opts = SEC_OPTS_DEFAULT;
454 1.1 christos opts.nshards = 0;
455 1.1 christos sec_init(TSDN_NULL, &sec, base, &ta.pai, &opts);
456 1.1 christos
457 1.1 christos bool deferred_work_generated = false;
458 1.1 christos edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
459 1.1 christos /* zero */ false, /* guarded */ false, /* frequent_reuse */ false,
460 1.1 christos &deferred_work_generated);
461 1.1 christos pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
462 1.1 christos
463 1.1 christos /* Both operations should have gone directly to the fallback. */
464 1.1 christos expect_zu_eq(1, ta.alloc_count, "");
465 1.1 christos expect_zu_eq(1, ta.dalloc_count, "");
466 1.1 christos }
467 1.1 christos TEST_END
468 1.1 christos
469 1.1 christos static void
470 1.1 christos expect_stats_pages(tsdn_t *tsdn, sec_t *sec, size_t npages) {
471 1.1 christos sec_stats_t stats;
472 1.1 christos /*
473 1.1 christos * Check that the stats merging accumulates rather than overwrites by
474 1.1 christos * putting some (made up) data there to begin with.
475 1.1 christos */
476 1.1 christos stats.bytes = 123;
477 1.1 christos sec_stats_merge(tsdn, sec, &stats);
478 1.1 christos assert_zu_le(npages * PAGE + 123, stats.bytes, "");
479 1.1 christos }
480 1.1 christos
481 1.1 christos TEST_BEGIN(test_stats_simple) {
482 1.1 christos pai_test_allocator_t ta;
483 1.1 christos pai_test_allocator_init(&ta);
484 1.1 christos sec_t sec;
485 1.1 christos
486 1.1 christos /* See the note above -- we can't use the real tsd. */
487 1.1 christos tsdn_t *tsdn = TSDN_NULL;
488 1.1 christos
489 1.1 christos enum {
490 1.1 christos NITERS = 100,
491 1.1 christos FLUSH_PAGES = 20,
492 1.1 christos };
493 1.1 christos
494 1.1 christos bool deferred_work_generated = false;
495 1.1 christos
496 1.1 christos test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
497 1.1 christos /* max_bytes */ FLUSH_PAGES * PAGE);
498 1.1 christos
499 1.1 christos edata_t *allocs[FLUSH_PAGES];
500 1.1 christos for (size_t i = 0; i < FLUSH_PAGES; i++) {
501 1.1 christos allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
502 1.1 christos /* zero */ false, /* guarded */ false, /* frequent_reuse */
503 1.1 christos false, &deferred_work_generated);
504 1.1 christos expect_stats_pages(tsdn, &sec, 0);
505 1.1 christos }
506 1.1 christos
507 1.1 christos /* Increase and decrease, without flushing. */
508 1.1 christos for (size_t i = 0; i < NITERS; i++) {
509 1.1 christos for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
510 1.1 christos pai_dalloc(tsdn, &sec.pai, allocs[j],
511 1.1 christos &deferred_work_generated);
512 1.1 christos expect_stats_pages(tsdn, &sec, j + 1);
513 1.1 christos }
514 1.1 christos for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
515 1.1 christos allocs[j] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
516 1.1 christos /* zero */ false, /* guarded */ false,
517 1.1 christos /* frequent_reuse */ false,
518 1.1 christos &deferred_work_generated);
519 1.1 christos expect_stats_pages(tsdn, &sec, FLUSH_PAGES / 2 - j - 1);
520 1.1 christos }
521 1.1 christos }
522 1.1 christos }
523 1.1 christos TEST_END
524 1.1 christos
525 1.1 christos TEST_BEGIN(test_stats_auto_flush) {
526 1.1 christos pai_test_allocator_t ta;
527 1.1 christos pai_test_allocator_init(&ta);
528 1.1 christos sec_t sec;
529 1.1 christos
530 1.1 christos /* See the note above -- we can't use the real tsd. */
531 1.1 christos tsdn_t *tsdn = TSDN_NULL;
532 1.1 christos
533 1.1 christos enum {
534 1.1 christos FLUSH_PAGES = 10,
535 1.1 christos };
536 1.1 christos
537 1.1 christos test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
538 1.1 christos /* max_bytes */ FLUSH_PAGES * PAGE);
539 1.1 christos
540 1.1 christos edata_t *extra_alloc0;
541 1.1 christos edata_t *extra_alloc1;
542 1.1 christos edata_t *allocs[2 * FLUSH_PAGES];
543 1.1 christos
544 1.1 christos bool deferred_work_generated = false;
545 1.1 christos
546 1.1 christos extra_alloc0 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
547 1.1 christos /* guarded */ false, /* frequent_reuse */ false,
548 1.1 christos &deferred_work_generated);
549 1.1 christos extra_alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
550 1.1 christos /* guarded */ false, /* frequent_reuse */ false,
551 1.1 christos &deferred_work_generated);
552 1.1 christos
553 1.1 christos for (size_t i = 0; i < 2 * FLUSH_PAGES; i++) {
554 1.1 christos allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
555 1.1 christos /* zero */ false, /* guarded */ false, /* frequent_reuse */
556 1.1 christos false, &deferred_work_generated);
557 1.1 christos }
558 1.1 christos
559 1.1 christos for (size_t i = 0; i < FLUSH_PAGES; i++) {
560 1.1 christos pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
561 1.1 christos }
562 1.1 christos pai_dalloc(tsdn, &sec.pai, extra_alloc0, &deferred_work_generated);
563 1.1 christos
564 1.1 christos /* Flush the remaining pages; stats should still work. */
565 1.1 christos for (size_t i = 0; i < FLUSH_PAGES; i++) {
566 1.1 christos pai_dalloc(tsdn, &sec.pai, allocs[FLUSH_PAGES + i],
567 1.1 christos &deferred_work_generated);
568 1.1 christos }
569 1.1 christos
570 1.1 christos pai_dalloc(tsdn, &sec.pai, extra_alloc1, &deferred_work_generated);
571 1.1 christos
572 1.1 christos expect_stats_pages(tsdn, &sec, ta.alloc_count + ta.alloc_batch_count
573 1.1 christos - ta.dalloc_count - ta.dalloc_batch_count);
574 1.1 christos }
575 1.1 christos TEST_END
576 1.1 christos
577 1.1 christos TEST_BEGIN(test_stats_manual_flush) {
578 1.1 christos pai_test_allocator_t ta;
579 1.1 christos pai_test_allocator_init(&ta);
580 1.1 christos sec_t sec;
581 1.1 christos
582 1.1 christos /* See the note above -- we can't use the real tsd. */
583 1.1 christos tsdn_t *tsdn = TSDN_NULL;
584 1.1 christos
585 1.1 christos enum {
586 1.1 christos FLUSH_PAGES = 10,
587 1.1 christos };
588 1.1 christos
589 1.1 christos test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
590 1.1 christos /* max_bytes */ FLUSH_PAGES * PAGE);
591 1.1 christos
592 1.1 christos bool deferred_work_generated = false;
593 1.1 christos edata_t *allocs[FLUSH_PAGES];
594 1.1 christos for (size_t i = 0; i < FLUSH_PAGES; i++) {
595 1.1 christos allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
596 1.1 christos /* zero */ false, /* guarded */ false, /* frequent_reuse */
597 1.1 christos false, &deferred_work_generated);
598 1.1 christos expect_stats_pages(tsdn, &sec, 0);
599 1.1 christos }
600 1.1 christos
601 1.1 christos /* Dalloc the first half of the allocations. */
602 1.1 christos for (size_t i = 0; i < FLUSH_PAGES / 2; i++) {
603 1.1 christos pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
604 1.1 christos expect_stats_pages(tsdn, &sec, i + 1);
605 1.1 christos }
606 1.1 christos
607 1.1 christos sec_flush(tsdn, &sec);
608 1.1 christos expect_stats_pages(tsdn, &sec, 0);
609 1.1 christos
610 1.1 christos /* Flush the remaining pages. */
611 1.1 christos for (size_t i = 0; i < FLUSH_PAGES / 2; i++) {
612 1.1 christos pai_dalloc(tsdn, &sec.pai, allocs[FLUSH_PAGES / 2 + i],
613 1.1 christos &deferred_work_generated);
614 1.1 christos expect_stats_pages(tsdn, &sec, i + 1);
615 1.1 christos }
616 1.1 christos sec_disable(tsdn, &sec);
617 1.1 christos expect_stats_pages(tsdn, &sec, 0);
618 1.1 christos }
619 1.1 christos TEST_END
620 1.1 christos
621 1.1 christos int
622 1.1 christos main(void) {
623 1.1 christos return test(
624 1.1 christos test_reuse,
625 1.1 christos test_auto_flush,
626 1.1 christos test_disable,
627 1.1 christos test_flush,
628 1.1 christos test_max_alloc_respected,
629 1.1 christos test_expand_shrink_delegate,
630 1.1 christos test_nshards_0,
631 1.1 christos test_stats_simple,
632 1.1 christos test_stats_auto_flush,
633 1.1 christos test_stats_manual_flush);
634 1.1 christos }
635