hpa_thp_always.c revision 1.1.1.1 1 #include "test/jemalloc_test.h"
2
3 #include "jemalloc/internal/hpa.h"
4 #include "jemalloc/internal/nstime.h"
5
6 #define SHARD_IND 111
7
8 #define ALLOC_MAX (HUGEPAGE)
9
10 typedef struct test_data_s test_data_t;
11 struct test_data_s {
12 /*
13 * Must be the first member -- we convert back and forth between the
14 * test_data_t and the hpa_shard_t;
15 */
16 hpa_shard_t shard;
17 hpa_central_t central;
18 base_t *base;
19 edata_cache_t shard_edata_cache;
20
21 emap_t emap;
22 };
23
24 static hpa_shard_opts_t test_hpa_shard_opts_aggressive = {
25 /* slab_max_alloc */
26 HUGEPAGE,
27 /* hugification_threshold */
28 0.9 * HUGEPAGE,
29 /* dirty_mult */
30 FXP_INIT_PERCENT(11),
31 /* deferral_allowed */
32 true,
33 /* hugify_delay_ms */
34 0,
35 /* hugify_sync */
36 false,
37 /* min_purge_interval_ms */
38 5,
39 /* experimental_max_purge_nhp */
40 -1,
41 /* purge_threshold */
42 HUGEPAGE - 5 * PAGE,
43 /* min_purge_delay_ms */
44 10,
45 /* hugify_style */
46 hpa_hugify_style_eager};
47
48 static hpa_shard_t *
49 create_test_data(const hpa_hooks_t *hooks, hpa_shard_opts_t *opts) {
50 bool err;
51 base_t *base = base_new(TSDN_NULL, /* ind */ SHARD_IND,
52 &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
53 assert_ptr_not_null(base, "");
54
55 test_data_t *test_data = malloc(sizeof(test_data_t));
56 assert_ptr_not_null(test_data, "");
57
58 test_data->base = base;
59
60 err = edata_cache_init(&test_data->shard_edata_cache, base);
61 assert_false(err, "");
62
63 err = emap_init(&test_data->emap, test_data->base, /* zeroed */ false);
64 assert_false(err, "");
65
66 err = hpa_central_init(&test_data->central, test_data->base, hooks);
67 assert_false(err, "");
68 sec_opts_t sec_opts;
69 sec_opts.nshards = 0;
70 tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
71 err = hpa_shard_init(tsdn, &test_data->shard, &test_data->central,
72 &test_data->emap, test_data->base, &test_data->shard_edata_cache,
73 SHARD_IND, opts, &sec_opts);
74 assert_false(err, "");
75
76 return (hpa_shard_t *)test_data;
77 }
78
79 static void
80 destroy_test_data(hpa_shard_t *shard) {
81 test_data_t *test_data = (test_data_t *)shard;
82 base_delete(TSDN_NULL, test_data->base);
83 free(test_data);
84 }
85
86 static uintptr_t defer_bump_ptr = HUGEPAGE * 123;
87 static void *
88 defer_test_map(size_t size) {
89 void *result = (void *)defer_bump_ptr;
90 defer_bump_ptr += size;
91 return result;
92 }
93
94 static void
95 defer_test_unmap(void *ptr, size_t size) {
96 (void)ptr;
97 (void)size;
98 }
99
100 static size_t ndefer_purge_calls = 0;
101 static size_t npurge_size = 0;
102 static void
103 defer_test_purge(void *ptr, size_t size) {
104 (void)ptr;
105 npurge_size = size;
106 ++ndefer_purge_calls;
107 }
108
109 static bool defer_vectorized_purge_called = false;
110 static bool
111 defer_vectorized_purge(void *vec, size_t vlen, size_t nbytes) {
112 (void)vec;
113 (void)nbytes;
114 ++ndefer_purge_calls;
115 defer_vectorized_purge_called = true;
116 return false;
117 }
118
119 static size_t ndefer_hugify_calls = 0;
120 static bool
121 defer_test_hugify(void *ptr, size_t size, bool sync) {
122 ++ndefer_hugify_calls;
123 return false;
124 }
125
126 static size_t ndefer_dehugify_calls = 0;
127 static void
128 defer_test_dehugify(void *ptr, size_t size) {
129 ++ndefer_dehugify_calls;
130 }
131
132 static nstime_t defer_curtime;
133 static void
134 defer_test_curtime(nstime_t *r_time, bool first_reading) {
135 *r_time = defer_curtime;
136 }
137
138 static uint64_t
139 defer_test_ms_since(nstime_t *past_time) {
140 return (nstime_ns(&defer_curtime) - nstime_ns(past_time)) / 1000 / 1000;
141 }
142
143 TEST_BEGIN(test_hpa_hugify_style_none_huge_no_syscall_thp_always) {
144 test_skip_if(!hpa_supported() || (opt_process_madvise_max_batch != 0));
145
146 hpa_hooks_t hooks;
147 hooks.map = &defer_test_map;
148 hooks.unmap = &defer_test_unmap;
149 hooks.purge = &defer_test_purge;
150 hooks.hugify = &defer_test_hugify;
151 hooks.dehugify = &defer_test_dehugify;
152 hooks.curtime = &defer_test_curtime;
153 hooks.ms_since = &defer_test_ms_since;
154 hooks.vectorized_purge = &defer_vectorized_purge;
155
156 hpa_shard_opts_t opts = test_hpa_shard_opts_aggressive;
157 opts.deferral_allowed = true;
158 opts.purge_threshold = PAGE;
159 opts.min_purge_delay_ms = 0;
160 opts.hugification_threshold = HUGEPAGE * 0.25;
161 opts.dirty_mult = FXP_INIT_PERCENT(10);
162 opts.hugify_style = hpa_hugify_style_none;
163 opts.min_purge_interval_ms = 0;
164 opts.hugify_delay_ms = 0;
165
166 hpa_shard_t *shard = create_test_data(&hooks, &opts);
167 bool deferred_work_generated = false;
168 /* Current time = 10ms */
169 nstime_init(&defer_curtime, 10 * 1000 * 1000);
170
171 /* Fake that system is in thp_always mode */
172 system_thp_mode_t old_mode = init_system_thp_mode;
173 init_system_thp_mode = system_thp_mode_always;
174
175 tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
176 enum { NALLOCS = HUGEPAGE_PAGES };
177 edata_t *edatas[NALLOCS];
178 ndefer_purge_calls = 0;
179 for (int i = 0; i < NALLOCS / 2; i++) {
180 edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
181 false, false, &deferred_work_generated);
182 expect_ptr_not_null(edatas[i], "Unexpected null edata");
183 }
184 hpdata_t *ps = psset_pick_alloc(&shard->psset, PAGE);
185 expect_true(hpdata_huge_get(ps),
186 "Page should be huge because thp=always and hugify_style is none");
187
188 ndefer_hugify_calls = 0;
189 ndefer_purge_calls = 0;
190 hpa_shard_do_deferred_work(tsdn, shard);
191 expect_zu_eq(ndefer_hugify_calls, 0, "style=none, no syscall");
192 expect_zu_eq(ndefer_dehugify_calls, 0, "style=none, no syscall");
193 expect_zu_eq(ndefer_purge_calls, 1, "purge should happen");
194
195 destroy_test_data(shard);
196 init_system_thp_mode = old_mode;
197 }
198 TEST_END
199
200 int
201 main(void) {
202 return test_no_reentrancy(
203 test_hpa_hugify_style_none_huge_no_syscall_thp_always);
204 }
205