subr_kmem.c revision 1.60.4.2 1 1.60.4.2 skrll /* $NetBSD: subr_kmem.c,v 1.60.4.2 2016/03/19 11:30:31 skrll Exp $ */
2 1.1 yamt
3 1.1 yamt /*-
4 1.60.4.1 skrll * Copyright (c) 2009-2015 The NetBSD Foundation, Inc.
5 1.23 ad * All rights reserved.
6 1.23 ad *
7 1.23 ad * This code is derived from software contributed to The NetBSD Foundation
8 1.60.4.1 skrll * by Andrew Doran and Maxime Villard.
9 1.23 ad *
10 1.23 ad * Redistribution and use in source and binary forms, with or without
11 1.23 ad * modification, are permitted provided that the following conditions
12 1.23 ad * are met:
13 1.23 ad * 1. Redistributions of source code must retain the above copyright
14 1.23 ad * notice, this list of conditions and the following disclaimer.
15 1.23 ad * 2. Redistributions in binary form must reproduce the above copyright
16 1.23 ad * notice, this list of conditions and the following disclaimer in the
17 1.23 ad * documentation and/or other materials provided with the distribution.
18 1.23 ad *
19 1.23 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.23 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.23 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.23 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.23 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.23 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.23 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.23 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.23 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.23 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.23 ad * POSSIBILITY OF SUCH DAMAGE.
30 1.23 ad */
31 1.23 ad
32 1.23 ad /*-
33 1.1 yamt * Copyright (c)2006 YAMAMOTO Takashi,
34 1.1 yamt * All rights reserved.
35 1.1 yamt *
36 1.1 yamt * Redistribution and use in source and binary forms, with or without
37 1.1 yamt * modification, are permitted provided that the following conditions
38 1.1 yamt * are met:
39 1.1 yamt * 1. Redistributions of source code must retain the above copyright
40 1.1 yamt * notice, this list of conditions and the following disclaimer.
41 1.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
42 1.1 yamt * notice, this list of conditions and the following disclaimer in the
43 1.1 yamt * documentation and/or other materials provided with the distribution.
44 1.1 yamt *
45 1.1 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 1.1 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 1.1 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 1.1 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 1.1 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 1.1 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 1.1 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 1.1 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 1.1 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 1.1 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 1.1 yamt * SUCH DAMAGE.
56 1.1 yamt */
57 1.1 yamt
58 1.1 yamt /*
59 1.55 maxv * Allocator of kernel wired memory. This allocator has some debug features
60 1.55 maxv * enabled with "option DIAGNOSTIC" and "option DEBUG".
61 1.50 yamt */
62 1.50 yamt
63 1.50 yamt /*
64 1.55 maxv * KMEM_SIZE: detect alloc/free size mismatch bugs.
65 1.57 maxv * Prefix each allocations with a fixed-sized, aligned header and record
66 1.57 maxv * the exact user-requested allocation size in it. When freeing, compare
67 1.57 maxv * it with kmem_free's "size" argument.
68 1.60 maxv *
69 1.55 maxv * KMEM_REDZONE: detect overrun bugs.
70 1.57 maxv * Add a 2-byte pattern (allocate one more memory chunk if needed) at the
71 1.57 maxv * end of each allocated buffer. Check this pattern on kmem_free.
72 1.50 yamt *
73 1.60 maxv * These options are enabled on DIAGNOSTIC.
74 1.60 maxv *
75 1.60 maxv * |CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|
76 1.60 maxv * +-----+-----+-----+-----+-----+-----+-----+-----+-----+---+-+--+--+
77 1.60 maxv * |/////| | | | | | | | | |*|**|UU|
78 1.60 maxv * |/HSZ/| | | | | | | | | |*|**|UU|
79 1.60 maxv * |/////| | | | | | | | | |*|**|UU|
80 1.60 maxv * +-----+-----+-----+-----+-----+-----+-----+-----+-----+---+-+--+--+
81 1.60 maxv * |Size | Buffer usable by the caller (requested size) |RedZ|Unused\
82 1.60 maxv */
83 1.60 maxv
84 1.60 maxv /*
85 1.55 maxv * KMEM_POISON: detect modify-after-free bugs.
86 1.50 yamt * Fill freed (in the sense of kmem_free) memory with a garbage pattern.
87 1.50 yamt * Check the pattern on allocation.
88 1.50 yamt *
89 1.50 yamt * KMEM_GUARD
90 1.60.4.1 skrll * A kernel with "option DEBUG" has "kmem_guard" debugging feature compiled
91 1.60.4.1 skrll * in. See the comment below for what kind of bugs it tries to detect. Even
92 1.60.4.1 skrll * if compiled in, it's disabled by default because it's very expensive.
93 1.60.4.1 skrll * You can enable it on boot by:
94 1.55 maxv * boot -d
95 1.55 maxv * db> w kmem_guard_depth 0t30000
96 1.55 maxv * db> c
97 1.1 yamt *
98 1.55 maxv * The default value of kmem_guard_depth is 0, which means disabled.
99 1.55 maxv * It can be changed by KMEM_GUARD_DEPTH kernel config option.
100 1.1 yamt */
101 1.1 yamt
102 1.1 yamt #include <sys/cdefs.h>
103 1.60.4.2 skrll __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.60.4.2 2016/03/19 11:30:31 skrll Exp $");
104 1.1 yamt
105 1.1 yamt #include <sys/param.h>
106 1.6 yamt #include <sys/callback.h>
107 1.1 yamt #include <sys/kmem.h>
108 1.39 para #include <sys/pool.h>
109 1.13 ad #include <sys/debug.h>
110 1.17 ad #include <sys/lockdebug.h>
111 1.23 ad #include <sys/cpu.h>
112 1.1 yamt
113 1.6 yamt #include <uvm/uvm_extern.h>
114 1.6 yamt #include <uvm/uvm_map.h>
115 1.6 yamt
116 1.1 yamt #include <lib/libkern/libkern.h>
117 1.1 yamt
118 1.46 para struct kmem_cache_info {
119 1.40 rmind size_t kc_size;
120 1.40 rmind const char * kc_name;
121 1.46 para };
122 1.46 para
123 1.46 para static const struct kmem_cache_info kmem_cache_sizes[] = {
124 1.39 para { 8, "kmem-8" },
125 1.39 para { 16, "kmem-16" },
126 1.39 para { 24, "kmem-24" },
127 1.39 para { 32, "kmem-32" },
128 1.39 para { 40, "kmem-40" },
129 1.39 para { 48, "kmem-48" },
130 1.39 para { 56, "kmem-56" },
131 1.39 para { 64, "kmem-64" },
132 1.39 para { 80, "kmem-80" },
133 1.39 para { 96, "kmem-96" },
134 1.39 para { 112, "kmem-112" },
135 1.39 para { 128, "kmem-128" },
136 1.39 para { 160, "kmem-160" },
137 1.39 para { 192, "kmem-192" },
138 1.39 para { 224, "kmem-224" },
139 1.39 para { 256, "kmem-256" },
140 1.39 para { 320, "kmem-320" },
141 1.39 para { 384, "kmem-384" },
142 1.39 para { 448, "kmem-448" },
143 1.39 para { 512, "kmem-512" },
144 1.39 para { 768, "kmem-768" },
145 1.39 para { 1024, "kmem-1024" },
146 1.46 para { 0, NULL }
147 1.46 para };
148 1.46 para
149 1.46 para static const struct kmem_cache_info kmem_cache_big_sizes[] = {
150 1.39 para { 2048, "kmem-2048" },
151 1.39 para { 4096, "kmem-4096" },
152 1.46 para { 8192, "kmem-8192" },
153 1.46 para { 16384, "kmem-16384" },
154 1.39 para { 0, NULL }
155 1.39 para };
156 1.1 yamt
157 1.39 para /*
158 1.40 rmind * KMEM_ALIGN is the smallest guaranteed alignment and also the
159 1.46 para * smallest allocateable quantum.
160 1.46 para * Every cache size >= CACHE_LINE_SIZE gets CACHE_LINE_SIZE alignment.
161 1.39 para */
162 1.40 rmind #define KMEM_ALIGN 8
163 1.40 rmind #define KMEM_SHIFT 3
164 1.46 para #define KMEM_MAXSIZE 1024
165 1.40 rmind #define KMEM_CACHE_COUNT (KMEM_MAXSIZE >> KMEM_SHIFT)
166 1.1 yamt
167 1.40 rmind static pool_cache_t kmem_cache[KMEM_CACHE_COUNT] __cacheline_aligned;
168 1.40 rmind static size_t kmem_cache_maxidx __read_mostly;
169 1.23 ad
170 1.46 para #define KMEM_BIG_ALIGN 2048
171 1.46 para #define KMEM_BIG_SHIFT 11
172 1.46 para #define KMEM_BIG_MAXSIZE 16384
173 1.46 para #define KMEM_CACHE_BIG_COUNT (KMEM_BIG_MAXSIZE >> KMEM_BIG_SHIFT)
174 1.46 para
175 1.46 para static pool_cache_t kmem_cache_big[KMEM_CACHE_BIG_COUNT] __cacheline_aligned;
176 1.46 para static size_t kmem_cache_big_maxidx __read_mostly;
177 1.46 para
178 1.53 maxv #if defined(DIAGNOSTIC) && defined(_HARDKERNEL)
179 1.57 maxv #define KMEM_SIZE
180 1.60 maxv #define KMEM_REDZONE
181 1.53 maxv #endif /* defined(DIAGNOSTIC) */
182 1.53 maxv
183 1.45 martin #if defined(DEBUG) && defined(_HARDKERNEL)
184 1.60.4.1 skrll #define KMEM_SIZE
185 1.19 yamt #define KMEM_POISON
186 1.27 ad #define KMEM_GUARD
187 1.60.4.1 skrll static void *kmem_freecheck;
188 1.19 yamt #endif /* defined(DEBUG) */
189 1.19 yamt
190 1.19 yamt #if defined(KMEM_POISON)
191 1.39 para static int kmem_poison_ctor(void *, void *, int);
192 1.4 yamt static void kmem_poison_fill(void *, size_t);
193 1.4 yamt static void kmem_poison_check(void *, size_t);
194 1.19 yamt #else /* defined(KMEM_POISON) */
195 1.40 rmind #define kmem_poison_fill(p, sz) /* nothing */
196 1.40 rmind #define kmem_poison_check(p, sz) /* nothing */
197 1.19 yamt #endif /* defined(KMEM_POISON) */
198 1.19 yamt
199 1.19 yamt #if defined(KMEM_REDZONE)
200 1.54 maxv #define REDZONE_SIZE 2
201 1.57 maxv static void kmem_redzone_fill(void *, size_t);
202 1.57 maxv static void kmem_redzone_check(void *, size_t);
203 1.19 yamt #else /* defined(KMEM_REDZONE) */
204 1.19 yamt #define REDZONE_SIZE 0
205 1.54 maxv #define kmem_redzone_fill(p, sz) /* nothing */
206 1.54 maxv #define kmem_redzone_check(p, sz) /* nothing */
207 1.19 yamt #endif /* defined(KMEM_REDZONE) */
208 1.4 yamt
209 1.23 ad #if defined(KMEM_SIZE)
210 1.57 maxv struct kmem_header {
211 1.57 maxv size_t size;
212 1.57 maxv } __aligned(KMEM_ALIGN);
213 1.57 maxv #define SIZE_SIZE sizeof(struct kmem_header)
214 1.23 ad static void kmem_size_set(void *, size_t);
215 1.39 para static void kmem_size_check(void *, size_t);
216 1.23 ad #else
217 1.23 ad #define SIZE_SIZE 0
218 1.23 ad #define kmem_size_set(p, sz) /* nothing */
219 1.23 ad #define kmem_size_check(p, sz) /* nothing */
220 1.23 ad #endif
221 1.23 ad
222 1.52 maxv #if defined(KMEM_GUARD)
223 1.52 maxv #ifndef KMEM_GUARD_DEPTH
224 1.52 maxv #define KMEM_GUARD_DEPTH 0
225 1.52 maxv #endif
226 1.60.4.1 skrll struct kmem_guard {
227 1.60.4.1 skrll u_int kg_depth;
228 1.60.4.1 skrll intptr_t * kg_fifo;
229 1.60.4.1 skrll u_int kg_rotor;
230 1.60.4.1 skrll vmem_t * kg_vmem;
231 1.60.4.1 skrll };
232 1.60.4.1 skrll
233 1.60.4.1 skrll static bool kmem_guard_init(struct kmem_guard *, u_int, vmem_t *);
234 1.60.4.1 skrll static void *kmem_guard_alloc(struct kmem_guard *, size_t, bool);
235 1.60.4.1 skrll static void kmem_guard_free(struct kmem_guard *, size_t, void *);
236 1.60.4.1 skrll
237 1.52 maxv int kmem_guard_depth = KMEM_GUARD_DEPTH;
238 1.60.4.1 skrll static bool kmem_guard_enabled;
239 1.60.4.1 skrll static struct kmem_guard kmem_guard;
240 1.52 maxv #endif /* defined(KMEM_GUARD) */
241 1.52 maxv
242 1.32 skrll CTASSERT(KM_SLEEP == PR_WAITOK);
243 1.32 skrll CTASSERT(KM_NOSLEEP == PR_NOWAIT);
244 1.32 skrll
245 1.46 para /*
246 1.46 para * kmem_intr_alloc: allocate wired memory.
247 1.46 para */
248 1.46 para
249 1.39 para void *
250 1.50 yamt kmem_intr_alloc(size_t requested_size, km_flag_t kmflags)
251 1.1 yamt {
252 1.40 rmind size_t allocsz, index;
253 1.50 yamt size_t size;
254 1.39 para pool_cache_t pc;
255 1.39 para uint8_t *p;
256 1.1 yamt
257 1.50 yamt KASSERT(requested_size > 0);
258 1.1 yamt
259 1.39 para #ifdef KMEM_GUARD
260 1.60.4.1 skrll if (kmem_guard_enabled) {
261 1.60.4.1 skrll return kmem_guard_alloc(&kmem_guard, requested_size,
262 1.39 para (kmflags & KM_SLEEP) != 0);
263 1.1 yamt }
264 1.39 para #endif
265 1.50 yamt size = kmem_roundup_size(requested_size);
266 1.54 maxv allocsz = size + SIZE_SIZE;
267 1.54 maxv
268 1.54 maxv #ifdef KMEM_REDZONE
269 1.54 maxv if (size - requested_size < REDZONE_SIZE) {
270 1.57 maxv /* If there isn't enough space in the padding, allocate
271 1.57 maxv * one more memory chunk for the red zone. */
272 1.56 maxv allocsz += kmem_roundup_size(REDZONE_SIZE);
273 1.54 maxv }
274 1.54 maxv #endif
275 1.39 para
276 1.46 para if ((index = ((allocsz -1) >> KMEM_SHIFT))
277 1.46 para < kmem_cache_maxidx) {
278 1.46 para pc = kmem_cache[index];
279 1.46 para } else if ((index = ((allocsz - 1) >> KMEM_BIG_SHIFT))
280 1.55 maxv < kmem_cache_big_maxidx) {
281 1.46 para pc = kmem_cache_big[index];
282 1.48 uebayasi } else {
283 1.40 rmind int ret = uvm_km_kmem_alloc(kmem_va_arena,
284 1.43 para (vsize_t)round_page(size),
285 1.39 para ((kmflags & KM_SLEEP) ? VM_SLEEP : VM_NOSLEEP)
286 1.39 para | VM_INSTANTFIT, (vmem_addr_t *)&p);
287 1.46 para if (ret) {
288 1.46 para return NULL;
289 1.46 para }
290 1.46 para FREECHECK_OUT(&kmem_freecheck, p);
291 1.46 para return p;
292 1.1 yamt }
293 1.1 yamt
294 1.39 para p = pool_cache_get(pc, kmflags);
295 1.39 para
296 1.39 para if (__predict_true(p != NULL)) {
297 1.58 maxv kmem_poison_check(p, allocsz);
298 1.39 para FREECHECK_OUT(&kmem_freecheck, p);
299 1.50 yamt kmem_size_set(p, requested_size);
300 1.54 maxv kmem_redzone_fill(p, requested_size + SIZE_SIZE);
301 1.47 para
302 1.47 para return p + SIZE_SIZE;
303 1.39 para }
304 1.47 para return p;
305 1.1 yamt }
306 1.1 yamt
307 1.46 para /*
308 1.46 para * kmem_intr_zalloc: allocate zeroed wired memory.
309 1.46 para */
310 1.46 para
311 1.39 para void *
312 1.39 para kmem_intr_zalloc(size_t size, km_flag_t kmflags)
313 1.23 ad {
314 1.39 para void *p;
315 1.23 ad
316 1.39 para p = kmem_intr_alloc(size, kmflags);
317 1.39 para if (p != NULL) {
318 1.39 para memset(p, 0, size);
319 1.39 para }
320 1.39 para return p;
321 1.23 ad }
322 1.23 ad
323 1.46 para /*
324 1.46 para * kmem_intr_free: free wired memory allocated by kmem_alloc.
325 1.46 para */
326 1.46 para
327 1.39 para void
328 1.50 yamt kmem_intr_free(void *p, size_t requested_size)
329 1.23 ad {
330 1.40 rmind size_t allocsz, index;
331 1.50 yamt size_t size;
332 1.39 para pool_cache_t pc;
333 1.23 ad
334 1.39 para KASSERT(p != NULL);
335 1.50 yamt KASSERT(requested_size > 0);
336 1.39 para
337 1.39 para #ifdef KMEM_GUARD
338 1.60.4.1 skrll if (kmem_guard_enabled) {
339 1.60.4.1 skrll kmem_guard_free(&kmem_guard, requested_size, p);
340 1.39 para return;
341 1.39 para }
342 1.39 para #endif
343 1.54 maxv
344 1.50 yamt size = kmem_roundup_size(requested_size);
345 1.54 maxv allocsz = size + SIZE_SIZE;
346 1.54 maxv
347 1.54 maxv #ifdef KMEM_REDZONE
348 1.54 maxv if (size - requested_size < REDZONE_SIZE) {
349 1.56 maxv allocsz += kmem_roundup_size(REDZONE_SIZE);
350 1.54 maxv }
351 1.54 maxv #endif
352 1.39 para
353 1.46 para if ((index = ((allocsz -1) >> KMEM_SHIFT))
354 1.46 para < kmem_cache_maxidx) {
355 1.46 para pc = kmem_cache[index];
356 1.46 para } else if ((index = ((allocsz - 1) >> KMEM_BIG_SHIFT))
357 1.55 maxv < kmem_cache_big_maxidx) {
358 1.46 para pc = kmem_cache_big[index];
359 1.46 para } else {
360 1.46 para FREECHECK_IN(&kmem_freecheck, p);
361 1.39 para uvm_km_kmem_free(kmem_va_arena, (vaddr_t)p,
362 1.43 para round_page(size));
363 1.39 para return;
364 1.39 para }
365 1.39 para
366 1.46 para p = (uint8_t *)p - SIZE_SIZE;
367 1.50 yamt kmem_size_check(p, requested_size);
368 1.54 maxv kmem_redzone_check(p, requested_size + SIZE_SIZE);
369 1.39 para FREECHECK_IN(&kmem_freecheck, p);
370 1.46 para LOCKDEBUG_MEM_CHECK(p, size);
371 1.39 para kmem_poison_fill(p, allocsz);
372 1.39 para
373 1.39 para pool_cache_put(pc, p);
374 1.23 ad }
375 1.23 ad
376 1.1 yamt /* ---- kmem API */
377 1.1 yamt
378 1.1 yamt /*
379 1.1 yamt * kmem_alloc: allocate wired memory.
380 1.1 yamt * => must not be called from interrupt context.
381 1.1 yamt */
382 1.1 yamt
383 1.1 yamt void *
384 1.1 yamt kmem_alloc(size_t size, km_flag_t kmflags)
385 1.1 yamt {
386 1.60.4.2 skrll void *v;
387 1.60.4.2 skrll
388 1.40 rmind KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()),
389 1.40 rmind "kmem(9) should not be used from the interrupt context");
390 1.60.4.2 skrll v = kmem_intr_alloc(size, kmflags);
391 1.60.4.2 skrll KASSERT(v || (kmflags & KM_NOSLEEP) != 0);
392 1.60.4.2 skrll return v;
393 1.1 yamt }
394 1.1 yamt
395 1.1 yamt /*
396 1.39 para * kmem_zalloc: allocate zeroed wired memory.
397 1.2 yamt * => must not be called from interrupt context.
398 1.2 yamt */
399 1.2 yamt
400 1.2 yamt void *
401 1.2 yamt kmem_zalloc(size_t size, km_flag_t kmflags)
402 1.2 yamt {
403 1.60.4.2 skrll void *v;
404 1.60.4.2 skrll
405 1.40 rmind KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()),
406 1.40 rmind "kmem(9) should not be used from the interrupt context");
407 1.60.4.2 skrll v = kmem_intr_zalloc(size, kmflags);
408 1.60.4.2 skrll KASSERT(v || (kmflags & KM_NOSLEEP) != 0);
409 1.60.4.2 skrll return v;
410 1.2 yamt }
411 1.2 yamt
412 1.2 yamt /*
413 1.1 yamt * kmem_free: free wired memory allocated by kmem_alloc.
414 1.1 yamt * => must not be called from interrupt context.
415 1.1 yamt */
416 1.1 yamt
417 1.1 yamt void
418 1.1 yamt kmem_free(void *p, size_t size)
419 1.1 yamt {
420 1.23 ad KASSERT(!cpu_intr_p());
421 1.27 ad KASSERT(!cpu_softintr_p());
422 1.39 para kmem_intr_free(p, size);
423 1.1 yamt }
424 1.1 yamt
425 1.46 para static size_t
426 1.39 para kmem_create_caches(const struct kmem_cache_info *array,
427 1.46 para pool_cache_t alloc_table[], size_t maxsize, int shift, int ipl)
428 1.1 yamt {
429 1.46 para size_t maxidx = 0;
430 1.46 para size_t table_unit = (1 << shift);
431 1.39 para size_t size = table_unit;
432 1.23 ad int i;
433 1.1 yamt
434 1.39 para for (i = 0; array[i].kc_size != 0 ; i++) {
435 1.40 rmind const char *name = array[i].kc_name;
436 1.39 para size_t cache_size = array[i].kc_size;
437 1.46 para struct pool_allocator *pa;
438 1.40 rmind int flags = PR_NOALIGN;
439 1.40 rmind pool_cache_t pc;
440 1.39 para size_t align;
441 1.39 para
442 1.39 para if ((cache_size & (CACHE_LINE_SIZE - 1)) == 0)
443 1.39 para align = CACHE_LINE_SIZE;
444 1.39 para else if ((cache_size & (PAGE_SIZE - 1)) == 0)
445 1.39 para align = PAGE_SIZE;
446 1.39 para else
447 1.39 para align = KMEM_ALIGN;
448 1.39 para
449 1.39 para if (cache_size < CACHE_LINE_SIZE)
450 1.39 para flags |= PR_NOTOUCH;
451 1.27 ad
452 1.39 para /* check if we reached the requested size */
453 1.46 para if (cache_size > maxsize || cache_size > PAGE_SIZE) {
454 1.23 ad break;
455 1.40 rmind }
456 1.46 para if ((cache_size >> shift) > maxidx) {
457 1.46 para maxidx = cache_size >> shift;
458 1.46 para }
459 1.46 para
460 1.46 para if ((cache_size >> shift) > maxidx) {
461 1.46 para maxidx = cache_size >> shift;
462 1.40 rmind }
463 1.1 yamt
464 1.46 para pa = &pool_allocator_kmem;
465 1.39 para #if defined(KMEM_POISON)
466 1.39 para pc = pool_cache_init(cache_size, align, 0, flags,
467 1.49 yamt name, pa, ipl, kmem_poison_ctor,
468 1.39 para NULL, (void *)cache_size);
469 1.39 para #else /* defined(KMEM_POISON) */
470 1.39 para pc = pool_cache_init(cache_size, align, 0, flags,
471 1.46 para name, pa, ipl, NULL, NULL, NULL);
472 1.39 para #endif /* defined(KMEM_POISON) */
473 1.1 yamt
474 1.39 para while (size <= cache_size) {
475 1.46 para alloc_table[(size - 1) >> shift] = pc;
476 1.39 para size += table_unit;
477 1.39 para }
478 1.1 yamt }
479 1.46 para return maxidx;
480 1.1 yamt }
481 1.1 yamt
482 1.39 para void
483 1.39 para kmem_init(void)
484 1.1 yamt {
485 1.39 para #ifdef KMEM_GUARD
486 1.60.4.1 skrll kmem_guard_enabled = kmem_guard_init(&kmem_guard, kmem_guard_depth,
487 1.42 rmind kmem_va_arena);
488 1.39 para #endif
489 1.46 para kmem_cache_maxidx = kmem_create_caches(kmem_cache_sizes,
490 1.46 para kmem_cache, KMEM_MAXSIZE, KMEM_SHIFT, IPL_VM);
491 1.55 maxv kmem_cache_big_maxidx = kmem_create_caches(kmem_cache_big_sizes,
492 1.46 para kmem_cache_big, PAGE_SIZE, KMEM_BIG_SHIFT, IPL_VM);
493 1.1 yamt }
494 1.4 yamt
495 1.39 para size_t
496 1.39 para kmem_roundup_size(size_t size)
497 1.7 yamt {
498 1.39 para return (size + (KMEM_ALIGN - 1)) & ~(KMEM_ALIGN - 1);
499 1.7 yamt }
500 1.7 yamt
501 1.60.4.1 skrll /*
502 1.60.4.1 skrll * Used to dynamically allocate string with kmem accordingly to format.
503 1.60.4.1 skrll */
504 1.60.4.1 skrll char *
505 1.60.4.1 skrll kmem_asprintf(const char *fmt, ...)
506 1.60.4.1 skrll {
507 1.60.4.1 skrll int size __diagused, len;
508 1.60.4.1 skrll va_list va;
509 1.60.4.1 skrll char *str;
510 1.60.4.1 skrll
511 1.60.4.1 skrll va_start(va, fmt);
512 1.60.4.1 skrll len = vsnprintf(NULL, 0, fmt, va);
513 1.60.4.1 skrll va_end(va);
514 1.60.4.1 skrll
515 1.60.4.1 skrll str = kmem_alloc(len + 1, KM_SLEEP);
516 1.60.4.1 skrll
517 1.60.4.1 skrll va_start(va, fmt);
518 1.60.4.1 skrll size = vsnprintf(str, len + 1, fmt, va);
519 1.60.4.1 skrll va_end(va);
520 1.60.4.1 skrll
521 1.60.4.1 skrll KASSERT(size == len);
522 1.60.4.1 skrll
523 1.60.4.1 skrll return str;
524 1.60.4.1 skrll }
525 1.60.4.1 skrll
526 1.54 maxv /* ------------------ DEBUG / DIAGNOSTIC ------------------ */
527 1.4 yamt
528 1.54 maxv #if defined(KMEM_POISON) || defined(KMEM_REDZONE)
529 1.4 yamt #if defined(_LP64)
530 1.39 para #define PRIME 0x9e37fffffffc0000UL
531 1.4 yamt #else /* defined(_LP64) */
532 1.39 para #define PRIME 0x9e3779b1
533 1.4 yamt #endif /* defined(_LP64) */
534 1.4 yamt
535 1.4 yamt static inline uint8_t
536 1.59 maxv kmem_pattern_generate(const void *p)
537 1.4 yamt {
538 1.39 para return (uint8_t)(((uintptr_t)p) * PRIME
539 1.39 para >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
540 1.39 para }
541 1.59 maxv #endif /* defined(KMEM_POISON) || defined(KMEM_REDZONE) */
542 1.39 para
543 1.59 maxv #if defined(KMEM_POISON)
544 1.39 para static int
545 1.39 para kmem_poison_ctor(void *arg, void *obj, int flag)
546 1.39 para {
547 1.39 para size_t sz = (size_t)arg;
548 1.39 para
549 1.39 para kmem_poison_fill(obj, sz);
550 1.39 para
551 1.39 para return 0;
552 1.4 yamt }
553 1.4 yamt
554 1.4 yamt static void
555 1.4 yamt kmem_poison_fill(void *p, size_t sz)
556 1.4 yamt {
557 1.4 yamt uint8_t *cp;
558 1.4 yamt const uint8_t *ep;
559 1.4 yamt
560 1.4 yamt cp = p;
561 1.4 yamt ep = cp + sz;
562 1.4 yamt while (cp < ep) {
563 1.59 maxv *cp = kmem_pattern_generate(cp);
564 1.4 yamt cp++;
565 1.4 yamt }
566 1.4 yamt }
567 1.4 yamt
568 1.4 yamt static void
569 1.4 yamt kmem_poison_check(void *p, size_t sz)
570 1.4 yamt {
571 1.4 yamt uint8_t *cp;
572 1.4 yamt const uint8_t *ep;
573 1.4 yamt
574 1.4 yamt cp = p;
575 1.4 yamt ep = cp + sz;
576 1.4 yamt while (cp < ep) {
577 1.59 maxv const uint8_t expected = kmem_pattern_generate(cp);
578 1.4 yamt
579 1.4 yamt if (*cp != expected) {
580 1.4 yamt panic("%s: %p: 0x%02x != 0x%02x\n",
581 1.39 para __func__, cp, *cp, expected);
582 1.4 yamt }
583 1.4 yamt cp++;
584 1.4 yamt }
585 1.4 yamt }
586 1.19 yamt #endif /* defined(KMEM_POISON) */
587 1.23 ad
588 1.23 ad #if defined(KMEM_SIZE)
589 1.23 ad static void
590 1.23 ad kmem_size_set(void *p, size_t sz)
591 1.23 ad {
592 1.57 maxv struct kmem_header *hd;
593 1.57 maxv hd = (struct kmem_header *)p;
594 1.57 maxv hd->size = sz;
595 1.23 ad }
596 1.23 ad
597 1.23 ad static void
598 1.39 para kmem_size_check(void *p, size_t sz)
599 1.23 ad {
600 1.57 maxv struct kmem_header *hd;
601 1.57 maxv size_t hsz;
602 1.23 ad
603 1.57 maxv hd = (struct kmem_header *)p;
604 1.57 maxv hsz = hd->size;
605 1.57 maxv
606 1.57 maxv if (hsz != sz) {
607 1.23 ad panic("kmem_free(%p, %zu) != allocated size %zu",
608 1.57 maxv (const uint8_t *)p + SIZE_SIZE, sz, hsz);
609 1.23 ad }
610 1.23 ad }
611 1.54 maxv #endif /* defined(KMEM_SIZE) */
612 1.54 maxv
613 1.54 maxv #if defined(KMEM_REDZONE)
614 1.59 maxv #define STATIC_BYTE 0xFE
615 1.59 maxv CTASSERT(REDZONE_SIZE > 1);
616 1.54 maxv static void
617 1.54 maxv kmem_redzone_fill(void *p, size_t sz)
618 1.54 maxv {
619 1.59 maxv uint8_t *cp, pat;
620 1.54 maxv const uint8_t *ep;
621 1.54 maxv
622 1.54 maxv cp = (uint8_t *)p + sz;
623 1.54 maxv ep = cp + REDZONE_SIZE;
624 1.59 maxv
625 1.59 maxv /*
626 1.59 maxv * We really don't want the first byte of the red zone to be '\0';
627 1.59 maxv * an off-by-one in a string may not be properly detected.
628 1.59 maxv */
629 1.59 maxv pat = kmem_pattern_generate(cp);
630 1.59 maxv *cp = (pat == '\0') ? STATIC_BYTE: pat;
631 1.59 maxv cp++;
632 1.59 maxv
633 1.54 maxv while (cp < ep) {
634 1.59 maxv *cp = kmem_pattern_generate(cp);
635 1.54 maxv cp++;
636 1.54 maxv }
637 1.54 maxv }
638 1.54 maxv
639 1.54 maxv static void
640 1.54 maxv kmem_redzone_check(void *p, size_t sz)
641 1.54 maxv {
642 1.59 maxv uint8_t *cp, pat, expected;
643 1.54 maxv const uint8_t *ep;
644 1.54 maxv
645 1.54 maxv cp = (uint8_t *)p + sz;
646 1.57 maxv ep = cp + REDZONE_SIZE;
647 1.59 maxv
648 1.59 maxv pat = kmem_pattern_generate(cp);
649 1.59 maxv expected = (pat == '\0') ? STATIC_BYTE: pat;
650 1.59 maxv if (expected != *cp) {
651 1.59 maxv panic("%s: %p: 0x%02x != 0x%02x\n",
652 1.59 maxv __func__, cp, *cp, expected);
653 1.59 maxv }
654 1.59 maxv cp++;
655 1.59 maxv
656 1.54 maxv while (cp < ep) {
657 1.59 maxv expected = kmem_pattern_generate(cp);
658 1.54 maxv if (*cp != expected) {
659 1.54 maxv panic("%s: %p: 0x%02x != 0x%02x\n",
660 1.54 maxv __func__, cp, *cp, expected);
661 1.54 maxv }
662 1.54 maxv cp++;
663 1.54 maxv }
664 1.54 maxv }
665 1.54 maxv #endif /* defined(KMEM_REDZONE) */
666 1.54 maxv
667 1.33 haad
668 1.60.4.1 skrll #if defined(KMEM_GUARD)
669 1.33 haad /*
670 1.60.4.1 skrll * The ultimate memory allocator for debugging, baby. It tries to catch:
671 1.60.4.1 skrll *
672 1.60.4.1 skrll * 1. Overflow, in realtime. A guard page sits immediately after the
673 1.60.4.1 skrll * requested area; a read/write overflow therefore triggers a page
674 1.60.4.1 skrll * fault.
675 1.60.4.1 skrll * 2. Invalid pointer/size passed, at free. A kmem_header structure sits
676 1.60.4.1 skrll * just before the requested area, and holds the allocated size. Any
677 1.60.4.1 skrll * difference with what is given at free triggers a panic.
678 1.60.4.1 skrll * 3. Underflow, at free. If an underflow occurs, the kmem header will be
679 1.60.4.1 skrll * modified, and 2. will trigger a panic.
680 1.60.4.1 skrll * 4. Use-after-free. When freeing, the memory is unmapped, and depending
681 1.60.4.1 skrll * on the value of kmem_guard_depth, the kernel will more or less delay
682 1.60.4.1 skrll * the recycling of that memory. Which means that any ulterior read/write
683 1.60.4.1 skrll * access to the memory will trigger a page fault, given it hasn't been
684 1.60.4.1 skrll * recycled yet.
685 1.60.4.1 skrll */
686 1.60.4.1 skrll
687 1.60.4.1 skrll #include <sys/atomic.h>
688 1.60.4.1 skrll #include <uvm/uvm.h>
689 1.60.4.1 skrll
690 1.60.4.1 skrll static bool
691 1.60.4.1 skrll kmem_guard_init(struct kmem_guard *kg, u_int depth, vmem_t *vm)
692 1.60.4.1 skrll {
693 1.60.4.1 skrll vaddr_t va;
694 1.60.4.1 skrll
695 1.60.4.1 skrll /* If not enabled, we have nothing to do. */
696 1.60.4.1 skrll if (depth == 0) {
697 1.60.4.1 skrll return false;
698 1.60.4.1 skrll }
699 1.60.4.1 skrll depth = roundup(depth, PAGE_SIZE / sizeof(void *));
700 1.60.4.1 skrll KASSERT(depth != 0);
701 1.60.4.1 skrll
702 1.60.4.1 skrll /*
703 1.60.4.1 skrll * Allocate fifo.
704 1.60.4.1 skrll */
705 1.60.4.1 skrll va = uvm_km_alloc(kernel_map, depth * sizeof(void *), PAGE_SIZE,
706 1.60.4.1 skrll UVM_KMF_WIRED | UVM_KMF_ZERO);
707 1.60.4.1 skrll if (va == 0) {
708 1.60.4.1 skrll return false;
709 1.60.4.1 skrll }
710 1.60.4.1 skrll
711 1.60.4.1 skrll /*
712 1.60.4.1 skrll * Init object.
713 1.60.4.1 skrll */
714 1.60.4.1 skrll kg->kg_vmem = vm;
715 1.60.4.1 skrll kg->kg_fifo = (void *)va;
716 1.60.4.1 skrll kg->kg_depth = depth;
717 1.60.4.1 skrll kg->kg_rotor = 0;
718 1.60.4.1 skrll
719 1.60.4.1 skrll printf("kmem_guard(%p): depth %d\n", kg, depth);
720 1.60.4.1 skrll return true;
721 1.60.4.1 skrll }
722 1.60.4.1 skrll
723 1.60.4.1 skrll static void *
724 1.60.4.1 skrll kmem_guard_alloc(struct kmem_guard *kg, size_t requested_size, bool waitok)
725 1.60.4.1 skrll {
726 1.60.4.1 skrll struct vm_page *pg;
727 1.60.4.1 skrll vm_flag_t flags;
728 1.60.4.1 skrll vmem_addr_t va;
729 1.60.4.1 skrll vaddr_t loopva;
730 1.60.4.1 skrll vsize_t loopsize;
731 1.60.4.1 skrll size_t size;
732 1.60.4.1 skrll void **p;
733 1.60.4.1 skrll
734 1.60.4.1 skrll /*
735 1.60.4.1 skrll * Compute the size: take the kmem header into account, and add a guard
736 1.60.4.1 skrll * page at the end.
737 1.60.4.1 skrll */
738 1.60.4.1 skrll size = round_page(requested_size + SIZE_SIZE) + PAGE_SIZE;
739 1.60.4.1 skrll
740 1.60.4.1 skrll /* Allocate pages of kernel VA, but do not map anything in yet. */
741 1.60.4.1 skrll flags = VM_BESTFIT | (waitok ? VM_SLEEP : VM_NOSLEEP);
742 1.60.4.1 skrll if (vmem_alloc(kg->kg_vmem, size, flags, &va) != 0) {
743 1.60.4.1 skrll return NULL;
744 1.60.4.1 skrll }
745 1.60.4.1 skrll
746 1.60.4.1 skrll loopva = va;
747 1.60.4.1 skrll loopsize = size - PAGE_SIZE;
748 1.60.4.1 skrll
749 1.60.4.1 skrll while (loopsize) {
750 1.60.4.1 skrll pg = uvm_pagealloc(NULL, loopva, NULL, 0);
751 1.60.4.1 skrll if (__predict_false(pg == NULL)) {
752 1.60.4.1 skrll if (waitok) {
753 1.60.4.1 skrll uvm_wait("kmem_guard");
754 1.60.4.1 skrll continue;
755 1.60.4.1 skrll } else {
756 1.60.4.1 skrll uvm_km_pgremove_intrsafe(kernel_map, va,
757 1.60.4.1 skrll va + size);
758 1.60.4.1 skrll vmem_free(kg->kg_vmem, va, size);
759 1.60.4.1 skrll return NULL;
760 1.60.4.1 skrll }
761 1.60.4.1 skrll }
762 1.60.4.1 skrll
763 1.60.4.1 skrll pg->flags &= ~PG_BUSY; /* new page */
764 1.60.4.1 skrll UVM_PAGE_OWN(pg, NULL);
765 1.60.4.1 skrll pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
766 1.60.4.1 skrll VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
767 1.60.4.1 skrll
768 1.60.4.1 skrll loopva += PAGE_SIZE;
769 1.60.4.1 skrll loopsize -= PAGE_SIZE;
770 1.60.4.1 skrll }
771 1.60.4.1 skrll
772 1.60.4.1 skrll pmap_update(pmap_kernel());
773 1.60.4.1 skrll
774 1.60.4.1 skrll /*
775 1.60.4.1 skrll * Offset the returned pointer so that the unmapped guard page sits
776 1.60.4.1 skrll * immediately after the returned object.
777 1.60.4.1 skrll */
778 1.60.4.1 skrll p = (void **)((va + (size - PAGE_SIZE) - requested_size) & ~(uintptr_t)ALIGNBYTES);
779 1.60.4.1 skrll kmem_size_set((uint8_t *)p - SIZE_SIZE, requested_size);
780 1.60.4.1 skrll return (void *)p;
781 1.60.4.1 skrll }
782 1.60.4.1 skrll
783 1.60.4.1 skrll static void
784 1.60.4.1 skrll kmem_guard_free(struct kmem_guard *kg, size_t requested_size, void *p)
785 1.33 haad {
786 1.60.4.1 skrll vaddr_t va;
787 1.60.4.1 skrll u_int rotor;
788 1.60.4.1 skrll size_t size;
789 1.60.4.1 skrll uint8_t *ptr;
790 1.48 uebayasi
791 1.60.4.1 skrll ptr = (uint8_t *)p - SIZE_SIZE;
792 1.60.4.1 skrll kmem_size_check(ptr, requested_size);
793 1.60.4.1 skrll va = trunc_page((vaddr_t)ptr);
794 1.60.4.1 skrll size = round_page(requested_size + SIZE_SIZE) + PAGE_SIZE;
795 1.33 haad
796 1.60.4.1 skrll KASSERT(pmap_extract(pmap_kernel(), va, NULL));
797 1.60.4.1 skrll KASSERT(!pmap_extract(pmap_kernel(), va + (size - PAGE_SIZE), NULL));
798 1.33 haad
799 1.60.4.1 skrll /*
800 1.60.4.1 skrll * Unmap and free the pages. The last one is never allocated.
801 1.60.4.1 skrll */
802 1.60.4.1 skrll uvm_km_pgremove_intrsafe(kernel_map, va, va + size);
803 1.60.4.1 skrll pmap_update(pmap_kernel());
804 1.38 christos
805 1.60.4.1 skrll #if 0
806 1.60.4.1 skrll /*
807 1.60.4.1 skrll * XXX: Here, we need to atomically register the va and its size in the
808 1.60.4.1 skrll * fifo.
809 1.60.4.1 skrll */
810 1.33 haad
811 1.60.4.1 skrll /*
812 1.60.4.1 skrll * Put the VA allocation into the list and swap an old one out to free.
813 1.60.4.1 skrll * This behaves mostly like a fifo.
814 1.60.4.1 skrll */
815 1.60.4.1 skrll rotor = atomic_inc_uint_nv(&kg->kg_rotor) % kg->kg_depth;
816 1.60.4.1 skrll va = (vaddr_t)atomic_swap_ptr(&kg->kg_fifo[rotor], (void *)va);
817 1.60.4.1 skrll if (va != 0) {
818 1.60.4.1 skrll vmem_free(kg->kg_vmem, va, size);
819 1.60.4.1 skrll }
820 1.60.4.1 skrll #else
821 1.60.4.1 skrll (void)rotor;
822 1.60.4.1 skrll vmem_free(kg->kg_vmem, va, size);
823 1.60.4.1 skrll #endif
824 1.33 haad }
825 1.60.4.1 skrll
826 1.60.4.1 skrll #endif /* defined(KMEM_GUARD) */
827