subr_kmem.c revision 1.61 1 1.61 maxv /* $NetBSD: subr_kmem.c,v 1.61 2015/07/27 09:24:28 maxv Exp $ */
2 1.1 yamt
3 1.1 yamt /*-
4 1.61 maxv * Copyright (c) 2009-2015 The NetBSD Foundation, Inc.
5 1.23 ad * All rights reserved.
6 1.23 ad *
7 1.23 ad * This code is derived from software contributed to The NetBSD Foundation
8 1.61 maxv * by Andrew Doran and Maxime Villard.
9 1.23 ad *
10 1.23 ad * Redistribution and use in source and binary forms, with or without
11 1.23 ad * modification, are permitted provided that the following conditions
12 1.23 ad * are met:
13 1.23 ad * 1. Redistributions of source code must retain the above copyright
14 1.23 ad * notice, this list of conditions and the following disclaimer.
15 1.23 ad * 2. Redistributions in binary form must reproduce the above copyright
16 1.23 ad * notice, this list of conditions and the following disclaimer in the
17 1.23 ad * documentation and/or other materials provided with the distribution.
18 1.23 ad *
19 1.23 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.23 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.23 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.23 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.23 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.23 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.23 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.23 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.23 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.23 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.23 ad * POSSIBILITY OF SUCH DAMAGE.
30 1.23 ad */
31 1.23 ad
32 1.23 ad /*-
33 1.1 yamt * Copyright (c)2006 YAMAMOTO Takashi,
34 1.1 yamt * All rights reserved.
35 1.1 yamt *
36 1.1 yamt * Redistribution and use in source and binary forms, with or without
37 1.1 yamt * modification, are permitted provided that the following conditions
38 1.1 yamt * are met:
39 1.1 yamt * 1. Redistributions of source code must retain the above copyright
40 1.1 yamt * notice, this list of conditions and the following disclaimer.
41 1.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
42 1.1 yamt * notice, this list of conditions and the following disclaimer in the
43 1.1 yamt * documentation and/or other materials provided with the distribution.
44 1.1 yamt *
45 1.1 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 1.1 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 1.1 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 1.1 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 1.1 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 1.1 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 1.1 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 1.1 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 1.1 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 1.1 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 1.1 yamt * SUCH DAMAGE.
56 1.1 yamt */
57 1.1 yamt
58 1.1 yamt /*
59 1.55 maxv * Allocator of kernel wired memory. This allocator has some debug features
60 1.55 maxv * enabled with "option DIAGNOSTIC" and "option DEBUG".
61 1.50 yamt */
62 1.50 yamt
63 1.50 yamt /*
64 1.55 maxv * KMEM_SIZE: detect alloc/free size mismatch bugs.
65 1.57 maxv * Prefix each allocations with a fixed-sized, aligned header and record
66 1.57 maxv * the exact user-requested allocation size in it. When freeing, compare
67 1.57 maxv * it with kmem_free's "size" argument.
68 1.60 maxv *
69 1.55 maxv * KMEM_REDZONE: detect overrun bugs.
70 1.57 maxv * Add a 2-byte pattern (allocate one more memory chunk if needed) at the
71 1.57 maxv * end of each allocated buffer. Check this pattern on kmem_free.
72 1.50 yamt *
73 1.60 maxv * These options are enabled on DIAGNOSTIC.
74 1.60 maxv *
75 1.60 maxv * |CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|
76 1.60 maxv * +-----+-----+-----+-----+-----+-----+-----+-----+-----+---+-+--+--+
77 1.60 maxv * |/////| | | | | | | | | |*|**|UU|
78 1.60 maxv * |/HSZ/| | | | | | | | | |*|**|UU|
79 1.60 maxv * |/////| | | | | | | | | |*|**|UU|
80 1.60 maxv * +-----+-----+-----+-----+-----+-----+-----+-----+-----+---+-+--+--+
81 1.60 maxv * |Size | Buffer usable by the caller (requested size) |RedZ|Unused\
82 1.60 maxv */
83 1.60 maxv
84 1.60 maxv /*
85 1.55 maxv * KMEM_POISON: detect modify-after-free bugs.
86 1.50 yamt * Fill freed (in the sense of kmem_free) memory with a garbage pattern.
87 1.50 yamt * Check the pattern on allocation.
88 1.50 yamt *
89 1.50 yamt * KMEM_GUARD
90 1.61 maxv * A kernel with "option DEBUG" has "kmem_guard" debugging feature compiled
91 1.61 maxv * in. See the comment below for what kind of bugs it tries to detect. Even
92 1.61 maxv * if compiled in, it's disabled by default because it's very expensive.
93 1.61 maxv * You can enable it on boot by:
94 1.55 maxv * boot -d
95 1.55 maxv * db> w kmem_guard_depth 0t30000
96 1.55 maxv * db> c
97 1.1 yamt *
98 1.55 maxv * The default value of kmem_guard_depth is 0, which means disabled.
99 1.55 maxv * It can be changed by KMEM_GUARD_DEPTH kernel config option.
100 1.1 yamt */
101 1.1 yamt
102 1.1 yamt #include <sys/cdefs.h>
103 1.61 maxv __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.61 2015/07/27 09:24:28 maxv Exp $");
104 1.1 yamt
105 1.1 yamt #include <sys/param.h>
106 1.6 yamt #include <sys/callback.h>
107 1.1 yamt #include <sys/kmem.h>
108 1.39 para #include <sys/pool.h>
109 1.13 ad #include <sys/debug.h>
110 1.17 ad #include <sys/lockdebug.h>
111 1.23 ad #include <sys/cpu.h>
112 1.1 yamt
113 1.6 yamt #include <uvm/uvm_extern.h>
114 1.6 yamt #include <uvm/uvm_map.h>
115 1.6 yamt
116 1.1 yamt #include <lib/libkern/libkern.h>
117 1.1 yamt
118 1.46 para struct kmem_cache_info {
119 1.40 rmind size_t kc_size;
120 1.40 rmind const char * kc_name;
121 1.46 para };
122 1.46 para
123 1.46 para static const struct kmem_cache_info kmem_cache_sizes[] = {
124 1.39 para { 8, "kmem-8" },
125 1.39 para { 16, "kmem-16" },
126 1.39 para { 24, "kmem-24" },
127 1.39 para { 32, "kmem-32" },
128 1.39 para { 40, "kmem-40" },
129 1.39 para { 48, "kmem-48" },
130 1.39 para { 56, "kmem-56" },
131 1.39 para { 64, "kmem-64" },
132 1.39 para { 80, "kmem-80" },
133 1.39 para { 96, "kmem-96" },
134 1.39 para { 112, "kmem-112" },
135 1.39 para { 128, "kmem-128" },
136 1.39 para { 160, "kmem-160" },
137 1.39 para { 192, "kmem-192" },
138 1.39 para { 224, "kmem-224" },
139 1.39 para { 256, "kmem-256" },
140 1.39 para { 320, "kmem-320" },
141 1.39 para { 384, "kmem-384" },
142 1.39 para { 448, "kmem-448" },
143 1.39 para { 512, "kmem-512" },
144 1.39 para { 768, "kmem-768" },
145 1.39 para { 1024, "kmem-1024" },
146 1.46 para { 0, NULL }
147 1.46 para };
148 1.46 para
149 1.46 para static const struct kmem_cache_info kmem_cache_big_sizes[] = {
150 1.39 para { 2048, "kmem-2048" },
151 1.39 para { 4096, "kmem-4096" },
152 1.46 para { 8192, "kmem-8192" },
153 1.46 para { 16384, "kmem-16384" },
154 1.39 para { 0, NULL }
155 1.39 para };
156 1.1 yamt
157 1.39 para /*
158 1.40 rmind * KMEM_ALIGN is the smallest guaranteed alignment and also the
159 1.46 para * smallest allocateable quantum.
160 1.46 para * Every cache size >= CACHE_LINE_SIZE gets CACHE_LINE_SIZE alignment.
161 1.39 para */
162 1.40 rmind #define KMEM_ALIGN 8
163 1.40 rmind #define KMEM_SHIFT 3
164 1.46 para #define KMEM_MAXSIZE 1024
165 1.40 rmind #define KMEM_CACHE_COUNT (KMEM_MAXSIZE >> KMEM_SHIFT)
166 1.1 yamt
167 1.40 rmind static pool_cache_t kmem_cache[KMEM_CACHE_COUNT] __cacheline_aligned;
168 1.40 rmind static size_t kmem_cache_maxidx __read_mostly;
169 1.23 ad
170 1.46 para #define KMEM_BIG_ALIGN 2048
171 1.46 para #define KMEM_BIG_SHIFT 11
172 1.46 para #define KMEM_BIG_MAXSIZE 16384
173 1.46 para #define KMEM_CACHE_BIG_COUNT (KMEM_BIG_MAXSIZE >> KMEM_BIG_SHIFT)
174 1.46 para
175 1.46 para static pool_cache_t kmem_cache_big[KMEM_CACHE_BIG_COUNT] __cacheline_aligned;
176 1.46 para static size_t kmem_cache_big_maxidx __read_mostly;
177 1.46 para
178 1.53 maxv #if defined(DIAGNOSTIC) && defined(_HARDKERNEL)
179 1.57 maxv #define KMEM_SIZE
180 1.60 maxv #define KMEM_REDZONE
181 1.53 maxv #endif /* defined(DIAGNOSTIC) */
182 1.53 maxv
183 1.45 martin #if defined(DEBUG) && defined(_HARDKERNEL)
184 1.61 maxv #define KMEM_SIZE
185 1.19 yamt #define KMEM_POISON
186 1.27 ad #define KMEM_GUARD
187 1.61 maxv static void *kmem_freecheck;
188 1.19 yamt #endif /* defined(DEBUG) */
189 1.19 yamt
190 1.19 yamt #if defined(KMEM_POISON)
191 1.39 para static int kmem_poison_ctor(void *, void *, int);
192 1.4 yamt static void kmem_poison_fill(void *, size_t);
193 1.4 yamt static void kmem_poison_check(void *, size_t);
194 1.19 yamt #else /* defined(KMEM_POISON) */
195 1.40 rmind #define kmem_poison_fill(p, sz) /* nothing */
196 1.40 rmind #define kmem_poison_check(p, sz) /* nothing */
197 1.19 yamt #endif /* defined(KMEM_POISON) */
198 1.19 yamt
199 1.19 yamt #if defined(KMEM_REDZONE)
200 1.54 maxv #define REDZONE_SIZE 2
201 1.57 maxv static void kmem_redzone_fill(void *, size_t);
202 1.57 maxv static void kmem_redzone_check(void *, size_t);
203 1.19 yamt #else /* defined(KMEM_REDZONE) */
204 1.19 yamt #define REDZONE_SIZE 0
205 1.54 maxv #define kmem_redzone_fill(p, sz) /* nothing */
206 1.54 maxv #define kmem_redzone_check(p, sz) /* nothing */
207 1.19 yamt #endif /* defined(KMEM_REDZONE) */
208 1.4 yamt
209 1.23 ad #if defined(KMEM_SIZE)
210 1.57 maxv struct kmem_header {
211 1.57 maxv size_t size;
212 1.57 maxv } __aligned(KMEM_ALIGN);
213 1.57 maxv #define SIZE_SIZE sizeof(struct kmem_header)
214 1.23 ad static void kmem_size_set(void *, size_t);
215 1.39 para static void kmem_size_check(void *, size_t);
216 1.23 ad #else
217 1.23 ad #define SIZE_SIZE 0
218 1.23 ad #define kmem_size_set(p, sz) /* nothing */
219 1.23 ad #define kmem_size_check(p, sz) /* nothing */
220 1.23 ad #endif
221 1.23 ad
222 1.52 maxv #if defined(KMEM_GUARD)
223 1.52 maxv #ifndef KMEM_GUARD_DEPTH
224 1.52 maxv #define KMEM_GUARD_DEPTH 0
225 1.52 maxv #endif
226 1.61 maxv struct kmem_guard {
227 1.61 maxv u_int kg_depth;
228 1.61 maxv intptr_t * kg_fifo;
229 1.61 maxv u_int kg_rotor;
230 1.61 maxv vmem_t * kg_vmem;
231 1.61 maxv };
232 1.61 maxv
233 1.61 maxv static bool kmem_guard_init(struct kmem_guard *, u_int, vmem_t *);
234 1.61 maxv static void *kmem_guard_alloc(struct kmem_guard *, size_t, bool);
235 1.61 maxv static void kmem_guard_free(struct kmem_guard *, size_t, void *);
236 1.61 maxv
237 1.52 maxv int kmem_guard_depth = KMEM_GUARD_DEPTH;
238 1.61 maxv static bool kmem_guard_enabled;
239 1.61 maxv static struct kmem_guard kmem_guard;
240 1.52 maxv #endif /* defined(KMEM_GUARD) */
241 1.52 maxv
242 1.32 skrll CTASSERT(KM_SLEEP == PR_WAITOK);
243 1.32 skrll CTASSERT(KM_NOSLEEP == PR_NOWAIT);
244 1.32 skrll
245 1.46 para /*
246 1.46 para * kmem_intr_alloc: allocate wired memory.
247 1.46 para */
248 1.46 para
249 1.39 para void *
250 1.50 yamt kmem_intr_alloc(size_t requested_size, km_flag_t kmflags)
251 1.1 yamt {
252 1.40 rmind size_t allocsz, index;
253 1.50 yamt size_t size;
254 1.39 para pool_cache_t pc;
255 1.39 para uint8_t *p;
256 1.1 yamt
257 1.50 yamt KASSERT(requested_size > 0);
258 1.1 yamt
259 1.39 para #ifdef KMEM_GUARD
260 1.61 maxv if (kmem_guard_enabled) {
261 1.61 maxv return kmem_guard_alloc(&kmem_guard, requested_size,
262 1.39 para (kmflags & KM_SLEEP) != 0);
263 1.1 yamt }
264 1.39 para #endif
265 1.50 yamt size = kmem_roundup_size(requested_size);
266 1.54 maxv allocsz = size + SIZE_SIZE;
267 1.54 maxv
268 1.54 maxv #ifdef KMEM_REDZONE
269 1.54 maxv if (size - requested_size < REDZONE_SIZE) {
270 1.57 maxv /* If there isn't enough space in the padding, allocate
271 1.57 maxv * one more memory chunk for the red zone. */
272 1.56 maxv allocsz += kmem_roundup_size(REDZONE_SIZE);
273 1.54 maxv }
274 1.54 maxv #endif
275 1.39 para
276 1.46 para if ((index = ((allocsz -1) >> KMEM_SHIFT))
277 1.46 para < kmem_cache_maxidx) {
278 1.46 para pc = kmem_cache[index];
279 1.46 para } else if ((index = ((allocsz - 1) >> KMEM_BIG_SHIFT))
280 1.55 maxv < kmem_cache_big_maxidx) {
281 1.46 para pc = kmem_cache_big[index];
282 1.48 uebayasi } else {
283 1.40 rmind int ret = uvm_km_kmem_alloc(kmem_va_arena,
284 1.43 para (vsize_t)round_page(size),
285 1.39 para ((kmflags & KM_SLEEP) ? VM_SLEEP : VM_NOSLEEP)
286 1.39 para | VM_INSTANTFIT, (vmem_addr_t *)&p);
287 1.46 para if (ret) {
288 1.46 para return NULL;
289 1.46 para }
290 1.46 para FREECHECK_OUT(&kmem_freecheck, p);
291 1.46 para return p;
292 1.1 yamt }
293 1.1 yamt
294 1.39 para p = pool_cache_get(pc, kmflags);
295 1.39 para
296 1.39 para if (__predict_true(p != NULL)) {
297 1.58 maxv kmem_poison_check(p, allocsz);
298 1.39 para FREECHECK_OUT(&kmem_freecheck, p);
299 1.50 yamt kmem_size_set(p, requested_size);
300 1.54 maxv kmem_redzone_fill(p, requested_size + SIZE_SIZE);
301 1.47 para
302 1.47 para return p + SIZE_SIZE;
303 1.39 para }
304 1.47 para return p;
305 1.1 yamt }
306 1.1 yamt
307 1.46 para /*
308 1.46 para * kmem_intr_zalloc: allocate zeroed wired memory.
309 1.46 para */
310 1.46 para
311 1.39 para void *
312 1.39 para kmem_intr_zalloc(size_t size, km_flag_t kmflags)
313 1.23 ad {
314 1.39 para void *p;
315 1.23 ad
316 1.39 para p = kmem_intr_alloc(size, kmflags);
317 1.39 para if (p != NULL) {
318 1.39 para memset(p, 0, size);
319 1.39 para }
320 1.39 para return p;
321 1.23 ad }
322 1.23 ad
323 1.46 para /*
324 1.46 para * kmem_intr_free: free wired memory allocated by kmem_alloc.
325 1.46 para */
326 1.46 para
327 1.39 para void
328 1.50 yamt kmem_intr_free(void *p, size_t requested_size)
329 1.23 ad {
330 1.40 rmind size_t allocsz, index;
331 1.50 yamt size_t size;
332 1.39 para pool_cache_t pc;
333 1.23 ad
334 1.39 para KASSERT(p != NULL);
335 1.50 yamt KASSERT(requested_size > 0);
336 1.39 para
337 1.39 para #ifdef KMEM_GUARD
338 1.61 maxv if (kmem_guard_enabled) {
339 1.61 maxv kmem_guard_free(&kmem_guard, requested_size, p);
340 1.39 para return;
341 1.39 para }
342 1.39 para #endif
343 1.54 maxv
344 1.50 yamt size = kmem_roundup_size(requested_size);
345 1.54 maxv allocsz = size + SIZE_SIZE;
346 1.54 maxv
347 1.54 maxv #ifdef KMEM_REDZONE
348 1.54 maxv if (size - requested_size < REDZONE_SIZE) {
349 1.56 maxv allocsz += kmem_roundup_size(REDZONE_SIZE);
350 1.54 maxv }
351 1.54 maxv #endif
352 1.39 para
353 1.46 para if ((index = ((allocsz -1) >> KMEM_SHIFT))
354 1.46 para < kmem_cache_maxidx) {
355 1.46 para pc = kmem_cache[index];
356 1.46 para } else if ((index = ((allocsz - 1) >> KMEM_BIG_SHIFT))
357 1.55 maxv < kmem_cache_big_maxidx) {
358 1.46 para pc = kmem_cache_big[index];
359 1.46 para } else {
360 1.46 para FREECHECK_IN(&kmem_freecheck, p);
361 1.39 para uvm_km_kmem_free(kmem_va_arena, (vaddr_t)p,
362 1.43 para round_page(size));
363 1.39 para return;
364 1.39 para }
365 1.39 para
366 1.46 para p = (uint8_t *)p - SIZE_SIZE;
367 1.50 yamt kmem_size_check(p, requested_size);
368 1.54 maxv kmem_redzone_check(p, requested_size + SIZE_SIZE);
369 1.39 para FREECHECK_IN(&kmem_freecheck, p);
370 1.46 para LOCKDEBUG_MEM_CHECK(p, size);
371 1.39 para kmem_poison_fill(p, allocsz);
372 1.39 para
373 1.39 para pool_cache_put(pc, p);
374 1.23 ad }
375 1.23 ad
376 1.1 yamt /* ---- kmem API */
377 1.1 yamt
378 1.1 yamt /*
379 1.1 yamt * kmem_alloc: allocate wired memory.
380 1.1 yamt * => must not be called from interrupt context.
381 1.1 yamt */
382 1.1 yamt
383 1.1 yamt void *
384 1.1 yamt kmem_alloc(size_t size, km_flag_t kmflags)
385 1.1 yamt {
386 1.40 rmind KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()),
387 1.40 rmind "kmem(9) should not be used from the interrupt context");
388 1.39 para return kmem_intr_alloc(size, kmflags);
389 1.1 yamt }
390 1.1 yamt
391 1.1 yamt /*
392 1.39 para * kmem_zalloc: allocate zeroed wired memory.
393 1.2 yamt * => must not be called from interrupt context.
394 1.2 yamt */
395 1.2 yamt
396 1.2 yamt void *
397 1.2 yamt kmem_zalloc(size_t size, km_flag_t kmflags)
398 1.2 yamt {
399 1.40 rmind KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()),
400 1.40 rmind "kmem(9) should not be used from the interrupt context");
401 1.39 para return kmem_intr_zalloc(size, kmflags);
402 1.2 yamt }
403 1.2 yamt
404 1.2 yamt /*
405 1.1 yamt * kmem_free: free wired memory allocated by kmem_alloc.
406 1.1 yamt * => must not be called from interrupt context.
407 1.1 yamt */
408 1.1 yamt
409 1.1 yamt void
410 1.1 yamt kmem_free(void *p, size_t size)
411 1.1 yamt {
412 1.23 ad KASSERT(!cpu_intr_p());
413 1.27 ad KASSERT(!cpu_softintr_p());
414 1.39 para kmem_intr_free(p, size);
415 1.1 yamt }
416 1.1 yamt
417 1.46 para static size_t
418 1.39 para kmem_create_caches(const struct kmem_cache_info *array,
419 1.46 para pool_cache_t alloc_table[], size_t maxsize, int shift, int ipl)
420 1.1 yamt {
421 1.46 para size_t maxidx = 0;
422 1.46 para size_t table_unit = (1 << shift);
423 1.39 para size_t size = table_unit;
424 1.23 ad int i;
425 1.1 yamt
426 1.39 para for (i = 0; array[i].kc_size != 0 ; i++) {
427 1.40 rmind const char *name = array[i].kc_name;
428 1.39 para size_t cache_size = array[i].kc_size;
429 1.46 para struct pool_allocator *pa;
430 1.40 rmind int flags = PR_NOALIGN;
431 1.40 rmind pool_cache_t pc;
432 1.39 para size_t align;
433 1.39 para
434 1.39 para if ((cache_size & (CACHE_LINE_SIZE - 1)) == 0)
435 1.39 para align = CACHE_LINE_SIZE;
436 1.39 para else if ((cache_size & (PAGE_SIZE - 1)) == 0)
437 1.39 para align = PAGE_SIZE;
438 1.39 para else
439 1.39 para align = KMEM_ALIGN;
440 1.39 para
441 1.39 para if (cache_size < CACHE_LINE_SIZE)
442 1.39 para flags |= PR_NOTOUCH;
443 1.27 ad
444 1.39 para /* check if we reached the requested size */
445 1.46 para if (cache_size > maxsize || cache_size > PAGE_SIZE) {
446 1.23 ad break;
447 1.40 rmind }
448 1.46 para if ((cache_size >> shift) > maxidx) {
449 1.46 para maxidx = cache_size >> shift;
450 1.46 para }
451 1.46 para
452 1.46 para if ((cache_size >> shift) > maxidx) {
453 1.46 para maxidx = cache_size >> shift;
454 1.40 rmind }
455 1.1 yamt
456 1.46 para pa = &pool_allocator_kmem;
457 1.39 para #if defined(KMEM_POISON)
458 1.39 para pc = pool_cache_init(cache_size, align, 0, flags,
459 1.49 yamt name, pa, ipl, kmem_poison_ctor,
460 1.39 para NULL, (void *)cache_size);
461 1.39 para #else /* defined(KMEM_POISON) */
462 1.39 para pc = pool_cache_init(cache_size, align, 0, flags,
463 1.46 para name, pa, ipl, NULL, NULL, NULL);
464 1.39 para #endif /* defined(KMEM_POISON) */
465 1.1 yamt
466 1.39 para while (size <= cache_size) {
467 1.46 para alloc_table[(size - 1) >> shift] = pc;
468 1.39 para size += table_unit;
469 1.39 para }
470 1.1 yamt }
471 1.46 para return maxidx;
472 1.1 yamt }
473 1.1 yamt
474 1.39 para void
475 1.39 para kmem_init(void)
476 1.1 yamt {
477 1.39 para #ifdef KMEM_GUARD
478 1.61 maxv kmem_guard_enabled = kmem_guard_init(&kmem_guard, kmem_guard_depth,
479 1.42 rmind kmem_va_arena);
480 1.39 para #endif
481 1.46 para kmem_cache_maxidx = kmem_create_caches(kmem_cache_sizes,
482 1.46 para kmem_cache, KMEM_MAXSIZE, KMEM_SHIFT, IPL_VM);
483 1.55 maxv kmem_cache_big_maxidx = kmem_create_caches(kmem_cache_big_sizes,
484 1.46 para kmem_cache_big, PAGE_SIZE, KMEM_BIG_SHIFT, IPL_VM);
485 1.1 yamt }
486 1.4 yamt
487 1.39 para size_t
488 1.39 para kmem_roundup_size(size_t size)
489 1.7 yamt {
490 1.61 maxv return (size + (KMEM_ALIGN - 1)) & ~(KMEM_ALIGN - 1);
491 1.61 maxv }
492 1.7 yamt
493 1.61 maxv /*
494 1.61 maxv * Used to dynamically allocate string with kmem accordingly to format.
495 1.61 maxv */
496 1.61 maxv char *
497 1.61 maxv kmem_asprintf(const char *fmt, ...)
498 1.61 maxv {
499 1.61 maxv int size __diagused, len;
500 1.61 maxv va_list va;
501 1.61 maxv char *str;
502 1.61 maxv
503 1.61 maxv va_start(va, fmt);
504 1.61 maxv len = vsnprintf(NULL, 0, fmt, va);
505 1.61 maxv va_end(va);
506 1.61 maxv
507 1.61 maxv str = kmem_alloc(len + 1, KM_SLEEP);
508 1.61 maxv
509 1.61 maxv va_start(va, fmt);
510 1.61 maxv size = vsnprintf(str, len + 1, fmt, va);
511 1.61 maxv va_end(va);
512 1.61 maxv
513 1.61 maxv KASSERT(size == len);
514 1.61 maxv
515 1.61 maxv return str;
516 1.7 yamt }
517 1.7 yamt
518 1.54 maxv /* ------------------ DEBUG / DIAGNOSTIC ------------------ */
519 1.4 yamt
520 1.54 maxv #if defined(KMEM_POISON) || defined(KMEM_REDZONE)
521 1.4 yamt #if defined(_LP64)
522 1.39 para #define PRIME 0x9e37fffffffc0000UL
523 1.4 yamt #else /* defined(_LP64) */
524 1.39 para #define PRIME 0x9e3779b1
525 1.4 yamt #endif /* defined(_LP64) */
526 1.4 yamt
527 1.4 yamt static inline uint8_t
528 1.59 maxv kmem_pattern_generate(const void *p)
529 1.4 yamt {
530 1.39 para return (uint8_t)(((uintptr_t)p) * PRIME
531 1.39 para >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
532 1.39 para }
533 1.59 maxv #endif /* defined(KMEM_POISON) || defined(KMEM_REDZONE) */
534 1.39 para
535 1.59 maxv #if defined(KMEM_POISON)
536 1.39 para static int
537 1.39 para kmem_poison_ctor(void *arg, void *obj, int flag)
538 1.39 para {
539 1.39 para size_t sz = (size_t)arg;
540 1.39 para
541 1.39 para kmem_poison_fill(obj, sz);
542 1.39 para
543 1.39 para return 0;
544 1.4 yamt }
545 1.4 yamt
546 1.4 yamt static void
547 1.4 yamt kmem_poison_fill(void *p, size_t sz)
548 1.4 yamt {
549 1.4 yamt uint8_t *cp;
550 1.4 yamt const uint8_t *ep;
551 1.4 yamt
552 1.4 yamt cp = p;
553 1.4 yamt ep = cp + sz;
554 1.4 yamt while (cp < ep) {
555 1.59 maxv *cp = kmem_pattern_generate(cp);
556 1.4 yamt cp++;
557 1.4 yamt }
558 1.4 yamt }
559 1.4 yamt
560 1.4 yamt static void
561 1.4 yamt kmem_poison_check(void *p, size_t sz)
562 1.4 yamt {
563 1.4 yamt uint8_t *cp;
564 1.4 yamt const uint8_t *ep;
565 1.4 yamt
566 1.4 yamt cp = p;
567 1.4 yamt ep = cp + sz;
568 1.4 yamt while (cp < ep) {
569 1.59 maxv const uint8_t expected = kmem_pattern_generate(cp);
570 1.4 yamt
571 1.4 yamt if (*cp != expected) {
572 1.4 yamt panic("%s: %p: 0x%02x != 0x%02x\n",
573 1.39 para __func__, cp, *cp, expected);
574 1.4 yamt }
575 1.4 yamt cp++;
576 1.4 yamt }
577 1.4 yamt }
578 1.19 yamt #endif /* defined(KMEM_POISON) */
579 1.23 ad
580 1.23 ad #if defined(KMEM_SIZE)
581 1.23 ad static void
582 1.23 ad kmem_size_set(void *p, size_t sz)
583 1.23 ad {
584 1.57 maxv struct kmem_header *hd;
585 1.57 maxv hd = (struct kmem_header *)p;
586 1.57 maxv hd->size = sz;
587 1.23 ad }
588 1.23 ad
589 1.23 ad static void
590 1.39 para kmem_size_check(void *p, size_t sz)
591 1.23 ad {
592 1.57 maxv struct kmem_header *hd;
593 1.57 maxv size_t hsz;
594 1.23 ad
595 1.57 maxv hd = (struct kmem_header *)p;
596 1.57 maxv hsz = hd->size;
597 1.57 maxv
598 1.57 maxv if (hsz != sz) {
599 1.23 ad panic("kmem_free(%p, %zu) != allocated size %zu",
600 1.57 maxv (const uint8_t *)p + SIZE_SIZE, sz, hsz);
601 1.23 ad }
602 1.23 ad }
603 1.54 maxv #endif /* defined(KMEM_SIZE) */
604 1.54 maxv
605 1.54 maxv #if defined(KMEM_REDZONE)
606 1.59 maxv #define STATIC_BYTE 0xFE
607 1.59 maxv CTASSERT(REDZONE_SIZE > 1);
608 1.54 maxv static void
609 1.54 maxv kmem_redzone_fill(void *p, size_t sz)
610 1.54 maxv {
611 1.59 maxv uint8_t *cp, pat;
612 1.54 maxv const uint8_t *ep;
613 1.54 maxv
614 1.54 maxv cp = (uint8_t *)p + sz;
615 1.54 maxv ep = cp + REDZONE_SIZE;
616 1.59 maxv
617 1.59 maxv /*
618 1.59 maxv * We really don't want the first byte of the red zone to be '\0';
619 1.59 maxv * an off-by-one in a string may not be properly detected.
620 1.59 maxv */
621 1.59 maxv pat = kmem_pattern_generate(cp);
622 1.59 maxv *cp = (pat == '\0') ? STATIC_BYTE: pat;
623 1.59 maxv cp++;
624 1.59 maxv
625 1.54 maxv while (cp < ep) {
626 1.59 maxv *cp = kmem_pattern_generate(cp);
627 1.54 maxv cp++;
628 1.54 maxv }
629 1.54 maxv }
630 1.54 maxv
631 1.54 maxv static void
632 1.54 maxv kmem_redzone_check(void *p, size_t sz)
633 1.54 maxv {
634 1.59 maxv uint8_t *cp, pat, expected;
635 1.54 maxv const uint8_t *ep;
636 1.54 maxv
637 1.54 maxv cp = (uint8_t *)p + sz;
638 1.57 maxv ep = cp + REDZONE_SIZE;
639 1.59 maxv
640 1.59 maxv pat = kmem_pattern_generate(cp);
641 1.59 maxv expected = (pat == '\0') ? STATIC_BYTE: pat;
642 1.59 maxv if (expected != *cp) {
643 1.59 maxv panic("%s: %p: 0x%02x != 0x%02x\n",
644 1.59 maxv __func__, cp, *cp, expected);
645 1.59 maxv }
646 1.59 maxv cp++;
647 1.59 maxv
648 1.54 maxv while (cp < ep) {
649 1.59 maxv expected = kmem_pattern_generate(cp);
650 1.54 maxv if (*cp != expected) {
651 1.54 maxv panic("%s: %p: 0x%02x != 0x%02x\n",
652 1.54 maxv __func__, cp, *cp, expected);
653 1.54 maxv }
654 1.54 maxv cp++;
655 1.54 maxv }
656 1.54 maxv }
657 1.54 maxv #endif /* defined(KMEM_REDZONE) */
658 1.54 maxv
659 1.33 haad
660 1.61 maxv #if defined(KMEM_GUARD)
661 1.33 haad /*
662 1.61 maxv * The ultimate memory allocator for debugging, baby. It tries to catch:
663 1.61 maxv *
664 1.61 maxv * 1. Overflow, in realtime. A guard page sits immediately after the
665 1.61 maxv * requested area; a read/write overflow therefore triggers a page
666 1.61 maxv * fault.
667 1.61 maxv * 2. Invalid pointer/size passed, at free. A kmem_header structure sits
668 1.61 maxv * just before the requested area, and holds the allocated size. Any
669 1.61 maxv * difference with what is given at free triggers a panic.
670 1.61 maxv * 3. Underflow, at free. If an underflow occurs, the kmem header will be
671 1.61 maxv * modified, and 2. will trigger a panic.
672 1.61 maxv * 4. Use-after-free. When freeing, the memory is unmapped, and depending
673 1.61 maxv * on the value of kmem_guard_depth, the kernel will more or less delay
674 1.61 maxv * the recycling of that memory. Which means that any ulterior read/write
675 1.61 maxv * access to the memory will trigger a page fault, given it hasn't been
676 1.61 maxv * recycled yet.
677 1.61 maxv */
678 1.61 maxv
679 1.61 maxv #include <sys/atomic.h>
680 1.61 maxv #include <uvm/uvm.h>
681 1.61 maxv
682 1.61 maxv static bool
683 1.61 maxv kmem_guard_init(struct kmem_guard *kg, u_int depth, vmem_t *vm)
684 1.61 maxv {
685 1.61 maxv vaddr_t va;
686 1.61 maxv
687 1.61 maxv /* If not enabled, we have nothing to do. */
688 1.61 maxv if (depth == 0) {
689 1.61 maxv return false;
690 1.61 maxv }
691 1.61 maxv depth = roundup(depth, PAGE_SIZE / sizeof(void *));
692 1.61 maxv KASSERT(depth != 0);
693 1.61 maxv
694 1.61 maxv /*
695 1.61 maxv * Allocate fifo.
696 1.61 maxv */
697 1.61 maxv va = uvm_km_alloc(kernel_map, depth * sizeof(void *), PAGE_SIZE,
698 1.61 maxv UVM_KMF_WIRED | UVM_KMF_ZERO);
699 1.61 maxv if (va == 0) {
700 1.61 maxv return false;
701 1.61 maxv }
702 1.61 maxv
703 1.61 maxv /*
704 1.61 maxv * Init object.
705 1.61 maxv */
706 1.61 maxv kg->kg_vmem = vm;
707 1.61 maxv kg->kg_fifo = (void *)va;
708 1.61 maxv kg->kg_depth = depth;
709 1.61 maxv kg->kg_rotor = 0;
710 1.61 maxv
711 1.61 maxv printf("kmem_guard(%p): depth %d\n", kg, depth);
712 1.61 maxv return true;
713 1.61 maxv }
714 1.61 maxv
715 1.61 maxv static void *
716 1.61 maxv kmem_guard_alloc(struct kmem_guard *kg, size_t requested_size, bool waitok)
717 1.61 maxv {
718 1.61 maxv struct vm_page *pg;
719 1.61 maxv vm_flag_t flags;
720 1.61 maxv vmem_addr_t va;
721 1.61 maxv vaddr_t loopva;
722 1.61 maxv vsize_t loopsize;
723 1.61 maxv size_t size;
724 1.61 maxv void **p;
725 1.61 maxv
726 1.61 maxv /*
727 1.61 maxv * Compute the size: take the kmem header into account, and add a guard
728 1.61 maxv * page at the end.
729 1.61 maxv */
730 1.61 maxv size = round_page(requested_size + SIZE_SIZE) + PAGE_SIZE;
731 1.61 maxv
732 1.61 maxv /* Allocate pages of kernel VA, but do not map anything in yet. */
733 1.61 maxv flags = VM_BESTFIT | (waitok ? VM_SLEEP : VM_NOSLEEP);
734 1.61 maxv if (vmem_alloc(kg->kg_vmem, size, flags, &va) != 0) {
735 1.61 maxv return NULL;
736 1.61 maxv }
737 1.61 maxv
738 1.61 maxv loopva = va;
739 1.61 maxv loopsize = size - PAGE_SIZE;
740 1.61 maxv
741 1.61 maxv while (loopsize) {
742 1.61 maxv pg = uvm_pagealloc(NULL, loopva, NULL, 0);
743 1.61 maxv if (__predict_false(pg == NULL)) {
744 1.61 maxv if (waitok) {
745 1.61 maxv uvm_wait("kmem_guard");
746 1.61 maxv continue;
747 1.61 maxv } else {
748 1.61 maxv uvm_km_pgremove_intrsafe(kernel_map, va,
749 1.61 maxv va + size);
750 1.61 maxv vmem_free(kg->kg_vmem, va, size);
751 1.61 maxv return NULL;
752 1.61 maxv }
753 1.61 maxv }
754 1.61 maxv
755 1.61 maxv pg->flags &= ~PG_BUSY; /* new page */
756 1.61 maxv UVM_PAGE_OWN(pg, NULL);
757 1.61 maxv pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
758 1.61 maxv VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
759 1.61 maxv
760 1.61 maxv loopva += PAGE_SIZE;
761 1.61 maxv loopsize -= PAGE_SIZE;
762 1.61 maxv }
763 1.61 maxv
764 1.61 maxv pmap_update(pmap_kernel());
765 1.61 maxv
766 1.61 maxv /*
767 1.61 maxv * Offset the returned pointer so that the unmapped guard page sits
768 1.61 maxv * immediately after the returned object.
769 1.61 maxv */
770 1.61 maxv p = (void **)((va + (size - PAGE_SIZE) - requested_size) & ~(uintptr_t)ALIGNBYTES);
771 1.61 maxv kmem_size_set((uint8_t *)p - SIZE_SIZE, requested_size);
772 1.61 maxv return (void *)p;
773 1.61 maxv }
774 1.61 maxv
775 1.61 maxv static void
776 1.61 maxv kmem_guard_free(struct kmem_guard *kg, size_t requested_size, void *p)
777 1.33 haad {
778 1.61 maxv vaddr_t va;
779 1.61 maxv u_int rotor;
780 1.61 maxv size_t size;
781 1.61 maxv uint8_t *ptr;
782 1.48 uebayasi
783 1.61 maxv ptr = (uint8_t *)p - SIZE_SIZE;
784 1.61 maxv kmem_size_check(ptr, requested_size);
785 1.61 maxv va = trunc_page((vaddr_t)ptr);
786 1.61 maxv size = round_page(requested_size + SIZE_SIZE) + PAGE_SIZE;
787 1.33 haad
788 1.61 maxv KASSERT(pmap_extract(pmap_kernel(), va, NULL));
789 1.61 maxv KASSERT(!pmap_extract(pmap_kernel(), va + (size - PAGE_SIZE), NULL));
790 1.33 haad
791 1.61 maxv /*
792 1.61 maxv * Unmap and free the pages. The last one is never allocated.
793 1.61 maxv */
794 1.61 maxv uvm_km_pgremove_intrsafe(kernel_map, va, va + size);
795 1.61 maxv pmap_update(pmap_kernel());
796 1.38 christos
797 1.61 maxv #if 0
798 1.61 maxv /*
799 1.61 maxv * XXX: Here, we need to atomically register the va and its size in the
800 1.61 maxv * fifo.
801 1.61 maxv */
802 1.33 haad
803 1.61 maxv /*
804 1.61 maxv * Put the VA allocation into the list and swap an old one out to free.
805 1.61 maxv * This behaves mostly like a fifo.
806 1.61 maxv */
807 1.61 maxv rotor = atomic_inc_uint_nv(&kg->kg_rotor) % kg->kg_depth;
808 1.61 maxv va = (vaddr_t)atomic_swap_ptr(&kg->kg_fifo[rotor], (void *)va);
809 1.61 maxv if (va != 0) {
810 1.61 maxv vmem_free(kg->kg_vmem, va, size);
811 1.61 maxv }
812 1.61 maxv #else
813 1.61 maxv (void)rotor;
814 1.61 maxv vmem_free(kg->kg_vmem, va, size);
815 1.61 maxv #endif
816 1.33 haad }
817 1.61 maxv
818 1.61 maxv #endif /* defined(KMEM_GUARD) */
819