subr_kmem.c revision 1.49 1 /* $NetBSD: subr_kmem.c,v 1.49 2013/04/22 13:13:20 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*-
33 * Copyright (c)2006 YAMAMOTO Takashi,
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 */
57
58 /*
59 * allocator of kernel wired memory.
60 *
61 */
62
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.49 2013/04/22 13:13:20 yamt Exp $");
65
66 #include <sys/param.h>
67 #include <sys/callback.h>
68 #include <sys/kmem.h>
69 #include <sys/pool.h>
70 #include <sys/debug.h>
71 #include <sys/lockdebug.h>
72 #include <sys/cpu.h>
73
74 #include <uvm/uvm_extern.h>
75 #include <uvm/uvm_map.h>
76 #include <uvm/uvm_kmguard.h>
77
78 #include <lib/libkern/libkern.h>
79
80 struct kmem_cache_info {
81 size_t kc_size;
82 const char * kc_name;
83 };
84
85 static const struct kmem_cache_info kmem_cache_sizes[] = {
86 { 8, "kmem-8" },
87 { 16, "kmem-16" },
88 { 24, "kmem-24" },
89 { 32, "kmem-32" },
90 { 40, "kmem-40" },
91 { 48, "kmem-48" },
92 { 56, "kmem-56" },
93 { 64, "kmem-64" },
94 { 80, "kmem-80" },
95 { 96, "kmem-96" },
96 { 112, "kmem-112" },
97 { 128, "kmem-128" },
98 { 160, "kmem-160" },
99 { 192, "kmem-192" },
100 { 224, "kmem-224" },
101 { 256, "kmem-256" },
102 { 320, "kmem-320" },
103 { 384, "kmem-384" },
104 { 448, "kmem-448" },
105 { 512, "kmem-512" },
106 { 768, "kmem-768" },
107 { 1024, "kmem-1024" },
108 { 0, NULL }
109 };
110
111 static const struct kmem_cache_info kmem_cache_big_sizes[] = {
112 { 2048, "kmem-2048" },
113 { 4096, "kmem-4096" },
114 { 8192, "kmem-8192" },
115 { 16384, "kmem-16384" },
116 { 0, NULL }
117 };
118
119 /*
120 * KMEM_ALIGN is the smallest guaranteed alignment and also the
121 * smallest allocateable quantum.
122 * Every cache size >= CACHE_LINE_SIZE gets CACHE_LINE_SIZE alignment.
123 */
124 #define KMEM_ALIGN 8
125 #define KMEM_SHIFT 3
126 #define KMEM_MAXSIZE 1024
127 #define KMEM_CACHE_COUNT (KMEM_MAXSIZE >> KMEM_SHIFT)
128
129 static pool_cache_t kmem_cache[KMEM_CACHE_COUNT] __cacheline_aligned;
130 static size_t kmem_cache_maxidx __read_mostly;
131
132 #define KMEM_BIG_ALIGN 2048
133 #define KMEM_BIG_SHIFT 11
134 #define KMEM_BIG_MAXSIZE 16384
135 #define KMEM_CACHE_BIG_COUNT (KMEM_BIG_MAXSIZE >> KMEM_BIG_SHIFT)
136
137 static pool_cache_t kmem_cache_big[KMEM_CACHE_BIG_COUNT] __cacheline_aligned;
138 static size_t kmem_cache_big_maxidx __read_mostly;
139
140
141 #if defined(DEBUG) && defined(_HARDKERNEL)
142 #ifndef KMEM_GUARD_DEPTH
143 #define KMEM_GUARD_DEPTH 0
144 #endif
145 int kmem_guard_depth = KMEM_GUARD_DEPTH;
146 size_t kmem_guard_size;
147 static struct uvm_kmguard kmem_guard;
148 static void *kmem_freecheck;
149 #define KMEM_POISON
150 #define KMEM_REDZONE
151 #define KMEM_SIZE
152 #define KMEM_GUARD
153 #endif /* defined(DEBUG) */
154
155 #if defined(KMEM_POISON)
156 static int kmem_poison_ctor(void *, void *, int);
157 static void kmem_poison_fill(void *, size_t);
158 static void kmem_poison_check(void *, size_t);
159 #else /* defined(KMEM_POISON) */
160 #define kmem_poison_fill(p, sz) /* nothing */
161 #define kmem_poison_check(p, sz) /* nothing */
162 #endif /* defined(KMEM_POISON) */
163
164 #if defined(KMEM_REDZONE)
165 #define REDZONE_SIZE 1
166 #else /* defined(KMEM_REDZONE) */
167 #define REDZONE_SIZE 0
168 #endif /* defined(KMEM_REDZONE) */
169
170 #if defined(KMEM_SIZE)
171 #define SIZE_SIZE (MAX(KMEM_ALIGN, sizeof(size_t)))
172 static void kmem_size_set(void *, size_t);
173 static void kmem_size_check(void *, size_t);
174 #else
175 #define SIZE_SIZE 0
176 #define kmem_size_set(p, sz) /* nothing */
177 #define kmem_size_check(p, sz) /* nothing */
178 #endif
179
180 CTASSERT(KM_SLEEP == PR_WAITOK);
181 CTASSERT(KM_NOSLEEP == PR_NOWAIT);
182
183 /*
184 * kmem_intr_alloc: allocate wired memory.
185 */
186
187 void *
188 kmem_intr_alloc(size_t size, km_flag_t kmflags)
189 {
190 size_t allocsz, index;
191 pool_cache_t pc;
192 uint8_t *p;
193
194 KASSERT(size > 0);
195
196 #ifdef KMEM_GUARD
197 if (size <= kmem_guard_size) {
198 return uvm_kmguard_alloc(&kmem_guard, size,
199 (kmflags & KM_SLEEP) != 0);
200 }
201 #endif
202 size = kmem_roundup_size(size);
203 allocsz = size + REDZONE_SIZE + SIZE_SIZE;
204
205 if ((index = ((allocsz -1) >> KMEM_SHIFT))
206 < kmem_cache_maxidx) {
207 pc = kmem_cache[index];
208 } else if ((index = ((allocsz - 1) >> KMEM_BIG_SHIFT))
209 < kmem_cache_big_maxidx) {
210 pc = kmem_cache_big[index];
211 } else {
212 int ret = uvm_km_kmem_alloc(kmem_va_arena,
213 (vsize_t)round_page(size),
214 ((kmflags & KM_SLEEP) ? VM_SLEEP : VM_NOSLEEP)
215 | VM_INSTANTFIT, (vmem_addr_t *)&p);
216 if (ret) {
217 return NULL;
218 }
219 FREECHECK_OUT(&kmem_freecheck, p);
220 return p;
221 }
222
223 p = pool_cache_get(pc, kmflags);
224
225 if (__predict_true(p != NULL)) {
226 kmem_poison_check(p, size);
227 FREECHECK_OUT(&kmem_freecheck, p);
228 kmem_size_set(p, size);
229
230 return p + SIZE_SIZE;
231 }
232 return p;
233 }
234
235 /*
236 * kmem_intr_zalloc: allocate zeroed wired memory.
237 */
238
239 void *
240 kmem_intr_zalloc(size_t size, km_flag_t kmflags)
241 {
242 void *p;
243
244 p = kmem_intr_alloc(size, kmflags);
245 if (p != NULL) {
246 memset(p, 0, size);
247 }
248 return p;
249 }
250
251 /*
252 * kmem_intr_free: free wired memory allocated by kmem_alloc.
253 */
254
255 void
256 kmem_intr_free(void *p, size_t size)
257 {
258 size_t allocsz, index;
259 pool_cache_t pc;
260
261 KASSERT(p != NULL);
262 KASSERT(size > 0);
263
264 #ifdef KMEM_GUARD
265 if (size <= kmem_guard_size) {
266 uvm_kmguard_free(&kmem_guard, size, p);
267 return;
268 }
269 #endif
270 size = kmem_roundup_size(size);
271 allocsz = size + REDZONE_SIZE + SIZE_SIZE;
272
273 if ((index = ((allocsz -1) >> KMEM_SHIFT))
274 < kmem_cache_maxidx) {
275 pc = kmem_cache[index];
276 } else if ((index = ((allocsz - 1) >> KMEM_BIG_SHIFT))
277 < kmem_cache_big_maxidx) {
278 pc = kmem_cache_big[index];
279 } else {
280 FREECHECK_IN(&kmem_freecheck, p);
281 uvm_km_kmem_free(kmem_va_arena, (vaddr_t)p,
282 round_page(size));
283 return;
284 }
285
286 p = (uint8_t *)p - SIZE_SIZE;
287 kmem_size_check(p, size);
288 FREECHECK_IN(&kmem_freecheck, p);
289 LOCKDEBUG_MEM_CHECK(p, size);
290 kmem_poison_check((uint8_t *)p + SIZE_SIZE + size,
291 allocsz - (SIZE_SIZE + size));
292 kmem_poison_fill(p, allocsz);
293
294 pool_cache_put(pc, p);
295 }
296
297 /* ---- kmem API */
298
299 /*
300 * kmem_alloc: allocate wired memory.
301 * => must not be called from interrupt context.
302 */
303
304 void *
305 kmem_alloc(size_t size, km_flag_t kmflags)
306 {
307
308 KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()),
309 "kmem(9) should not be used from the interrupt context");
310 return kmem_intr_alloc(size, kmflags);
311 }
312
313 /*
314 * kmem_zalloc: allocate zeroed wired memory.
315 * => must not be called from interrupt context.
316 */
317
318 void *
319 kmem_zalloc(size_t size, km_flag_t kmflags)
320 {
321
322 KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()),
323 "kmem(9) should not be used from the interrupt context");
324 return kmem_intr_zalloc(size, kmflags);
325 }
326
327 /*
328 * kmem_free: free wired memory allocated by kmem_alloc.
329 * => must not be called from interrupt context.
330 */
331
332 void
333 kmem_free(void *p, size_t size)
334 {
335
336 KASSERT(!cpu_intr_p());
337 KASSERT(!cpu_softintr_p());
338 kmem_intr_free(p, size);
339 }
340
341 static size_t
342 kmem_create_caches(const struct kmem_cache_info *array,
343 pool_cache_t alloc_table[], size_t maxsize, int shift, int ipl)
344 {
345 size_t maxidx = 0;
346 size_t table_unit = (1 << shift);
347 size_t size = table_unit;
348 int i;
349
350 for (i = 0; array[i].kc_size != 0 ; i++) {
351 const char *name = array[i].kc_name;
352 size_t cache_size = array[i].kc_size;
353 struct pool_allocator *pa;
354 int flags = PR_NOALIGN;
355 pool_cache_t pc;
356 size_t align;
357
358 if ((cache_size & (CACHE_LINE_SIZE - 1)) == 0)
359 align = CACHE_LINE_SIZE;
360 else if ((cache_size & (PAGE_SIZE - 1)) == 0)
361 align = PAGE_SIZE;
362 else
363 align = KMEM_ALIGN;
364
365 if (cache_size < CACHE_LINE_SIZE)
366 flags |= PR_NOTOUCH;
367
368 /* check if we reached the requested size */
369 if (cache_size > maxsize || cache_size > PAGE_SIZE) {
370 break;
371 }
372 if ((cache_size >> shift) > maxidx) {
373 maxidx = cache_size >> shift;
374 }
375
376 if ((cache_size >> shift) > maxidx) {
377 maxidx = cache_size >> shift;
378 }
379
380 pa = &pool_allocator_kmem;
381 #if defined(KMEM_POISON)
382 pc = pool_cache_init(cache_size, align, 0, flags,
383 name, pa, ipl, kmem_poison_ctor,
384 NULL, (void *)cache_size);
385 #else /* defined(KMEM_POISON) */
386 pc = pool_cache_init(cache_size, align, 0, flags,
387 name, pa, ipl, NULL, NULL, NULL);
388 #endif /* defined(KMEM_POISON) */
389
390 while (size <= cache_size) {
391 alloc_table[(size - 1) >> shift] = pc;
392 size += table_unit;
393 }
394 }
395 return maxidx;
396 }
397
398 void
399 kmem_init(void)
400 {
401
402 #ifdef KMEM_GUARD
403 uvm_kmguard_init(&kmem_guard, &kmem_guard_depth, &kmem_guard_size,
404 kmem_va_arena);
405 #endif
406 kmem_cache_maxidx = kmem_create_caches(kmem_cache_sizes,
407 kmem_cache, KMEM_MAXSIZE, KMEM_SHIFT, IPL_VM);
408 kmem_cache_big_maxidx = kmem_create_caches(kmem_cache_big_sizes,
409 kmem_cache_big, PAGE_SIZE, KMEM_BIG_SHIFT, IPL_VM);
410 }
411
412 size_t
413 kmem_roundup_size(size_t size)
414 {
415
416 return (size + (KMEM_ALIGN - 1)) & ~(KMEM_ALIGN - 1);
417 }
418
419 /* ---- debug */
420
421 #if defined(KMEM_POISON)
422
423 #if defined(_LP64)
424 #define PRIME 0x9e37fffffffc0000UL
425 #else /* defined(_LP64) */
426 #define PRIME 0x9e3779b1
427 #endif /* defined(_LP64) */
428
429 static inline uint8_t
430 kmem_poison_pattern(const void *p)
431 {
432
433 return (uint8_t)(((uintptr_t)p) * PRIME
434 >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
435 }
436
437 static int
438 kmem_poison_ctor(void *arg, void *obj, int flag)
439 {
440 size_t sz = (size_t)arg;
441
442 kmem_poison_fill(obj, sz);
443
444 return 0;
445 }
446
447 static void
448 kmem_poison_fill(void *p, size_t sz)
449 {
450 uint8_t *cp;
451 const uint8_t *ep;
452
453 cp = p;
454 ep = cp + sz;
455 while (cp < ep) {
456 *cp = kmem_poison_pattern(cp);
457 cp++;
458 }
459 }
460
461 static void
462 kmem_poison_check(void *p, size_t sz)
463 {
464 uint8_t *cp;
465 const uint8_t *ep;
466
467 cp = p;
468 ep = cp + sz;
469 while (cp < ep) {
470 const uint8_t expected = kmem_poison_pattern(cp);
471
472 if (*cp != expected) {
473 panic("%s: %p: 0x%02x != 0x%02x\n",
474 __func__, cp, *cp, expected);
475 }
476 cp++;
477 }
478 }
479
480 #endif /* defined(KMEM_POISON) */
481
482 #if defined(KMEM_SIZE)
483 static void
484 kmem_size_set(void *p, size_t sz)
485 {
486
487 memcpy(p, &sz, sizeof(sz));
488 }
489
490 static void
491 kmem_size_check(void *p, size_t sz)
492 {
493 size_t psz;
494
495 memcpy(&psz, p, sizeof(psz));
496 if (psz != sz) {
497 panic("kmem_free(%p, %zu) != allocated size %zu",
498 (const uint8_t *)p + SIZE_SIZE, sz, psz);
499 }
500 }
501 #endif /* defined(KMEM_SIZE) */
502
503 /*
504 * Used to dynamically allocate string with kmem accordingly to format.
505 */
506 char *
507 kmem_asprintf(const char *fmt, ...)
508 {
509 int size, len;
510 va_list va;
511 char *str;
512
513 va_start(va, fmt);
514 len = vsnprintf(NULL, 0, fmt, va);
515 va_end(va);
516
517 str = kmem_alloc(len + 1, KM_SLEEP);
518
519 va_start(va, fmt);
520 size = vsnprintf(str, len + 1, fmt, va);
521 va_end(va);
522
523 KASSERT(size == len);
524
525 return str;
526 }
527