Home | History | Annotate | Line # | Download | only in kern
subr_kmem.c revision 1.39
      1 /*	$NetBSD: subr_kmem.c,v 1.39 2012/01/27 19:48:40 para Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2009 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*-
     33  * Copyright (c)2006 YAMAMOTO Takashi,
     34  * All rights reserved.
     35  *
     36  * Redistribution and use in source and binary forms, with or without
     37  * modification, are permitted provided that the following conditions
     38  * are met:
     39  * 1. Redistributions of source code must retain the above copyright
     40  *    notice, this list of conditions and the following disclaimer.
     41  * 2. Redistributions in binary form must reproduce the above copyright
     42  *    notice, this list of conditions and the following disclaimer in the
     43  *    documentation and/or other materials provided with the distribution.
     44  *
     45  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     55  * SUCH DAMAGE.
     56  */
     57 
     58 /*
     59  * allocator of kernel wired memory.
     60  *
     61  */
     62 
     63 #include <sys/cdefs.h>
     64 __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.39 2012/01/27 19:48:40 para Exp $");
     65 
     66 #include <sys/param.h>
     67 #include <sys/callback.h>
     68 #include <sys/kmem.h>
     69 #include <sys/pool.h>
     70 #include <sys/debug.h>
     71 #include <sys/lockdebug.h>
     72 #include <sys/cpu.h>
     73 
     74 #include <uvm/uvm_extern.h>
     75 #include <uvm/uvm_map.h>
     76 #include <uvm/uvm_kmguard.h>
     77 
     78 #include <lib/libkern/libkern.h>
     79 
     80 struct kmem_cache_info {
     81 	int kc_size;
     82 	const char *kc_name;
     83 };
     84 
     85 static const struct kmem_cache_info kmem_cache_sizes[] = {
     86 	{  8, "kmem-8" },
     87 	{ 16, "kmem-16" },
     88 	{ 24, "kmem-24" },
     89 	{ 32, "kmem-32" },
     90 	{ 40, "kmem-40" },
     91 	{ 48, "kmem-48" },
     92 	{ 56, "kmem-56" },
     93 	{ 64, "kmem-64" },
     94 	{ 80, "kmem-80" },
     95 	{ 96, "kmem-96" },
     96 	{ 112, "kmem-112" },
     97 	{ 128, "kmem-128" },
     98 	{ 160, "kmem-160" },
     99 	{ 192, "kmem-192" },
    100 	{ 224, "kmem-224" },
    101 	{ 256, "kmem-256" },
    102 	{ 320, "kmem-320" },
    103 	{ 384, "kmem-384" },
    104 	{ 448, "kmem-448" },
    105 	{ 512, "kmem-512" },
    106 	{ 768, "kmem-768" },
    107 	{ 1024, "kmem-1024" },
    108 	{ 2048, "kmem-2048" },
    109 	{ 4096, "kmem-4096" },
    110 	{ 0, NULL }
    111 };
    112 
    113 /*
    114  * KMEM_ALIGN is the smalles guaranteed alignment and
    115  * also the smallest allocateable quanta.
    116  * Every cache size which is a multiply of CACHE_LINE_SIZE
    117  * gets CACHE_LINE_SIZE alignment.
    118  */
    119 #define KMEM_ALIGN	8
    120 #define KMEM_SHIFT	3
    121 #define KMEM_MAXSIZE	4096
    122 
    123 static pool_cache_t kmem_cache[KMEM_MAXSIZE >> KMEM_SHIFT];
    124 static size_t kmem_cache_max;
    125 
    126 #if defined(DEBUG)
    127 int kmem_guard_depth = 0;
    128 size_t kmem_guard_size;
    129 static struct uvm_kmguard kmem_guard;
    130 static void *kmem_freecheck;
    131 #define	KMEM_POISON
    132 #define	KMEM_REDZONE
    133 #define	KMEM_SIZE
    134 #define	KMEM_GUARD
    135 #endif /* defined(DEBUG) */
    136 
    137 #if defined(KMEM_POISON)
    138 static int kmem_poison_ctor(void *, void *, int);
    139 static void kmem_poison_fill(void *, size_t);
    140 static void kmem_poison_check(void *, size_t);
    141 #else /* defined(KMEM_POISON) */
    142 #define kmem_poison_fill(p, sz)		/* nothing */
    143 #define kmem_poison_check(p, sz)		/* nothing */
    144 #endif /* defined(KMEM_POISON) */
    145 
    146 #if defined(KMEM_REDZONE)
    147 #define	REDZONE_SIZE	1
    148 #else /* defined(KMEM_REDZONE) */
    149 #define	REDZONE_SIZE	0
    150 #endif /* defined(KMEM_REDZONE) */
    151 
    152 #if defined(KMEM_SIZE)
    153 #define	SIZE_SIZE	(max(KMEM_ALIGN, sizeof(size_t)))
    154 static void kmem_size_set(void *, size_t);
    155 static void kmem_size_check(void *, size_t);
    156 #else
    157 #define	SIZE_SIZE	0
    158 #define	kmem_size_set(p, sz)	/* nothing */
    159 #define	kmem_size_check(p, sz)	/* nothing */
    160 #endif
    161 
    162 CTASSERT(KM_SLEEP == PR_WAITOK);
    163 CTASSERT(KM_NOSLEEP == PR_NOWAIT);
    164 
    165 void * kmem_intr_alloc(size_t size, km_flag_t kmflags);
    166 void * kmem_intr_zalloc(size_t size, km_flag_t kmflags);
    167 void kmem_intr_free(void *, size_t size);
    168 
    169 void *
    170 kmem_intr_alloc(size_t size, km_flag_t kmflags)
    171 {
    172 	size_t index;
    173 	size_t allocsz;
    174 	pool_cache_t pc;
    175 	uint8_t *p;
    176 
    177 	KASSERT(size > 0);
    178 
    179 #ifdef KMEM_GUARD
    180 	if (size <= kmem_guard_size) {
    181 		return uvm_kmguard_alloc(&kmem_guard, size,
    182 		    (kmflags & KM_SLEEP) != 0);
    183 	}
    184 #endif
    185 
    186 	allocsz = kmem_roundup_size(size) + REDZONE_SIZE + SIZE_SIZE;
    187 	if ((index = ((allocsz - 1) >> KMEM_SHIFT))
    188 	    < kmem_cache_max >> KMEM_SHIFT) {
    189 		pc = kmem_cache[index];
    190 	} else {
    191 		int rc;
    192 		rc = uvm_km_kmem_alloc(kmem_va_arena,
    193 		    (vsize_t)round_page(allocsz),
    194 		    ((kmflags & KM_SLEEP) ? VM_SLEEP : VM_NOSLEEP)
    195 		     | VM_INSTANTFIT, (vmem_addr_t *)&p);
    196 		return (rc != 0) ? NULL : p;
    197 	}
    198 
    199 	p = pool_cache_get(pc, kmflags);
    200 
    201 	if (__predict_true(p != NULL)) {
    202 		kmem_poison_check(p, kmem_roundup_size(size));
    203 		FREECHECK_OUT(&kmem_freecheck, p);
    204 		kmem_size_set(p, allocsz);
    205 	}
    206 	return p;
    207 }
    208 
    209 void *
    210 kmem_intr_zalloc(size_t size, km_flag_t kmflags)
    211 {
    212 	void *p;
    213 
    214 	p = kmem_intr_alloc(size, kmflags);
    215 	if (p != NULL) {
    216 		memset(p, 0, size);
    217 	}
    218 	return p;
    219 }
    220 
    221 void
    222 kmem_intr_free(void *p, size_t size)
    223 {
    224 	size_t index;
    225 	size_t allocsz;
    226 	pool_cache_t pc;
    227 
    228 	KASSERT(p != NULL);
    229 	KASSERT(size > 0);
    230 
    231 #ifdef KMEM_GUARD
    232 	if (size <= kmem_guard_size) {
    233 		uvm_kmguard_free(&kmem_guard, size, p);
    234 		return;
    235 	}
    236 #endif
    237 
    238 	allocsz = kmem_roundup_size(size) + REDZONE_SIZE + SIZE_SIZE;
    239 	if ((index = ((allocsz - 1) >> KMEM_SHIFT))
    240 	    < kmem_cache_max >> KMEM_SHIFT) {
    241 		pc = kmem_cache[index];
    242 	} else {
    243 		uvm_km_kmem_free(kmem_va_arena, (vaddr_t)p,
    244 		    round_page(allocsz));
    245 		return;
    246 	}
    247 
    248 	kmem_size_check(p, allocsz);
    249 	FREECHECK_IN(&kmem_freecheck, p);
    250 	LOCKDEBUG_MEM_CHECK(p, allocsz - (REDZONE_SIZE + SIZE_SIZE));
    251 	kmem_poison_check((uint8_t *)p + size, allocsz - size - SIZE_SIZE);
    252 	kmem_poison_fill(p, allocsz);
    253 
    254 	pool_cache_put(pc, p);
    255 }
    256 
    257 
    258 /* ---- kmem API */
    259 
    260 /*
    261  * kmem_alloc: allocate wired memory.
    262  * => must not be called from interrupt context.
    263  */
    264 
    265 void *
    266 kmem_alloc(size_t size, km_flag_t kmflags)
    267 {
    268 
    269 	KASSERT(!cpu_intr_p());
    270 	KASSERT(!cpu_softintr_p());
    271 	return kmem_intr_alloc(size, kmflags);
    272 }
    273 
    274 /*
    275  * kmem_zalloc: allocate zeroed wired memory.
    276  * => must not be called from interrupt context.
    277  */
    278 
    279 void *
    280 kmem_zalloc(size_t size, km_flag_t kmflags)
    281 {
    282 
    283 	KASSERT(!cpu_intr_p());
    284 	KASSERT(!cpu_softintr_p());
    285 	return kmem_intr_zalloc(size, kmflags);
    286 }
    287 
    288 /*
    289  * kmem_free: free wired memory allocated by kmem_alloc.
    290  * => must not be called from interrupt context.
    291  */
    292 
    293 void
    294 kmem_free(void *p, size_t size)
    295 {
    296 
    297 	KASSERT(!cpu_intr_p());
    298 	KASSERT(!cpu_softintr_p());
    299 	kmem_intr_free(p, size);
    300 }
    301 
    302 static void
    303 kmem_create_caches(const struct kmem_cache_info *array,
    304     pool_cache_t alloc_table[], size_t maxsize)
    305 {
    306 	size_t table_unit = (1 << KMEM_SHIFT);
    307 	size_t size = table_unit;
    308 	int i;
    309 
    310 	for (i = 0; array[i].kc_size != 0 ; i++) {
    311 		size_t cache_size = array[i].kc_size;
    312 		size_t align;
    313 
    314 		if ((cache_size & (CACHE_LINE_SIZE - 1)) == 0)
    315 			align = CACHE_LINE_SIZE;
    316 		else if ((cache_size & (PAGE_SIZE - 1)) == 0)
    317 			align = PAGE_SIZE;
    318 		else
    319 			align = KMEM_ALIGN;
    320 
    321 		const char *name = array[i].kc_name;
    322 		pool_cache_t pc;
    323 		int flags = PR_NOALIGN;
    324 		if (cache_size < CACHE_LINE_SIZE)
    325 			flags |= PR_NOTOUCH;
    326 
    327 		/* check if we reached the requested size */
    328 		if (cache_size > maxsize)
    329 			break;
    330 
    331 		kmem_cache_max = cache_size;
    332 
    333 #if defined(KMEM_POISON)
    334 		pc = pool_cache_init(cache_size, align, 0, flags,
    335 		    name, &pool_allocator_kmem, IPL_VM, kmem_poison_ctor,
    336 		    NULL, (void *)cache_size);
    337 #else /* defined(KMEM_POISON) */
    338 		pc = pool_cache_init(cache_size, align, 0, flags,
    339 		    name, &pool_allocator_kmem, IPL_VM, NULL, NULL, NULL);
    340 #endif /* defined(KMEM_POISON) */
    341 
    342 		while (size <= cache_size) {
    343 			alloc_table[(size - 1) >> KMEM_SHIFT] = pc;
    344 			size += table_unit;
    345 		}
    346 	}
    347 }
    348 
    349 void
    350 kmem_init(void)
    351 {
    352 
    353 #ifdef KMEM_GUARD
    354 	uvm_kmguard_init(&kmem_guard, &kmem_guard_depth, &kmem_guard_size,
    355 		kernel_map);
    356 #endif
    357 
    358 	kmem_create_caches(kmem_cache_sizes, kmem_cache, KMEM_MAXSIZE);
    359 }
    360 
    361 size_t
    362 kmem_roundup_size(size_t size)
    363 {
    364 
    365 	return (size + (KMEM_ALIGN - 1)) & ~(KMEM_ALIGN - 1);
    366 }
    367 
    368 /* ---- debug */
    369 
    370 #if defined(KMEM_POISON)
    371 
    372 #if defined(_LP64)
    373 #define PRIME 0x9e37fffffffc0000UL
    374 #else /* defined(_LP64) */
    375 #define PRIME 0x9e3779b1
    376 #endif /* defined(_LP64) */
    377 
    378 static inline uint8_t
    379 kmem_poison_pattern(const void *p)
    380 {
    381 
    382 	return (uint8_t)(((uintptr_t)p) * PRIME
    383 	   >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
    384 }
    385 
    386 static int
    387 kmem_poison_ctor(void *arg, void *obj, int flag)
    388 {
    389 	size_t sz = (size_t)arg;
    390 
    391 	kmem_poison_fill(obj, sz);
    392 
    393 	return 0;
    394 }
    395 
    396 static void
    397 kmem_poison_fill(void *p, size_t sz)
    398 {
    399 	uint8_t *cp;
    400 	const uint8_t *ep;
    401 
    402 	cp = p;
    403 	ep = cp + sz;
    404 	while (cp < ep) {
    405 		*cp = kmem_poison_pattern(cp);
    406 		cp++;
    407 	}
    408 }
    409 
    410 static void
    411 kmem_poison_check(void *p, size_t sz)
    412 {
    413 	uint8_t *cp;
    414 	const uint8_t *ep;
    415 
    416 	cp = p;
    417 	ep = cp + sz;
    418 	while (cp < ep) {
    419 		const uint8_t expected = kmem_poison_pattern(cp);
    420 
    421 		if (*cp != expected) {
    422 			panic("%s: %p: 0x%02x != 0x%02x\n",
    423 			   __func__, cp, *cp, expected);
    424 		}
    425 		cp++;
    426 	}
    427 }
    428 
    429 #endif /* defined(KMEM_POISON) */
    430 
    431 #if defined(KMEM_SIZE)
    432 static void
    433 kmem_size_set(void *p, size_t sz)
    434 {
    435 	void *szp;
    436 
    437 	szp = (uint8_t *)p + sz - SIZE_SIZE;
    438 	memcpy(szp, &sz, sizeof(sz));
    439 }
    440 
    441 static void
    442 kmem_size_check(void *p, size_t sz)
    443 {
    444 	uint8_t *szp;
    445 	size_t psz;
    446 
    447 	szp = (uint8_t *)p + sz - SIZE_SIZE;
    448 	memcpy(&psz, szp, sizeof(psz));
    449 	if (psz != sz) {
    450 		panic("kmem_free(%p, %zu) != allocated size %zu",
    451 		    (const uint8_t *)p + SIZE_SIZE, sz - SIZE_SIZE, psz);
    452 	}
    453 }
    454 #endif	/* defined(KMEM_SIZE) */
    455 
    456 /*
    457  * Used to dynamically allocate string with kmem accordingly to format.
    458  */
    459 char *
    460 kmem_asprintf(const char *fmt, ...)
    461 {
    462 	int size, len;
    463 	va_list va;
    464 	char *str;
    465 
    466 	va_start(va, fmt);
    467 	len = vsnprintf(NULL, 0, fmt, va);
    468 	va_end(va);
    469 
    470 	str = kmem_alloc(len + 1, KM_SLEEP);
    471 
    472 	va_start(va, fmt);
    473 	size = vsnprintf(str, len + 1, fmt, va);
    474 	va_end(va);
    475 
    476 	KASSERT(size == len);
    477 
    478 	return str;
    479 }
    480