Home | History | Annotate | Line # | Download | only in kern
subr_kmem.c revision 1.42.2.2.4.1
      1 /*	$NetBSD: subr_kmem.c,v 1.42.2.2.4.1 2013/04/20 10:16:31 bouyer Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2009 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*-
     33  * Copyright (c)2006 YAMAMOTO Takashi,
     34  * All rights reserved.
     35  *
     36  * Redistribution and use in source and binary forms, with or without
     37  * modification, are permitted provided that the following conditions
     38  * are met:
     39  * 1. Redistributions of source code must retain the above copyright
     40  *    notice, this list of conditions and the following disclaimer.
     41  * 2. Redistributions in binary form must reproduce the above copyright
     42  *    notice, this list of conditions and the following disclaimer in the
     43  *    documentation and/or other materials provided with the distribution.
     44  *
     45  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     55  * SUCH DAMAGE.
     56  */
     57 
     58 /*
     59  * allocator of kernel wired memory.
     60  *
     61  */
     62 
     63 #include <sys/cdefs.h>
     64 __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.42.2.2.4.1 2013/04/20 10:16:31 bouyer Exp $");
     65 
     66 #include <sys/param.h>
     67 #include <sys/callback.h>
     68 #include <sys/kmem.h>
     69 #include <sys/pool.h>
     70 #include <sys/debug.h>
     71 #include <sys/lockdebug.h>
     72 #include <sys/cpu.h>
     73 
     74 #include <uvm/uvm_extern.h>
     75 #include <uvm/uvm_map.h>
     76 #include <uvm/uvm_kmguard.h>
     77 
     78 #include <lib/libkern/libkern.h>
     79 
     80 struct kmem_cache_info {
     81 	size_t		kc_size;
     82 	const char *	kc_name;
     83 };
     84 
     85 static const struct kmem_cache_info kmem_cache_sizes[] = {
     86 	{  8, "kmem-8" },
     87 	{ 16, "kmem-16" },
     88 	{ 24, "kmem-24" },
     89 	{ 32, "kmem-32" },
     90 	{ 40, "kmem-40" },
     91 	{ 48, "kmem-48" },
     92 	{ 56, "kmem-56" },
     93 	{ 64, "kmem-64" },
     94 	{ 80, "kmem-80" },
     95 	{ 96, "kmem-96" },
     96 	{ 112, "kmem-112" },
     97 	{ 128, "kmem-128" },
     98 	{ 160, "kmem-160" },
     99 	{ 192, "kmem-192" },
    100 	{ 224, "kmem-224" },
    101 	{ 256, "kmem-256" },
    102 	{ 320, "kmem-320" },
    103 	{ 384, "kmem-384" },
    104 	{ 448, "kmem-448" },
    105 	{ 512, "kmem-512" },
    106 	{ 768, "kmem-768" },
    107 	{ 1024, "kmem-1024" },
    108 	{ 0, NULL }
    109 };
    110 
    111 static const struct kmem_cache_info kmem_cache_big_sizes[] = {
    112 	{ 2048, "kmem-2048" },
    113 	{ 4096, "kmem-4096" },
    114 	{ 8192, "kmem-8192" },
    115 	{ 16384, "kmem-16384" },
    116 	{ 0, NULL }
    117 };
    118 
    119 /*
    120  * KMEM_ALIGN is the smallest guaranteed alignment and also the
    121  * smallest allocateable quantum.
    122  * Every cache size >= CACHE_LINE_SIZE gets CACHE_LINE_SIZE alignment.
    123  */
    124 #define	KMEM_ALIGN		8
    125 #define	KMEM_SHIFT		3
    126 #define	KMEM_MAXSIZE		1024
    127 #define	KMEM_CACHE_COUNT	(KMEM_MAXSIZE >> KMEM_SHIFT)
    128 
    129 static pool_cache_t kmem_cache[KMEM_CACHE_COUNT] __cacheline_aligned;
    130 static size_t kmem_cache_maxidx __read_mostly;
    131 
    132 #define	KMEM_BIG_ALIGN		2048
    133 #define	KMEM_BIG_SHIFT		11
    134 #define	KMEM_BIG_MAXSIZE	16384
    135 #define	KMEM_CACHE_BIG_COUNT	(KMEM_BIG_MAXSIZE >> KMEM_BIG_SHIFT)
    136 
    137 static pool_cache_t kmem_cache_big[KMEM_CACHE_BIG_COUNT] __cacheline_aligned;
    138 static size_t kmem_cache_big_maxidx __read_mostly;
    139 
    140 
    141 #if defined(DEBUG)
    142 int kmem_guard_depth = 0;
    143 size_t kmem_guard_size;
    144 static struct uvm_kmguard kmem_guard;
    145 static void *kmem_freecheck;
    146 #define	KMEM_POISON
    147 #define	KMEM_REDZONE
    148 #define	KMEM_SIZE
    149 #define	KMEM_GUARD
    150 #endif /* defined(DEBUG) */
    151 
    152 #if defined(KMEM_POISON)
    153 static int kmem_poison_ctor(void *, void *, int);
    154 static void kmem_poison_fill(void *, size_t);
    155 static void kmem_poison_check(void *, size_t);
    156 #else /* defined(KMEM_POISON) */
    157 #define	kmem_poison_fill(p, sz)		/* nothing */
    158 #define	kmem_poison_check(p, sz)	/* nothing */
    159 #endif /* defined(KMEM_POISON) */
    160 
    161 #if defined(KMEM_REDZONE)
    162 #define	REDZONE_SIZE	1
    163 #else /* defined(KMEM_REDZONE) */
    164 #define	REDZONE_SIZE	0
    165 #endif /* defined(KMEM_REDZONE) */
    166 
    167 #if defined(KMEM_SIZE)
    168 #define	SIZE_SIZE	(MAX(KMEM_ALIGN, sizeof(size_t)))
    169 static void kmem_size_set(void *, size_t);
    170 static void kmem_size_check(void *, size_t);
    171 #else
    172 #define	SIZE_SIZE	0
    173 #define	kmem_size_set(p, sz)	/* nothing */
    174 #define	kmem_size_check(p, sz)	/* nothing */
    175 #endif
    176 
    177 CTASSERT(KM_SLEEP == PR_WAITOK);
    178 CTASSERT(KM_NOSLEEP == PR_NOWAIT);
    179 
    180 /*
    181  * kmem_intr_alloc: allocate wired memory.
    182  */
    183 
    184 void *
    185 kmem_intr_alloc(size_t size, km_flag_t kmflags)
    186 {
    187 	size_t allocsz, index;
    188 	pool_cache_t pc;
    189 	uint8_t *p;
    190 
    191 	KASSERT(size > 0);
    192 
    193 #ifdef KMEM_GUARD
    194 	if (size <= kmem_guard_size) {
    195 		return uvm_kmguard_alloc(&kmem_guard, size,
    196 		    (kmflags & KM_SLEEP) != 0);
    197 	}
    198 #endif
    199 	size = kmem_roundup_size(size);
    200 	allocsz = size + REDZONE_SIZE + SIZE_SIZE;
    201 
    202 	if ((index = ((allocsz -1) >> KMEM_SHIFT))
    203 	    < kmem_cache_maxidx) {
    204 		pc = kmem_cache[index];
    205 	} else if ((index = ((allocsz - 1) >> KMEM_BIG_SHIFT))
    206             < kmem_cache_big_maxidx) {
    207 		pc = kmem_cache_big[index];
    208 	} else {
    209 		int ret = uvm_km_kmem_alloc(kmem_va_arena,
    210 		    (vsize_t)round_page(size),
    211 		    ((kmflags & KM_SLEEP) ? VM_SLEEP : VM_NOSLEEP)
    212 		     | VM_INSTANTFIT, (vmem_addr_t *)&p);
    213 		if (ret) {
    214 			return NULL;
    215 		}
    216 		FREECHECK_OUT(&kmem_freecheck, p);
    217 		return p;
    218 	}
    219 
    220 	p = pool_cache_get(pc, kmflags);
    221 
    222 	if (__predict_true(p != NULL)) {
    223 		kmem_poison_check(p, size);
    224 		FREECHECK_OUT(&kmem_freecheck, p);
    225 		kmem_size_set(p, size);
    226 
    227 		return p + SIZE_SIZE;
    228 	}
    229 	return p;
    230 }
    231 
    232 /*
    233  * kmem_intr_zalloc: allocate zeroed wired memory.
    234  */
    235 
    236 void *
    237 kmem_intr_zalloc(size_t size, km_flag_t kmflags)
    238 {
    239 	void *p;
    240 
    241 	p = kmem_intr_alloc(size, kmflags);
    242 	if (p != NULL) {
    243 		memset(p, 0, size);
    244 	}
    245 	return p;
    246 }
    247 
    248 /*
    249  * kmem_intr_free: free wired memory allocated by kmem_alloc.
    250  */
    251 
    252 void
    253 kmem_intr_free(void *p, size_t size)
    254 {
    255 	size_t allocsz, index;
    256 	pool_cache_t pc;
    257 
    258 	KASSERT(p != NULL);
    259 	KASSERT(size > 0);
    260 
    261 #ifdef KMEM_GUARD
    262 	if (size <= kmem_guard_size) {
    263 		uvm_kmguard_free(&kmem_guard, size, p);
    264 		return;
    265 	}
    266 #endif
    267 	size = kmem_roundup_size(size);
    268 	allocsz = size + REDZONE_SIZE + SIZE_SIZE;
    269 
    270 	if ((index = ((allocsz -1) >> KMEM_SHIFT))
    271 	    < kmem_cache_maxidx) {
    272 		pc = kmem_cache[index];
    273 	} else if ((index = ((allocsz - 1) >> KMEM_BIG_SHIFT))
    274             < kmem_cache_big_maxidx) {
    275 		pc = kmem_cache_big[index];
    276 	} else {
    277 		FREECHECK_IN(&kmem_freecheck, p);
    278 		uvm_km_kmem_free(kmem_va_arena, (vaddr_t)p,
    279 		    round_page(size));
    280 		return;
    281 	}
    282 
    283 	p = (uint8_t *)p - SIZE_SIZE;
    284 	kmem_size_check(p, size);
    285 	FREECHECK_IN(&kmem_freecheck, p);
    286 	LOCKDEBUG_MEM_CHECK(p, size);
    287 	kmem_poison_check((uint8_t *)p + SIZE_SIZE + size,
    288       	    allocsz - (SIZE_SIZE + size));
    289 	kmem_poison_fill(p, allocsz);
    290 
    291 	pool_cache_put(pc, p);
    292 }
    293 
    294 /* ---- kmem API */
    295 
    296 /*
    297  * kmem_alloc: allocate wired memory.
    298  * => must not be called from interrupt context.
    299  */
    300 
    301 void *
    302 kmem_alloc(size_t size, km_flag_t kmflags)
    303 {
    304 
    305 	KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()),
    306 	    "kmem(9) should not be used from the interrupt context");
    307 	return kmem_intr_alloc(size, kmflags);
    308 }
    309 
    310 /*
    311  * kmem_zalloc: allocate zeroed wired memory.
    312  * => must not be called from interrupt context.
    313  */
    314 
    315 void *
    316 kmem_zalloc(size_t size, km_flag_t kmflags)
    317 {
    318 
    319 	KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()),
    320 	    "kmem(9) should not be used from the interrupt context");
    321 	return kmem_intr_zalloc(size, kmflags);
    322 }
    323 
    324 /*
    325  * kmem_free: free wired memory allocated by kmem_alloc.
    326  * => must not be called from interrupt context.
    327  */
    328 
    329 void
    330 kmem_free(void *p, size_t size)
    331 {
    332 
    333 	KASSERT(!cpu_intr_p());
    334 	KASSERT(!cpu_softintr_p());
    335 	kmem_intr_free(p, size);
    336 }
    337 
    338 static size_t
    339 kmem_create_caches(const struct kmem_cache_info *array,
    340     pool_cache_t alloc_table[], size_t maxsize, int shift, int ipl)
    341 {
    342 	size_t maxidx = 0;
    343 	size_t table_unit = (1 << shift);
    344 	size_t size = table_unit;
    345 	int i;
    346 
    347 	for (i = 0; array[i].kc_size != 0 ; i++) {
    348 		const char *name = array[i].kc_name;
    349 		size_t cache_size = array[i].kc_size;
    350 		struct pool_allocator *pa;
    351 		int flags = PR_NOALIGN;
    352 		pool_cache_t pc;
    353 		size_t align;
    354 
    355 		if ((cache_size & (CACHE_LINE_SIZE - 1)) == 0)
    356 			align = CACHE_LINE_SIZE;
    357 		else if ((cache_size & (PAGE_SIZE - 1)) == 0)
    358 			align = PAGE_SIZE;
    359 		else
    360 			align = KMEM_ALIGN;
    361 
    362 		if (cache_size < CACHE_LINE_SIZE)
    363 			flags |= PR_NOTOUCH;
    364 
    365 		/* check if we reached the requested size */
    366 		if (cache_size > maxsize || cache_size > PAGE_SIZE) {
    367 			break;
    368 		}
    369 		if ((cache_size >> shift) > maxidx) {
    370 			maxidx = cache_size >> shift;
    371 		}
    372 
    373 		if ((cache_size >> shift) > maxidx) {
    374 			maxidx = cache_size >> shift;
    375 		}
    376 
    377 		pa = &pool_allocator_kmem;
    378 #if defined(KMEM_POISON)
    379 		pc = pool_cache_init(cache_size, align, 0, flags,
    380 		    name, pa, ipl,kmem_poison_ctor,
    381 		    NULL, (void *)cache_size);
    382 #else /* defined(KMEM_POISON) */
    383 		pc = pool_cache_init(cache_size, align, 0, flags,
    384 		    name, pa, ipl, NULL, NULL, NULL);
    385 #endif /* defined(KMEM_POISON) */
    386 
    387 		while (size <= cache_size) {
    388 			alloc_table[(size - 1) >> shift] = pc;
    389 			size += table_unit;
    390 		}
    391 	}
    392 	return maxidx;
    393 }
    394 
    395 void
    396 kmem_init(void)
    397 {
    398 
    399 #ifdef KMEM_GUARD
    400 	uvm_kmguard_init(&kmem_guard, &kmem_guard_depth, &kmem_guard_size,
    401 	    kmem_va_arena);
    402 #endif
    403 	kmem_cache_maxidx = kmem_create_caches(kmem_cache_sizes,
    404 	    kmem_cache, KMEM_MAXSIZE, KMEM_SHIFT, IPL_VM);
    405        	kmem_cache_big_maxidx = kmem_create_caches(kmem_cache_big_sizes,
    406 	    kmem_cache_big, PAGE_SIZE, KMEM_BIG_SHIFT, IPL_VM);
    407 }
    408 
    409 size_t
    410 kmem_roundup_size(size_t size)
    411 {
    412 
    413 	return (size + (KMEM_ALIGN - 1)) & ~(KMEM_ALIGN - 1);
    414 }
    415 
    416 /* ---- debug */
    417 
    418 #if defined(KMEM_POISON)
    419 
    420 #if defined(_LP64)
    421 #define PRIME 0x9e37fffffffc0000UL
    422 #else /* defined(_LP64) */
    423 #define PRIME 0x9e3779b1
    424 #endif /* defined(_LP64) */
    425 
    426 static inline uint8_t
    427 kmem_poison_pattern(const void *p)
    428 {
    429 
    430 	return (uint8_t)(((uintptr_t)p) * PRIME
    431 	   >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
    432 }
    433 
    434 static int
    435 kmem_poison_ctor(void *arg, void *obj, int flag)
    436 {
    437 	size_t sz = (size_t)arg;
    438 
    439 	kmem_poison_fill(obj, sz);
    440 
    441 	return 0;
    442 }
    443 
    444 static void
    445 kmem_poison_fill(void *p, size_t sz)
    446 {
    447 	uint8_t *cp;
    448 	const uint8_t *ep;
    449 
    450 	cp = p;
    451 	ep = cp + sz;
    452 	while (cp < ep) {
    453 		*cp = kmem_poison_pattern(cp);
    454 		cp++;
    455 	}
    456 }
    457 
    458 static void
    459 kmem_poison_check(void *p, size_t sz)
    460 {
    461 	uint8_t *cp;
    462 	const uint8_t *ep;
    463 
    464 	cp = p;
    465 	ep = cp + sz;
    466 	while (cp < ep) {
    467 		const uint8_t expected = kmem_poison_pattern(cp);
    468 
    469 		if (*cp != expected) {
    470 			panic("%s: %p: 0x%02x != 0x%02x\n",
    471 			   __func__, cp, *cp, expected);
    472 		}
    473 		cp++;
    474 	}
    475 }
    476 
    477 #endif /* defined(KMEM_POISON) */
    478 
    479 #if defined(KMEM_SIZE)
    480 static void
    481 kmem_size_set(void *p, size_t sz)
    482 {
    483 
    484 	memcpy(p, &sz, sizeof(sz));
    485 }
    486 
    487 static void
    488 kmem_size_check(void *p, size_t sz)
    489 {
    490 	size_t psz;
    491 
    492 	memcpy(&psz, p, sizeof(psz));
    493 	if (psz != sz) {
    494 		panic("kmem_free(%p, %zu) != allocated size %zu",
    495 		    (const uint8_t *)p + SIZE_SIZE, sz, psz);
    496 	}
    497 }
    498 #endif	/* defined(KMEM_SIZE) */
    499 
    500 /*
    501  * Used to dynamically allocate string with kmem accordingly to format.
    502  */
    503 char *
    504 kmem_asprintf(const char *fmt, ...)
    505 {
    506 	int size, len;
    507 	va_list va;
    508 	char *str;
    509 
    510 	va_start(va, fmt);
    511 	len = vsnprintf(NULL, 0, fmt, va);
    512 	va_end(va);
    513 
    514 	str = kmem_alloc(len + 1, KM_SLEEP);
    515 
    516 	va_start(va, fmt);
    517 	size = vsnprintf(str, len + 1, fmt, va);
    518 	va_end(va);
    519 
    520 	KASSERT(size == len);
    521 
    522 	return str;
    523 }
    524