Home | History | Annotate | Line # | Download | only in kern
subr_kmem.c revision 1.9.2.3
      1  1.9.2.3    ad /*	$NetBSD: subr_kmem.c,v 1.9.2.3 2007/02/04 14:15:50 ad Exp $	*/
      2      1.1  yamt 
      3      1.1  yamt /*-
      4      1.1  yamt  * Copyright (c)2006 YAMAMOTO Takashi,
      5      1.1  yamt  * All rights reserved.
      6      1.1  yamt  *
      7      1.1  yamt  * Redistribution and use in source and binary forms, with or without
      8      1.1  yamt  * modification, are permitted provided that the following conditions
      9      1.1  yamt  * are met:
     10      1.1  yamt  * 1. Redistributions of source code must retain the above copyright
     11      1.1  yamt  *    notice, this list of conditions and the following disclaimer.
     12      1.1  yamt  * 2. Redistributions in binary form must reproduce the above copyright
     13      1.1  yamt  *    notice, this list of conditions and the following disclaimer in the
     14      1.1  yamt  *    documentation and/or other materials provided with the distribution.
     15      1.1  yamt  *
     16      1.1  yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17      1.1  yamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18      1.1  yamt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19      1.1  yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20      1.1  yamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21      1.1  yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22      1.1  yamt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23      1.1  yamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24      1.1  yamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25      1.1  yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26      1.1  yamt  * SUCH DAMAGE.
     27      1.1  yamt  */
     28      1.1  yamt 
     29      1.1  yamt /*
     30      1.1  yamt  * allocator of kernel wired memory.
     31      1.1  yamt  *
     32      1.1  yamt  * TODO:
     33      1.1  yamt  * -	worth to have "intrsafe" version?  maybe..
     34      1.1  yamt  */
     35      1.1  yamt 
     36      1.1  yamt #include <sys/cdefs.h>
     37  1.9.2.3    ad __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.9.2.3 2007/02/04 14:15:50 ad Exp $");
     38      1.1  yamt 
     39      1.1  yamt #include <sys/param.h>
     40      1.6  yamt #include <sys/callback.h>
     41      1.1  yamt #include <sys/kmem.h>
     42      1.1  yamt #include <sys/vmem.h>
     43  1.9.2.1    ad #include <sys/debug.h>
     44      1.1  yamt 
     45      1.6  yamt #include <uvm/uvm_extern.h>
     46      1.6  yamt #include <uvm/uvm_map.h>
     47      1.6  yamt 
     48      1.1  yamt #include <lib/libkern/libkern.h>
     49      1.1  yamt 
     50      1.3  yamt #define	KMEM_QUANTUM_SIZE	(ALIGNBYTES + 1)
     51      1.1  yamt 
     52      1.1  yamt static vmem_t *kmem_arena;
     53      1.6  yamt static struct callback_entry kmem_kva_reclaim_entry;
     54      1.1  yamt 
     55      1.4  yamt #if defined(DEBUG)
     56  1.9.2.1    ad static void *kmem_freecheck;
     57      1.4  yamt static void kmem_poison_fill(void *, size_t);
     58      1.4  yamt static void kmem_poison_check(void *, size_t);
     59      1.4  yamt #else /* defined(DEBUG) */
     60      1.4  yamt #define	kmem_poison_fill(p, sz)		/* nothing */
     61      1.4  yamt #define	kmem_poison_check(p, sz)	/* nothing */
     62      1.4  yamt #endif /* defined(DEBUG) */
     63      1.4  yamt 
     64      1.1  yamt static vmem_addr_t kmem_backend_alloc(vmem_t *, vmem_size_t, vmem_size_t *,
     65      1.1  yamt     vm_flag_t);
     66      1.1  yamt static void kmem_backend_free(vmem_t *, vmem_addr_t, vmem_size_t);
     67      1.6  yamt static int kmem_kva_reclaim_callback(struct callback_entry *, void *, void *);
     68      1.1  yamt 
     69      1.1  yamt static inline vm_flag_t
     70      1.1  yamt kmf_to_vmf(km_flag_t kmflags)
     71      1.1  yamt {
     72      1.1  yamt 	vm_flag_t vmflags;
     73      1.1  yamt 
     74      1.1  yamt 	KASSERT((kmflags & (KM_SLEEP|KM_NOSLEEP)) != 0);
     75      1.1  yamt 	KASSERT((~kmflags & (KM_SLEEP|KM_NOSLEEP)) != 0);
     76      1.1  yamt 
     77      1.1  yamt 	vmflags = 0;
     78      1.1  yamt 	if ((kmflags & KM_SLEEP) != 0) {
     79      1.1  yamt 		vmflags |= VM_SLEEP;
     80      1.1  yamt 	}
     81      1.1  yamt 	if ((kmflags & KM_NOSLEEP) != 0) {
     82      1.1  yamt 		vmflags |= VM_NOSLEEP;
     83      1.1  yamt 	}
     84      1.1  yamt 
     85      1.1  yamt 	return vmflags;
     86      1.1  yamt }
     87      1.1  yamt 
     88      1.1  yamt /* ---- kmem API */
     89      1.1  yamt 
     90      1.1  yamt /*
     91      1.1  yamt  * kmem_alloc: allocate wired memory.
     92      1.1  yamt  *
     93      1.1  yamt  * => must not be called from interrupt context.
     94      1.1  yamt  */
     95      1.1  yamt 
     96      1.1  yamt void *
     97      1.1  yamt kmem_alloc(size_t size, km_flag_t kmflags)
     98      1.1  yamt {
     99      1.4  yamt 	void *p;
    100      1.1  yamt 
    101      1.4  yamt 	p = (void *)vmem_alloc(kmem_arena, size,
    102      1.1  yamt 	    kmf_to_vmf(kmflags) | VM_INSTANTFIT);
    103      1.4  yamt 	kmem_poison_check(p, size);
    104  1.9.2.1    ad 	FREECHECK_OUT(&kmem_freecheck, p);
    105      1.4  yamt 	return p;
    106      1.1  yamt }
    107      1.1  yamt 
    108      1.1  yamt /*
    109      1.2  yamt  * kmem_zalloc: allocate wired memory.
    110      1.2  yamt  *
    111      1.2  yamt  * => must not be called from interrupt context.
    112      1.2  yamt  */
    113      1.2  yamt 
    114      1.2  yamt void *
    115      1.2  yamt kmem_zalloc(size_t size, km_flag_t kmflags)
    116      1.2  yamt {
    117      1.2  yamt 	void *p;
    118      1.2  yamt 
    119      1.2  yamt 	p = kmem_alloc(size, kmflags);
    120      1.2  yamt 	if (p != NULL) {
    121      1.2  yamt 		memset(p, 0, size);
    122      1.2  yamt 	}
    123      1.2  yamt 	return p;
    124      1.2  yamt }
    125      1.2  yamt 
    126      1.2  yamt /*
    127      1.1  yamt  * kmem_free: free wired memory allocated by kmem_alloc.
    128      1.1  yamt  *
    129      1.1  yamt  * => must not be called from interrupt context.
    130      1.1  yamt  */
    131      1.1  yamt 
    132      1.1  yamt void
    133      1.1  yamt kmem_free(void *p, size_t size)
    134      1.1  yamt {
    135      1.1  yamt 
    136  1.9.2.1    ad 	FREECHECK_IN(&kmem_freecheck, p);
    137      1.4  yamt 	kmem_poison_fill(p, size);
    138      1.1  yamt 	vmem_free(kmem_arena, (vmem_addr_t)p, size);
    139      1.1  yamt }
    140      1.1  yamt 
    141      1.1  yamt void
    142      1.1  yamt kmem_init(void)
    143      1.1  yamt {
    144      1.1  yamt 
    145      1.1  yamt 	kmem_arena = vmem_create("kmem", 0, 0, KMEM_QUANTUM_SIZE,
    146      1.5  yamt 	    kmem_backend_alloc, kmem_backend_free, NULL,
    147      1.5  yamt 	    KMEM_QUANTUM_SIZE * 32, VM_SLEEP);
    148      1.6  yamt 	callback_register(&vm_map_to_kernel(kernel_map)->vmk_reclaim_callback,
    149      1.6  yamt 	    &kmem_kva_reclaim_entry, kmem_arena, kmem_kva_reclaim_callback);
    150      1.1  yamt }
    151      1.1  yamt 
    152      1.1  yamt size_t
    153      1.1  yamt kmem_roundup_size(size_t size)
    154      1.1  yamt {
    155      1.1  yamt 
    156      1.1  yamt 	return vmem_roundup_size(kmem_arena, size);
    157      1.1  yamt }
    158      1.1  yamt 
    159      1.1  yamt /* ---- uvm glue */
    160      1.1  yamt 
    161      1.1  yamt #include <uvm/uvm_extern.h>
    162      1.1  yamt 
    163      1.1  yamt static vmem_addr_t
    164      1.1  yamt kmem_backend_alloc(vmem_t *dummy, vmem_size_t size, vmem_size_t *resultsize,
    165      1.1  yamt     vm_flag_t vmflags)
    166      1.1  yamt {
    167      1.1  yamt 	uvm_flag_t uflags;
    168      1.4  yamt 	vaddr_t va;
    169      1.1  yamt 
    170      1.1  yamt 	KASSERT(dummy == NULL);
    171      1.1  yamt 	KASSERT(size != 0);
    172      1.1  yamt 	KASSERT((vmflags & (VM_SLEEP|VM_NOSLEEP)) != 0);
    173      1.1  yamt 	KASSERT((~vmflags & (VM_SLEEP|VM_NOSLEEP)) != 0);
    174      1.1  yamt 
    175      1.1  yamt 	if ((vmflags & VM_NOSLEEP) != 0) {
    176      1.1  yamt 		uflags = UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT;
    177      1.1  yamt 	} else {
    178      1.1  yamt 		uflags = UVM_KMF_WAITVA;
    179      1.1  yamt 	}
    180      1.1  yamt 	*resultsize = size = round_page(size);
    181      1.4  yamt 	va = uvm_km_alloc(kernel_map, size, 0,
    182      1.1  yamt 	    uflags | UVM_KMF_WIRED | UVM_KMF_CANFAIL);
    183      1.4  yamt 	kmem_poison_fill((void *)va, size);
    184      1.4  yamt 	return (vmem_addr_t)va;
    185      1.1  yamt }
    186      1.1  yamt 
    187      1.1  yamt static void
    188      1.1  yamt kmem_backend_free(vmem_t *dummy, vmem_addr_t addr, vmem_size_t size)
    189      1.1  yamt {
    190      1.1  yamt 
    191      1.1  yamt 	KASSERT(dummy == NULL);
    192      1.1  yamt 	KASSERT(addr != 0);
    193      1.1  yamt 	KASSERT(size != 0);
    194      1.1  yamt 	KASSERT(size == round_page(size));
    195      1.1  yamt 
    196      1.4  yamt 	kmem_poison_check((void *)addr, size);
    197      1.1  yamt 	uvm_km_free(kernel_map, (vaddr_t)addr, size, UVM_KMF_WIRED);
    198      1.1  yamt }
    199      1.4  yamt 
    200      1.7  yamt static int
    201      1.7  yamt kmem_kva_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
    202      1.7  yamt {
    203      1.7  yamt 	vmem_t *vm = obj;
    204      1.7  yamt 
    205      1.7  yamt 	vmem_reap(vm);
    206      1.7  yamt 	return CALLBACK_CHAIN_CONTINUE;
    207      1.7  yamt }
    208      1.7  yamt 
    209      1.4  yamt /* ---- debug */
    210      1.4  yamt 
    211      1.4  yamt #if defined(DEBUG)
    212      1.4  yamt 
    213      1.4  yamt #if defined(_LP64)
    214      1.4  yamt #define	PRIME	0x9e37fffffffc0001UL
    215      1.4  yamt #else /* defined(_LP64) */
    216      1.4  yamt #define	PRIME	0x9e3779b1
    217      1.4  yamt #endif /* defined(_LP64) */
    218      1.4  yamt 
    219      1.4  yamt static inline uint8_t
    220      1.4  yamt kmem_poison_pattern(const void *p)
    221      1.4  yamt {
    222      1.4  yamt 
    223      1.4  yamt 	return (uint8_t)((((uintptr_t)p) * PRIME)
    224      1.4  yamt 	    >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
    225      1.4  yamt }
    226      1.4  yamt 
    227      1.4  yamt static void
    228      1.4  yamt kmem_poison_fill(void *p, size_t sz)
    229      1.4  yamt {
    230      1.4  yamt 	uint8_t *cp;
    231      1.4  yamt 	const uint8_t *ep;
    232      1.4  yamt 
    233      1.4  yamt 	cp = p;
    234      1.4  yamt 	ep = cp + sz;
    235      1.4  yamt 	while (cp < ep) {
    236      1.4  yamt 		*cp = kmem_poison_pattern(cp);
    237      1.4  yamt 		cp++;
    238      1.4  yamt 	}
    239      1.4  yamt }
    240      1.4  yamt 
    241      1.4  yamt static void
    242      1.4  yamt kmem_poison_check(void *p, size_t sz)
    243      1.4  yamt {
    244      1.4  yamt 	uint8_t *cp;
    245      1.4  yamt 	const uint8_t *ep;
    246      1.4  yamt 
    247      1.4  yamt 	cp = p;
    248      1.4  yamt 	ep = cp + sz;
    249      1.4  yamt 	while (cp < ep) {
    250      1.4  yamt 		const uint8_t expected = kmem_poison_pattern(cp);
    251      1.4  yamt 
    252      1.4  yamt 		if (*cp != expected) {
    253      1.4  yamt 			panic("%s: %p: 0x%02x != 0x%02x\n",
    254      1.4  yamt 			    __func__, cp, *cp, expected);
    255      1.4  yamt 		}
    256      1.4  yamt 		cp++;
    257      1.4  yamt 	}
    258      1.4  yamt }
    259      1.4  yamt 
    260      1.4  yamt #endif /* defined(DEBUG) */
    261