Home | History | Annotate | Line # | Download | only in uvm
uvm_km.c revision 1.107
      1  1.107      matt /*	$NetBSD: uvm_km.c,v 1.107 2011/01/04 08:26:33 matt Exp $	*/
      2    1.1       mrg 
      3   1.47       chs /*
      4    1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5   1.47       chs  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6    1.1       mrg  *
      7    1.1       mrg  * All rights reserved.
      8    1.1       mrg  *
      9    1.1       mrg  * This code is derived from software contributed to Berkeley by
     10    1.1       mrg  * The Mach Operating System project at Carnegie-Mellon University.
     11    1.1       mrg  *
     12    1.1       mrg  * Redistribution and use in source and binary forms, with or without
     13    1.1       mrg  * modification, are permitted provided that the following conditions
     14    1.1       mrg  * are met:
     15    1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     16    1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     17    1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18    1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     19    1.1       mrg  *    documentation and/or other materials provided with the distribution.
     20    1.1       mrg  * 3. All advertising materials mentioning features or use of this software
     21    1.1       mrg  *    must display the following acknowledgement:
     22    1.1       mrg  *	This product includes software developed by Charles D. Cranor,
     23   1.47       chs  *      Washington University, the University of California, Berkeley and
     24    1.1       mrg  *      its contributors.
     25    1.1       mrg  * 4. Neither the name of the University nor the names of its contributors
     26    1.1       mrg  *    may be used to endorse or promote products derived from this software
     27    1.1       mrg  *    without specific prior written permission.
     28    1.1       mrg  *
     29    1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30    1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31    1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32    1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33    1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34    1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35    1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36    1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37    1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38    1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39    1.1       mrg  * SUCH DAMAGE.
     40    1.1       mrg  *
     41    1.1       mrg  *	@(#)vm_kern.c   8.3 (Berkeley) 1/12/94
     42    1.4       mrg  * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
     43    1.1       mrg  *
     44    1.1       mrg  *
     45    1.1       mrg  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     46    1.1       mrg  * All rights reserved.
     47   1.47       chs  *
     48    1.1       mrg  * Permission to use, copy, modify and distribute this software and
     49    1.1       mrg  * its documentation is hereby granted, provided that both the copyright
     50    1.1       mrg  * notice and this permission notice appear in all copies of the
     51    1.1       mrg  * software, derivative works or modified versions, and any portions
     52    1.1       mrg  * thereof, and that both notices appear in supporting documentation.
     53   1.47       chs  *
     54   1.47       chs  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     55   1.47       chs  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     56    1.1       mrg  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     57   1.47       chs  *
     58    1.1       mrg  * Carnegie Mellon requests users of this software to return to
     59    1.1       mrg  *
     60    1.1       mrg  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     61    1.1       mrg  *  School of Computer Science
     62    1.1       mrg  *  Carnegie Mellon University
     63    1.1       mrg  *  Pittsburgh PA 15213-3890
     64    1.1       mrg  *
     65    1.1       mrg  * any improvements or extensions that they make and grant Carnegie the
     66    1.1       mrg  * rights to redistribute these changes.
     67    1.1       mrg  */
     68    1.6       mrg 
     69    1.1       mrg /*
     70    1.1       mrg  * uvm_km.c: handle kernel memory allocation and management
     71    1.1       mrg  */
     72    1.1       mrg 
     73    1.7     chuck /*
     74    1.7     chuck  * overview of kernel memory management:
     75    1.7     chuck  *
     76    1.7     chuck  * the kernel virtual address space is mapped by "kernel_map."   kernel_map
     77   1.62   thorpej  * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
     78   1.62   thorpej  * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
     79    1.7     chuck  *
     80   1.47       chs  * the kernel_map has several "submaps."   submaps can only appear in
     81    1.7     chuck  * the kernel_map (user processes can't use them).   submaps "take over"
     82    1.7     chuck  * the management of a sub-range of the kernel's address space.  submaps
     83    1.7     chuck  * are typically allocated at boot time and are never released.   kernel
     84   1.47       chs  * virtual address space that is mapped by a submap is locked by the
     85    1.7     chuck  * submap's lock -- not the kernel_map's lock.
     86    1.7     chuck  *
     87    1.7     chuck  * thus, the useful feature of submaps is that they allow us to break
     88    1.7     chuck  * up the locking and protection of the kernel address space into smaller
     89    1.7     chuck  * chunks.
     90    1.7     chuck  *
     91    1.7     chuck  * the vm system has several standard kernel submaps, including:
     92    1.7     chuck  *   kmem_map => contains only wired kernel memory for the kernel
     93   1.97        ad  *		malloc.
     94    1.7     chuck  *   pager_map => used to map "buf" structures into kernel space
     95    1.7     chuck  *   exec_map => used during exec to handle exec args
     96    1.7     chuck  *   etc...
     97    1.7     chuck  *
     98    1.7     chuck  * the kernel allocates its private memory out of special uvm_objects whose
     99    1.7     chuck  * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
    100    1.7     chuck  * are "special" and never die).   all kernel objects should be thought of
    101   1.47       chs  * as large, fixed-sized, sparsely populated uvm_objects.   each kernel
    102   1.62   thorpej  * object is equal to the size of kernel virtual address space (i.e. the
    103   1.62   thorpej  * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
    104    1.7     chuck  *
    105  1.101     pooka  * note that just because a kernel object spans the entire kernel virtual
    106    1.7     chuck  * address space doesn't mean that it has to be mapped into the entire space.
    107   1.47       chs  * large chunks of a kernel object's space go unused either because
    108   1.47       chs  * that area of kernel VM is unmapped, or there is some other type of
    109    1.7     chuck  * object mapped into that range (e.g. a vnode).    for submap's kernel
    110    1.7     chuck  * objects, the only part of the object that can ever be populated is the
    111    1.7     chuck  * offsets that are managed by the submap.
    112    1.7     chuck  *
    113    1.7     chuck  * note that the "offset" in a kernel object is always the kernel virtual
    114   1.62   thorpej  * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
    115    1.7     chuck  * example:
    116   1.62   thorpej  *   suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
    117    1.7     chuck  *   uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
    118    1.7     chuck  *   kernel map].    if uvm_km_alloc returns virtual address 0xf8235000,
    119    1.7     chuck  *   then that means that the page at offset 0x235000 in kernel_object is
    120   1.47       chs  *   mapped at 0xf8235000.
    121    1.7     chuck  *
    122    1.7     chuck  * kernel object have one other special property: when the kernel virtual
    123    1.7     chuck  * memory mapping them is unmapped, the backing memory in the object is
    124    1.7     chuck  * freed right away.   this is done with the uvm_km_pgremove() function.
    125    1.7     chuck  * this has to be done because there is no backing store for kernel pages
    126    1.7     chuck  * and no need to save them after they are no longer referenced.
    127    1.7     chuck  */
    128   1.55     lukem 
    129   1.55     lukem #include <sys/cdefs.h>
    130  1.107      matt __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.107 2011/01/04 08:26:33 matt Exp $");
    131   1.55     lukem 
    132   1.55     lukem #include "opt_uvmhist.h"
    133    1.7     chuck 
    134    1.1       mrg #include <sys/param.h>
    135   1.71      yamt #include <sys/malloc.h>
    136    1.1       mrg #include <sys/systm.h>
    137    1.1       mrg #include <sys/proc.h>
    138   1.72      yamt #include <sys/pool.h>
    139    1.1       mrg 
    140    1.1       mrg #include <uvm/uvm.h>
    141    1.1       mrg 
    142    1.1       mrg /*
    143    1.1       mrg  * global data structures
    144    1.1       mrg  */
    145    1.1       mrg 
    146   1.49       chs struct vm_map *kernel_map = NULL;
    147    1.1       mrg 
    148    1.1       mrg /*
    149    1.1       mrg  * local data structues
    150    1.1       mrg  */
    151    1.1       mrg 
    152   1.71      yamt static struct vm_map_kernel	kernel_map_store;
    153   1.70      yamt static struct vm_map_entry	kernel_first_mapent_store;
    154    1.1       mrg 
    155   1.72      yamt #if !defined(PMAP_MAP_POOLPAGE)
    156   1.72      yamt 
    157   1.72      yamt /*
    158   1.72      yamt  * kva cache
    159   1.72      yamt  *
    160   1.72      yamt  * XXX maybe it's better to do this at the uvm_map layer.
    161   1.72      yamt  */
    162   1.72      yamt 
    163   1.72      yamt #define	KM_VACACHE_SIZE	(32 * PAGE_SIZE) /* XXX tune */
    164   1.72      yamt 
    165   1.72      yamt static void *km_vacache_alloc(struct pool *, int);
    166   1.72      yamt static void km_vacache_free(struct pool *, void *);
    167   1.72      yamt static void km_vacache_init(struct vm_map *, const char *, size_t);
    168   1.72      yamt 
    169   1.72      yamt /* XXX */
    170   1.72      yamt #define	KM_VACACHE_POOL_TO_MAP(pp) \
    171   1.72      yamt 	((struct vm_map *)((char *)(pp) - \
    172   1.72      yamt 	    offsetof(struct vm_map_kernel, vmk_vacache)))
    173   1.72      yamt 
    174   1.72      yamt static void *
    175   1.72      yamt km_vacache_alloc(struct pool *pp, int flags)
    176   1.72      yamt {
    177   1.72      yamt 	vaddr_t va;
    178   1.72      yamt 	size_t size;
    179   1.72      yamt 	struct vm_map *map;
    180   1.72      yamt 	size = pp->pr_alloc->pa_pagesz;
    181   1.72      yamt 
    182   1.72      yamt 	map = KM_VACACHE_POOL_TO_MAP(pp);
    183   1.72      yamt 
    184   1.73      yamt 	va = vm_map_min(map); /* hint */
    185   1.72      yamt 	if (uvm_map(map, &va, size, NULL, UVM_UNKNOWN_OFFSET, size,
    186   1.74      yamt 	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    187   1.72      yamt 	    UVM_ADV_RANDOM, UVM_FLAG_QUANTUM |
    188   1.88      yamt 	    ((flags & PR_WAITOK) ? UVM_FLAG_WAITVA :
    189   1.88      yamt 	    UVM_FLAG_TRYLOCK | UVM_FLAG_NOWAIT))))
    190   1.72      yamt 		return NULL;
    191   1.72      yamt 
    192   1.72      yamt 	return (void *)va;
    193   1.72      yamt }
    194   1.72      yamt 
    195   1.72      yamt static void
    196   1.72      yamt km_vacache_free(struct pool *pp, void *v)
    197   1.72      yamt {
    198   1.72      yamt 	vaddr_t va = (vaddr_t)v;
    199   1.72      yamt 	size_t size = pp->pr_alloc->pa_pagesz;
    200   1.72      yamt 	struct vm_map *map;
    201   1.72      yamt 
    202   1.72      yamt 	map = KM_VACACHE_POOL_TO_MAP(pp);
    203   1.78      yamt 	uvm_unmap1(map, va, va + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY);
    204   1.72      yamt }
    205   1.72      yamt 
    206   1.72      yamt /*
    207   1.72      yamt  * km_vacache_init: initialize kva cache.
    208   1.72      yamt  */
    209   1.72      yamt 
    210   1.72      yamt static void
    211   1.72      yamt km_vacache_init(struct vm_map *map, const char *name, size_t size)
    212   1.72      yamt {
    213   1.72      yamt 	struct vm_map_kernel *vmk;
    214   1.72      yamt 	struct pool *pp;
    215   1.72      yamt 	struct pool_allocator *pa;
    216   1.94        ad 	int ipl;
    217   1.72      yamt 
    218   1.72      yamt 	KASSERT(VM_MAP_IS_KERNEL(map));
    219   1.72      yamt 	KASSERT(size < (vm_map_max(map) - vm_map_min(map)) / 2); /* sanity */
    220   1.72      yamt 
    221   1.97        ad 
    222   1.72      yamt 	vmk = vm_map_to_kernel(map);
    223   1.72      yamt 	pp = &vmk->vmk_vacache;
    224   1.72      yamt 	pa = &vmk->vmk_vacache_allocator;
    225   1.72      yamt 	memset(pa, 0, sizeof(*pa));
    226   1.72      yamt 	pa->pa_alloc = km_vacache_alloc;
    227   1.72      yamt 	pa->pa_free = km_vacache_free;
    228   1.72      yamt 	pa->pa_pagesz = (unsigned int)size;
    229   1.88      yamt 	pa->pa_backingmap = map;
    230   1.88      yamt 	pa->pa_backingmapptr = NULL;
    231   1.94        ad 
    232   1.94        ad 	if ((map->flags & VM_MAP_INTRSAFE) != 0)
    233   1.94        ad 		ipl = IPL_VM;
    234   1.94        ad 	else
    235   1.94        ad 		ipl = IPL_NONE;
    236   1.94        ad 
    237   1.94        ad 	pool_init(pp, PAGE_SIZE, 0, 0, PR_NOTOUCH | PR_RECURSIVE, name, pa,
    238   1.94        ad 	    ipl);
    239   1.72      yamt }
    240   1.72      yamt 
    241   1.72      yamt void
    242   1.72      yamt uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
    243   1.72      yamt {
    244   1.72      yamt 
    245   1.72      yamt 	map->flags |= VM_MAP_VACACHE;
    246   1.72      yamt 	if (size == 0)
    247   1.72      yamt 		size = KM_VACACHE_SIZE;
    248   1.72      yamt 	km_vacache_init(map, name, size);
    249   1.72      yamt }
    250   1.72      yamt 
    251   1.72      yamt #else /* !defined(PMAP_MAP_POOLPAGE) */
    252   1.72      yamt 
    253   1.72      yamt void
    254   1.92      yamt uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
    255   1.72      yamt {
    256   1.72      yamt 
    257   1.72      yamt 	/* nothing */
    258   1.72      yamt }
    259   1.72      yamt 
    260   1.72      yamt #endif /* !defined(PMAP_MAP_POOLPAGE) */
    261   1.72      yamt 
    262   1.88      yamt void
    263   1.92      yamt uvm_km_va_drain(struct vm_map *map, uvm_flag_t flags)
    264   1.88      yamt {
    265   1.88      yamt 	struct vm_map_kernel *vmk = vm_map_to_kernel(map);
    266   1.88      yamt 
    267   1.88      yamt 	callback_run_roundrobin(&vmk->vmk_reclaim_callback, NULL);
    268   1.88      yamt }
    269   1.88      yamt 
    270    1.1       mrg /*
    271    1.1       mrg  * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
    272    1.1       mrg  * KVM already allocated for text, data, bss, and static data structures).
    273    1.1       mrg  *
    274   1.62   thorpej  * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
    275   1.82  christos  *    we assume that [vmin -> start] has already been allocated and that
    276   1.62   thorpej  *    "end" is the end.
    277    1.1       mrg  */
    278    1.1       mrg 
    279    1.8       mrg void
    280   1.83   thorpej uvm_km_init(vaddr_t start, vaddr_t end)
    281    1.1       mrg {
    282   1.62   thorpej 	vaddr_t base = VM_MIN_KERNEL_ADDRESS;
    283   1.27   thorpej 
    284   1.27   thorpej 	/*
    285   1.27   thorpej 	 * next, init kernel memory objects.
    286    1.8       mrg 	 */
    287    1.1       mrg 
    288    1.8       mrg 	/* kernel_object: for pageable anonymous kernel memory */
    289   1.34       chs 	uao_init();
    290   1.95        ad 	uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
    291   1.62   thorpej 				 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
    292    1.1       mrg 
    293   1.24   thorpej 	/*
    294   1.56   thorpej 	 * init the map and reserve any space that might already
    295   1.56   thorpej 	 * have been allocated kernel space before installing.
    296    1.8       mrg 	 */
    297    1.1       mrg 
    298   1.71      yamt 	uvm_map_setup_kernel(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
    299   1.71      yamt 	kernel_map_store.vmk_map.pmap = pmap_kernel();
    300   1.70      yamt 	if (start != base) {
    301   1.70      yamt 		int error;
    302   1.70      yamt 		struct uvm_map_args args;
    303   1.70      yamt 
    304   1.71      yamt 		error = uvm_map_prepare(&kernel_map_store.vmk_map,
    305   1.71      yamt 		    base, start - base,
    306   1.70      yamt 		    NULL, UVM_UNKNOWN_OFFSET, 0,
    307   1.62   thorpej 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    308   1.70      yamt 		    		UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
    309   1.70      yamt 		if (!error) {
    310   1.70      yamt 			kernel_first_mapent_store.flags =
    311   1.70      yamt 			    UVM_MAP_KERNEL | UVM_MAP_FIRST;
    312   1.71      yamt 			error = uvm_map_enter(&kernel_map_store.vmk_map, &args,
    313   1.70      yamt 			    &kernel_first_mapent_store);
    314   1.70      yamt 		}
    315   1.70      yamt 
    316   1.70      yamt 		if (error)
    317   1.70      yamt 			panic(
    318   1.70      yamt 			    "uvm_km_init: could not reserve space for kernel");
    319   1.70      yamt 	}
    320   1.47       chs 
    321    1.8       mrg 	/*
    322    1.8       mrg 	 * install!
    323    1.8       mrg 	 */
    324    1.8       mrg 
    325   1.71      yamt 	kernel_map = &kernel_map_store.vmk_map;
    326   1.72      yamt 	uvm_km_vacache_init(kernel_map, "kvakernel", 0);
    327    1.1       mrg }
    328    1.1       mrg 
    329    1.1       mrg /*
    330    1.1       mrg  * uvm_km_suballoc: allocate a submap in the kernel map.   once a submap
    331    1.1       mrg  * is allocated all references to that area of VM must go through it.  this
    332    1.1       mrg  * allows the locking of VAs in kernel_map to be broken up into regions.
    333    1.1       mrg  *
    334   1.82  christos  * => if `fixed' is true, *vmin specifies where the region described
    335    1.5   thorpej  *      by the submap must start
    336    1.1       mrg  * => if submap is non NULL we use that as the submap, otherwise we
    337    1.1       mrg  *	alloc a new map
    338    1.1       mrg  */
    339   1.78      yamt 
    340    1.8       mrg struct vm_map *
    341   1.83   thorpej uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */,
    342   1.93   thorpej     vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed,
    343   1.83   thorpej     struct vm_map_kernel *submap)
    344    1.8       mrg {
    345    1.8       mrg 	int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
    346    1.1       mrg 
    347   1.71      yamt 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    348   1.71      yamt 
    349    1.8       mrg 	size = round_page(size);	/* round up to pagesize */
    350   1.87      yamt 	size += uvm_mapent_overhead(size, flags);
    351    1.1       mrg 
    352    1.8       mrg 	/*
    353    1.8       mrg 	 * first allocate a blank spot in the parent map
    354    1.8       mrg 	 */
    355    1.8       mrg 
    356   1.82  christos 	if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0,
    357    1.8       mrg 	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    358   1.43       chs 	    UVM_ADV_RANDOM, mapflags)) != 0) {
    359    1.8       mrg 	       panic("uvm_km_suballoc: unable to allocate space in parent map");
    360    1.8       mrg 	}
    361    1.8       mrg 
    362    1.8       mrg 	/*
    363   1.82  christos 	 * set VM bounds (vmin is filled in by uvm_map)
    364    1.8       mrg 	 */
    365    1.1       mrg 
    366   1.82  christos 	*vmax = *vmin + size;
    367    1.5   thorpej 
    368    1.8       mrg 	/*
    369    1.8       mrg 	 * add references to pmap and create or init the submap
    370    1.8       mrg 	 */
    371    1.1       mrg 
    372    1.8       mrg 	pmap_reference(vm_map_pmap(map));
    373    1.8       mrg 	if (submap == NULL) {
    374   1.71      yamt 		submap = malloc(sizeof(*submap), M_VMMAP, M_WAITOK);
    375    1.8       mrg 		if (submap == NULL)
    376    1.8       mrg 			panic("uvm_km_suballoc: unable to create submap");
    377    1.8       mrg 	}
    378   1.82  christos 	uvm_map_setup_kernel(submap, *vmin, *vmax, flags);
    379   1.71      yamt 	submap->vmk_map.pmap = vm_map_pmap(map);
    380    1.1       mrg 
    381    1.8       mrg 	/*
    382    1.8       mrg 	 * now let uvm_map_submap plug in it...
    383    1.8       mrg 	 */
    384    1.1       mrg 
    385   1.82  christos 	if (uvm_map_submap(map, *vmin, *vmax, &submap->vmk_map) != 0)
    386    1.8       mrg 		panic("uvm_km_suballoc: submap allocation failed");
    387    1.1       mrg 
    388   1.71      yamt 	return(&submap->vmk_map);
    389    1.1       mrg }
    390    1.1       mrg 
    391    1.1       mrg /*
    392    1.1       mrg  * uvm_km_pgremove: remove pages from a kernel uvm_object.
    393    1.1       mrg  *
    394    1.1       mrg  * => when you unmap a part of anonymous kernel memory you want to toss
    395    1.1       mrg  *    the pages right away.    (this gets called from uvm_unmap_...).
    396    1.1       mrg  */
    397    1.1       mrg 
    398    1.8       mrg void
    399   1.83   thorpej uvm_km_pgremove(vaddr_t startva, vaddr_t endva)
    400    1.1       mrg {
    401   1.95        ad 	struct uvm_object * const uobj = uvm_kernel_object;
    402   1.78      yamt 	const voff_t start = startva - vm_map_min(kernel_map);
    403   1.78      yamt 	const voff_t end = endva - vm_map_min(kernel_map);
    404   1.53       chs 	struct vm_page *pg;
    405   1.52       chs 	voff_t curoff, nextoff;
    406   1.53       chs 	int swpgonlydelta = 0;
    407    1.8       mrg 	UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
    408    1.1       mrg 
    409   1.78      yamt 	KASSERT(VM_MIN_KERNEL_ADDRESS <= startva);
    410   1.78      yamt 	KASSERT(startva < endva);
    411   1.86      yamt 	KASSERT(endva <= VM_MAX_KERNEL_ADDRESS);
    412   1.78      yamt 
    413   1.97        ad 	mutex_enter(&uobj->vmobjlock);
    414    1.3       chs 
    415   1.52       chs 	for (curoff = start; curoff < end; curoff = nextoff) {
    416   1.52       chs 		nextoff = curoff + PAGE_SIZE;
    417   1.52       chs 		pg = uvm_pagelookup(uobj, curoff);
    418   1.53       chs 		if (pg != NULL && pg->flags & PG_BUSY) {
    419   1.52       chs 			pg->flags |= PG_WANTED;
    420   1.52       chs 			UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
    421   1.52       chs 				    "km_pgrm", 0);
    422   1.97        ad 			mutex_enter(&uobj->vmobjlock);
    423   1.52       chs 			nextoff = curoff;
    424    1.8       mrg 			continue;
    425   1.52       chs 		}
    426    1.8       mrg 
    427   1.52       chs 		/*
    428   1.52       chs 		 * free the swap slot, then the page.
    429   1.52       chs 		 */
    430    1.8       mrg 
    431   1.53       chs 		if (pg == NULL &&
    432   1.64        pk 		    uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) {
    433   1.53       chs 			swpgonlydelta++;
    434   1.53       chs 		}
    435   1.52       chs 		uao_dropswap(uobj, curoff >> PAGE_SHIFT);
    436   1.53       chs 		if (pg != NULL) {
    437   1.97        ad 			mutex_enter(&uvm_pageqlock);
    438   1.53       chs 			uvm_pagefree(pg);
    439   1.97        ad 			mutex_exit(&uvm_pageqlock);
    440   1.53       chs 		}
    441    1.8       mrg 	}
    442   1.97        ad 	mutex_exit(&uobj->vmobjlock);
    443    1.8       mrg 
    444   1.54       chs 	if (swpgonlydelta > 0) {
    445   1.95        ad 		mutex_enter(&uvm_swap_data_lock);
    446   1.54       chs 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
    447   1.54       chs 		uvmexp.swpgonly -= swpgonlydelta;
    448   1.95        ad 		mutex_exit(&uvm_swap_data_lock);
    449   1.54       chs 	}
    450   1.24   thorpej }
    451   1.24   thorpej 
    452   1.24   thorpej 
    453   1.24   thorpej /*
    454   1.78      yamt  * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed
    455   1.78      yamt  *    regions.
    456   1.24   thorpej  *
    457   1.24   thorpej  * => when you unmap a part of anonymous kernel memory you want to toss
    458   1.52       chs  *    the pages right away.    (this is called from uvm_unmap_...).
    459   1.24   thorpej  * => none of the pages will ever be busy, and none of them will ever
    460   1.52       chs  *    be on the active or inactive queues (because they have no object).
    461   1.24   thorpej  */
    462   1.24   thorpej 
    463   1.24   thorpej void
    464  1.102        ad uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end)
    465   1.24   thorpej {
    466   1.52       chs 	struct vm_page *pg;
    467   1.52       chs 	paddr_t pa;
    468   1.24   thorpej 	UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist);
    469   1.24   thorpej 
    470  1.102        ad 	KASSERT(VM_MAP_IS_KERNEL(map));
    471  1.102        ad 	KASSERT(vm_map_min(map) <= start);
    472   1.78      yamt 	KASSERT(start < end);
    473  1.102        ad 	KASSERT(end <= vm_map_max(map));
    474   1.78      yamt 
    475   1.52       chs 	for (; start < end; start += PAGE_SIZE) {
    476   1.52       chs 		if (!pmap_extract(pmap_kernel(), start, &pa)) {
    477   1.24   thorpej 			continue;
    478   1.40       chs 		}
    479   1.52       chs 		pg = PHYS_TO_VM_PAGE(pa);
    480   1.52       chs 		KASSERT(pg);
    481   1.52       chs 		KASSERT(pg->uobject == NULL && pg->uanon == NULL);
    482   1.52       chs 		uvm_pagefree(pg);
    483   1.24   thorpej 	}
    484    1.1       mrg }
    485    1.1       mrg 
    486   1.78      yamt #if defined(DEBUG)
    487   1.78      yamt void
    488  1.102        ad uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end)
    489   1.78      yamt {
    490  1.102        ad 	struct vm_page *pg;
    491   1.78      yamt 	vaddr_t va;
    492   1.78      yamt 	paddr_t pa;
    493   1.78      yamt 
    494  1.102        ad 	KDASSERT(VM_MAP_IS_KERNEL(map));
    495  1.102        ad 	KDASSERT(vm_map_min(map) <= start);
    496   1.78      yamt 	KDASSERT(start < end);
    497  1.102        ad 	KDASSERT(end <= vm_map_max(map));
    498   1.78      yamt 
    499   1.78      yamt 	for (va = start; va < end; va += PAGE_SIZE) {
    500   1.78      yamt 		if (pmap_extract(pmap_kernel(), va, &pa)) {
    501   1.81    simonb 			panic("uvm_km_check_empty: va %p has pa 0x%llx",
    502   1.81    simonb 			    (void *)va, (long long)pa);
    503   1.78      yamt 		}
    504  1.102        ad 		if ((map->flags & VM_MAP_INTRSAFE) == 0) {
    505   1.97        ad 			mutex_enter(&uvm_kernel_object->vmobjlock);
    506   1.96        ad 			pg = uvm_pagelookup(uvm_kernel_object,
    507   1.78      yamt 			    va - vm_map_min(kernel_map));
    508   1.97        ad 			mutex_exit(&uvm_kernel_object->vmobjlock);
    509   1.78      yamt 			if (pg) {
    510   1.78      yamt 				panic("uvm_km_check_empty: "
    511   1.78      yamt 				    "has page hashed at %p", (const void *)va);
    512   1.78      yamt 			}
    513   1.78      yamt 		}
    514   1.78      yamt 	}
    515   1.78      yamt }
    516   1.78      yamt #endif /* defined(DEBUG) */
    517    1.1       mrg 
    518    1.1       mrg /*
    519   1.78      yamt  * uvm_km_alloc: allocate an area of kernel memory.
    520    1.1       mrg  *
    521   1.78      yamt  * => NOTE: we can return 0 even if we can wait if there is not enough
    522    1.1       mrg  *	free VM space in the map... caller should be prepared to handle
    523    1.1       mrg  *	this case.
    524    1.1       mrg  * => we return KVA of memory allocated
    525    1.1       mrg  */
    526    1.1       mrg 
    527   1.14       eeh vaddr_t
    528   1.83   thorpej uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
    529    1.1       mrg {
    530   1.14       eeh 	vaddr_t kva, loopva;
    531   1.14       eeh 	vaddr_t offset;
    532   1.44   thorpej 	vsize_t loopsize;
    533    1.8       mrg 	struct vm_page *pg;
    534   1.78      yamt 	struct uvm_object *obj;
    535   1.78      yamt 	int pgaflags;
    536   1.89  drochner 	vm_prot_t prot;
    537   1.78      yamt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    538    1.1       mrg 
    539   1.40       chs 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    540   1.78      yamt 	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
    541   1.78      yamt 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
    542   1.78      yamt 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
    543    1.1       mrg 
    544    1.8       mrg 	/*
    545    1.8       mrg 	 * setup for call
    546    1.8       mrg 	 */
    547    1.8       mrg 
    548   1.78      yamt 	kva = vm_map_min(map);	/* hint */
    549    1.8       mrg 	size = round_page(size);
    550   1.95        ad 	obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL;
    551   1.78      yamt 	UVMHIST_LOG(maphist,"  (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
    552   1.78      yamt 		    map, obj, size, flags);
    553    1.1       mrg 
    554    1.8       mrg 	/*
    555    1.8       mrg 	 * allocate some virtual space
    556    1.8       mrg 	 */
    557    1.8       mrg 
    558   1.78      yamt 	if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
    559   1.78      yamt 	    align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    560   1.78      yamt 	    UVM_ADV_RANDOM,
    561   1.78      yamt 	    (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA))
    562   1.78      yamt 	    | UVM_FLAG_QUANTUM)) != 0)) {
    563    1.8       mrg 		UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
    564    1.8       mrg 		return(0);
    565    1.8       mrg 	}
    566    1.8       mrg 
    567    1.8       mrg 	/*
    568    1.8       mrg 	 * if all we wanted was VA, return now
    569    1.8       mrg 	 */
    570    1.8       mrg 
    571   1.78      yamt 	if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) {
    572    1.8       mrg 		UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
    573    1.8       mrg 		return(kva);
    574    1.8       mrg 	}
    575   1.40       chs 
    576    1.8       mrg 	/*
    577    1.8       mrg 	 * recover object offset from virtual address
    578    1.8       mrg 	 */
    579    1.8       mrg 
    580    1.8       mrg 	offset = kva - vm_map_min(kernel_map);
    581    1.8       mrg 	UVMHIST_LOG(maphist, "  kva=0x%x, offset=0x%x", kva, offset,0,0);
    582    1.8       mrg 
    583    1.8       mrg 	/*
    584    1.8       mrg 	 * now allocate and map in the memory... note that we are the only ones
    585    1.8       mrg 	 * whom should ever get a handle on this area of VM.
    586    1.8       mrg 	 */
    587    1.8       mrg 
    588    1.8       mrg 	loopva = kva;
    589   1.44   thorpej 	loopsize = size;
    590   1.78      yamt 
    591  1.107      matt 	pgaflags = UVM_FLAG_COLORMATCH;
    592  1.103        ad 	if (flags & UVM_KMF_NOWAIT)
    593  1.103        ad 		pgaflags |= UVM_PGA_USERESERVE;
    594   1.78      yamt 	if (flags & UVM_KMF_ZERO)
    595   1.78      yamt 		pgaflags |= UVM_PGA_ZERO;
    596   1.89  drochner 	prot = VM_PROT_READ | VM_PROT_WRITE;
    597   1.89  drochner 	if (flags & UVM_KMF_EXEC)
    598   1.89  drochner 		prot |= VM_PROT_EXECUTE;
    599   1.44   thorpej 	while (loopsize) {
    600   1.78      yamt 		KASSERT(!pmap_extract(pmap_kernel(), loopva, NULL));
    601   1.78      yamt 
    602  1.107      matt 		pg = uvm_pagealloc_strat(NULL, offset, NULL, pgaflags,
    603  1.107      matt #ifdef UVM_KM_VMFREELIST
    604  1.107      matt 		   UVM_PGA_STRAT_ONLY, UVM_KM_VMFREELIST
    605  1.107      matt #else
    606  1.107      matt 		   UVM_PGA_STRAT_NORMAL, 0
    607  1.107      matt #endif
    608  1.107      matt 		   );
    609   1.47       chs 
    610    1.8       mrg 		/*
    611    1.8       mrg 		 * out of memory?
    612    1.8       mrg 		 */
    613    1.8       mrg 
    614   1.35   thorpej 		if (__predict_false(pg == NULL)) {
    615   1.58       chs 			if ((flags & UVM_KMF_NOWAIT) ||
    616   1.80      yamt 			    ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) {
    617    1.8       mrg 				/* free everything! */
    618   1.78      yamt 				uvm_km_free(map, kva, size,
    619   1.78      yamt 				    flags & UVM_KMF_TYPEMASK);
    620   1.58       chs 				return (0);
    621    1.8       mrg 			} else {
    622    1.8       mrg 				uvm_wait("km_getwait2");	/* sleep here */
    623    1.8       mrg 				continue;
    624    1.8       mrg 			}
    625    1.8       mrg 		}
    626   1.47       chs 
    627   1.78      yamt 		pg->flags &= ~PG_BUSY;	/* new page */
    628   1.78      yamt 		UVM_PAGE_OWN(pg, NULL);
    629   1.78      yamt 
    630    1.8       mrg 		/*
    631   1.52       chs 		 * map it in
    632    1.8       mrg 		 */
    633   1.40       chs 
    634  1.104    cegger 		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
    635  1.106    cegger 		    prot, PMAP_KMPAGE);
    636    1.8       mrg 		loopva += PAGE_SIZE;
    637    1.8       mrg 		offset += PAGE_SIZE;
    638   1.44   thorpej 		loopsize -= PAGE_SIZE;
    639    1.8       mrg 	}
    640   1.69  junyoung 
    641   1.51     chris        	pmap_update(pmap_kernel());
    642   1.69  junyoung 
    643    1.8       mrg 	UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
    644    1.8       mrg 	return(kva);
    645    1.1       mrg }
    646    1.1       mrg 
    647    1.1       mrg /*
    648    1.1       mrg  * uvm_km_free: free an area of kernel memory
    649    1.1       mrg  */
    650    1.1       mrg 
    651    1.8       mrg void
    652   1.83   thorpej uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags)
    653    1.8       mrg {
    654    1.1       mrg 
    655   1.78      yamt 	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
    656   1.78      yamt 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
    657   1.78      yamt 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
    658   1.78      yamt 	KASSERT((addr & PAGE_MASK) == 0);
    659   1.40       chs 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    660    1.1       mrg 
    661    1.8       mrg 	size = round_page(size);
    662    1.1       mrg 
    663   1.78      yamt 	if (flags & UVM_KMF_PAGEABLE) {
    664   1.78      yamt 		uvm_km_pgremove(addr, addr + size);
    665   1.78      yamt 		pmap_remove(pmap_kernel(), addr, addr + size);
    666   1.78      yamt 	} else if (flags & UVM_KMF_WIRED) {
    667  1.102        ad 		uvm_km_pgremove_intrsafe(map, addr, addr + size);
    668   1.78      yamt 		pmap_kremove(addr, size);
    669    1.8       mrg 	}
    670   1.99      yamt 
    671   1.99      yamt 	/*
    672   1.99      yamt 	 * uvm_unmap_remove calls pmap_update for us.
    673   1.99      yamt 	 */
    674    1.8       mrg 
    675   1.78      yamt 	uvm_unmap1(map, addr, addr + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY);
    676   1.66        pk }
    677   1.66        pk 
    678   1.10   thorpej /* Sanity; must specify both or none. */
    679   1.10   thorpej #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
    680   1.10   thorpej     (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
    681   1.10   thorpej #error Must specify MAP and UNMAP together.
    682   1.10   thorpej #endif
    683   1.10   thorpej 
    684   1.10   thorpej /*
    685   1.10   thorpej  * uvm_km_alloc_poolpage: allocate a page for the pool allocator
    686   1.10   thorpej  *
    687   1.10   thorpej  * => if the pmap specifies an alternate mapping method, we use it.
    688   1.10   thorpej  */
    689   1.10   thorpej 
    690   1.11   thorpej /* ARGSUSED */
    691   1.14       eeh vaddr_t
    692   1.93   thorpej uvm_km_alloc_poolpage_cache(struct vm_map *map, bool waitok)
    693   1.72      yamt {
    694   1.72      yamt #if defined(PMAP_MAP_POOLPAGE)
    695   1.78      yamt 	return uvm_km_alloc_poolpage(map, waitok);
    696   1.72      yamt #else
    697   1.72      yamt 	struct vm_page *pg;
    698   1.72      yamt 	struct pool *pp = &vm_map_to_kernel(map)->vmk_vacache;
    699   1.72      yamt 	vaddr_t va;
    700   1.72      yamt 
    701   1.72      yamt 	if ((map->flags & VM_MAP_VACACHE) == 0)
    702   1.78      yamt 		return uvm_km_alloc_poolpage(map, waitok);
    703   1.72      yamt 
    704   1.72      yamt 	va = (vaddr_t)pool_get(pp, waitok ? PR_WAITOK : PR_NOWAIT);
    705   1.72      yamt 	if (va == 0)
    706   1.72      yamt 		return 0;
    707   1.72      yamt 	KASSERT(!pmap_extract(pmap_kernel(), va, NULL));
    708   1.72      yamt again:
    709  1.103        ad 	pg = uvm_pagealloc(NULL, 0, NULL, waitok ? 0 : UVM_PGA_USERESERVE);
    710   1.72      yamt 	if (__predict_false(pg == NULL)) {
    711   1.72      yamt 		if (waitok) {
    712   1.72      yamt 			uvm_wait("plpg");
    713   1.72      yamt 			goto again;
    714   1.72      yamt 		} else {
    715   1.72      yamt 			pool_put(pp, (void *)va);
    716   1.72      yamt 			return 0;
    717   1.72      yamt 		}
    718   1.72      yamt 	}
    719  1.100      matt 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
    720  1.106    cegger 	    VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
    721   1.72      yamt 	pmap_update(pmap_kernel());
    722   1.72      yamt 
    723   1.72      yamt 	return va;
    724   1.72      yamt #endif /* PMAP_MAP_POOLPAGE */
    725   1.72      yamt }
    726   1.72      yamt 
    727   1.72      yamt vaddr_t
    728   1.93   thorpej uvm_km_alloc_poolpage(struct vm_map *map, bool waitok)
    729   1.10   thorpej {
    730   1.10   thorpej #if defined(PMAP_MAP_POOLPAGE)
    731   1.10   thorpej 	struct vm_page *pg;
    732   1.14       eeh 	vaddr_t va;
    733   1.10   thorpej 
    734  1.107      matt 
    735   1.15   thorpej  again:
    736  1.107      matt #ifdef PMAP_ALLOC_POOLPAGE
    737  1.107      matt 	pg = PMAP_ALLOC_POOLPAGE(waitok ? 0 : UVM_PGA_USERESERVE);
    738  1.107      matt #else
    739  1.103        ad 	pg = uvm_pagealloc(NULL, 0, NULL, waitok ? 0 : UVM_PGA_USERESERVE);
    740  1.107      matt #endif
    741   1.35   thorpej 	if (__predict_false(pg == NULL)) {
    742   1.15   thorpej 		if (waitok) {
    743   1.15   thorpej 			uvm_wait("plpg");
    744   1.15   thorpej 			goto again;
    745   1.15   thorpej 		} else
    746   1.15   thorpej 			return (0);
    747   1.15   thorpej 	}
    748   1.10   thorpej 	va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
    749   1.35   thorpej 	if (__predict_false(va == 0))
    750   1.10   thorpej 		uvm_pagefree(pg);
    751   1.10   thorpej 	return (va);
    752   1.10   thorpej #else
    753   1.14       eeh 	vaddr_t va;
    754   1.16   thorpej 
    755   1.78      yamt 	va = uvm_km_alloc(map, PAGE_SIZE, 0,
    756   1.78      yamt 	    (waitok ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK) | UVM_KMF_WIRED);
    757   1.10   thorpej 	return (va);
    758   1.10   thorpej #endif /* PMAP_MAP_POOLPAGE */
    759   1.10   thorpej }
    760   1.10   thorpej 
    761   1.10   thorpej /*
    762   1.10   thorpej  * uvm_km_free_poolpage: free a previously allocated pool page
    763   1.10   thorpej  *
    764   1.10   thorpej  * => if the pmap specifies an alternate unmapping method, we use it.
    765   1.10   thorpej  */
    766   1.10   thorpej 
    767   1.11   thorpej /* ARGSUSED */
    768   1.10   thorpej void
    769   1.83   thorpej uvm_km_free_poolpage_cache(struct vm_map *map, vaddr_t addr)
    770   1.72      yamt {
    771   1.72      yamt #if defined(PMAP_UNMAP_POOLPAGE)
    772   1.78      yamt 	uvm_km_free_poolpage(map, addr);
    773   1.72      yamt #else
    774   1.72      yamt 	struct pool *pp;
    775   1.72      yamt 
    776   1.72      yamt 	if ((map->flags & VM_MAP_VACACHE) == 0) {
    777   1.78      yamt 		uvm_km_free_poolpage(map, addr);
    778   1.72      yamt 		return;
    779   1.72      yamt 	}
    780   1.72      yamt 
    781   1.72      yamt 	KASSERT(pmap_extract(pmap_kernel(), addr, NULL));
    782  1.102        ad 	uvm_km_pgremove_intrsafe(map, addr, addr + PAGE_SIZE);
    783   1.72      yamt 	pmap_kremove(addr, PAGE_SIZE);
    784   1.72      yamt #if defined(DEBUG)
    785   1.72      yamt 	pmap_update(pmap_kernel());
    786   1.72      yamt #endif
    787   1.72      yamt 	KASSERT(!pmap_extract(pmap_kernel(), addr, NULL));
    788   1.72      yamt 	pp = &vm_map_to_kernel(map)->vmk_vacache;
    789   1.72      yamt 	pool_put(pp, (void *)addr);
    790   1.72      yamt #endif
    791   1.72      yamt }
    792   1.72      yamt 
    793   1.72      yamt /* ARGSUSED */
    794   1.72      yamt void
    795   1.83   thorpej uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr)
    796   1.10   thorpej {
    797   1.10   thorpej #if defined(PMAP_UNMAP_POOLPAGE)
    798   1.14       eeh 	paddr_t pa;
    799   1.10   thorpej 
    800   1.10   thorpej 	pa = PMAP_UNMAP_POOLPAGE(addr);
    801   1.10   thorpej 	uvm_pagefree(PHYS_TO_VM_PAGE(pa));
    802   1.10   thorpej #else
    803   1.78      yamt 	uvm_km_free(map, addr, PAGE_SIZE, UVM_KMF_WIRED);
    804   1.10   thorpej #endif /* PMAP_UNMAP_POOLPAGE */
    805    1.1       mrg }
    806