Home | History | Annotate | Line # | Download | only in uvm
uvm_km.c revision 1.101.4.2.4.11
      1  1.101.4.2.4.11      matt /*	$NetBSD: uvm_km.c,v 1.101.4.2.4.11 2012/04/12 19:38:27 matt Exp $	*/
      2             1.1       mrg 
      3            1.47       chs /*
      4             1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5            1.47       chs  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6             1.1       mrg  *
      7             1.1       mrg  * All rights reserved.
      8             1.1       mrg  *
      9             1.1       mrg  * This code is derived from software contributed to Berkeley by
     10             1.1       mrg  * The Mach Operating System project at Carnegie-Mellon University.
     11             1.1       mrg  *
     12             1.1       mrg  * Redistribution and use in source and binary forms, with or without
     13             1.1       mrg  * modification, are permitted provided that the following conditions
     14             1.1       mrg  * are met:
     15             1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     16             1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     17             1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18             1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     19             1.1       mrg  *    documentation and/or other materials provided with the distribution.
     20             1.1       mrg  * 3. All advertising materials mentioning features or use of this software
     21             1.1       mrg  *    must display the following acknowledgement:
     22             1.1       mrg  *	This product includes software developed by Charles D. Cranor,
     23            1.47       chs  *      Washington University, the University of California, Berkeley and
     24             1.1       mrg  *      its contributors.
     25             1.1       mrg  * 4. Neither the name of the University nor the names of its contributors
     26             1.1       mrg  *    may be used to endorse or promote products derived from this software
     27             1.1       mrg  *    without specific prior written permission.
     28             1.1       mrg  *
     29             1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30             1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31             1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32             1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33             1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34             1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35             1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36             1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37             1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38             1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39             1.1       mrg  * SUCH DAMAGE.
     40             1.1       mrg  *
     41             1.1       mrg  *	@(#)vm_kern.c   8.3 (Berkeley) 1/12/94
     42             1.4       mrg  * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
     43             1.1       mrg  *
     44             1.1       mrg  *
     45             1.1       mrg  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     46             1.1       mrg  * All rights reserved.
     47            1.47       chs  *
     48             1.1       mrg  * Permission to use, copy, modify and distribute this software and
     49             1.1       mrg  * its documentation is hereby granted, provided that both the copyright
     50             1.1       mrg  * notice and this permission notice appear in all copies of the
     51             1.1       mrg  * software, derivative works or modified versions, and any portions
     52             1.1       mrg  * thereof, and that both notices appear in supporting documentation.
     53            1.47       chs  *
     54            1.47       chs  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     55            1.47       chs  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     56             1.1       mrg  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     57            1.47       chs  *
     58             1.1       mrg  * Carnegie Mellon requests users of this software to return to
     59             1.1       mrg  *
     60             1.1       mrg  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     61             1.1       mrg  *  School of Computer Science
     62             1.1       mrg  *  Carnegie Mellon University
     63             1.1       mrg  *  Pittsburgh PA 15213-3890
     64             1.1       mrg  *
     65             1.1       mrg  * any improvements or extensions that they make and grant Carnegie the
     66             1.1       mrg  * rights to redistribute these changes.
     67             1.1       mrg  */
     68             1.6       mrg 
     69             1.1       mrg /*
     70             1.1       mrg  * uvm_km.c: handle kernel memory allocation and management
     71             1.1       mrg  */
     72             1.1       mrg 
     73             1.7     chuck /*
     74             1.7     chuck  * overview of kernel memory management:
     75             1.7     chuck  *
     76             1.7     chuck  * the kernel virtual address space is mapped by "kernel_map."   kernel_map
     77            1.62   thorpej  * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
     78            1.62   thorpej  * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
     79             1.7     chuck  *
     80            1.47       chs  * the kernel_map has several "submaps."   submaps can only appear in
     81             1.7     chuck  * the kernel_map (user processes can't use them).   submaps "take over"
     82             1.7     chuck  * the management of a sub-range of the kernel's address space.  submaps
     83             1.7     chuck  * are typically allocated at boot time and are never released.   kernel
     84            1.47       chs  * virtual address space that is mapped by a submap is locked by the
     85             1.7     chuck  * submap's lock -- not the kernel_map's lock.
     86             1.7     chuck  *
     87             1.7     chuck  * thus, the useful feature of submaps is that they allow us to break
     88             1.7     chuck  * up the locking and protection of the kernel address space into smaller
     89             1.7     chuck  * chunks.
     90             1.7     chuck  *
     91             1.7     chuck  * the vm system has several standard kernel submaps, including:
     92             1.7     chuck  *   kmem_map => contains only wired kernel memory for the kernel
     93            1.97        ad  *		malloc.
     94            1.97        ad  *   mb_map => memory for large mbufs,
     95             1.7     chuck  *   pager_map => used to map "buf" structures into kernel space
     96             1.7     chuck  *   exec_map => used during exec to handle exec args
     97             1.7     chuck  *   etc...
     98             1.7     chuck  *
     99             1.7     chuck  * the kernel allocates its private memory out of special uvm_objects whose
    100             1.7     chuck  * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
    101             1.7     chuck  * are "special" and never die).   all kernel objects should be thought of
    102            1.47       chs  * as large, fixed-sized, sparsely populated uvm_objects.   each kernel
    103            1.62   thorpej  * object is equal to the size of kernel virtual address space (i.e. the
    104            1.62   thorpej  * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
    105             1.7     chuck  *
    106           1.101     pooka  * note that just because a kernel object spans the entire kernel virtual
    107             1.7     chuck  * address space doesn't mean that it has to be mapped into the entire space.
    108            1.47       chs  * large chunks of a kernel object's space go unused either because
    109            1.47       chs  * that area of kernel VM is unmapped, or there is some other type of
    110             1.7     chuck  * object mapped into that range (e.g. a vnode).    for submap's kernel
    111             1.7     chuck  * objects, the only part of the object that can ever be populated is the
    112             1.7     chuck  * offsets that are managed by the submap.
    113             1.7     chuck  *
    114             1.7     chuck  * note that the "offset" in a kernel object is always the kernel virtual
    115            1.62   thorpej  * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
    116             1.7     chuck  * example:
    117            1.62   thorpej  *   suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
    118             1.7     chuck  *   uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
    119             1.7     chuck  *   kernel map].    if uvm_km_alloc returns virtual address 0xf8235000,
    120             1.7     chuck  *   then that means that the page at offset 0x235000 in kernel_object is
    121            1.47       chs  *   mapped at 0xf8235000.
    122             1.7     chuck  *
    123             1.7     chuck  * kernel object have one other special property: when the kernel virtual
    124             1.7     chuck  * memory mapping them is unmapped, the backing memory in the object is
    125             1.7     chuck  * freed right away.   this is done with the uvm_km_pgremove() function.
    126             1.7     chuck  * this has to be done because there is no backing store for kernel pages
    127             1.7     chuck  * and no need to save them after they are no longer referenced.
    128             1.7     chuck  */
    129            1.55     lukem 
    130            1.55     lukem #include <sys/cdefs.h>
    131  1.101.4.2.4.11      matt __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.101.4.2.4.11 2012/04/12 19:38:27 matt Exp $");
    132            1.55     lukem 
    133            1.55     lukem #include "opt_uvmhist.h"
    134             1.7     chuck 
    135             1.1       mrg #include <sys/param.h>
    136   1.101.4.2.4.6      matt #include <sys/atomic.h>
    137            1.71      yamt #include <sys/malloc.h>
    138             1.1       mrg #include <sys/systm.h>
    139             1.1       mrg #include <sys/proc.h>
    140            1.72      yamt #include <sys/pool.h>
    141             1.1       mrg 
    142             1.1       mrg #include <uvm/uvm.h>
    143             1.1       mrg 
    144             1.1       mrg /*
    145             1.1       mrg  * global data structures
    146             1.1       mrg  */
    147             1.1       mrg 
    148            1.49       chs struct vm_map *kernel_map = NULL;
    149             1.1       mrg 
    150             1.1       mrg /*
    151             1.1       mrg  * local data structues
    152             1.1       mrg  */
    153             1.1       mrg 
    154            1.71      yamt static struct vm_map_kernel	kernel_map_store;
    155            1.70      yamt static struct vm_map_entry	kernel_first_mapent_store;
    156             1.1       mrg 
    157            1.72      yamt #if !defined(PMAP_MAP_POOLPAGE)
    158            1.72      yamt 
    159            1.72      yamt /*
    160            1.72      yamt  * kva cache
    161            1.72      yamt  *
    162            1.72      yamt  * XXX maybe it's better to do this at the uvm_map layer.
    163            1.72      yamt  */
    164            1.72      yamt 
    165            1.72      yamt #define	KM_VACACHE_SIZE	(32 * PAGE_SIZE) /* XXX tune */
    166            1.72      yamt 
    167            1.72      yamt static void *km_vacache_alloc(struct pool *, int);
    168            1.72      yamt static void km_vacache_free(struct pool *, void *);
    169            1.72      yamt static void km_vacache_init(struct vm_map *, const char *, size_t);
    170            1.72      yamt 
    171            1.72      yamt /* XXX */
    172            1.72      yamt #define	KM_VACACHE_POOL_TO_MAP(pp) \
    173            1.72      yamt 	((struct vm_map *)((char *)(pp) - \
    174            1.72      yamt 	    offsetof(struct vm_map_kernel, vmk_vacache)))
    175            1.72      yamt 
    176            1.72      yamt static void *
    177            1.72      yamt km_vacache_alloc(struct pool *pp, int flags)
    178            1.72      yamt {
    179            1.72      yamt 	vaddr_t va;
    180            1.72      yamt 	size_t size;
    181            1.72      yamt 	struct vm_map *map;
    182            1.72      yamt 	size = pp->pr_alloc->pa_pagesz;
    183            1.72      yamt 
    184            1.72      yamt 	map = KM_VACACHE_POOL_TO_MAP(pp);
    185            1.72      yamt 
    186            1.73      yamt 	va = vm_map_min(map); /* hint */
    187            1.72      yamt 	if (uvm_map(map, &va, size, NULL, UVM_UNKNOWN_OFFSET, size,
    188            1.74      yamt 	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    189            1.72      yamt 	    UVM_ADV_RANDOM, UVM_FLAG_QUANTUM |
    190            1.88      yamt 	    ((flags & PR_WAITOK) ? UVM_FLAG_WAITVA :
    191            1.88      yamt 	    UVM_FLAG_TRYLOCK | UVM_FLAG_NOWAIT))))
    192            1.72      yamt 		return NULL;
    193            1.72      yamt 
    194            1.72      yamt 	return (void *)va;
    195            1.72      yamt }
    196            1.72      yamt 
    197            1.72      yamt static void
    198            1.72      yamt km_vacache_free(struct pool *pp, void *v)
    199            1.72      yamt {
    200            1.72      yamt 	vaddr_t va = (vaddr_t)v;
    201            1.72      yamt 	size_t size = pp->pr_alloc->pa_pagesz;
    202            1.72      yamt 	struct vm_map *map;
    203            1.72      yamt 
    204            1.72      yamt 	map = KM_VACACHE_POOL_TO_MAP(pp);
    205            1.78      yamt 	uvm_unmap1(map, va, va + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY);
    206            1.72      yamt }
    207            1.72      yamt 
    208            1.72      yamt /*
    209            1.72      yamt  * km_vacache_init: initialize kva cache.
    210            1.72      yamt  */
    211            1.72      yamt 
    212            1.72      yamt static void
    213            1.72      yamt km_vacache_init(struct vm_map *map, const char *name, size_t size)
    214            1.72      yamt {
    215            1.72      yamt 	struct vm_map_kernel *vmk;
    216            1.72      yamt 	struct pool *pp;
    217            1.72      yamt 	struct pool_allocator *pa;
    218            1.94        ad 	int ipl;
    219            1.72      yamt 
    220            1.72      yamt 	KASSERT(VM_MAP_IS_KERNEL(map));
    221            1.72      yamt 	KASSERT(size < (vm_map_max(map) - vm_map_min(map)) / 2); /* sanity */
    222            1.72      yamt 
    223            1.97        ad 
    224            1.72      yamt 	vmk = vm_map_to_kernel(map);
    225            1.72      yamt 	pp = &vmk->vmk_vacache;
    226            1.72      yamt 	pa = &vmk->vmk_vacache_allocator;
    227            1.72      yamt 	memset(pa, 0, sizeof(*pa));
    228            1.72      yamt 	pa->pa_alloc = km_vacache_alloc;
    229            1.72      yamt 	pa->pa_free = km_vacache_free;
    230            1.72      yamt 	pa->pa_pagesz = (unsigned int)size;
    231            1.88      yamt 	pa->pa_backingmap = map;
    232            1.88      yamt 	pa->pa_backingmapptr = NULL;
    233            1.94        ad 
    234            1.94        ad 	if ((map->flags & VM_MAP_INTRSAFE) != 0)
    235            1.94        ad 		ipl = IPL_VM;
    236            1.94        ad 	else
    237            1.94        ad 		ipl = IPL_NONE;
    238            1.94        ad 
    239            1.94        ad 	pool_init(pp, PAGE_SIZE, 0, 0, PR_NOTOUCH | PR_RECURSIVE, name, pa,
    240            1.94        ad 	    ipl);
    241            1.72      yamt }
    242            1.72      yamt 
    243            1.72      yamt void
    244            1.72      yamt uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
    245            1.72      yamt {
    246            1.72      yamt 
    247            1.72      yamt 	map->flags |= VM_MAP_VACACHE;
    248            1.72      yamt 	if (size == 0)
    249            1.72      yamt 		size = KM_VACACHE_SIZE;
    250            1.72      yamt 	km_vacache_init(map, name, size);
    251            1.72      yamt }
    252            1.72      yamt 
    253            1.72      yamt #else /* !defined(PMAP_MAP_POOLPAGE) */
    254            1.72      yamt 
    255            1.72      yamt void
    256            1.92      yamt uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
    257            1.72      yamt {
    258            1.72      yamt 
    259            1.72      yamt 	/* nothing */
    260            1.72      yamt }
    261            1.72      yamt 
    262            1.72      yamt #endif /* !defined(PMAP_MAP_POOLPAGE) */
    263            1.72      yamt 
    264            1.88      yamt void
    265            1.92      yamt uvm_km_va_drain(struct vm_map *map, uvm_flag_t flags)
    266            1.88      yamt {
    267            1.88      yamt 	struct vm_map_kernel *vmk = vm_map_to_kernel(map);
    268            1.88      yamt 
    269            1.88      yamt 	callback_run_roundrobin(&vmk->vmk_reclaim_callback, NULL);
    270            1.88      yamt }
    271            1.88      yamt 
    272             1.1       mrg /*
    273             1.1       mrg  * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
    274             1.1       mrg  * KVM already allocated for text, data, bss, and static data structures).
    275             1.1       mrg  *
    276            1.62   thorpej  * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
    277            1.82  christos  *    we assume that [vmin -> start] has already been allocated and that
    278            1.62   thorpej  *    "end" is the end.
    279             1.1       mrg  */
    280             1.1       mrg 
    281             1.8       mrg void
    282            1.83   thorpej uvm_km_init(vaddr_t start, vaddr_t end)
    283             1.1       mrg {
    284            1.62   thorpej 	vaddr_t base = VM_MIN_KERNEL_ADDRESS;
    285            1.27   thorpej 
    286            1.27   thorpej 	/*
    287            1.27   thorpej 	 * next, init kernel memory objects.
    288             1.8       mrg 	 */
    289             1.1       mrg 
    290             1.8       mrg 	/* kernel_object: for pageable anonymous kernel memory */
    291            1.34       chs 	uao_init();
    292            1.95        ad 	uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
    293            1.62   thorpej 				 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
    294             1.1       mrg 
    295            1.24   thorpej 	/*
    296            1.56   thorpej 	 * init the map and reserve any space that might already
    297            1.56   thorpej 	 * have been allocated kernel space before installing.
    298             1.8       mrg 	 */
    299             1.1       mrg 
    300            1.71      yamt 	uvm_map_setup_kernel(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
    301            1.71      yamt 	kernel_map_store.vmk_map.pmap = pmap_kernel();
    302            1.70      yamt 	if (start != base) {
    303            1.70      yamt 		int error;
    304            1.70      yamt 		struct uvm_map_args args;
    305            1.70      yamt 
    306            1.71      yamt 		error = uvm_map_prepare(&kernel_map_store.vmk_map,
    307            1.71      yamt 		    base, start - base,
    308            1.70      yamt 		    NULL, UVM_UNKNOWN_OFFSET, 0,
    309            1.62   thorpej 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    310            1.70      yamt 		    		UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
    311            1.70      yamt 		if (!error) {
    312            1.70      yamt 			kernel_first_mapent_store.flags =
    313            1.70      yamt 			    UVM_MAP_KERNEL | UVM_MAP_FIRST;
    314            1.71      yamt 			error = uvm_map_enter(&kernel_map_store.vmk_map, &args,
    315            1.70      yamt 			    &kernel_first_mapent_store);
    316            1.70      yamt 		}
    317            1.70      yamt 
    318            1.70      yamt 		if (error)
    319            1.70      yamt 			panic(
    320            1.70      yamt 			    "uvm_km_init: could not reserve space for kernel");
    321            1.70      yamt 	}
    322            1.47       chs 
    323             1.8       mrg 	/*
    324             1.8       mrg 	 * install!
    325             1.8       mrg 	 */
    326             1.8       mrg 
    327            1.71      yamt 	kernel_map = &kernel_map_store.vmk_map;
    328            1.72      yamt 	uvm_km_vacache_init(kernel_map, "kvakernel", 0);
    329             1.1       mrg }
    330             1.1       mrg 
    331             1.1       mrg /*
    332             1.1       mrg  * uvm_km_suballoc: allocate a submap in the kernel map.   once a submap
    333             1.1       mrg  * is allocated all references to that area of VM must go through it.  this
    334             1.1       mrg  * allows the locking of VAs in kernel_map to be broken up into regions.
    335             1.1       mrg  *
    336            1.82  christos  * => if `fixed' is true, *vmin specifies where the region described
    337             1.5   thorpej  *      by the submap must start
    338             1.1       mrg  * => if submap is non NULL we use that as the submap, otherwise we
    339             1.1       mrg  *	alloc a new map
    340             1.1       mrg  */
    341            1.78      yamt 
    342             1.8       mrg struct vm_map *
    343            1.83   thorpej uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */,
    344            1.93   thorpej     vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed,
    345            1.83   thorpej     struct vm_map_kernel *submap)
    346             1.8       mrg {
    347             1.8       mrg 	int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
    348             1.1       mrg 
    349            1.71      yamt 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    350            1.71      yamt 
    351             1.8       mrg 	size = round_page(size);	/* round up to pagesize */
    352            1.87      yamt 	size += uvm_mapent_overhead(size, flags);
    353             1.1       mrg 
    354             1.8       mrg 	/*
    355             1.8       mrg 	 * first allocate a blank spot in the parent map
    356             1.8       mrg 	 */
    357             1.8       mrg 
    358            1.82  christos 	if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0,
    359             1.8       mrg 	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    360            1.43       chs 	    UVM_ADV_RANDOM, mapflags)) != 0) {
    361             1.8       mrg 	       panic("uvm_km_suballoc: unable to allocate space in parent map");
    362             1.8       mrg 	}
    363             1.8       mrg 
    364             1.8       mrg 	/*
    365            1.82  christos 	 * set VM bounds (vmin is filled in by uvm_map)
    366             1.8       mrg 	 */
    367             1.1       mrg 
    368            1.82  christos 	*vmax = *vmin + size;
    369             1.5   thorpej 
    370             1.8       mrg 	/*
    371             1.8       mrg 	 * add references to pmap and create or init the submap
    372             1.8       mrg 	 */
    373             1.1       mrg 
    374             1.8       mrg 	pmap_reference(vm_map_pmap(map));
    375             1.8       mrg 	if (submap == NULL) {
    376            1.71      yamt 		submap = malloc(sizeof(*submap), M_VMMAP, M_WAITOK);
    377             1.8       mrg 		if (submap == NULL)
    378             1.8       mrg 			panic("uvm_km_suballoc: unable to create submap");
    379             1.8       mrg 	}
    380            1.82  christos 	uvm_map_setup_kernel(submap, *vmin, *vmax, flags);
    381            1.71      yamt 	submap->vmk_map.pmap = vm_map_pmap(map);
    382             1.1       mrg 
    383             1.8       mrg 	/*
    384             1.8       mrg 	 * now let uvm_map_submap plug in it...
    385             1.8       mrg 	 */
    386             1.1       mrg 
    387            1.82  christos 	if (uvm_map_submap(map, *vmin, *vmax, &submap->vmk_map) != 0)
    388             1.8       mrg 		panic("uvm_km_suballoc: submap allocation failed");
    389             1.1       mrg 
    390            1.71      yamt 	return(&submap->vmk_map);
    391             1.1       mrg }
    392             1.1       mrg 
    393             1.1       mrg /*
    394             1.1       mrg  * uvm_km_pgremove: remove pages from a kernel uvm_object.
    395             1.1       mrg  *
    396             1.1       mrg  * => when you unmap a part of anonymous kernel memory you want to toss
    397             1.1       mrg  *    the pages right away.    (this gets called from uvm_unmap_...).
    398             1.1       mrg  */
    399             1.1       mrg 
    400             1.8       mrg void
    401            1.83   thorpej uvm_km_pgremove(vaddr_t startva, vaddr_t endva)
    402             1.1       mrg {
    403            1.95        ad 	struct uvm_object * const uobj = uvm_kernel_object;
    404            1.78      yamt 	const voff_t start = startva - vm_map_min(kernel_map);
    405            1.78      yamt 	const voff_t end = endva - vm_map_min(kernel_map);
    406            1.53       chs 	struct vm_page *pg;
    407            1.52       chs 	voff_t curoff, nextoff;
    408            1.53       chs 	int swpgonlydelta = 0;
    409             1.8       mrg 	UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
    410             1.1       mrg 
    411            1.78      yamt 	KASSERT(VM_MIN_KERNEL_ADDRESS <= startva);
    412            1.78      yamt 	KASSERT(startva < endva);
    413            1.86      yamt 	KASSERT(endva <= VM_MAX_KERNEL_ADDRESS);
    414            1.78      yamt 
    415            1.97        ad 	mutex_enter(&uobj->vmobjlock);
    416             1.3       chs 
    417            1.52       chs 	for (curoff = start; curoff < end; curoff = nextoff) {
    418            1.52       chs 		nextoff = curoff + PAGE_SIZE;
    419            1.52       chs 		pg = uvm_pagelookup(uobj, curoff);
    420            1.53       chs 		if (pg != NULL && pg->flags & PG_BUSY) {
    421            1.52       chs 			pg->flags |= PG_WANTED;
    422            1.52       chs 			UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
    423            1.52       chs 				    "km_pgrm", 0);
    424            1.97        ad 			mutex_enter(&uobj->vmobjlock);
    425            1.52       chs 			nextoff = curoff;
    426             1.8       mrg 			continue;
    427            1.52       chs 		}
    428             1.8       mrg 
    429            1.52       chs 		/*
    430            1.52       chs 		 * free the swap slot, then the page.
    431            1.52       chs 		 */
    432             1.8       mrg 
    433            1.53       chs 		if (pg == NULL &&
    434            1.64        pk 		    uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) {
    435            1.53       chs 			swpgonlydelta++;
    436            1.53       chs 		}
    437            1.52       chs 		uao_dropswap(uobj, curoff >> PAGE_SHIFT);
    438            1.53       chs 		if (pg != NULL) {
    439            1.97        ad 			mutex_enter(&uvm_pageqlock);
    440   1.101.4.2.4.8      matt 			uvm_pagedequeue(pg);
    441            1.53       chs 			uvm_pagefree(pg);
    442            1.97        ad 			mutex_exit(&uvm_pageqlock);
    443            1.53       chs 		}
    444             1.8       mrg 	}
    445            1.97        ad 	mutex_exit(&uobj->vmobjlock);
    446             1.8       mrg 
    447            1.54       chs 	if (swpgonlydelta > 0) {
    448            1.95        ad 		mutex_enter(&uvm_swap_data_lock);
    449            1.54       chs 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
    450            1.54       chs 		uvmexp.swpgonly -= swpgonlydelta;
    451            1.95        ad 		mutex_exit(&uvm_swap_data_lock);
    452            1.54       chs 	}
    453            1.24   thorpej }
    454            1.24   thorpej 
    455            1.24   thorpej 
    456            1.24   thorpej /*
    457            1.78      yamt  * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed
    458            1.78      yamt  *    regions.
    459            1.24   thorpej  *
    460            1.24   thorpej  * => when you unmap a part of anonymous kernel memory you want to toss
    461            1.52       chs  *    the pages right away.    (this is called from uvm_unmap_...).
    462            1.24   thorpej  * => none of the pages will ever be busy, and none of them will ever
    463            1.52       chs  *    be on the active or inactive queues (because they have no object).
    464            1.24   thorpej  */
    465            1.24   thorpej 
    466            1.24   thorpej void
    467       1.101.4.2       snj uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end)
    468            1.24   thorpej {
    469            1.52       chs 	struct vm_page *pg;
    470            1.52       chs 	paddr_t pa;
    471            1.24   thorpej 	UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist);
    472            1.24   thorpej 
    473       1.101.4.2       snj 	KASSERT(VM_MAP_IS_KERNEL(map));
    474       1.101.4.2       snj 	KASSERT(vm_map_min(map) <= start);
    475            1.78      yamt 	KASSERT(start < end);
    476       1.101.4.2       snj 	KASSERT(end <= vm_map_max(map));
    477            1.78      yamt 
    478            1.52       chs 	for (; start < end; start += PAGE_SIZE) {
    479            1.52       chs 		if (!pmap_extract(pmap_kernel(), start, &pa)) {
    480            1.24   thorpej 			continue;
    481            1.40       chs 		}
    482            1.52       chs 		pg = PHYS_TO_VM_PAGE(pa);
    483            1.52       chs 		KASSERT(pg);
    484            1.52       chs 		KASSERT(pg->uobject == NULL && pg->uanon == NULL);
    485   1.101.4.2.4.6      matt 		uvm_km_pagefree(pg);
    486            1.24   thorpej 	}
    487             1.1       mrg }
    488             1.1       mrg 
    489            1.78      yamt #if defined(DEBUG)
    490            1.78      yamt void
    491       1.101.4.2       snj uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end)
    492            1.78      yamt {
    493       1.101.4.2       snj 	struct vm_page *pg;
    494            1.78      yamt 	vaddr_t va;
    495            1.78      yamt 	paddr_t pa;
    496            1.78      yamt 
    497       1.101.4.2       snj 	KDASSERT(VM_MAP_IS_KERNEL(map));
    498       1.101.4.2       snj 	KDASSERT(vm_map_min(map) <= start);
    499            1.78      yamt 	KDASSERT(start < end);
    500       1.101.4.2       snj 	KDASSERT(end <= vm_map_max(map));
    501            1.78      yamt 
    502            1.78      yamt 	for (va = start; va < end; va += PAGE_SIZE) {
    503            1.78      yamt 		if (pmap_extract(pmap_kernel(), va, &pa)) {
    504            1.81    simonb 			panic("uvm_km_check_empty: va %p has pa 0x%llx",
    505            1.81    simonb 			    (void *)va, (long long)pa);
    506            1.78      yamt 		}
    507       1.101.4.2       snj 		if ((map->flags & VM_MAP_INTRSAFE) == 0) {
    508            1.97        ad 			mutex_enter(&uvm_kernel_object->vmobjlock);
    509            1.96        ad 			pg = uvm_pagelookup(uvm_kernel_object,
    510            1.78      yamt 			    va - vm_map_min(kernel_map));
    511            1.97        ad 			mutex_exit(&uvm_kernel_object->vmobjlock);
    512            1.78      yamt 			if (pg) {
    513            1.78      yamt 				panic("uvm_km_check_empty: "
    514            1.78      yamt 				    "has page hashed at %p", (const void *)va);
    515            1.78      yamt 			}
    516            1.78      yamt 		}
    517            1.78      yamt 	}
    518            1.78      yamt }
    519            1.78      yamt #endif /* defined(DEBUG) */
    520             1.1       mrg 
    521             1.1       mrg /*
    522            1.78      yamt  * uvm_km_alloc: allocate an area of kernel memory.
    523             1.1       mrg  *
    524            1.78      yamt  * => NOTE: we can return 0 even if we can wait if there is not enough
    525             1.1       mrg  *	free VM space in the map... caller should be prepared to handle
    526             1.1       mrg  *	this case.
    527             1.1       mrg  * => we return KVA of memory allocated
    528             1.1       mrg  */
    529             1.1       mrg 
    530            1.14       eeh vaddr_t
    531            1.83   thorpej uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
    532             1.1       mrg {
    533            1.14       eeh 	vaddr_t kva, loopva;
    534            1.14       eeh 	vaddr_t offset;
    535            1.44   thorpej 	vsize_t loopsize;
    536             1.8       mrg 	struct vm_page *pg;
    537            1.78      yamt 	struct uvm_object *obj;
    538            1.78      yamt 	int pgaflags;
    539            1.89  drochner 	vm_prot_t prot;
    540            1.78      yamt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    541             1.1       mrg 
    542            1.40       chs 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    543            1.78      yamt 	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
    544            1.78      yamt 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
    545            1.78      yamt 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
    546   1.101.4.2.4.4      matt 	KASSERT((flags & UVM_KMF_VAONLY) != 0 || (flags & UVM_KMF_COLORMATCH) == 0);
    547   1.101.4.2.4.4      matt 	KASSERT((flags & UVM_KMF_COLORMATCH) == 0 || (flags & UVM_KMF_VAONLY) != 0);
    548             1.1       mrg 
    549             1.8       mrg 	/*
    550             1.8       mrg 	 * setup for call
    551             1.8       mrg 	 */
    552             1.8       mrg 
    553            1.78      yamt 	kva = vm_map_min(map);	/* hint */
    554             1.8       mrg 	size = round_page(size);
    555            1.95        ad 	obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL;
    556            1.78      yamt 	UVMHIST_LOG(maphist,"  (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
    557            1.78      yamt 		    map, obj, size, flags);
    558             1.1       mrg 
    559             1.8       mrg 	/*
    560             1.8       mrg 	 * allocate some virtual space
    561             1.8       mrg 	 */
    562             1.8       mrg 
    563            1.78      yamt 	if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
    564            1.78      yamt 	    align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    565            1.78      yamt 	    UVM_ADV_RANDOM,
    566   1.101.4.2.4.4      matt 	    (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH))
    567            1.78      yamt 	    | UVM_FLAG_QUANTUM)) != 0)) {
    568             1.8       mrg 		UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
    569             1.8       mrg 		return(0);
    570             1.8       mrg 	}
    571             1.8       mrg 
    572             1.8       mrg 	/*
    573             1.8       mrg 	 * if all we wanted was VA, return now
    574             1.8       mrg 	 */
    575             1.8       mrg 
    576            1.78      yamt 	if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) {
    577             1.8       mrg 		UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
    578             1.8       mrg 		return(kva);
    579             1.8       mrg 	}
    580            1.40       chs 
    581             1.8       mrg 	/*
    582             1.8       mrg 	 * recover object offset from virtual address
    583             1.8       mrg 	 */
    584             1.8       mrg 
    585             1.8       mrg 	offset = kva - vm_map_min(kernel_map);
    586             1.8       mrg 	UVMHIST_LOG(maphist, "  kva=0x%x, offset=0x%x", kva, offset,0,0);
    587             1.8       mrg 
    588             1.8       mrg 	/*
    589             1.8       mrg 	 * now allocate and map in the memory... note that we are the only ones
    590             1.8       mrg 	 * whom should ever get a handle on this area of VM.
    591             1.8       mrg 	 */
    592             1.8       mrg 
    593             1.8       mrg 	loopva = kva;
    594            1.44   thorpej 	loopsize = size;
    595            1.78      yamt 
    596   1.101.4.2.4.2      matt 	pgaflags = UVM_FLAG_COLORMATCH;
    597       1.101.4.1       snj 	if (flags & UVM_KMF_NOWAIT)
    598       1.101.4.1       snj 		pgaflags |= UVM_PGA_USERESERVE;
    599            1.78      yamt 	if (flags & UVM_KMF_ZERO)
    600            1.78      yamt 		pgaflags |= UVM_PGA_ZERO;
    601            1.89  drochner 	prot = VM_PROT_READ | VM_PROT_WRITE;
    602            1.89  drochner 	if (flags & UVM_KMF_EXEC)
    603            1.89  drochner 		prot |= VM_PROT_EXECUTE;
    604            1.44   thorpej 	while (loopsize) {
    605            1.78      yamt 		KASSERT(!pmap_extract(pmap_kernel(), loopva, NULL));
    606            1.78      yamt 
    607   1.101.4.2.4.3      matt 		pg = uvm_pagealloc_strat(NULL, offset, NULL, pgaflags,
    608   1.101.4.2.4.3      matt #ifdef UVM_KM_VMFREELIST
    609   1.101.4.2.4.3      matt 		   UVM_PGA_STRAT_ONLY, UVM_KM_VMFREELIST
    610   1.101.4.2.4.3      matt #else
    611   1.101.4.2.4.3      matt 		   UVM_PGA_STRAT_NORMAL, 0
    612   1.101.4.2.4.3      matt #endif
    613   1.101.4.2.4.3      matt 		   );
    614            1.47       chs 
    615             1.8       mrg 		/*
    616             1.8       mrg 		 * out of memory?
    617             1.8       mrg 		 */
    618             1.8       mrg 
    619            1.35   thorpej 		if (__predict_false(pg == NULL)) {
    620  1.101.4.2.4.11      matt 			if ((flags & UVM_KMF_NOWAIT)
    621  1.101.4.2.4.11      matt 			    || ((flags & UVM_KMF_CANFAIL)
    622  1.101.4.2.4.11      matt 			        && !uvm_reclaimable(
    623  1.101.4.2.4.11      matt 				    atop(offset) & uvmexp.colormask, true))) {
    624             1.8       mrg 				/* free everything! */
    625            1.78      yamt 				uvm_km_free(map, kva, size,
    626            1.78      yamt 				    flags & UVM_KMF_TYPEMASK);
    627            1.58       chs 				return (0);
    628             1.8       mrg 			} else {
    629             1.8       mrg 				uvm_wait("km_getwait2");	/* sleep here */
    630             1.8       mrg 				continue;
    631             1.8       mrg 			}
    632             1.8       mrg 		}
    633            1.47       chs 
    634   1.101.4.2.4.6      matt 		uvm_km_pageclaim(pg);
    635   1.101.4.2.4.6      matt 
    636            1.78      yamt 		pg->flags &= ~PG_BUSY;	/* new page */
    637   1.101.4.2.4.9      matt 		UVM_PAGE_OWN(pg, NULL, NULL);
    638            1.78      yamt 
    639             1.8       mrg 		/*
    640            1.52       chs 		 * map it in
    641             1.8       mrg 		 */
    642            1.40       chs 
    643           1.100      matt 		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), prot|PMAP_KMPAGE);
    644             1.8       mrg 		loopva += PAGE_SIZE;
    645             1.8       mrg 		offset += PAGE_SIZE;
    646            1.44   thorpej 		loopsize -= PAGE_SIZE;
    647             1.8       mrg 	}
    648            1.69  junyoung 
    649            1.51     chris        	pmap_update(pmap_kernel());
    650            1.69  junyoung 
    651             1.8       mrg 	UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
    652             1.8       mrg 	return(kva);
    653             1.1       mrg }
    654             1.1       mrg 
    655             1.1       mrg /*
    656             1.1       mrg  * uvm_km_free: free an area of kernel memory
    657             1.1       mrg  */
    658             1.1       mrg 
    659             1.8       mrg void
    660            1.83   thorpej uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags)
    661             1.8       mrg {
    662             1.1       mrg 
    663            1.78      yamt 	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
    664            1.78      yamt 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
    665            1.78      yamt 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
    666            1.78      yamt 	KASSERT((addr & PAGE_MASK) == 0);
    667            1.40       chs 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    668             1.1       mrg 
    669             1.8       mrg 	size = round_page(size);
    670             1.1       mrg 
    671            1.78      yamt 	if (flags & UVM_KMF_PAGEABLE) {
    672            1.78      yamt 		uvm_km_pgremove(addr, addr + size);
    673            1.78      yamt 		pmap_remove(pmap_kernel(), addr, addr + size);
    674            1.78      yamt 	} else if (flags & UVM_KMF_WIRED) {
    675       1.101.4.2       snj 		uvm_km_pgremove_intrsafe(map, addr, addr + size);
    676            1.78      yamt 		pmap_kremove(addr, size);
    677             1.8       mrg 	}
    678            1.99      yamt 
    679            1.99      yamt 	/*
    680            1.99      yamt 	 * uvm_unmap_remove calls pmap_update for us.
    681            1.99      yamt 	 */
    682             1.8       mrg 
    683            1.78      yamt 	uvm_unmap1(map, addr, addr + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY);
    684            1.66        pk }
    685            1.66        pk 
    686            1.10   thorpej /* Sanity; must specify both or none. */
    687            1.10   thorpej #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
    688            1.10   thorpej     (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
    689            1.10   thorpej #error Must specify MAP and UNMAP together.
    690            1.10   thorpej #endif
    691            1.10   thorpej 
    692            1.10   thorpej /*
    693            1.10   thorpej  * uvm_km_alloc_poolpage: allocate a page for the pool allocator
    694            1.10   thorpej  *
    695            1.10   thorpej  * => if the pmap specifies an alternate mapping method, we use it.
    696            1.10   thorpej  */
    697            1.10   thorpej 
    698            1.11   thorpej /* ARGSUSED */
    699            1.14       eeh vaddr_t
    700            1.93   thorpej uvm_km_alloc_poolpage_cache(struct vm_map *map, bool waitok)
    701            1.72      yamt {
    702            1.72      yamt #if defined(PMAP_MAP_POOLPAGE)
    703            1.78      yamt 	return uvm_km_alloc_poolpage(map, waitok);
    704            1.72      yamt #else
    705            1.72      yamt 	struct vm_page *pg;
    706            1.72      yamt 	struct pool *pp = &vm_map_to_kernel(map)->vmk_vacache;
    707            1.72      yamt 	vaddr_t va;
    708            1.72      yamt 
    709            1.72      yamt 	if ((map->flags & VM_MAP_VACACHE) == 0)
    710            1.78      yamt 		return uvm_km_alloc_poolpage(map, waitok);
    711            1.72      yamt 
    712            1.72      yamt 	va = (vaddr_t)pool_get(pp, waitok ? PR_WAITOK : PR_NOWAIT);
    713            1.72      yamt 	if (va == 0)
    714            1.72      yamt 		return 0;
    715            1.72      yamt 	KASSERT(!pmap_extract(pmap_kernel(), va, NULL));
    716            1.72      yamt again:
    717       1.101.4.1       snj 	pg = uvm_pagealloc(NULL, 0, NULL, waitok ? 0 : UVM_PGA_USERESERVE);
    718            1.72      yamt 	if (__predict_false(pg == NULL)) {
    719            1.72      yamt 		if (waitok) {
    720            1.72      yamt 			uvm_wait("plpg");
    721            1.72      yamt 			goto again;
    722            1.72      yamt 		} else {
    723            1.72      yamt 			pool_put(pp, (void *)va);
    724            1.72      yamt 			return 0;
    725            1.72      yamt 		}
    726            1.72      yamt 	}
    727   1.101.4.2.4.6      matt 	uvm_km_pageclaim(pg);
    728           1.100      matt 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
    729           1.100      matt 	    VM_PROT_READ|VM_PROT_WRITE|PMAP_KMPAGE);
    730            1.72      yamt 	pmap_update(pmap_kernel());
    731            1.72      yamt 
    732            1.72      yamt 	return va;
    733            1.72      yamt #endif /* PMAP_MAP_POOLPAGE */
    734            1.72      yamt }
    735            1.72      yamt 
    736            1.72      yamt vaddr_t
    737            1.93   thorpej uvm_km_alloc_poolpage(struct vm_map *map, bool waitok)
    738            1.10   thorpej {
    739            1.10   thorpej #if defined(PMAP_MAP_POOLPAGE)
    740            1.10   thorpej 	struct vm_page *pg;
    741            1.14       eeh 	vaddr_t va;
    742            1.10   thorpej 
    743   1.101.4.2.4.1      matt 
    744            1.15   thorpej  again:
    745   1.101.4.2.4.1      matt #ifdef PMAP_ALLOC_POOLPAGE
    746   1.101.4.2.4.1      matt 	pg = PMAP_ALLOC_POOLPAGE(waitok ? 0 : UVM_PGA_USERESERVE);
    747   1.101.4.2.4.1      matt #else
    748       1.101.4.1       snj 	pg = uvm_pagealloc(NULL, 0, NULL, waitok ? 0 : UVM_PGA_USERESERVE);
    749   1.101.4.2.4.1      matt #endif
    750            1.35   thorpej 	if (__predict_false(pg == NULL)) {
    751            1.15   thorpej 		if (waitok) {
    752            1.15   thorpej 			uvm_wait("plpg");
    753            1.15   thorpej 			goto again;
    754            1.15   thorpej 		} else
    755            1.15   thorpej 			return (0);
    756            1.15   thorpej 	}
    757            1.10   thorpej 	va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
    758   1.101.4.2.4.6      matt 	if (__predict_false(va == 0)) {
    759            1.10   thorpej 		uvm_pagefree(pg);
    760   1.101.4.2.4.6      matt 	} else {
    761   1.101.4.2.4.6      matt 		uvm_km_pageclaim(pg);
    762   1.101.4.2.4.6      matt 	}
    763            1.10   thorpej 	return (va);
    764            1.10   thorpej #else
    765            1.14       eeh 	vaddr_t va;
    766            1.16   thorpej 
    767            1.78      yamt 	va = uvm_km_alloc(map, PAGE_SIZE, 0,
    768            1.78      yamt 	    (waitok ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK) | UVM_KMF_WIRED);
    769            1.10   thorpej 	return (va);
    770            1.10   thorpej #endif /* PMAP_MAP_POOLPAGE */
    771            1.10   thorpej }
    772            1.10   thorpej 
    773            1.10   thorpej /*
    774            1.10   thorpej  * uvm_km_free_poolpage: free a previously allocated pool page
    775            1.10   thorpej  *
    776            1.10   thorpej  * => if the pmap specifies an alternate unmapping method, we use it.
    777            1.10   thorpej  */
    778            1.10   thorpej 
    779            1.11   thorpej /* ARGSUSED */
    780            1.10   thorpej void
    781            1.83   thorpej uvm_km_free_poolpage_cache(struct vm_map *map, vaddr_t addr)
    782            1.72      yamt {
    783            1.72      yamt #if defined(PMAP_UNMAP_POOLPAGE)
    784            1.78      yamt 	uvm_km_free_poolpage(map, addr);
    785            1.72      yamt #else
    786            1.72      yamt 	struct pool *pp;
    787            1.72      yamt 
    788            1.72      yamt 	if ((map->flags & VM_MAP_VACACHE) == 0) {
    789            1.78      yamt 		uvm_km_free_poolpage(map, addr);
    790            1.72      yamt 		return;
    791            1.72      yamt 	}
    792            1.72      yamt 
    793            1.72      yamt 	KASSERT(pmap_extract(pmap_kernel(), addr, NULL));
    794       1.101.4.2       snj 	uvm_km_pgremove_intrsafe(map, addr, addr + PAGE_SIZE);
    795            1.72      yamt 	pmap_kremove(addr, PAGE_SIZE);
    796            1.72      yamt #if defined(DEBUG)
    797            1.72      yamt 	pmap_update(pmap_kernel());
    798            1.72      yamt #endif
    799            1.72      yamt 	KASSERT(!pmap_extract(pmap_kernel(), addr, NULL));
    800            1.72      yamt 	pp = &vm_map_to_kernel(map)->vmk_vacache;
    801            1.72      yamt 	pool_put(pp, (void *)addr);
    802            1.72      yamt #endif
    803            1.72      yamt }
    804            1.72      yamt 
    805            1.72      yamt /* ARGSUSED */
    806            1.72      yamt void
    807            1.83   thorpej uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr)
    808            1.10   thorpej {
    809            1.10   thorpej #if defined(PMAP_UNMAP_POOLPAGE)
    810   1.101.4.2.4.6      matt 	paddr_t pa = PMAP_UNMAP_POOLPAGE(addr);
    811   1.101.4.2.4.6      matt 	struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
    812   1.101.4.2.4.6      matt 	uvm_km_pagefree(pg);
    813            1.10   thorpej #else
    814            1.78      yamt 	uvm_km_free(map, addr, PAGE_SIZE, UVM_KMF_WIRED);
    815            1.10   thorpej #endif /* PMAP_UNMAP_POOLPAGE */
    816             1.1       mrg }
    817   1.101.4.2.4.6      matt 
    818   1.101.4.2.4.6      matt void
    819   1.101.4.2.4.6      matt uvm_km_pageclaim(struct vm_page *pg)
    820   1.101.4.2.4.6      matt {
    821   1.101.4.2.4.7      matt 	KASSERT(!(pg->pqflags & (PQ_PRIVATE1|PQ_PRIVATE2)));
    822   1.101.4.2.4.6      matt 	atomic_inc_uint(&uvm_page_to_pggroup(pg)->pgrp_kmempages);
    823   1.101.4.2.4.7      matt 	TAILQ_INSERT_TAIL(&uvm.kmem_pageq, pg, pageq.queue);
    824   1.101.4.2.4.6      matt }
    825   1.101.4.2.4.6      matt 
    826   1.101.4.2.4.6      matt void
    827   1.101.4.2.4.6      matt uvm_km_pagefree(struct vm_page *pg)
    828   1.101.4.2.4.6      matt {
    829   1.101.4.2.4.7      matt 	KASSERT(!(pg->pqflags & (PQ_PRIVATE1|PQ_PRIVATE2)));
    830   1.101.4.2.4.6      matt 	atomic_dec_uint(&uvm_page_to_pggroup(pg)->pgrp_kmempages);
    831   1.101.4.2.4.7      matt 	TAILQ_REMOVE(&uvm.kmem_pageq, pg, pageq.queue);
    832   1.101.4.2.4.6      matt 	uvm_pagefree(pg);
    833   1.101.4.2.4.6      matt }
    834