Home | History | Annotate | Line # | Download | only in uvm
uvm_km.c revision 1.138.14.1
      1  1.138.14.1     skrll /*	$NetBSD: uvm_km.c,v 1.138.14.1 2015/04/06 15:18:33 skrll Exp $	*/
      2         1.1       mrg 
      3        1.47       chs /*
      4         1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5        1.47       chs  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6         1.1       mrg  *
      7         1.1       mrg  * All rights reserved.
      8         1.1       mrg  *
      9         1.1       mrg  * This code is derived from software contributed to Berkeley by
     10         1.1       mrg  * The Mach Operating System project at Carnegie-Mellon University.
     11         1.1       mrg  *
     12         1.1       mrg  * Redistribution and use in source and binary forms, with or without
     13         1.1       mrg  * modification, are permitted provided that the following conditions
     14         1.1       mrg  * are met:
     15         1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     16         1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     17         1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18         1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     19         1.1       mrg  *    documentation and/or other materials provided with the distribution.
     20       1.108     chuck  * 3. Neither the name of the University nor the names of its contributors
     21         1.1       mrg  *    may be used to endorse or promote products derived from this software
     22         1.1       mrg  *    without specific prior written permission.
     23         1.1       mrg  *
     24         1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25         1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26         1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27         1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28         1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29         1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30         1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31         1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32         1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33         1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34         1.1       mrg  * SUCH DAMAGE.
     35         1.1       mrg  *
     36         1.1       mrg  *	@(#)vm_kern.c   8.3 (Berkeley) 1/12/94
     37         1.4       mrg  * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
     38         1.1       mrg  *
     39         1.1       mrg  *
     40         1.1       mrg  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     41         1.1       mrg  * All rights reserved.
     42        1.47       chs  *
     43         1.1       mrg  * Permission to use, copy, modify and distribute this software and
     44         1.1       mrg  * its documentation is hereby granted, provided that both the copyright
     45         1.1       mrg  * notice and this permission notice appear in all copies of the
     46         1.1       mrg  * software, derivative works or modified versions, and any portions
     47         1.1       mrg  * thereof, and that both notices appear in supporting documentation.
     48        1.47       chs  *
     49        1.47       chs  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     50        1.47       chs  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     51         1.1       mrg  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     52        1.47       chs  *
     53         1.1       mrg  * Carnegie Mellon requests users of this software to return to
     54         1.1       mrg  *
     55         1.1       mrg  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     56         1.1       mrg  *  School of Computer Science
     57         1.1       mrg  *  Carnegie Mellon University
     58         1.1       mrg  *  Pittsburgh PA 15213-3890
     59         1.1       mrg  *
     60         1.1       mrg  * any improvements or extensions that they make and grant Carnegie the
     61         1.1       mrg  * rights to redistribute these changes.
     62         1.1       mrg  */
     63         1.6       mrg 
     64         1.1       mrg /*
     65         1.1       mrg  * uvm_km.c: handle kernel memory allocation and management
     66         1.1       mrg  */
     67         1.1       mrg 
     68         1.7     chuck /*
     69         1.7     chuck  * overview of kernel memory management:
     70         1.7     chuck  *
     71         1.7     chuck  * the kernel virtual address space is mapped by "kernel_map."   kernel_map
     72        1.62   thorpej  * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
     73        1.62   thorpej  * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
     74         1.7     chuck  *
     75        1.47       chs  * the kernel_map has several "submaps."   submaps can only appear in
     76         1.7     chuck  * the kernel_map (user processes can't use them).   submaps "take over"
     77         1.7     chuck  * the management of a sub-range of the kernel's address space.  submaps
     78         1.7     chuck  * are typically allocated at boot time and are never released.   kernel
     79        1.47       chs  * virtual address space that is mapped by a submap is locked by the
     80         1.7     chuck  * submap's lock -- not the kernel_map's lock.
     81         1.7     chuck  *
     82         1.7     chuck  * thus, the useful feature of submaps is that they allow us to break
     83         1.7     chuck  * up the locking and protection of the kernel address space into smaller
     84         1.7     chuck  * chunks.
     85         1.7     chuck  *
     86       1.126      para  * the vm system has several standard kernel submaps/arenas, including:
     87       1.126      para  *   kmem_arena => used for kmem/pool (memoryallocators(9))
     88         1.7     chuck  *   pager_map => used to map "buf" structures into kernel space
     89         1.7     chuck  *   exec_map => used during exec to handle exec args
     90         1.7     chuck  *   etc...
     91         1.7     chuck  *
     92       1.127     rmind  * The kmem_arena is a "special submap", as it lives in a fixed map entry
     93       1.127     rmind  * within the kernel_map and is controlled by vmem(9).
     94       1.126      para  *
     95         1.7     chuck  * the kernel allocates its private memory out of special uvm_objects whose
     96         1.7     chuck  * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
     97         1.7     chuck  * are "special" and never die).   all kernel objects should be thought of
     98        1.47       chs  * as large, fixed-sized, sparsely populated uvm_objects.   each kernel
     99        1.62   thorpej  * object is equal to the size of kernel virtual address space (i.e. the
    100        1.62   thorpej  * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
    101         1.7     chuck  *
    102       1.101     pooka  * note that just because a kernel object spans the entire kernel virtual
    103         1.7     chuck  * address space doesn't mean that it has to be mapped into the entire space.
    104        1.47       chs  * large chunks of a kernel object's space go unused either because
    105        1.47       chs  * that area of kernel VM is unmapped, or there is some other type of
    106         1.7     chuck  * object mapped into that range (e.g. a vnode).    for submap's kernel
    107         1.7     chuck  * objects, the only part of the object that can ever be populated is the
    108         1.7     chuck  * offsets that are managed by the submap.
    109         1.7     chuck  *
    110         1.7     chuck  * note that the "offset" in a kernel object is always the kernel virtual
    111        1.62   thorpej  * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
    112         1.7     chuck  * example:
    113        1.62   thorpej  *   suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
    114         1.7     chuck  *   uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
    115         1.7     chuck  *   kernel map].    if uvm_km_alloc returns virtual address 0xf8235000,
    116         1.7     chuck  *   then that means that the page at offset 0x235000 in kernel_object is
    117        1.47       chs  *   mapped at 0xf8235000.
    118         1.7     chuck  *
    119         1.7     chuck  * kernel object have one other special property: when the kernel virtual
    120         1.7     chuck  * memory mapping them is unmapped, the backing memory in the object is
    121         1.7     chuck  * freed right away.   this is done with the uvm_km_pgremove() function.
    122         1.7     chuck  * this has to be done because there is no backing store for kernel pages
    123         1.7     chuck  * and no need to save them after they are no longer referenced.
    124       1.126      para  *
    125       1.127     rmind  * Generic arenas:
    126       1.126      para  *
    127       1.127     rmind  * kmem_arena:
    128       1.127     rmind  *	Main arena controlling the kernel KVA used by other arenas.
    129       1.127     rmind  *
    130       1.127     rmind  * kmem_va_arena:
    131       1.127     rmind  *	Implements quantum caching in order to speedup allocations and
    132       1.127     rmind  *	reduce fragmentation.  The pool(9), unless created with a custom
    133       1.127     rmind  *	meta-data allocator, and kmem(9) subsystems use this arena.
    134       1.127     rmind  *
    135       1.127     rmind  * Arenas for meta-data allocations are used by vmem(9) and pool(9).
    136       1.127     rmind  * These arenas cannot use quantum cache.  However, kmem_va_meta_arena
    137       1.127     rmind  * compensates this by importing larger chunks from kmem_arena.
    138       1.127     rmind  *
    139       1.127     rmind  * kmem_va_meta_arena:
    140       1.127     rmind  *	Space for meta-data.
    141       1.127     rmind  *
    142       1.127     rmind  * kmem_meta_arena:
    143       1.127     rmind  *	Imports from kmem_va_meta_arena.  Allocations from this arena are
    144       1.127     rmind  *	backed with the pages.
    145       1.127     rmind  *
    146       1.127     rmind  * Arena stacking:
    147       1.127     rmind  *
    148       1.127     rmind  *	kmem_arena
    149       1.127     rmind  *		kmem_va_arena
    150       1.127     rmind  *		kmem_va_meta_arena
    151       1.127     rmind  *			kmem_meta_arena
    152         1.7     chuck  */
    153        1.55     lukem 
    154        1.55     lukem #include <sys/cdefs.h>
    155  1.138.14.1     skrll __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.138.14.1 2015/04/06 15:18:33 skrll Exp $");
    156        1.55     lukem 
    157        1.55     lukem #include "opt_uvmhist.h"
    158         1.7     chuck 
    159       1.117      para #include "opt_kmempages.h"
    160       1.117      para 
    161       1.117      para #ifndef NKMEMPAGES
    162       1.117      para #define NKMEMPAGES 0
    163       1.117      para #endif
    164       1.117      para 
    165       1.117      para /*
    166       1.117      para  * Defaults for lower and upper-bounds for the kmem_arena page count.
    167       1.117      para  * Can be overridden by kernel config options.
    168       1.117      para  */
    169       1.117      para #ifndef NKMEMPAGES_MIN
    170       1.117      para #define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT
    171       1.117      para #endif
    172       1.117      para 
    173       1.117      para #ifndef NKMEMPAGES_MAX
    174       1.117      para #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT
    175       1.117      para #endif
    176       1.117      para 
    177       1.117      para 
    178         1.1       mrg #include <sys/param.h>
    179         1.1       mrg #include <sys/systm.h>
    180         1.1       mrg #include <sys/proc.h>
    181        1.72      yamt #include <sys/pool.h>
    182       1.112      para #include <sys/vmem.h>
    183       1.138      para #include <sys/vmem_impl.h>
    184       1.112      para #include <sys/kmem.h>
    185         1.1       mrg 
    186         1.1       mrg #include <uvm/uvm.h>
    187         1.1       mrg 
    188         1.1       mrg /*
    189         1.1       mrg  * global data structures
    190         1.1       mrg  */
    191         1.1       mrg 
    192        1.49       chs struct vm_map *kernel_map = NULL;
    193         1.1       mrg 
    194         1.1       mrg /*
    195         1.1       mrg  * local data structues
    196         1.1       mrg  */
    197         1.1       mrg 
    198       1.112      para static struct vm_map		kernel_map_store;
    199       1.112      para static struct vm_map_entry	kernel_image_mapent_store;
    200       1.112      para static struct vm_map_entry	kernel_kmem_mapent_store;
    201         1.1       mrg 
    202       1.117      para int nkmempages = 0;
    203       1.112      para vaddr_t kmembase;
    204       1.112      para vsize_t kmemsize;
    205        1.72      yamt 
    206       1.138      para static struct vmem kmem_arena_store;
    207       1.135      para vmem_t *kmem_arena = NULL;
    208       1.138      para static struct vmem kmem_va_arena_store;
    209       1.112      para vmem_t *kmem_va_arena;
    210        1.72      yamt 
    211        1.72      yamt /*
    212       1.117      para  * kmeminit_nkmempages: calculate the size of kmem_arena.
    213       1.117      para  */
    214       1.117      para void
    215       1.117      para kmeminit_nkmempages(void)
    216       1.117      para {
    217       1.117      para 	int npages;
    218       1.117      para 
    219       1.117      para 	if (nkmempages != 0) {
    220       1.117      para 		/*
    221       1.117      para 		 * It's already been set (by us being here before)
    222       1.117      para 		 * bail out now;
    223       1.117      para 		 */
    224       1.117      para 		return;
    225       1.117      para 	}
    226       1.117      para 
    227       1.119      para #if defined(PMAP_MAP_POOLPAGE)
    228       1.119      para 	npages = (physmem / 4);
    229       1.119      para #else
    230       1.119      para 	npages = (physmem / 3) * 2;
    231       1.119      para #endif /* defined(PMAP_MAP_POOLPAGE) */
    232       1.117      para 
    233       1.119      para #ifndef NKMEMPAGES_MAX_UNLIMITED
    234       1.117      para 	if (npages > NKMEMPAGES_MAX)
    235       1.117      para 		npages = NKMEMPAGES_MAX;
    236       1.119      para #endif
    237       1.117      para 
    238       1.117      para 	if (npages < NKMEMPAGES_MIN)
    239       1.117      para 		npages = NKMEMPAGES_MIN;
    240       1.117      para 
    241       1.117      para 	nkmempages = npages;
    242       1.117      para }
    243       1.117      para 
    244       1.117      para /*
    245       1.112      para  * uvm_km_bootstrap: init kernel maps and objects to reflect reality (i.e.
    246         1.1       mrg  * KVM already allocated for text, data, bss, and static data structures).
    247         1.1       mrg  *
    248        1.62   thorpej  * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
    249        1.82  christos  *    we assume that [vmin -> start] has already been allocated and that
    250        1.62   thorpej  *    "end" is the end.
    251         1.1       mrg  */
    252         1.1       mrg 
    253         1.8       mrg void
    254       1.112      para uvm_km_bootstrap(vaddr_t start, vaddr_t end)
    255         1.1       mrg {
    256       1.119      para 	bool kmem_arena_small;
    257        1.62   thorpej 	vaddr_t base = VM_MIN_KERNEL_ADDRESS;
    258       1.118      matt 	struct uvm_map_args args;
    259       1.118      matt 	int error;
    260       1.118      matt 
    261       1.118      matt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    262       1.118      matt 	UVMHIST_LOG(maphist, "start=%"PRIxVADDR" end=%#"PRIxVADDR,
    263       1.118      matt 	    start, end, 0,0);
    264        1.27   thorpej 
    265       1.117      para 	kmeminit_nkmempages();
    266       1.119      para 	kmemsize = (vsize_t)nkmempages * PAGE_SIZE;
    267       1.119      para 	kmem_arena_small = kmemsize < 64 * 1024 * 1024;
    268       1.112      para 
    269       1.118      matt 	UVMHIST_LOG(maphist, "kmemsize=%#"PRIxVSIZE, kmemsize, 0,0,0);
    270       1.118      matt 
    271        1.27   thorpej 	/*
    272        1.27   thorpej 	 * next, init kernel memory objects.
    273         1.8       mrg 	 */
    274         1.1       mrg 
    275         1.8       mrg 	/* kernel_object: for pageable anonymous kernel memory */
    276        1.95        ad 	uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
    277       1.112      para 				VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
    278         1.1       mrg 
    279        1.24   thorpej 	/*
    280        1.56   thorpej 	 * init the map and reserve any space that might already
    281        1.56   thorpej 	 * have been allocated kernel space before installing.
    282         1.8       mrg 	 */
    283         1.1       mrg 
    284       1.112      para 	uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
    285       1.112      para 	kernel_map_store.pmap = pmap_kernel();
    286        1.70      yamt 	if (start != base) {
    287       1.112      para 		error = uvm_map_prepare(&kernel_map_store,
    288        1.71      yamt 		    base, start - base,
    289        1.70      yamt 		    NULL, UVM_UNKNOWN_OFFSET, 0,
    290        1.62   thorpej 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    291        1.70      yamt 		    		UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
    292        1.70      yamt 		if (!error) {
    293       1.112      para 			kernel_image_mapent_store.flags =
    294       1.112      para 			    UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE;
    295       1.112      para 			error = uvm_map_enter(&kernel_map_store, &args,
    296       1.112      para 			    &kernel_image_mapent_store);
    297        1.70      yamt 		}
    298        1.70      yamt 
    299        1.70      yamt 		if (error)
    300        1.70      yamt 			panic(
    301       1.112      para 			    "uvm_km_bootstrap: could not reserve space for kernel");
    302       1.112      para 
    303       1.112      para 		kmembase = args.uma_start + args.uma_size;
    304       1.114      matt 	} else {
    305       1.114      matt 		kmembase = base;
    306        1.70      yamt 	}
    307        1.47       chs 
    308       1.118      matt 	error = uvm_map_prepare(&kernel_map_store,
    309       1.118      matt 	    kmembase, kmemsize,
    310       1.118      matt 	    NULL, UVM_UNKNOWN_OFFSET, 0,
    311       1.118      matt 	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    312       1.118      matt 	    		UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
    313       1.118      matt 	if (!error) {
    314       1.118      matt 		kernel_kmem_mapent_store.flags =
    315       1.118      matt 		    UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE;
    316       1.118      matt 		error = uvm_map_enter(&kernel_map_store, &args,
    317       1.118      matt 		    &kernel_kmem_mapent_store);
    318       1.118      matt 	}
    319       1.118      matt 
    320       1.118      matt 	if (error)
    321       1.118      matt 		panic("uvm_km_bootstrap: could not reserve kernel kmem");
    322       1.118      matt 
    323         1.8       mrg 	/*
    324         1.8       mrg 	 * install!
    325         1.8       mrg 	 */
    326         1.8       mrg 
    327       1.112      para 	kernel_map = &kernel_map_store;
    328       1.112      para 
    329       1.112      para 	pool_subsystem_init();
    330       1.112      para 
    331       1.138      para 	kmem_arena = vmem_init(&kmem_arena_store, "kmem",
    332       1.138      para 	    kmembase, kmemsize, PAGE_SIZE, NULL, NULL, NULL,
    333       1.112      para 	    0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
    334       1.135      para #ifdef PMAP_GROWKERNEL
    335       1.135      para 	/*
    336       1.135      para 	 * kmem_arena VA allocations happen independently of uvm_map.
    337       1.135      para 	 * grow kernel to accommodate the kmem_arena.
    338       1.135      para 	 */
    339       1.135      para 	if (uvm_maxkaddr < kmembase + kmemsize) {
    340       1.135      para 		uvm_maxkaddr = pmap_growkernel(kmembase + kmemsize);
    341       1.135      para 		KASSERTMSG(uvm_maxkaddr >= kmembase + kmemsize,
    342       1.135      para 		    "%#"PRIxVADDR" %#"PRIxVADDR" %#"PRIxVSIZE,
    343       1.135      para 		    uvm_maxkaddr, kmembase, kmemsize);
    344       1.135      para 	}
    345       1.135      para #endif
    346       1.112      para 
    347       1.138      para 	vmem_subsystem_init(kmem_arena);
    348       1.112      para 
    349       1.118      matt 	UVMHIST_LOG(maphist, "kmem vmem created (base=%#"PRIxVADDR
    350       1.118      matt 	    ", size=%#"PRIxVSIZE, kmembase, kmemsize, 0,0);
    351       1.118      matt 
    352       1.138      para 	kmem_va_arena = vmem_init(&kmem_va_arena_store, "kva",
    353       1.138      para 	    0, 0, PAGE_SIZE, vmem_alloc, vmem_free, kmem_arena,
    354       1.138      para 	    (kmem_arena_small ? 4 : VMEM_QCACHE_IDX_MAX) * PAGE_SIZE,
    355       1.138      para 	    VM_NOSLEEP, IPL_VM);
    356       1.118      matt 
    357       1.118      matt 	UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
    358       1.112      para }
    359       1.112      para 
    360       1.112      para /*
    361       1.112      para  * uvm_km_init: init the kernel maps virtual memory caches
    362       1.112      para  * and start the pool/kmem allocator.
    363       1.112      para  */
    364       1.112      para void
    365       1.112      para uvm_km_init(void)
    366       1.112      para {
    367       1.112      para 	kmem_init();
    368         1.1       mrg }
    369         1.1       mrg 
    370         1.1       mrg /*
    371         1.1       mrg  * uvm_km_suballoc: allocate a submap in the kernel map.   once a submap
    372         1.1       mrg  * is allocated all references to that area of VM must go through it.  this
    373         1.1       mrg  * allows the locking of VAs in kernel_map to be broken up into regions.
    374         1.1       mrg  *
    375        1.82  christos  * => if `fixed' is true, *vmin specifies where the region described
    376       1.112      para  *   pager_map => used to map "buf" structures into kernel space
    377         1.5   thorpej  *      by the submap must start
    378         1.1       mrg  * => if submap is non NULL we use that as the submap, otherwise we
    379         1.1       mrg  *	alloc a new map
    380         1.1       mrg  */
    381        1.78      yamt 
    382         1.8       mrg struct vm_map *
    383        1.83   thorpej uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */,
    384        1.93   thorpej     vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed,
    385       1.112      para     struct vm_map *submap)
    386         1.8       mrg {
    387         1.8       mrg 	int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
    388       1.118      matt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    389         1.1       mrg 
    390        1.71      yamt 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    391        1.71      yamt 
    392         1.8       mrg 	size = round_page(size);	/* round up to pagesize */
    393         1.1       mrg 
    394         1.8       mrg 	/*
    395         1.8       mrg 	 * first allocate a blank spot in the parent map
    396         1.8       mrg 	 */
    397         1.8       mrg 
    398        1.82  christos 	if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0,
    399         1.8       mrg 	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    400        1.43       chs 	    UVM_ADV_RANDOM, mapflags)) != 0) {
    401       1.118      matt 		panic("%s: unable to allocate space in parent map", __func__);
    402         1.8       mrg 	}
    403         1.8       mrg 
    404         1.8       mrg 	/*
    405        1.82  christos 	 * set VM bounds (vmin is filled in by uvm_map)
    406         1.8       mrg 	 */
    407         1.1       mrg 
    408        1.82  christos 	*vmax = *vmin + size;
    409         1.5   thorpej 
    410         1.8       mrg 	/*
    411         1.8       mrg 	 * add references to pmap and create or init the submap
    412         1.8       mrg 	 */
    413         1.1       mrg 
    414         1.8       mrg 	pmap_reference(vm_map_pmap(map));
    415         1.8       mrg 	if (submap == NULL) {
    416       1.112      para 		submap = kmem_alloc(sizeof(*submap), KM_SLEEP);
    417         1.8       mrg 		if (submap == NULL)
    418         1.8       mrg 			panic("uvm_km_suballoc: unable to create submap");
    419         1.8       mrg 	}
    420       1.112      para 	uvm_map_setup(submap, *vmin, *vmax, flags);
    421       1.112      para 	submap->pmap = vm_map_pmap(map);
    422         1.1       mrg 
    423         1.8       mrg 	/*
    424         1.8       mrg 	 * now let uvm_map_submap plug in it...
    425         1.8       mrg 	 */
    426         1.1       mrg 
    427       1.112      para 	if (uvm_map_submap(map, *vmin, *vmax, submap) != 0)
    428         1.8       mrg 		panic("uvm_km_suballoc: submap allocation failed");
    429         1.1       mrg 
    430       1.112      para 	return(submap);
    431         1.1       mrg }
    432         1.1       mrg 
    433         1.1       mrg /*
    434       1.110      yamt  * uvm_km_pgremove: remove pages from a kernel uvm_object and KVA.
    435         1.1       mrg  */
    436         1.1       mrg 
    437         1.8       mrg void
    438        1.83   thorpej uvm_km_pgremove(vaddr_t startva, vaddr_t endva)
    439         1.1       mrg {
    440        1.95        ad 	struct uvm_object * const uobj = uvm_kernel_object;
    441        1.78      yamt 	const voff_t start = startva - vm_map_min(kernel_map);
    442        1.78      yamt 	const voff_t end = endva - vm_map_min(kernel_map);
    443        1.53       chs 	struct vm_page *pg;
    444        1.52       chs 	voff_t curoff, nextoff;
    445        1.53       chs 	int swpgonlydelta = 0;
    446       1.118      matt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    447         1.1       mrg 
    448        1.78      yamt 	KASSERT(VM_MIN_KERNEL_ADDRESS <= startva);
    449        1.78      yamt 	KASSERT(startva < endva);
    450        1.86      yamt 	KASSERT(endva <= VM_MAX_KERNEL_ADDRESS);
    451        1.78      yamt 
    452       1.109     rmind 	mutex_enter(uobj->vmobjlock);
    453       1.110      yamt 	pmap_remove(pmap_kernel(), startva, endva);
    454        1.52       chs 	for (curoff = start; curoff < end; curoff = nextoff) {
    455        1.52       chs 		nextoff = curoff + PAGE_SIZE;
    456        1.52       chs 		pg = uvm_pagelookup(uobj, curoff);
    457        1.53       chs 		if (pg != NULL && pg->flags & PG_BUSY) {
    458        1.52       chs 			pg->flags |= PG_WANTED;
    459       1.109     rmind 			UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0,
    460        1.52       chs 				    "km_pgrm", 0);
    461       1.109     rmind 			mutex_enter(uobj->vmobjlock);
    462        1.52       chs 			nextoff = curoff;
    463         1.8       mrg 			continue;
    464        1.52       chs 		}
    465         1.8       mrg 
    466        1.52       chs 		/*
    467        1.52       chs 		 * free the swap slot, then the page.
    468        1.52       chs 		 */
    469         1.8       mrg 
    470        1.53       chs 		if (pg == NULL &&
    471        1.64        pk 		    uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) {
    472        1.53       chs 			swpgonlydelta++;
    473        1.53       chs 		}
    474        1.52       chs 		uao_dropswap(uobj, curoff >> PAGE_SHIFT);
    475        1.53       chs 		if (pg != NULL) {
    476        1.97        ad 			mutex_enter(&uvm_pageqlock);
    477        1.53       chs 			uvm_pagefree(pg);
    478        1.97        ad 			mutex_exit(&uvm_pageqlock);
    479        1.53       chs 		}
    480         1.8       mrg 	}
    481       1.109     rmind 	mutex_exit(uobj->vmobjlock);
    482         1.8       mrg 
    483        1.54       chs 	if (swpgonlydelta > 0) {
    484        1.95        ad 		mutex_enter(&uvm_swap_data_lock);
    485        1.54       chs 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
    486        1.54       chs 		uvmexp.swpgonly -= swpgonlydelta;
    487        1.95        ad 		mutex_exit(&uvm_swap_data_lock);
    488        1.54       chs 	}
    489        1.24   thorpej }
    490        1.24   thorpej 
    491        1.24   thorpej 
    492        1.24   thorpej /*
    493        1.78      yamt  * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed
    494        1.78      yamt  *    regions.
    495        1.24   thorpej  *
    496        1.24   thorpej  * => when you unmap a part of anonymous kernel memory you want to toss
    497        1.52       chs  *    the pages right away.    (this is called from uvm_unmap_...).
    498        1.24   thorpej  * => none of the pages will ever be busy, and none of them will ever
    499        1.52       chs  *    be on the active or inactive queues (because they have no object).
    500        1.24   thorpej  */
    501        1.24   thorpej 
    502        1.24   thorpej void
    503       1.102        ad uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end)
    504        1.24   thorpej {
    505       1.122    bouyer #define __PGRM_BATCH 16
    506        1.52       chs 	struct vm_page *pg;
    507       1.122    bouyer 	paddr_t pa[__PGRM_BATCH];
    508       1.122    bouyer 	int npgrm, i;
    509       1.122    bouyer 	vaddr_t va, batch_vastart;
    510       1.122    bouyer 
    511       1.118      matt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    512        1.24   thorpej 
    513       1.102        ad 	KASSERT(VM_MAP_IS_KERNEL(map));
    514       1.128      matt 	KASSERTMSG(vm_map_min(map) <= start,
    515       1.128      matt 	    "vm_map_min(map) [%#"PRIxVADDR"] <= start [%#"PRIxVADDR"]"
    516       1.128      matt 	    " (size=%#"PRIxVSIZE")",
    517       1.128      matt 	    vm_map_min(map), start, end - start);
    518        1.78      yamt 	KASSERT(start < end);
    519       1.102        ad 	KASSERT(end <= vm_map_max(map));
    520        1.78      yamt 
    521       1.122    bouyer 	for (va = start; va < end;) {
    522       1.122    bouyer 		batch_vastart = va;
    523       1.122    bouyer 		/* create a batch of at most __PGRM_BATCH pages to free */
    524       1.122    bouyer 		for (i = 0;
    525       1.122    bouyer 		     i < __PGRM_BATCH && va < end;
    526       1.122    bouyer 		     va += PAGE_SIZE) {
    527       1.122    bouyer 			if (!pmap_extract(pmap_kernel(), va, &pa[i])) {
    528       1.122    bouyer 				continue;
    529       1.122    bouyer 			}
    530       1.122    bouyer 			i++;
    531       1.122    bouyer 		}
    532       1.122    bouyer 		npgrm = i;
    533       1.122    bouyer 		/* now remove the mappings */
    534       1.124    bouyer 		pmap_kremove(batch_vastart, va - batch_vastart);
    535       1.122    bouyer 		/* and free the pages */
    536       1.122    bouyer 		for (i = 0; i < npgrm; i++) {
    537       1.122    bouyer 			pg = PHYS_TO_VM_PAGE(pa[i]);
    538       1.122    bouyer 			KASSERT(pg);
    539       1.122    bouyer 			KASSERT(pg->uobject == NULL && pg->uanon == NULL);
    540       1.122    bouyer 			KASSERT((pg->flags & PG_BUSY) == 0);
    541       1.122    bouyer 			uvm_pagefree(pg);
    542        1.40       chs 		}
    543        1.24   thorpej 	}
    544       1.122    bouyer #undef __PGRM_BATCH
    545         1.1       mrg }
    546         1.1       mrg 
    547        1.78      yamt #if defined(DEBUG)
    548        1.78      yamt void
    549       1.102        ad uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end)
    550        1.78      yamt {
    551       1.102        ad 	struct vm_page *pg;
    552        1.78      yamt 	vaddr_t va;
    553        1.78      yamt 	paddr_t pa;
    554       1.118      matt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    555        1.78      yamt 
    556       1.102        ad 	KDASSERT(VM_MAP_IS_KERNEL(map));
    557       1.102        ad 	KDASSERT(vm_map_min(map) <= start);
    558        1.78      yamt 	KDASSERT(start < end);
    559       1.102        ad 	KDASSERT(end <= vm_map_max(map));
    560        1.78      yamt 
    561        1.78      yamt 	for (va = start; va < end; va += PAGE_SIZE) {
    562        1.78      yamt 		if (pmap_extract(pmap_kernel(), va, &pa)) {
    563        1.81    simonb 			panic("uvm_km_check_empty: va %p has pa 0x%llx",
    564        1.81    simonb 			    (void *)va, (long long)pa);
    565        1.78      yamt 		}
    566       1.121     rmind 		mutex_enter(uvm_kernel_object->vmobjlock);
    567       1.121     rmind 		pg = uvm_pagelookup(uvm_kernel_object,
    568       1.121     rmind 		    va - vm_map_min(kernel_map));
    569       1.121     rmind 		mutex_exit(uvm_kernel_object->vmobjlock);
    570       1.121     rmind 		if (pg) {
    571       1.121     rmind 			panic("uvm_km_check_empty: "
    572       1.121     rmind 			    "has page hashed at %p", (const void *)va);
    573        1.78      yamt 		}
    574        1.78      yamt 	}
    575        1.78      yamt }
    576        1.78      yamt #endif /* defined(DEBUG) */
    577         1.1       mrg 
    578         1.1       mrg /*
    579        1.78      yamt  * uvm_km_alloc: allocate an area of kernel memory.
    580         1.1       mrg  *
    581        1.78      yamt  * => NOTE: we can return 0 even if we can wait if there is not enough
    582         1.1       mrg  *	free VM space in the map... caller should be prepared to handle
    583         1.1       mrg  *	this case.
    584         1.1       mrg  * => we return KVA of memory allocated
    585         1.1       mrg  */
    586         1.1       mrg 
    587        1.14       eeh vaddr_t
    588        1.83   thorpej uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
    589         1.1       mrg {
    590        1.14       eeh 	vaddr_t kva, loopva;
    591        1.14       eeh 	vaddr_t offset;
    592        1.44   thorpej 	vsize_t loopsize;
    593         1.8       mrg 	struct vm_page *pg;
    594        1.78      yamt 	struct uvm_object *obj;
    595        1.78      yamt 	int pgaflags;
    596        1.89  drochner 	vm_prot_t prot;
    597        1.78      yamt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    598         1.1       mrg 
    599        1.40       chs 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    600        1.78      yamt 	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
    601        1.78      yamt 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
    602        1.78      yamt 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
    603       1.111      matt 	KASSERT((flags & UVM_KMF_VAONLY) != 0 || (flags & UVM_KMF_COLORMATCH) == 0);
    604       1.111      matt 	KASSERT((flags & UVM_KMF_COLORMATCH) == 0 || (flags & UVM_KMF_VAONLY) != 0);
    605         1.1       mrg 
    606         1.8       mrg 	/*
    607         1.8       mrg 	 * setup for call
    608         1.8       mrg 	 */
    609         1.8       mrg 
    610        1.78      yamt 	kva = vm_map_min(map);	/* hint */
    611         1.8       mrg 	size = round_page(size);
    612        1.95        ad 	obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL;
    613        1.78      yamt 	UVMHIST_LOG(maphist,"  (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
    614        1.78      yamt 		    map, obj, size, flags);
    615         1.1       mrg 
    616         1.8       mrg 	/*
    617         1.8       mrg 	 * allocate some virtual space
    618         1.8       mrg 	 */
    619         1.8       mrg 
    620        1.78      yamt 	if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
    621        1.78      yamt 	    align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    622        1.78      yamt 	    UVM_ADV_RANDOM,
    623       1.111      matt 	    (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA
    624       1.112      para 	     | UVM_KMF_COLORMATCH)))) != 0)) {
    625         1.8       mrg 		UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
    626         1.8       mrg 		return(0);
    627         1.8       mrg 	}
    628         1.8       mrg 
    629         1.8       mrg 	/*
    630         1.8       mrg 	 * if all we wanted was VA, return now
    631         1.8       mrg 	 */
    632         1.8       mrg 
    633        1.78      yamt 	if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) {
    634         1.8       mrg 		UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
    635         1.8       mrg 		return(kva);
    636         1.8       mrg 	}
    637        1.40       chs 
    638         1.8       mrg 	/*
    639         1.8       mrg 	 * recover object offset from virtual address
    640         1.8       mrg 	 */
    641         1.8       mrg 
    642         1.8       mrg 	offset = kva - vm_map_min(kernel_map);
    643         1.8       mrg 	UVMHIST_LOG(maphist, "  kva=0x%x, offset=0x%x", kva, offset,0,0);
    644         1.8       mrg 
    645         1.8       mrg 	/*
    646         1.8       mrg 	 * now allocate and map in the memory... note that we are the only ones
    647         1.8       mrg 	 * whom should ever get a handle on this area of VM.
    648         1.8       mrg 	 */
    649         1.8       mrg 
    650         1.8       mrg 	loopva = kva;
    651        1.44   thorpej 	loopsize = size;
    652        1.78      yamt 
    653       1.107      matt 	pgaflags = UVM_FLAG_COLORMATCH;
    654       1.103        ad 	if (flags & UVM_KMF_NOWAIT)
    655       1.103        ad 		pgaflags |= UVM_PGA_USERESERVE;
    656        1.78      yamt 	if (flags & UVM_KMF_ZERO)
    657        1.78      yamt 		pgaflags |= UVM_PGA_ZERO;
    658        1.89  drochner 	prot = VM_PROT_READ | VM_PROT_WRITE;
    659        1.89  drochner 	if (flags & UVM_KMF_EXEC)
    660        1.89  drochner 		prot |= VM_PROT_EXECUTE;
    661        1.44   thorpej 	while (loopsize) {
    662       1.114      matt 		KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, NULL),
    663       1.114      matt 		    "loopva=%#"PRIxVADDR, loopva);
    664        1.78      yamt 
    665       1.107      matt 		pg = uvm_pagealloc_strat(NULL, offset, NULL, pgaflags,
    666       1.107      matt #ifdef UVM_KM_VMFREELIST
    667       1.107      matt 		   UVM_PGA_STRAT_ONLY, UVM_KM_VMFREELIST
    668       1.107      matt #else
    669       1.107      matt 		   UVM_PGA_STRAT_NORMAL, 0
    670       1.107      matt #endif
    671       1.107      matt 		   );
    672        1.47       chs 
    673         1.8       mrg 		/*
    674         1.8       mrg 		 * out of memory?
    675         1.8       mrg 		 */
    676         1.8       mrg 
    677        1.35   thorpej 		if (__predict_false(pg == NULL)) {
    678        1.58       chs 			if ((flags & UVM_KMF_NOWAIT) ||
    679        1.80      yamt 			    ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) {
    680         1.8       mrg 				/* free everything! */
    681        1.78      yamt 				uvm_km_free(map, kva, size,
    682        1.78      yamt 				    flags & UVM_KMF_TYPEMASK);
    683        1.58       chs 				return (0);
    684         1.8       mrg 			} else {
    685         1.8       mrg 				uvm_wait("km_getwait2");	/* sleep here */
    686         1.8       mrg 				continue;
    687         1.8       mrg 			}
    688         1.8       mrg 		}
    689        1.47       chs 
    690        1.78      yamt 		pg->flags &= ~PG_BUSY;	/* new page */
    691        1.78      yamt 		UVM_PAGE_OWN(pg, NULL);
    692        1.78      yamt 
    693         1.8       mrg 		/*
    694        1.52       chs 		 * map it in
    695         1.8       mrg 		 */
    696        1.40       chs 
    697       1.104    cegger 		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
    698       1.106    cegger 		    prot, PMAP_KMPAGE);
    699         1.8       mrg 		loopva += PAGE_SIZE;
    700         1.8       mrg 		offset += PAGE_SIZE;
    701        1.44   thorpej 		loopsize -= PAGE_SIZE;
    702         1.8       mrg 	}
    703        1.69  junyoung 
    704       1.112      para 	pmap_update(pmap_kernel());
    705        1.69  junyoung 
    706         1.8       mrg 	UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
    707         1.8       mrg 	return(kva);
    708         1.1       mrg }
    709         1.1       mrg 
    710         1.1       mrg /*
    711         1.1       mrg  * uvm_km_free: free an area of kernel memory
    712         1.1       mrg  */
    713         1.1       mrg 
    714         1.8       mrg void
    715        1.83   thorpej uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags)
    716         1.8       mrg {
    717       1.118      matt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    718         1.1       mrg 
    719        1.78      yamt 	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
    720        1.78      yamt 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
    721        1.78      yamt 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
    722        1.78      yamt 	KASSERT((addr & PAGE_MASK) == 0);
    723        1.40       chs 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    724         1.1       mrg 
    725         1.8       mrg 	size = round_page(size);
    726         1.1       mrg 
    727        1.78      yamt 	if (flags & UVM_KMF_PAGEABLE) {
    728        1.78      yamt 		uvm_km_pgremove(addr, addr + size);
    729        1.78      yamt 	} else if (flags & UVM_KMF_WIRED) {
    730       1.109     rmind 		/*
    731       1.109     rmind 		 * Note: uvm_km_pgremove_intrsafe() extracts mapping, thus
    732       1.109     rmind 		 * remove it after.  See comment below about KVA visibility.
    733       1.109     rmind 		 */
    734       1.102        ad 		uvm_km_pgremove_intrsafe(map, addr, addr + size);
    735         1.8       mrg 	}
    736        1.99      yamt 
    737        1.99      yamt 	/*
    738       1.109     rmind 	 * Note: uvm_unmap_remove() calls pmap_update() for us, before
    739       1.109     rmind 	 * KVA becomes globally available.
    740        1.99      yamt 	 */
    741         1.8       mrg 
    742       1.112      para 	uvm_unmap1(map, addr, addr + size, UVM_FLAG_VAONLY);
    743        1.66        pk }
    744        1.66        pk 
    745        1.10   thorpej /* Sanity; must specify both or none. */
    746        1.10   thorpej #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
    747        1.10   thorpej     (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
    748        1.10   thorpej #error Must specify MAP and UNMAP together.
    749        1.10   thorpej #endif
    750        1.10   thorpej 
    751       1.112      para int
    752       1.112      para uvm_km_kmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags,
    753       1.112      para     vmem_addr_t *addr)
    754        1.72      yamt {
    755        1.72      yamt 	struct vm_page *pg;
    756       1.112      para 	vmem_addr_t va;
    757       1.112      para 	int rc;
    758       1.112      para 	vaddr_t loopva;
    759       1.112      para 	vsize_t loopsize;
    760        1.72      yamt 
    761       1.112      para 	size = round_page(size);
    762        1.72      yamt 
    763       1.112      para #if defined(PMAP_MAP_POOLPAGE)
    764       1.112      para 	if (size == PAGE_SIZE) {
    765        1.72      yamt again:
    766       1.112      para #ifdef PMAP_ALLOC_POOLPAGE
    767       1.112      para 		pg = PMAP_ALLOC_POOLPAGE((flags & VM_SLEEP) ?
    768       1.112      para 		   0 : UVM_PGA_USERESERVE);
    769       1.112      para #else
    770       1.112      para 		pg = uvm_pagealloc(NULL, 0, NULL,
    771       1.112      para 		   (flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE);
    772       1.112      para #endif /* PMAP_ALLOC_POOLPAGE */
    773       1.112      para 		if (__predict_false(pg == NULL)) {
    774       1.112      para 			if (flags & VM_SLEEP) {
    775       1.112      para 				uvm_wait("plpg");
    776       1.112      para 				goto again;
    777       1.112      para 			}
    778       1.123     rmind 			return ENOMEM;
    779       1.112      para 		}
    780       1.112      para 		va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
    781       1.112      para 		if (__predict_false(va == 0)) {
    782       1.112      para 			uvm_pagefree(pg);
    783       1.112      para 			return ENOMEM;
    784        1.72      yamt 		}
    785       1.112      para 		*addr = va;
    786       1.112      para 		return 0;
    787        1.72      yamt 	}
    788       1.112      para #endif /* PMAP_MAP_POOLPAGE */
    789       1.112      para 
    790       1.112      para 	rc = vmem_alloc(vm, size, flags, &va);
    791       1.112      para 	if (rc != 0)
    792       1.112      para 		return rc;
    793        1.72      yamt 
    794       1.130      matt #ifdef PMAP_GROWKERNEL
    795       1.130      matt 	/*
    796       1.135      para 	 * These VA allocations happen independently of uvm_map
    797       1.135      para 	 * so this allocation must not extend beyond the current limit.
    798       1.135      para 	 */
    799       1.135      para 	KASSERTMSG(uvm_maxkaddr >= va + size,
    800       1.135      para 	    "%#"PRIxVADDR" %#"PRIxPTR" %#zx",
    801       1.135      para 	    uvm_maxkaddr, va, size);
    802       1.130      matt #endif
    803       1.130      matt 
    804       1.112      para 	loopva = va;
    805       1.112      para 	loopsize = size;
    806        1.72      yamt 
    807       1.112      para 	while (loopsize) {
    808       1.128      matt #ifdef DIAGNOSTIC
    809       1.128      matt 		paddr_t pa;
    810       1.128      matt #endif
    811       1.128      matt 		KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, &pa),
    812       1.128      matt 		    "loopva=%#"PRIxVADDR" loopsize=%#"PRIxVSIZE
    813       1.128      matt 		    " pa=%#"PRIxPADDR" vmem=%p",
    814       1.128      matt 		    loopva, loopsize, pa, vm);
    815       1.114      matt 
    816       1.114      matt 		pg = uvm_pagealloc(NULL, loopva, NULL,
    817       1.115      matt 		    UVM_FLAG_COLORMATCH
    818       1.114      matt 		    | ((flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE));
    819       1.112      para 		if (__predict_false(pg == NULL)) {
    820       1.112      para 			if (flags & VM_SLEEP) {
    821       1.112      para 				uvm_wait("plpg");
    822       1.112      para 				continue;
    823       1.112      para 			} else {
    824       1.112      para 				uvm_km_pgremove_intrsafe(kernel_map, va,
    825       1.112      para 				    va + size);
    826       1.125      yamt 				vmem_free(vm, va, size);
    827       1.112      para 				return ENOMEM;
    828       1.112      para 			}
    829       1.112      para 		}
    830       1.123     rmind 
    831       1.112      para 		pg->flags &= ~PG_BUSY;	/* new page */
    832       1.112      para 		UVM_PAGE_OWN(pg, NULL);
    833       1.112      para 		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
    834       1.112      para 		    VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
    835       1.107      matt 
    836       1.112      para 		loopva += PAGE_SIZE;
    837       1.112      para 		loopsize -= PAGE_SIZE;
    838        1.15   thorpej 	}
    839       1.112      para 	pmap_update(pmap_kernel());
    840       1.112      para 
    841       1.112      para 	*addr = va;
    842        1.16   thorpej 
    843       1.112      para 	return 0;
    844        1.10   thorpej }
    845        1.10   thorpej 
    846        1.10   thorpej void
    847       1.112      para uvm_km_kmem_free(vmem_t *vm, vmem_addr_t addr, size_t size)
    848        1.72      yamt {
    849       1.112      para 
    850       1.112      para 	size = round_page(size);
    851        1.72      yamt #if defined(PMAP_UNMAP_POOLPAGE)
    852       1.112      para 	if (size == PAGE_SIZE) {
    853       1.112      para 		paddr_t pa;
    854        1.72      yamt 
    855       1.112      para 		pa = PMAP_UNMAP_POOLPAGE(addr);
    856       1.112      para 		uvm_pagefree(PHYS_TO_VM_PAGE(pa));
    857        1.72      yamt 		return;
    858        1.72      yamt 	}
    859       1.112      para #endif /* PMAP_UNMAP_POOLPAGE */
    860       1.112      para 	uvm_km_pgremove_intrsafe(kernel_map, addr, addr + size);
    861       1.112      para 	pmap_update(pmap_kernel());
    862        1.72      yamt 
    863       1.112      para 	vmem_free(vm, addr, size);
    864        1.72      yamt }
    865        1.72      yamt 
    866       1.112      para bool
    867       1.112      para uvm_km_va_starved_p(void)
    868        1.10   thorpej {
    869       1.112      para 	vmem_size_t total;
    870       1.112      para 	vmem_size_t free;
    871       1.112      para 
    872       1.135      para 	if (kmem_arena == NULL)
    873       1.135      para 		return false;
    874       1.135      para 
    875       1.112      para 	total = vmem_size(kmem_arena, VMEM_ALLOC|VMEM_FREE);
    876       1.112      para 	free = vmem_size(kmem_arena, VMEM_FREE);
    877        1.10   thorpej 
    878       1.112      para 	return (free < (total / 10));
    879         1.1       mrg }
    880