Home | History | Annotate | Line # | Download | only in uvm
uvm_km.c revision 1.155
      1  1.155        ad /*	$NetBSD: uvm_km.c,v 1.155 2020/02/23 15:46:43 ad Exp $	*/
      2    1.1       mrg 
      3   1.47       chs /*
      4    1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5   1.47       chs  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6    1.1       mrg  *
      7    1.1       mrg  * All rights reserved.
      8    1.1       mrg  *
      9    1.1       mrg  * This code is derived from software contributed to Berkeley by
     10    1.1       mrg  * The Mach Operating System project at Carnegie-Mellon University.
     11    1.1       mrg  *
     12    1.1       mrg  * Redistribution and use in source and binary forms, with or without
     13    1.1       mrg  * modification, are permitted provided that the following conditions
     14    1.1       mrg  * are met:
     15    1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     16    1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     17    1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18    1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     19    1.1       mrg  *    documentation and/or other materials provided with the distribution.
     20  1.108     chuck  * 3. Neither the name of the University nor the names of its contributors
     21    1.1       mrg  *    may be used to endorse or promote products derived from this software
     22    1.1       mrg  *    without specific prior written permission.
     23    1.1       mrg  *
     24    1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25    1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26    1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27    1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28    1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29    1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30    1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31    1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32    1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33    1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34    1.1       mrg  * SUCH DAMAGE.
     35    1.1       mrg  *
     36    1.1       mrg  *	@(#)vm_kern.c   8.3 (Berkeley) 1/12/94
     37    1.4       mrg  * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
     38    1.1       mrg  *
     39    1.1       mrg  *
     40    1.1       mrg  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     41    1.1       mrg  * All rights reserved.
     42   1.47       chs  *
     43    1.1       mrg  * Permission to use, copy, modify and distribute this software and
     44    1.1       mrg  * its documentation is hereby granted, provided that both the copyright
     45    1.1       mrg  * notice and this permission notice appear in all copies of the
     46    1.1       mrg  * software, derivative works or modified versions, and any portions
     47    1.1       mrg  * thereof, and that both notices appear in supporting documentation.
     48   1.47       chs  *
     49   1.47       chs  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     50   1.47       chs  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     51    1.1       mrg  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     52   1.47       chs  *
     53    1.1       mrg  * Carnegie Mellon requests users of this software to return to
     54    1.1       mrg  *
     55    1.1       mrg  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     56    1.1       mrg  *  School of Computer Science
     57    1.1       mrg  *  Carnegie Mellon University
     58    1.1       mrg  *  Pittsburgh PA 15213-3890
     59    1.1       mrg  *
     60    1.1       mrg  * any improvements or extensions that they make and grant Carnegie the
     61    1.1       mrg  * rights to redistribute these changes.
     62    1.1       mrg  */
     63    1.6       mrg 
     64    1.1       mrg /*
     65    1.1       mrg  * uvm_km.c: handle kernel memory allocation and management
     66    1.1       mrg  */
     67    1.1       mrg 
     68    1.7     chuck /*
     69    1.7     chuck  * overview of kernel memory management:
     70    1.7     chuck  *
     71    1.7     chuck  * the kernel virtual address space is mapped by "kernel_map."   kernel_map
     72   1.62   thorpej  * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
     73   1.62   thorpej  * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
     74    1.7     chuck  *
     75   1.47       chs  * the kernel_map has several "submaps."   submaps can only appear in
     76    1.7     chuck  * the kernel_map (user processes can't use them).   submaps "take over"
     77    1.7     chuck  * the management of a sub-range of the kernel's address space.  submaps
     78    1.7     chuck  * are typically allocated at boot time and are never released.   kernel
     79   1.47       chs  * virtual address space that is mapped by a submap is locked by the
     80    1.7     chuck  * submap's lock -- not the kernel_map's lock.
     81    1.7     chuck  *
     82    1.7     chuck  * thus, the useful feature of submaps is that they allow us to break
     83    1.7     chuck  * up the locking and protection of the kernel address space into smaller
     84    1.7     chuck  * chunks.
     85    1.7     chuck  *
     86  1.126      para  * the vm system has several standard kernel submaps/arenas, including:
     87  1.126      para  *   kmem_arena => used for kmem/pool (memoryallocators(9))
     88    1.7     chuck  *   pager_map => used to map "buf" structures into kernel space
     89    1.7     chuck  *   exec_map => used during exec to handle exec args
     90    1.7     chuck  *   etc...
     91    1.7     chuck  *
     92  1.127     rmind  * The kmem_arena is a "special submap", as it lives in a fixed map entry
     93  1.127     rmind  * within the kernel_map and is controlled by vmem(9).
     94  1.126      para  *
     95    1.7     chuck  * the kernel allocates its private memory out of special uvm_objects whose
     96    1.7     chuck  * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
     97    1.7     chuck  * are "special" and never die).   all kernel objects should be thought of
     98   1.47       chs  * as large, fixed-sized, sparsely populated uvm_objects.   each kernel
     99   1.62   thorpej  * object is equal to the size of kernel virtual address space (i.e. the
    100   1.62   thorpej  * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
    101    1.7     chuck  *
    102  1.101     pooka  * note that just because a kernel object spans the entire kernel virtual
    103    1.7     chuck  * address space doesn't mean that it has to be mapped into the entire space.
    104   1.47       chs  * large chunks of a kernel object's space go unused either because
    105   1.47       chs  * that area of kernel VM is unmapped, or there is some other type of
    106    1.7     chuck  * object mapped into that range (e.g. a vnode).    for submap's kernel
    107    1.7     chuck  * objects, the only part of the object that can ever be populated is the
    108    1.7     chuck  * offsets that are managed by the submap.
    109    1.7     chuck  *
    110    1.7     chuck  * note that the "offset" in a kernel object is always the kernel virtual
    111   1.62   thorpej  * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
    112    1.7     chuck  * example:
    113   1.62   thorpej  *   suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
    114    1.7     chuck  *   uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
    115    1.7     chuck  *   kernel map].    if uvm_km_alloc returns virtual address 0xf8235000,
    116    1.7     chuck  *   then that means that the page at offset 0x235000 in kernel_object is
    117   1.47       chs  *   mapped at 0xf8235000.
    118    1.7     chuck  *
    119    1.7     chuck  * kernel object have one other special property: when the kernel virtual
    120    1.7     chuck  * memory mapping them is unmapped, the backing memory in the object is
    121    1.7     chuck  * freed right away.   this is done with the uvm_km_pgremove() function.
    122    1.7     chuck  * this has to be done because there is no backing store for kernel pages
    123    1.7     chuck  * and no need to save them after they are no longer referenced.
    124  1.126      para  *
    125  1.127     rmind  * Generic arenas:
    126  1.126      para  *
    127  1.127     rmind  * kmem_arena:
    128  1.127     rmind  *	Main arena controlling the kernel KVA used by other arenas.
    129  1.127     rmind  *
    130  1.127     rmind  * kmem_va_arena:
    131  1.127     rmind  *	Implements quantum caching in order to speedup allocations and
    132  1.127     rmind  *	reduce fragmentation.  The pool(9), unless created with a custom
    133  1.127     rmind  *	meta-data allocator, and kmem(9) subsystems use this arena.
    134  1.127     rmind  *
    135  1.127     rmind  * Arenas for meta-data allocations are used by vmem(9) and pool(9).
    136  1.127     rmind  * These arenas cannot use quantum cache.  However, kmem_va_meta_arena
    137  1.127     rmind  * compensates this by importing larger chunks from kmem_arena.
    138  1.127     rmind  *
    139  1.127     rmind  * kmem_va_meta_arena:
    140  1.127     rmind  *	Space for meta-data.
    141  1.127     rmind  *
    142  1.127     rmind  * kmem_meta_arena:
    143  1.127     rmind  *	Imports from kmem_va_meta_arena.  Allocations from this arena are
    144  1.127     rmind  *	backed with the pages.
    145  1.127     rmind  *
    146  1.127     rmind  * Arena stacking:
    147  1.127     rmind  *
    148  1.127     rmind  *	kmem_arena
    149  1.127     rmind  *		kmem_va_arena
    150  1.127     rmind  *		kmem_va_meta_arena
    151  1.127     rmind  *			kmem_meta_arena
    152    1.7     chuck  */
    153   1.55     lukem 
    154   1.55     lukem #include <sys/cdefs.h>
    155  1.155        ad __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.155 2020/02/23 15:46:43 ad Exp $");
    156   1.55     lukem 
    157   1.55     lukem #include "opt_uvmhist.h"
    158    1.7     chuck 
    159  1.117      para #include "opt_kmempages.h"
    160  1.117      para 
    161  1.117      para #ifndef NKMEMPAGES
    162  1.117      para #define NKMEMPAGES 0
    163  1.117      para #endif
    164  1.117      para 
    165  1.117      para /*
    166  1.117      para  * Defaults for lower and upper-bounds for the kmem_arena page count.
    167  1.117      para  * Can be overridden by kernel config options.
    168  1.117      para  */
    169  1.117      para #ifndef NKMEMPAGES_MIN
    170  1.117      para #define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT
    171  1.117      para #endif
    172  1.117      para 
    173  1.117      para #ifndef NKMEMPAGES_MAX
    174  1.117      para #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT
    175  1.117      para #endif
    176  1.117      para 
    177  1.117      para 
    178    1.1       mrg #include <sys/param.h>
    179    1.1       mrg #include <sys/systm.h>
    180  1.150       uwe #include <sys/atomic.h>
    181    1.1       mrg #include <sys/proc.h>
    182   1.72      yamt #include <sys/pool.h>
    183  1.112      para #include <sys/vmem.h>
    184  1.138      para #include <sys/vmem_impl.h>
    185  1.112      para #include <sys/kmem.h>
    186  1.147      maxv #include <sys/msan.h>
    187    1.1       mrg 
    188    1.1       mrg #include <uvm/uvm.h>
    189    1.1       mrg 
    190    1.1       mrg /*
    191    1.1       mrg  * global data structures
    192    1.1       mrg  */
    193    1.1       mrg 
    194   1.49       chs struct vm_map *kernel_map = NULL;
    195    1.1       mrg 
    196    1.1       mrg /*
    197    1.1       mrg  * local data structues
    198    1.1       mrg  */
    199    1.1       mrg 
    200  1.112      para static struct vm_map		kernel_map_store;
    201  1.112      para static struct vm_map_entry	kernel_image_mapent_store;
    202  1.112      para static struct vm_map_entry	kernel_kmem_mapent_store;
    203    1.1       mrg 
    204  1.117      para int nkmempages = 0;
    205  1.112      para vaddr_t kmembase;
    206  1.112      para vsize_t kmemsize;
    207   1.72      yamt 
    208  1.138      para static struct vmem kmem_arena_store;
    209  1.135      para vmem_t *kmem_arena = NULL;
    210  1.138      para static struct vmem kmem_va_arena_store;
    211  1.112      para vmem_t *kmem_va_arena;
    212   1.72      yamt 
    213   1.72      yamt /*
    214  1.117      para  * kmeminit_nkmempages: calculate the size of kmem_arena.
    215  1.117      para  */
    216  1.117      para void
    217  1.117      para kmeminit_nkmempages(void)
    218  1.117      para {
    219  1.117      para 	int npages;
    220  1.117      para 
    221  1.117      para 	if (nkmempages != 0) {
    222  1.117      para 		/*
    223  1.117      para 		 * It's already been set (by us being here before)
    224  1.117      para 		 * bail out now;
    225  1.117      para 		 */
    226  1.117      para 		return;
    227  1.117      para 	}
    228  1.117      para 
    229  1.147      maxv #if defined(KMSAN)
    230  1.147      maxv 	npages = (physmem / 8);
    231  1.147      maxv #elif defined(PMAP_MAP_POOLPAGE)
    232  1.119      para 	npages = (physmem / 4);
    233  1.119      para #else
    234  1.119      para 	npages = (physmem / 3) * 2;
    235  1.119      para #endif /* defined(PMAP_MAP_POOLPAGE) */
    236  1.117      para 
    237  1.119      para #ifndef NKMEMPAGES_MAX_UNLIMITED
    238  1.117      para 	if (npages > NKMEMPAGES_MAX)
    239  1.117      para 		npages = NKMEMPAGES_MAX;
    240  1.119      para #endif
    241  1.117      para 
    242  1.117      para 	if (npages < NKMEMPAGES_MIN)
    243  1.117      para 		npages = NKMEMPAGES_MIN;
    244  1.117      para 
    245  1.117      para 	nkmempages = npages;
    246  1.117      para }
    247  1.117      para 
    248  1.117      para /*
    249  1.112      para  * uvm_km_bootstrap: init kernel maps and objects to reflect reality (i.e.
    250    1.1       mrg  * KVM already allocated for text, data, bss, and static data structures).
    251    1.1       mrg  *
    252   1.62   thorpej  * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
    253   1.82  christos  *    we assume that [vmin -> start] has already been allocated and that
    254   1.62   thorpej  *    "end" is the end.
    255    1.1       mrg  */
    256    1.1       mrg 
    257    1.8       mrg void
    258  1.112      para uvm_km_bootstrap(vaddr_t start, vaddr_t end)
    259    1.1       mrg {
    260  1.119      para 	bool kmem_arena_small;
    261   1.62   thorpej 	vaddr_t base = VM_MIN_KERNEL_ADDRESS;
    262  1.118      matt 	struct uvm_map_args args;
    263  1.118      matt 	int error;
    264  1.118      matt 
    265  1.118      matt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    266  1.144  pgoyette 	UVMHIST_LOG(maphist, "start=%#jx end=%#jx", start, end, 0,0);
    267   1.27   thorpej 
    268  1.117      para 	kmeminit_nkmempages();
    269  1.119      para 	kmemsize = (vsize_t)nkmempages * PAGE_SIZE;
    270  1.119      para 	kmem_arena_small = kmemsize < 64 * 1024 * 1024;
    271  1.112      para 
    272  1.144  pgoyette 	UVMHIST_LOG(maphist, "kmemsize=%#jx", kmemsize, 0,0,0);
    273  1.118      matt 
    274   1.27   thorpej 	/*
    275   1.27   thorpej 	 * next, init kernel memory objects.
    276    1.8       mrg 	 */
    277    1.1       mrg 
    278    1.8       mrg 	/* kernel_object: for pageable anonymous kernel memory */
    279   1.95        ad 	uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
    280  1.112      para 				VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
    281    1.1       mrg 
    282   1.24   thorpej 	/*
    283   1.56   thorpej 	 * init the map and reserve any space that might already
    284   1.56   thorpej 	 * have been allocated kernel space before installing.
    285    1.8       mrg 	 */
    286    1.1       mrg 
    287  1.112      para 	uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
    288  1.112      para 	kernel_map_store.pmap = pmap_kernel();
    289   1.70      yamt 	if (start != base) {
    290  1.112      para 		error = uvm_map_prepare(&kernel_map_store,
    291   1.71      yamt 		    base, start - base,
    292   1.70      yamt 		    NULL, UVM_UNKNOWN_OFFSET, 0,
    293   1.62   thorpej 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    294   1.70      yamt 		    		UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
    295   1.70      yamt 		if (!error) {
    296  1.112      para 			kernel_image_mapent_store.flags =
    297  1.112      para 			    UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE;
    298  1.112      para 			error = uvm_map_enter(&kernel_map_store, &args,
    299  1.112      para 			    &kernel_image_mapent_store);
    300   1.70      yamt 		}
    301   1.70      yamt 
    302   1.70      yamt 		if (error)
    303   1.70      yamt 			panic(
    304  1.112      para 			    "uvm_km_bootstrap: could not reserve space for kernel");
    305  1.112      para 
    306  1.112      para 		kmembase = args.uma_start + args.uma_size;
    307  1.114      matt 	} else {
    308  1.114      matt 		kmembase = base;
    309   1.70      yamt 	}
    310   1.47       chs 
    311  1.118      matt 	error = uvm_map_prepare(&kernel_map_store,
    312  1.118      matt 	    kmembase, kmemsize,
    313  1.118      matt 	    NULL, UVM_UNKNOWN_OFFSET, 0,
    314  1.118      matt 	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    315  1.118      matt 	    		UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
    316  1.118      matt 	if (!error) {
    317  1.118      matt 		kernel_kmem_mapent_store.flags =
    318  1.118      matt 		    UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE;
    319  1.118      matt 		error = uvm_map_enter(&kernel_map_store, &args,
    320  1.118      matt 		    &kernel_kmem_mapent_store);
    321  1.118      matt 	}
    322  1.118      matt 
    323  1.118      matt 	if (error)
    324  1.118      matt 		panic("uvm_km_bootstrap: could not reserve kernel kmem");
    325  1.118      matt 
    326    1.8       mrg 	/*
    327    1.8       mrg 	 * install!
    328    1.8       mrg 	 */
    329    1.8       mrg 
    330  1.112      para 	kernel_map = &kernel_map_store;
    331  1.112      para 
    332  1.112      para 	pool_subsystem_init();
    333  1.112      para 
    334  1.138      para 	kmem_arena = vmem_init(&kmem_arena_store, "kmem",
    335  1.138      para 	    kmembase, kmemsize, PAGE_SIZE, NULL, NULL, NULL,
    336  1.112      para 	    0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
    337  1.135      para #ifdef PMAP_GROWKERNEL
    338  1.135      para 	/*
    339  1.135      para 	 * kmem_arena VA allocations happen independently of uvm_map.
    340  1.135      para 	 * grow kernel to accommodate the kmem_arena.
    341  1.135      para 	 */
    342  1.135      para 	if (uvm_maxkaddr < kmembase + kmemsize) {
    343  1.135      para 		uvm_maxkaddr = pmap_growkernel(kmembase + kmemsize);
    344  1.135      para 		KASSERTMSG(uvm_maxkaddr >= kmembase + kmemsize,
    345  1.135      para 		    "%#"PRIxVADDR" %#"PRIxVADDR" %#"PRIxVSIZE,
    346  1.135      para 		    uvm_maxkaddr, kmembase, kmemsize);
    347  1.135      para 	}
    348  1.135      para #endif
    349  1.112      para 
    350  1.138      para 	vmem_subsystem_init(kmem_arena);
    351  1.112      para 
    352  1.144  pgoyette 	UVMHIST_LOG(maphist, "kmem vmem created (base=%#jx, size=%#jx",
    353  1.144  pgoyette 	    kmembase, kmemsize, 0,0);
    354  1.118      matt 
    355  1.138      para 	kmem_va_arena = vmem_init(&kmem_va_arena_store, "kva",
    356  1.138      para 	    0, 0, PAGE_SIZE, vmem_alloc, vmem_free, kmem_arena,
    357  1.138      para 	    (kmem_arena_small ? 4 : VMEM_QCACHE_IDX_MAX) * PAGE_SIZE,
    358  1.138      para 	    VM_NOSLEEP, IPL_VM);
    359  1.118      matt 
    360  1.118      matt 	UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
    361  1.112      para }
    362  1.112      para 
    363  1.112      para /*
    364  1.112      para  * uvm_km_init: init the kernel maps virtual memory caches
    365  1.112      para  * and start the pool/kmem allocator.
    366  1.112      para  */
    367  1.112      para void
    368  1.112      para uvm_km_init(void)
    369  1.112      para {
    370  1.112      para 	kmem_init();
    371    1.1       mrg }
    372    1.1       mrg 
    373    1.1       mrg /*
    374    1.1       mrg  * uvm_km_suballoc: allocate a submap in the kernel map.   once a submap
    375    1.1       mrg  * is allocated all references to that area of VM must go through it.  this
    376    1.1       mrg  * allows the locking of VAs in kernel_map to be broken up into regions.
    377    1.1       mrg  *
    378   1.82  christos  * => if `fixed' is true, *vmin specifies where the region described
    379  1.112      para  *   pager_map => used to map "buf" structures into kernel space
    380    1.5   thorpej  *      by the submap must start
    381    1.1       mrg  * => if submap is non NULL we use that as the submap, otherwise we
    382    1.1       mrg  *	alloc a new map
    383    1.1       mrg  */
    384   1.78      yamt 
    385    1.8       mrg struct vm_map *
    386   1.83   thorpej uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */,
    387   1.93   thorpej     vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed,
    388  1.112      para     struct vm_map *submap)
    389    1.8       mrg {
    390    1.8       mrg 	int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
    391  1.118      matt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    392    1.1       mrg 
    393   1.71      yamt 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    394   1.71      yamt 
    395    1.8       mrg 	size = round_page(size);	/* round up to pagesize */
    396    1.1       mrg 
    397    1.8       mrg 	/*
    398    1.8       mrg 	 * first allocate a blank spot in the parent map
    399    1.8       mrg 	 */
    400    1.8       mrg 
    401   1.82  christos 	if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0,
    402    1.8       mrg 	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    403   1.43       chs 	    UVM_ADV_RANDOM, mapflags)) != 0) {
    404  1.118      matt 		panic("%s: unable to allocate space in parent map", __func__);
    405    1.8       mrg 	}
    406    1.8       mrg 
    407    1.8       mrg 	/*
    408   1.82  christos 	 * set VM bounds (vmin is filled in by uvm_map)
    409    1.8       mrg 	 */
    410    1.1       mrg 
    411   1.82  christos 	*vmax = *vmin + size;
    412    1.5   thorpej 
    413    1.8       mrg 	/*
    414    1.8       mrg 	 * add references to pmap and create or init the submap
    415    1.8       mrg 	 */
    416    1.1       mrg 
    417    1.8       mrg 	pmap_reference(vm_map_pmap(map));
    418    1.8       mrg 	if (submap == NULL) {
    419  1.112      para 		submap = kmem_alloc(sizeof(*submap), KM_SLEEP);
    420    1.8       mrg 	}
    421  1.112      para 	uvm_map_setup(submap, *vmin, *vmax, flags);
    422  1.112      para 	submap->pmap = vm_map_pmap(map);
    423    1.1       mrg 
    424    1.8       mrg 	/*
    425    1.8       mrg 	 * now let uvm_map_submap plug in it...
    426    1.8       mrg 	 */
    427    1.1       mrg 
    428  1.112      para 	if (uvm_map_submap(map, *vmin, *vmax, submap) != 0)
    429    1.8       mrg 		panic("uvm_km_suballoc: submap allocation failed");
    430    1.1       mrg 
    431  1.112      para 	return(submap);
    432    1.1       mrg }
    433    1.1       mrg 
    434    1.1       mrg /*
    435  1.110      yamt  * uvm_km_pgremove: remove pages from a kernel uvm_object and KVA.
    436    1.1       mrg  */
    437    1.1       mrg 
    438    1.8       mrg void
    439   1.83   thorpej uvm_km_pgremove(vaddr_t startva, vaddr_t endva)
    440    1.1       mrg {
    441   1.95        ad 	struct uvm_object * const uobj = uvm_kernel_object;
    442   1.78      yamt 	const voff_t start = startva - vm_map_min(kernel_map);
    443   1.78      yamt 	const voff_t end = endva - vm_map_min(kernel_map);
    444   1.53       chs 	struct vm_page *pg;
    445   1.52       chs 	voff_t curoff, nextoff;
    446   1.53       chs 	int swpgonlydelta = 0;
    447  1.118      matt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    448    1.1       mrg 
    449   1.78      yamt 	KASSERT(VM_MIN_KERNEL_ADDRESS <= startva);
    450   1.78      yamt 	KASSERT(startva < endva);
    451   1.86      yamt 	KASSERT(endva <= VM_MAX_KERNEL_ADDRESS);
    452   1.78      yamt 
    453  1.155        ad 	rw_enter(uobj->vmobjlock, RW_WRITER);
    454  1.110      yamt 	pmap_remove(pmap_kernel(), startva, endva);
    455   1.52       chs 	for (curoff = start; curoff < end; curoff = nextoff) {
    456   1.52       chs 		nextoff = curoff + PAGE_SIZE;
    457   1.52       chs 		pg = uvm_pagelookup(uobj, curoff);
    458   1.53       chs 		if (pg != NULL && pg->flags & PG_BUSY) {
    459   1.52       chs 			pg->flags |= PG_WANTED;
    460  1.155        ad 			UVM_UNLOCK_AND_WAIT_RW(pg, uobj->vmobjlock, 0,
    461   1.52       chs 				    "km_pgrm", 0);
    462  1.155        ad 			rw_enter(uobj->vmobjlock, RW_WRITER);
    463   1.52       chs 			nextoff = curoff;
    464    1.8       mrg 			continue;
    465   1.52       chs 		}
    466    1.8       mrg 
    467   1.52       chs 		/*
    468   1.52       chs 		 * free the swap slot, then the page.
    469   1.52       chs 		 */
    470    1.8       mrg 
    471   1.53       chs 		if (pg == NULL &&
    472   1.64        pk 		    uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) {
    473   1.53       chs 			swpgonlydelta++;
    474   1.53       chs 		}
    475   1.52       chs 		uao_dropswap(uobj, curoff >> PAGE_SHIFT);
    476   1.53       chs 		if (pg != NULL) {
    477   1.53       chs 			uvm_pagefree(pg);
    478   1.53       chs 		}
    479    1.8       mrg 	}
    480  1.155        ad 	rw_exit(uobj->vmobjlock);
    481    1.8       mrg 
    482   1.54       chs 	if (swpgonlydelta > 0) {
    483  1.149        ad 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
    484  1.148        ad 		atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta);
    485   1.54       chs 	}
    486   1.24   thorpej }
    487   1.24   thorpej 
    488   1.24   thorpej 
    489   1.24   thorpej /*
    490   1.78      yamt  * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed
    491   1.78      yamt  *    regions.
    492   1.24   thorpej  *
    493   1.24   thorpej  * => when you unmap a part of anonymous kernel memory you want to toss
    494   1.52       chs  *    the pages right away.    (this is called from uvm_unmap_...).
    495   1.24   thorpej  * => none of the pages will ever be busy, and none of them will ever
    496   1.52       chs  *    be on the active or inactive queues (because they have no object).
    497   1.24   thorpej  */
    498   1.24   thorpej 
    499   1.24   thorpej void
    500  1.102        ad uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end)
    501   1.24   thorpej {
    502  1.122    bouyer #define __PGRM_BATCH 16
    503   1.52       chs 	struct vm_page *pg;
    504  1.122    bouyer 	paddr_t pa[__PGRM_BATCH];
    505  1.122    bouyer 	int npgrm, i;
    506  1.122    bouyer 	vaddr_t va, batch_vastart;
    507  1.122    bouyer 
    508  1.118      matt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    509   1.24   thorpej 
    510  1.102        ad 	KASSERT(VM_MAP_IS_KERNEL(map));
    511  1.128      matt 	KASSERTMSG(vm_map_min(map) <= start,
    512  1.128      matt 	    "vm_map_min(map) [%#"PRIxVADDR"] <= start [%#"PRIxVADDR"]"
    513  1.128      matt 	    " (size=%#"PRIxVSIZE")",
    514  1.128      matt 	    vm_map_min(map), start, end - start);
    515   1.78      yamt 	KASSERT(start < end);
    516  1.102        ad 	KASSERT(end <= vm_map_max(map));
    517   1.78      yamt 
    518  1.122    bouyer 	for (va = start; va < end;) {
    519  1.122    bouyer 		batch_vastart = va;
    520  1.122    bouyer 		/* create a batch of at most __PGRM_BATCH pages to free */
    521  1.122    bouyer 		for (i = 0;
    522  1.122    bouyer 		     i < __PGRM_BATCH && va < end;
    523  1.122    bouyer 		     va += PAGE_SIZE) {
    524  1.122    bouyer 			if (!pmap_extract(pmap_kernel(), va, &pa[i])) {
    525  1.122    bouyer 				continue;
    526  1.122    bouyer 			}
    527  1.122    bouyer 			i++;
    528  1.122    bouyer 		}
    529  1.122    bouyer 		npgrm = i;
    530  1.122    bouyer 		/* now remove the mappings */
    531  1.124    bouyer 		pmap_kremove(batch_vastart, va - batch_vastart);
    532  1.122    bouyer 		/* and free the pages */
    533  1.122    bouyer 		for (i = 0; i < npgrm; i++) {
    534  1.122    bouyer 			pg = PHYS_TO_VM_PAGE(pa[i]);
    535  1.122    bouyer 			KASSERT(pg);
    536  1.122    bouyer 			KASSERT(pg->uobject == NULL && pg->uanon == NULL);
    537  1.122    bouyer 			KASSERT((pg->flags & PG_BUSY) == 0);
    538  1.122    bouyer 			uvm_pagefree(pg);
    539   1.40       chs 		}
    540   1.24   thorpej 	}
    541  1.122    bouyer #undef __PGRM_BATCH
    542    1.1       mrg }
    543    1.1       mrg 
    544   1.78      yamt #if defined(DEBUG)
    545   1.78      yamt void
    546  1.102        ad uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end)
    547   1.78      yamt {
    548   1.78      yamt 	vaddr_t va;
    549  1.118      matt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    550   1.78      yamt 
    551  1.102        ad 	KDASSERT(VM_MAP_IS_KERNEL(map));
    552  1.102        ad 	KDASSERT(vm_map_min(map) <= start);
    553   1.78      yamt 	KDASSERT(start < end);
    554  1.102        ad 	KDASSERT(end <= vm_map_max(map));
    555   1.78      yamt 
    556   1.78      yamt 	for (va = start; va < end; va += PAGE_SIZE) {
    557  1.152        ad 		paddr_t pa;
    558  1.152        ad 
    559   1.78      yamt 		if (pmap_extract(pmap_kernel(), va, &pa)) {
    560   1.81    simonb 			panic("uvm_km_check_empty: va %p has pa 0x%llx",
    561   1.81    simonb 			    (void *)va, (long long)pa);
    562   1.78      yamt 		}
    563  1.152        ad 		/*
    564  1.152        ad 		 * kernel_object should not have pages for the corresponding
    565  1.152        ad 		 * region.  check it.
    566  1.152        ad 		 *
    567  1.152        ad 		 * why trylock?  because:
    568  1.152        ad 		 * - caller might not want to block.
    569  1.152        ad 		 * - we can recurse when allocating radix_node for
    570  1.152        ad 		 *   kernel_object.
    571  1.152        ad 		 */
    572  1.155        ad 		if (rw_tryenter(uvm_kernel_object->vmobjlock, RW_WRITER)) {
    573  1.152        ad 			struct vm_page *pg;
    574  1.152        ad 
    575  1.152        ad 			pg = uvm_pagelookup(uvm_kernel_object,
    576  1.152        ad 			    va - vm_map_min(kernel_map));
    577  1.155        ad 			rw_exit(uvm_kernel_object->vmobjlock);
    578  1.152        ad 			if (pg) {
    579  1.152        ad 				panic("uvm_km_check_empty: "
    580  1.152        ad 				    "has page hashed at %p",
    581  1.152        ad 				    (const void *)va);
    582  1.152        ad 			}
    583   1.78      yamt 		}
    584   1.78      yamt 	}
    585   1.78      yamt }
    586   1.78      yamt #endif /* defined(DEBUG) */
    587    1.1       mrg 
    588    1.1       mrg /*
    589   1.78      yamt  * uvm_km_alloc: allocate an area of kernel memory.
    590    1.1       mrg  *
    591   1.78      yamt  * => NOTE: we can return 0 even if we can wait if there is not enough
    592    1.1       mrg  *	free VM space in the map... caller should be prepared to handle
    593    1.1       mrg  *	this case.
    594    1.1       mrg  * => we return KVA of memory allocated
    595    1.1       mrg  */
    596    1.1       mrg 
    597   1.14       eeh vaddr_t
    598   1.83   thorpej uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
    599    1.1       mrg {
    600   1.14       eeh 	vaddr_t kva, loopva;
    601   1.14       eeh 	vaddr_t offset;
    602   1.44   thorpej 	vsize_t loopsize;
    603    1.8       mrg 	struct vm_page *pg;
    604   1.78      yamt 	struct uvm_object *obj;
    605   1.78      yamt 	int pgaflags;
    606  1.141      maxv 	vm_prot_t prot, vaprot;
    607   1.78      yamt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    608    1.1       mrg 
    609   1.40       chs 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    610   1.78      yamt 	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
    611   1.78      yamt 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
    612   1.78      yamt 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
    613  1.111      matt 	KASSERT((flags & UVM_KMF_VAONLY) != 0 || (flags & UVM_KMF_COLORMATCH) == 0);
    614  1.111      matt 	KASSERT((flags & UVM_KMF_COLORMATCH) == 0 || (flags & UVM_KMF_VAONLY) != 0);
    615    1.1       mrg 
    616    1.8       mrg 	/*
    617    1.8       mrg 	 * setup for call
    618    1.8       mrg 	 */
    619    1.8       mrg 
    620   1.78      yamt 	kva = vm_map_min(map);	/* hint */
    621    1.8       mrg 	size = round_page(size);
    622   1.95        ad 	obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL;
    623  1.144  pgoyette 	UVMHIST_LOG(maphist,"  (map=0x%#jx, obj=0x%#jx, size=0x%jx, flags=%jd)",
    624  1.144  pgoyette 	    (uintptr_t)map, (uintptr_t)obj, size, flags);
    625    1.1       mrg 
    626    1.8       mrg 	/*
    627    1.8       mrg 	 * allocate some virtual space
    628    1.8       mrg 	 */
    629    1.8       mrg 
    630  1.141      maxv 	vaprot = (flags & UVM_KMF_EXEC) ? UVM_PROT_ALL : UVM_PROT_RW;
    631   1.78      yamt 	if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
    632  1.141      maxv 	    align, UVM_MAPFLAG(vaprot, UVM_PROT_ALL, UVM_INH_NONE,
    633   1.78      yamt 	    UVM_ADV_RANDOM,
    634  1.111      matt 	    (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA
    635  1.112      para 	     | UVM_KMF_COLORMATCH)))) != 0)) {
    636    1.8       mrg 		UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
    637    1.8       mrg 		return(0);
    638    1.8       mrg 	}
    639    1.8       mrg 
    640    1.8       mrg 	/*
    641    1.8       mrg 	 * if all we wanted was VA, return now
    642    1.8       mrg 	 */
    643    1.8       mrg 
    644   1.78      yamt 	if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) {
    645  1.144  pgoyette 		UVMHIST_LOG(maphist,"<- done valloc (kva=0x%jx)", kva,0,0,0);
    646    1.8       mrg 		return(kva);
    647    1.8       mrg 	}
    648   1.40       chs 
    649    1.8       mrg 	/*
    650    1.8       mrg 	 * recover object offset from virtual address
    651    1.8       mrg 	 */
    652    1.8       mrg 
    653    1.8       mrg 	offset = kva - vm_map_min(kernel_map);
    654  1.144  pgoyette 	UVMHIST_LOG(maphist, "  kva=0x%jx, offset=0x%jx", kva, offset,0,0);
    655    1.8       mrg 
    656    1.8       mrg 	/*
    657    1.8       mrg 	 * now allocate and map in the memory... note that we are the only ones
    658    1.8       mrg 	 * whom should ever get a handle on this area of VM.
    659    1.8       mrg 	 */
    660    1.8       mrg 
    661    1.8       mrg 	loopva = kva;
    662   1.44   thorpej 	loopsize = size;
    663   1.78      yamt 
    664  1.107      matt 	pgaflags = UVM_FLAG_COLORMATCH;
    665  1.103        ad 	if (flags & UVM_KMF_NOWAIT)
    666  1.103        ad 		pgaflags |= UVM_PGA_USERESERVE;
    667   1.78      yamt 	if (flags & UVM_KMF_ZERO)
    668   1.78      yamt 		pgaflags |= UVM_PGA_ZERO;
    669   1.89  drochner 	prot = VM_PROT_READ | VM_PROT_WRITE;
    670   1.89  drochner 	if (flags & UVM_KMF_EXEC)
    671   1.89  drochner 		prot |= VM_PROT_EXECUTE;
    672   1.44   thorpej 	while (loopsize) {
    673  1.114      matt 		KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, NULL),
    674  1.114      matt 		    "loopva=%#"PRIxVADDR, loopva);
    675   1.78      yamt 
    676  1.107      matt 		pg = uvm_pagealloc_strat(NULL, offset, NULL, pgaflags,
    677  1.107      matt #ifdef UVM_KM_VMFREELIST
    678  1.107      matt 		   UVM_PGA_STRAT_ONLY, UVM_KM_VMFREELIST
    679  1.107      matt #else
    680  1.107      matt 		   UVM_PGA_STRAT_NORMAL, 0
    681  1.107      matt #endif
    682  1.107      matt 		   );
    683   1.47       chs 
    684    1.8       mrg 		/*
    685    1.8       mrg 		 * out of memory?
    686    1.8       mrg 		 */
    687    1.8       mrg 
    688   1.35   thorpej 		if (__predict_false(pg == NULL)) {
    689   1.58       chs 			if ((flags & UVM_KMF_NOWAIT) ||
    690   1.80      yamt 			    ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) {
    691    1.8       mrg 				/* free everything! */
    692   1.78      yamt 				uvm_km_free(map, kva, size,
    693   1.78      yamt 				    flags & UVM_KMF_TYPEMASK);
    694   1.58       chs 				return (0);
    695    1.8       mrg 			} else {
    696    1.8       mrg 				uvm_wait("km_getwait2");	/* sleep here */
    697    1.8       mrg 				continue;
    698    1.8       mrg 			}
    699    1.8       mrg 		}
    700   1.47       chs 
    701   1.78      yamt 		pg->flags &= ~PG_BUSY;	/* new page */
    702   1.78      yamt 		UVM_PAGE_OWN(pg, NULL);
    703   1.78      yamt 
    704    1.8       mrg 		/*
    705   1.52       chs 		 * map it in
    706    1.8       mrg 		 */
    707   1.40       chs 
    708  1.104    cegger 		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
    709  1.106    cegger 		    prot, PMAP_KMPAGE);
    710    1.8       mrg 		loopva += PAGE_SIZE;
    711    1.8       mrg 		offset += PAGE_SIZE;
    712   1.44   thorpej 		loopsize -= PAGE_SIZE;
    713    1.8       mrg 	}
    714   1.69  junyoung 
    715  1.112      para 	pmap_update(pmap_kernel());
    716   1.69  junyoung 
    717  1.146      maxv 	if ((flags & UVM_KMF_ZERO) == 0) {
    718  1.147      maxv 		kmsan_orig((void *)kva, size, KMSAN_TYPE_UVM, __RET_ADDR);
    719  1.147      maxv 		kmsan_mark((void *)kva, size, KMSAN_STATE_UNINIT);
    720  1.146      maxv 	}
    721  1.146      maxv 
    722  1.144  pgoyette 	UVMHIST_LOG(maphist,"<- done (kva=0x%jx)", kva,0,0,0);
    723    1.8       mrg 	return(kva);
    724    1.1       mrg }
    725    1.1       mrg 
    726    1.1       mrg /*
    727  1.140      maxv  * uvm_km_protect: change the protection of an allocated area
    728  1.140      maxv  */
    729  1.140      maxv 
    730  1.140      maxv int
    731  1.140      maxv uvm_km_protect(struct vm_map *map, vaddr_t addr, vsize_t size, vm_prot_t prot)
    732  1.140      maxv {
    733  1.140      maxv 	return uvm_map_protect(map, addr, addr + round_page(size), prot, false);
    734  1.140      maxv }
    735  1.140      maxv 
    736  1.140      maxv /*
    737    1.1       mrg  * uvm_km_free: free an area of kernel memory
    738    1.1       mrg  */
    739    1.1       mrg 
    740    1.8       mrg void
    741   1.83   thorpej uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags)
    742    1.8       mrg {
    743  1.118      matt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    744    1.1       mrg 
    745   1.78      yamt 	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
    746   1.78      yamt 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
    747   1.78      yamt 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
    748   1.78      yamt 	KASSERT((addr & PAGE_MASK) == 0);
    749   1.40       chs 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    750    1.1       mrg 
    751    1.8       mrg 	size = round_page(size);
    752    1.1       mrg 
    753   1.78      yamt 	if (flags & UVM_KMF_PAGEABLE) {
    754   1.78      yamt 		uvm_km_pgremove(addr, addr + size);
    755   1.78      yamt 	} else if (flags & UVM_KMF_WIRED) {
    756  1.109     rmind 		/*
    757  1.109     rmind 		 * Note: uvm_km_pgremove_intrsafe() extracts mapping, thus
    758  1.109     rmind 		 * remove it after.  See comment below about KVA visibility.
    759  1.109     rmind 		 */
    760  1.102        ad 		uvm_km_pgremove_intrsafe(map, addr, addr + size);
    761    1.8       mrg 	}
    762   1.99      yamt 
    763   1.99      yamt 	/*
    764  1.109     rmind 	 * Note: uvm_unmap_remove() calls pmap_update() for us, before
    765  1.109     rmind 	 * KVA becomes globally available.
    766   1.99      yamt 	 */
    767    1.8       mrg 
    768  1.112      para 	uvm_unmap1(map, addr, addr + size, UVM_FLAG_VAONLY);
    769   1.66        pk }
    770   1.66        pk 
    771   1.10   thorpej /* Sanity; must specify both or none. */
    772   1.10   thorpej #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
    773   1.10   thorpej     (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
    774   1.10   thorpej #error Must specify MAP and UNMAP together.
    775   1.10   thorpej #endif
    776   1.10   thorpej 
    777  1.153     skrll #if defined(PMAP_ALLOC_POOLPAGE) && \
    778  1.153     skrll     !defined(PMAP_MAP_POOLPAGE) && !defined(PMAP_UNMAP_POOLPAGE)
    779  1.153     skrll #error Must specify ALLOC with MAP and UNMAP
    780  1.153     skrll #endif
    781  1.153     skrll 
    782  1.112      para int
    783  1.112      para uvm_km_kmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags,
    784  1.112      para     vmem_addr_t *addr)
    785   1.72      yamt {
    786   1.72      yamt 	struct vm_page *pg;
    787  1.112      para 	vmem_addr_t va;
    788  1.112      para 	int rc;
    789  1.112      para 	vaddr_t loopva;
    790  1.112      para 	vsize_t loopsize;
    791   1.72      yamt 
    792  1.112      para 	size = round_page(size);
    793   1.72      yamt 
    794  1.112      para #if defined(PMAP_MAP_POOLPAGE)
    795  1.112      para 	if (size == PAGE_SIZE) {
    796   1.72      yamt again:
    797  1.112      para #ifdef PMAP_ALLOC_POOLPAGE
    798  1.112      para 		pg = PMAP_ALLOC_POOLPAGE((flags & VM_SLEEP) ?
    799  1.112      para 		   0 : UVM_PGA_USERESERVE);
    800  1.112      para #else
    801  1.112      para 		pg = uvm_pagealloc(NULL, 0, NULL,
    802  1.112      para 		   (flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE);
    803  1.112      para #endif /* PMAP_ALLOC_POOLPAGE */
    804  1.112      para 		if (__predict_false(pg == NULL)) {
    805  1.112      para 			if (flags & VM_SLEEP) {
    806  1.112      para 				uvm_wait("plpg");
    807  1.112      para 				goto again;
    808  1.112      para 			}
    809  1.123     rmind 			return ENOMEM;
    810  1.112      para 		}
    811  1.112      para 		va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
    812  1.145   mlelstv 		KASSERT(va != 0);
    813  1.112      para 		*addr = va;
    814  1.112      para 		return 0;
    815   1.72      yamt 	}
    816  1.112      para #endif /* PMAP_MAP_POOLPAGE */
    817  1.112      para 
    818  1.112      para 	rc = vmem_alloc(vm, size, flags, &va);
    819  1.112      para 	if (rc != 0)
    820  1.112      para 		return rc;
    821   1.72      yamt 
    822  1.130      matt #ifdef PMAP_GROWKERNEL
    823  1.130      matt 	/*
    824  1.135      para 	 * These VA allocations happen independently of uvm_map
    825  1.135      para 	 * so this allocation must not extend beyond the current limit.
    826  1.135      para 	 */
    827  1.135      para 	KASSERTMSG(uvm_maxkaddr >= va + size,
    828  1.135      para 	    "%#"PRIxVADDR" %#"PRIxPTR" %#zx",
    829  1.135      para 	    uvm_maxkaddr, va, size);
    830  1.130      matt #endif
    831  1.130      matt 
    832  1.112      para 	loopva = va;
    833  1.112      para 	loopsize = size;
    834   1.72      yamt 
    835  1.112      para 	while (loopsize) {
    836  1.142  riastrad 		paddr_t pa __diagused;
    837  1.128      matt 		KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, &pa),
    838  1.128      matt 		    "loopva=%#"PRIxVADDR" loopsize=%#"PRIxVSIZE
    839  1.128      matt 		    " pa=%#"PRIxPADDR" vmem=%p",
    840  1.128      matt 		    loopva, loopsize, pa, vm);
    841  1.114      matt 
    842  1.114      matt 		pg = uvm_pagealloc(NULL, loopva, NULL,
    843  1.115      matt 		    UVM_FLAG_COLORMATCH
    844  1.114      matt 		    | ((flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE));
    845  1.112      para 		if (__predict_false(pg == NULL)) {
    846  1.112      para 			if (flags & VM_SLEEP) {
    847  1.112      para 				uvm_wait("plpg");
    848  1.112      para 				continue;
    849  1.112      para 			} else {
    850  1.112      para 				uvm_km_pgremove_intrsafe(kernel_map, va,
    851  1.112      para 				    va + size);
    852  1.125      yamt 				vmem_free(vm, va, size);
    853  1.112      para 				return ENOMEM;
    854  1.112      para 			}
    855  1.112      para 		}
    856  1.123     rmind 
    857  1.112      para 		pg->flags &= ~PG_BUSY;	/* new page */
    858  1.112      para 		UVM_PAGE_OWN(pg, NULL);
    859  1.112      para 		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
    860  1.112      para 		    VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
    861  1.107      matt 
    862  1.112      para 		loopva += PAGE_SIZE;
    863  1.112      para 		loopsize -= PAGE_SIZE;
    864   1.15   thorpej 	}
    865  1.112      para 	pmap_update(pmap_kernel());
    866  1.112      para 
    867  1.112      para 	*addr = va;
    868   1.16   thorpej 
    869  1.112      para 	return 0;
    870   1.10   thorpej }
    871   1.10   thorpej 
    872   1.10   thorpej void
    873  1.112      para uvm_km_kmem_free(vmem_t *vm, vmem_addr_t addr, size_t size)
    874   1.72      yamt {
    875  1.112      para 
    876  1.112      para 	size = round_page(size);
    877   1.72      yamt #if defined(PMAP_UNMAP_POOLPAGE)
    878  1.112      para 	if (size == PAGE_SIZE) {
    879  1.112      para 		paddr_t pa;
    880   1.72      yamt 
    881  1.112      para 		pa = PMAP_UNMAP_POOLPAGE(addr);
    882  1.112      para 		uvm_pagefree(PHYS_TO_VM_PAGE(pa));
    883   1.72      yamt 		return;
    884   1.72      yamt 	}
    885  1.112      para #endif /* PMAP_UNMAP_POOLPAGE */
    886  1.112      para 	uvm_km_pgremove_intrsafe(kernel_map, addr, addr + size);
    887  1.112      para 	pmap_update(pmap_kernel());
    888   1.72      yamt 
    889  1.112      para 	vmem_free(vm, addr, size);
    890   1.72      yamt }
    891   1.72      yamt 
    892  1.112      para bool
    893  1.112      para uvm_km_va_starved_p(void)
    894   1.10   thorpej {
    895  1.112      para 	vmem_size_t total;
    896  1.112      para 	vmem_size_t free;
    897  1.112      para 
    898  1.135      para 	if (kmem_arena == NULL)
    899  1.135      para 		return false;
    900  1.135      para 
    901  1.112      para 	total = vmem_size(kmem_arena, VMEM_ALLOC|VMEM_FREE);
    902  1.112      para 	free = vmem_size(kmem_arena, VMEM_FREE);
    903   1.10   thorpej 
    904  1.112      para 	return (free < (total / 10));
    905    1.1       mrg }
    906