Home | History | Annotate | Line # | Download | only in uvm
uvm_km.c revision 1.144
      1 /*	$NetBSD: uvm_km.c,v 1.144 2017/10/28 00:37:13 pgoyette Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6  *
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to Berkeley by
     10  * The Mach Operating System project at Carnegie-Mellon University.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. Neither the name of the University nor the names of its contributors
     21  *    may be used to endorse or promote products derived from this software
     22  *    without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  * SUCH DAMAGE.
     35  *
     36  *	@(#)vm_kern.c   8.3 (Berkeley) 1/12/94
     37  * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
     38  *
     39  *
     40  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     41  * All rights reserved.
     42  *
     43  * Permission to use, copy, modify and distribute this software and
     44  * its documentation is hereby granted, provided that both the copyright
     45  * notice and this permission notice appear in all copies of the
     46  * software, derivative works or modified versions, and any portions
     47  * thereof, and that both notices appear in supporting documentation.
     48  *
     49  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     50  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     51  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     52  *
     53  * Carnegie Mellon requests users of this software to return to
     54  *
     55  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     56  *  School of Computer Science
     57  *  Carnegie Mellon University
     58  *  Pittsburgh PA 15213-3890
     59  *
     60  * any improvements or extensions that they make and grant Carnegie the
     61  * rights to redistribute these changes.
     62  */
     63 
     64 /*
     65  * uvm_km.c: handle kernel memory allocation and management
     66  */
     67 
     68 /*
     69  * overview of kernel memory management:
     70  *
     71  * the kernel virtual address space is mapped by "kernel_map."   kernel_map
     72  * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
     73  * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
     74  *
     75  * the kernel_map has several "submaps."   submaps can only appear in
     76  * the kernel_map (user processes can't use them).   submaps "take over"
     77  * the management of a sub-range of the kernel's address space.  submaps
     78  * are typically allocated at boot time and are never released.   kernel
     79  * virtual address space that is mapped by a submap is locked by the
     80  * submap's lock -- not the kernel_map's lock.
     81  *
     82  * thus, the useful feature of submaps is that they allow us to break
     83  * up the locking and protection of the kernel address space into smaller
     84  * chunks.
     85  *
     86  * the vm system has several standard kernel submaps/arenas, including:
     87  *   kmem_arena => used for kmem/pool (memoryallocators(9))
     88  *   pager_map => used to map "buf" structures into kernel space
     89  *   exec_map => used during exec to handle exec args
     90  *   etc...
     91  *
     92  * The kmem_arena is a "special submap", as it lives in a fixed map entry
     93  * within the kernel_map and is controlled by vmem(9).
     94  *
     95  * the kernel allocates its private memory out of special uvm_objects whose
     96  * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
     97  * are "special" and never die).   all kernel objects should be thought of
     98  * as large, fixed-sized, sparsely populated uvm_objects.   each kernel
     99  * object is equal to the size of kernel virtual address space (i.e. the
    100  * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
    101  *
    102  * note that just because a kernel object spans the entire kernel virtual
    103  * address space doesn't mean that it has to be mapped into the entire space.
    104  * large chunks of a kernel object's space go unused either because
    105  * that area of kernel VM is unmapped, or there is some other type of
    106  * object mapped into that range (e.g. a vnode).    for submap's kernel
    107  * objects, the only part of the object that can ever be populated is the
    108  * offsets that are managed by the submap.
    109  *
    110  * note that the "offset" in a kernel object is always the kernel virtual
    111  * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
    112  * example:
    113  *   suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
    114  *   uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
    115  *   kernel map].    if uvm_km_alloc returns virtual address 0xf8235000,
    116  *   then that means that the page at offset 0x235000 in kernel_object is
    117  *   mapped at 0xf8235000.
    118  *
    119  * kernel object have one other special property: when the kernel virtual
    120  * memory mapping them is unmapped, the backing memory in the object is
    121  * freed right away.   this is done with the uvm_km_pgremove() function.
    122  * this has to be done because there is no backing store for kernel pages
    123  * and no need to save them after they are no longer referenced.
    124  *
    125  * Generic arenas:
    126  *
    127  * kmem_arena:
    128  *	Main arena controlling the kernel KVA used by other arenas.
    129  *
    130  * kmem_va_arena:
    131  *	Implements quantum caching in order to speedup allocations and
    132  *	reduce fragmentation.  The pool(9), unless created with a custom
    133  *	meta-data allocator, and kmem(9) subsystems use this arena.
    134  *
    135  * Arenas for meta-data allocations are used by vmem(9) and pool(9).
    136  * These arenas cannot use quantum cache.  However, kmem_va_meta_arena
    137  * compensates this by importing larger chunks from kmem_arena.
    138  *
    139  * kmem_va_meta_arena:
    140  *	Space for meta-data.
    141  *
    142  * kmem_meta_arena:
    143  *	Imports from kmem_va_meta_arena.  Allocations from this arena are
    144  *	backed with the pages.
    145  *
    146  * Arena stacking:
    147  *
    148  *	kmem_arena
    149  *		kmem_va_arena
    150  *		kmem_va_meta_arena
    151  *			kmem_meta_arena
    152  */
    153 
    154 #include <sys/cdefs.h>
    155 __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.144 2017/10/28 00:37:13 pgoyette Exp $");
    156 
    157 #include "opt_uvmhist.h"
    158 
    159 #include "opt_kmempages.h"
    160 
    161 #ifndef NKMEMPAGES
    162 #define NKMEMPAGES 0
    163 #endif
    164 
    165 /*
    166  * Defaults for lower and upper-bounds for the kmem_arena page count.
    167  * Can be overridden by kernel config options.
    168  */
    169 #ifndef NKMEMPAGES_MIN
    170 #define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT
    171 #endif
    172 
    173 #ifndef NKMEMPAGES_MAX
    174 #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT
    175 #endif
    176 
    177 
    178 #include <sys/param.h>
    179 #include <sys/systm.h>
    180 #include <sys/proc.h>
    181 #include <sys/pool.h>
    182 #include <sys/vmem.h>
    183 #include <sys/vmem_impl.h>
    184 #include <sys/kmem.h>
    185 
    186 #include <uvm/uvm.h>
    187 
    188 /*
    189  * global data structures
    190  */
    191 
    192 struct vm_map *kernel_map = NULL;
    193 
    194 /*
    195  * local data structues
    196  */
    197 
    198 static struct vm_map		kernel_map_store;
    199 static struct vm_map_entry	kernel_image_mapent_store;
    200 static struct vm_map_entry	kernel_kmem_mapent_store;
    201 
    202 int nkmempages = 0;
    203 vaddr_t kmembase;
    204 vsize_t kmemsize;
    205 
    206 static struct vmem kmem_arena_store;
    207 vmem_t *kmem_arena = NULL;
    208 static struct vmem kmem_va_arena_store;
    209 vmem_t *kmem_va_arena;
    210 
    211 /*
    212  * kmeminit_nkmempages: calculate the size of kmem_arena.
    213  */
    214 void
    215 kmeminit_nkmempages(void)
    216 {
    217 	int npages;
    218 
    219 	if (nkmempages != 0) {
    220 		/*
    221 		 * It's already been set (by us being here before)
    222 		 * bail out now;
    223 		 */
    224 		return;
    225 	}
    226 
    227 #if defined(PMAP_MAP_POOLPAGE)
    228 	npages = (physmem / 4);
    229 #else
    230 	npages = (physmem / 3) * 2;
    231 #endif /* defined(PMAP_MAP_POOLPAGE) */
    232 
    233 #ifndef NKMEMPAGES_MAX_UNLIMITED
    234 	if (npages > NKMEMPAGES_MAX)
    235 		npages = NKMEMPAGES_MAX;
    236 #endif
    237 
    238 	if (npages < NKMEMPAGES_MIN)
    239 		npages = NKMEMPAGES_MIN;
    240 
    241 	nkmempages = npages;
    242 }
    243 
    244 /*
    245  * uvm_km_bootstrap: init kernel maps and objects to reflect reality (i.e.
    246  * KVM already allocated for text, data, bss, and static data structures).
    247  *
    248  * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
    249  *    we assume that [vmin -> start] has already been allocated and that
    250  *    "end" is the end.
    251  */
    252 
    253 void
    254 uvm_km_bootstrap(vaddr_t start, vaddr_t end)
    255 {
    256 	bool kmem_arena_small;
    257 	vaddr_t base = VM_MIN_KERNEL_ADDRESS;
    258 	struct uvm_map_args args;
    259 	int error;
    260 
    261 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    262 	UVMHIST_LOG(maphist, "start=%#jx end=%#jx", start, end, 0,0);
    263 
    264 	kmeminit_nkmempages();
    265 	kmemsize = (vsize_t)nkmempages * PAGE_SIZE;
    266 	kmem_arena_small = kmemsize < 64 * 1024 * 1024;
    267 
    268 	UVMHIST_LOG(maphist, "kmemsize=%#jx", kmemsize, 0,0,0);
    269 
    270 	/*
    271 	 * next, init kernel memory objects.
    272 	 */
    273 
    274 	/* kernel_object: for pageable anonymous kernel memory */
    275 	uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
    276 				VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
    277 
    278 	/*
    279 	 * init the map and reserve any space that might already
    280 	 * have been allocated kernel space before installing.
    281 	 */
    282 
    283 	uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
    284 	kernel_map_store.pmap = pmap_kernel();
    285 	if (start != base) {
    286 		error = uvm_map_prepare(&kernel_map_store,
    287 		    base, start - base,
    288 		    NULL, UVM_UNKNOWN_OFFSET, 0,
    289 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    290 		    		UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
    291 		if (!error) {
    292 			kernel_image_mapent_store.flags =
    293 			    UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE;
    294 			error = uvm_map_enter(&kernel_map_store, &args,
    295 			    &kernel_image_mapent_store);
    296 		}
    297 
    298 		if (error)
    299 			panic(
    300 			    "uvm_km_bootstrap: could not reserve space for kernel");
    301 
    302 		kmembase = args.uma_start + args.uma_size;
    303 	} else {
    304 		kmembase = base;
    305 	}
    306 
    307 	error = uvm_map_prepare(&kernel_map_store,
    308 	    kmembase, kmemsize,
    309 	    NULL, UVM_UNKNOWN_OFFSET, 0,
    310 	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    311 	    		UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
    312 	if (!error) {
    313 		kernel_kmem_mapent_store.flags =
    314 		    UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE;
    315 		error = uvm_map_enter(&kernel_map_store, &args,
    316 		    &kernel_kmem_mapent_store);
    317 	}
    318 
    319 	if (error)
    320 		panic("uvm_km_bootstrap: could not reserve kernel kmem");
    321 
    322 	/*
    323 	 * install!
    324 	 */
    325 
    326 	kernel_map = &kernel_map_store;
    327 
    328 	pool_subsystem_init();
    329 
    330 	kmem_arena = vmem_init(&kmem_arena_store, "kmem",
    331 	    kmembase, kmemsize, PAGE_SIZE, NULL, NULL, NULL,
    332 	    0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
    333 #ifdef PMAP_GROWKERNEL
    334 	/*
    335 	 * kmem_arena VA allocations happen independently of uvm_map.
    336 	 * grow kernel to accommodate the kmem_arena.
    337 	 */
    338 	if (uvm_maxkaddr < kmembase + kmemsize) {
    339 		uvm_maxkaddr = pmap_growkernel(kmembase + kmemsize);
    340 		KASSERTMSG(uvm_maxkaddr >= kmembase + kmemsize,
    341 		    "%#"PRIxVADDR" %#"PRIxVADDR" %#"PRIxVSIZE,
    342 		    uvm_maxkaddr, kmembase, kmemsize);
    343 	}
    344 #endif
    345 
    346 	vmem_subsystem_init(kmem_arena);
    347 
    348 	UVMHIST_LOG(maphist, "kmem vmem created (base=%#jx, size=%#jx",
    349 	    kmembase, kmemsize, 0,0);
    350 
    351 	kmem_va_arena = vmem_init(&kmem_va_arena_store, "kva",
    352 	    0, 0, PAGE_SIZE, vmem_alloc, vmem_free, kmem_arena,
    353 	    (kmem_arena_small ? 4 : VMEM_QCACHE_IDX_MAX) * PAGE_SIZE,
    354 	    VM_NOSLEEP, IPL_VM);
    355 
    356 	UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
    357 }
    358 
    359 /*
    360  * uvm_km_init: init the kernel maps virtual memory caches
    361  * and start the pool/kmem allocator.
    362  */
    363 void
    364 uvm_km_init(void)
    365 {
    366 	kmem_init();
    367 }
    368 
    369 /*
    370  * uvm_km_suballoc: allocate a submap in the kernel map.   once a submap
    371  * is allocated all references to that area of VM must go through it.  this
    372  * allows the locking of VAs in kernel_map to be broken up into regions.
    373  *
    374  * => if `fixed' is true, *vmin specifies where the region described
    375  *   pager_map => used to map "buf" structures into kernel space
    376  *      by the submap must start
    377  * => if submap is non NULL we use that as the submap, otherwise we
    378  *	alloc a new map
    379  */
    380 
    381 struct vm_map *
    382 uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */,
    383     vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed,
    384     struct vm_map *submap)
    385 {
    386 	int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
    387 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    388 
    389 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    390 
    391 	size = round_page(size);	/* round up to pagesize */
    392 
    393 	/*
    394 	 * first allocate a blank spot in the parent map
    395 	 */
    396 
    397 	if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0,
    398 	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    399 	    UVM_ADV_RANDOM, mapflags)) != 0) {
    400 		panic("%s: unable to allocate space in parent map", __func__);
    401 	}
    402 
    403 	/*
    404 	 * set VM bounds (vmin is filled in by uvm_map)
    405 	 */
    406 
    407 	*vmax = *vmin + size;
    408 
    409 	/*
    410 	 * add references to pmap and create or init the submap
    411 	 */
    412 
    413 	pmap_reference(vm_map_pmap(map));
    414 	if (submap == NULL) {
    415 		submap = kmem_alloc(sizeof(*submap), KM_SLEEP);
    416 	}
    417 	uvm_map_setup(submap, *vmin, *vmax, flags);
    418 	submap->pmap = vm_map_pmap(map);
    419 
    420 	/*
    421 	 * now let uvm_map_submap plug in it...
    422 	 */
    423 
    424 	if (uvm_map_submap(map, *vmin, *vmax, submap) != 0)
    425 		panic("uvm_km_suballoc: submap allocation failed");
    426 
    427 	return(submap);
    428 }
    429 
    430 /*
    431  * uvm_km_pgremove: remove pages from a kernel uvm_object and KVA.
    432  */
    433 
    434 void
    435 uvm_km_pgremove(vaddr_t startva, vaddr_t endva)
    436 {
    437 	struct uvm_object * const uobj = uvm_kernel_object;
    438 	const voff_t start = startva - vm_map_min(kernel_map);
    439 	const voff_t end = endva - vm_map_min(kernel_map);
    440 	struct vm_page *pg;
    441 	voff_t curoff, nextoff;
    442 	int swpgonlydelta = 0;
    443 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    444 
    445 	KASSERT(VM_MIN_KERNEL_ADDRESS <= startva);
    446 	KASSERT(startva < endva);
    447 	KASSERT(endva <= VM_MAX_KERNEL_ADDRESS);
    448 
    449 	mutex_enter(uobj->vmobjlock);
    450 	pmap_remove(pmap_kernel(), startva, endva);
    451 	for (curoff = start; curoff < end; curoff = nextoff) {
    452 		nextoff = curoff + PAGE_SIZE;
    453 		pg = uvm_pagelookup(uobj, curoff);
    454 		if (pg != NULL && pg->flags & PG_BUSY) {
    455 			pg->flags |= PG_WANTED;
    456 			UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0,
    457 				    "km_pgrm", 0);
    458 			mutex_enter(uobj->vmobjlock);
    459 			nextoff = curoff;
    460 			continue;
    461 		}
    462 
    463 		/*
    464 		 * free the swap slot, then the page.
    465 		 */
    466 
    467 		if (pg == NULL &&
    468 		    uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) {
    469 			swpgonlydelta++;
    470 		}
    471 		uao_dropswap(uobj, curoff >> PAGE_SHIFT);
    472 		if (pg != NULL) {
    473 			mutex_enter(&uvm_pageqlock);
    474 			uvm_pagefree(pg);
    475 			mutex_exit(&uvm_pageqlock);
    476 		}
    477 	}
    478 	mutex_exit(uobj->vmobjlock);
    479 
    480 	if (swpgonlydelta > 0) {
    481 		mutex_enter(&uvm_swap_data_lock);
    482 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
    483 		uvmexp.swpgonly -= swpgonlydelta;
    484 		mutex_exit(&uvm_swap_data_lock);
    485 	}
    486 }
    487 
    488 
    489 /*
    490  * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed
    491  *    regions.
    492  *
    493  * => when you unmap a part of anonymous kernel memory you want to toss
    494  *    the pages right away.    (this is called from uvm_unmap_...).
    495  * => none of the pages will ever be busy, and none of them will ever
    496  *    be on the active or inactive queues (because they have no object).
    497  */
    498 
    499 void
    500 uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end)
    501 {
    502 #define __PGRM_BATCH 16
    503 	struct vm_page *pg;
    504 	paddr_t pa[__PGRM_BATCH];
    505 	int npgrm, i;
    506 	vaddr_t va, batch_vastart;
    507 
    508 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    509 
    510 	KASSERT(VM_MAP_IS_KERNEL(map));
    511 	KASSERTMSG(vm_map_min(map) <= start,
    512 	    "vm_map_min(map) [%#"PRIxVADDR"] <= start [%#"PRIxVADDR"]"
    513 	    " (size=%#"PRIxVSIZE")",
    514 	    vm_map_min(map), start, end - start);
    515 	KASSERT(start < end);
    516 	KASSERT(end <= vm_map_max(map));
    517 
    518 	for (va = start; va < end;) {
    519 		batch_vastart = va;
    520 		/* create a batch of at most __PGRM_BATCH pages to free */
    521 		for (i = 0;
    522 		     i < __PGRM_BATCH && va < end;
    523 		     va += PAGE_SIZE) {
    524 			if (!pmap_extract(pmap_kernel(), va, &pa[i])) {
    525 				continue;
    526 			}
    527 			i++;
    528 		}
    529 		npgrm = i;
    530 		/* now remove the mappings */
    531 		pmap_kremove(batch_vastart, va - batch_vastart);
    532 		/* and free the pages */
    533 		for (i = 0; i < npgrm; i++) {
    534 			pg = PHYS_TO_VM_PAGE(pa[i]);
    535 			KASSERT(pg);
    536 			KASSERT(pg->uobject == NULL && pg->uanon == NULL);
    537 			KASSERT((pg->flags & PG_BUSY) == 0);
    538 			uvm_pagefree(pg);
    539 		}
    540 	}
    541 #undef __PGRM_BATCH
    542 }
    543 
    544 #if defined(DEBUG)
    545 void
    546 uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end)
    547 {
    548 	struct vm_page *pg;
    549 	vaddr_t va;
    550 	paddr_t pa;
    551 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    552 
    553 	KDASSERT(VM_MAP_IS_KERNEL(map));
    554 	KDASSERT(vm_map_min(map) <= start);
    555 	KDASSERT(start < end);
    556 	KDASSERT(end <= vm_map_max(map));
    557 
    558 	for (va = start; va < end; va += PAGE_SIZE) {
    559 		if (pmap_extract(pmap_kernel(), va, &pa)) {
    560 			panic("uvm_km_check_empty: va %p has pa 0x%llx",
    561 			    (void *)va, (long long)pa);
    562 		}
    563 		mutex_enter(uvm_kernel_object->vmobjlock);
    564 		pg = uvm_pagelookup(uvm_kernel_object,
    565 		    va - vm_map_min(kernel_map));
    566 		mutex_exit(uvm_kernel_object->vmobjlock);
    567 		if (pg) {
    568 			panic("uvm_km_check_empty: "
    569 			    "has page hashed at %p", (const void *)va);
    570 		}
    571 	}
    572 }
    573 #endif /* defined(DEBUG) */
    574 
    575 /*
    576  * uvm_km_alloc: allocate an area of kernel memory.
    577  *
    578  * => NOTE: we can return 0 even if we can wait if there is not enough
    579  *	free VM space in the map... caller should be prepared to handle
    580  *	this case.
    581  * => we return KVA of memory allocated
    582  */
    583 
    584 vaddr_t
    585 uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
    586 {
    587 	vaddr_t kva, loopva;
    588 	vaddr_t offset;
    589 	vsize_t loopsize;
    590 	struct vm_page *pg;
    591 	struct uvm_object *obj;
    592 	int pgaflags;
    593 	vm_prot_t prot, vaprot;
    594 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    595 
    596 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    597 	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
    598 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
    599 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
    600 	KASSERT((flags & UVM_KMF_VAONLY) != 0 || (flags & UVM_KMF_COLORMATCH) == 0);
    601 	KASSERT((flags & UVM_KMF_COLORMATCH) == 0 || (flags & UVM_KMF_VAONLY) != 0);
    602 
    603 	/*
    604 	 * setup for call
    605 	 */
    606 
    607 	kva = vm_map_min(map);	/* hint */
    608 	size = round_page(size);
    609 	obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL;
    610 	UVMHIST_LOG(maphist,"  (map=0x%#jx, obj=0x%#jx, size=0x%jx, flags=%jd)",
    611 	    (uintptr_t)map, (uintptr_t)obj, size, flags);
    612 
    613 	/*
    614 	 * allocate some virtual space
    615 	 */
    616 
    617 	vaprot = (flags & UVM_KMF_EXEC) ? UVM_PROT_ALL : UVM_PROT_RW;
    618 	if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
    619 	    align, UVM_MAPFLAG(vaprot, UVM_PROT_ALL, UVM_INH_NONE,
    620 	    UVM_ADV_RANDOM,
    621 	    (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA
    622 	     | UVM_KMF_COLORMATCH)))) != 0)) {
    623 		UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
    624 		return(0);
    625 	}
    626 
    627 	/*
    628 	 * if all we wanted was VA, return now
    629 	 */
    630 
    631 	if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) {
    632 		UVMHIST_LOG(maphist,"<- done valloc (kva=0x%jx)", kva,0,0,0);
    633 		return(kva);
    634 	}
    635 
    636 	/*
    637 	 * recover object offset from virtual address
    638 	 */
    639 
    640 	offset = kva - vm_map_min(kernel_map);
    641 	UVMHIST_LOG(maphist, "  kva=0x%jx, offset=0x%jx", kva, offset,0,0);
    642 
    643 	/*
    644 	 * now allocate and map in the memory... note that we are the only ones
    645 	 * whom should ever get a handle on this area of VM.
    646 	 */
    647 
    648 	loopva = kva;
    649 	loopsize = size;
    650 
    651 	pgaflags = UVM_FLAG_COLORMATCH;
    652 	if (flags & UVM_KMF_NOWAIT)
    653 		pgaflags |= UVM_PGA_USERESERVE;
    654 	if (flags & UVM_KMF_ZERO)
    655 		pgaflags |= UVM_PGA_ZERO;
    656 	prot = VM_PROT_READ | VM_PROT_WRITE;
    657 	if (flags & UVM_KMF_EXEC)
    658 		prot |= VM_PROT_EXECUTE;
    659 	while (loopsize) {
    660 		KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, NULL),
    661 		    "loopva=%#"PRIxVADDR, loopva);
    662 
    663 		pg = uvm_pagealloc_strat(NULL, offset, NULL, pgaflags,
    664 #ifdef UVM_KM_VMFREELIST
    665 		   UVM_PGA_STRAT_ONLY, UVM_KM_VMFREELIST
    666 #else
    667 		   UVM_PGA_STRAT_NORMAL, 0
    668 #endif
    669 		   );
    670 
    671 		/*
    672 		 * out of memory?
    673 		 */
    674 
    675 		if (__predict_false(pg == NULL)) {
    676 			if ((flags & UVM_KMF_NOWAIT) ||
    677 			    ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) {
    678 				/* free everything! */
    679 				uvm_km_free(map, kva, size,
    680 				    flags & UVM_KMF_TYPEMASK);
    681 				return (0);
    682 			} else {
    683 				uvm_wait("km_getwait2");	/* sleep here */
    684 				continue;
    685 			}
    686 		}
    687 
    688 		pg->flags &= ~PG_BUSY;	/* new page */
    689 		UVM_PAGE_OWN(pg, NULL);
    690 
    691 		/*
    692 		 * map it in
    693 		 */
    694 
    695 		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
    696 		    prot, PMAP_KMPAGE);
    697 		loopva += PAGE_SIZE;
    698 		offset += PAGE_SIZE;
    699 		loopsize -= PAGE_SIZE;
    700 	}
    701 
    702 	pmap_update(pmap_kernel());
    703 
    704 	UVMHIST_LOG(maphist,"<- done (kva=0x%jx)", kva,0,0,0);
    705 	return(kva);
    706 }
    707 
    708 /*
    709  * uvm_km_protect: change the protection of an allocated area
    710  */
    711 
    712 int
    713 uvm_km_protect(struct vm_map *map, vaddr_t addr, vsize_t size, vm_prot_t prot)
    714 {
    715 	return uvm_map_protect(map, addr, addr + round_page(size), prot, false);
    716 }
    717 
    718 /*
    719  * uvm_km_free: free an area of kernel memory
    720  */
    721 
    722 void
    723 uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags)
    724 {
    725 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    726 
    727 	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
    728 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
    729 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
    730 	KASSERT((addr & PAGE_MASK) == 0);
    731 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    732 
    733 	size = round_page(size);
    734 
    735 	if (flags & UVM_KMF_PAGEABLE) {
    736 		uvm_km_pgremove(addr, addr + size);
    737 	} else if (flags & UVM_KMF_WIRED) {
    738 		/*
    739 		 * Note: uvm_km_pgremove_intrsafe() extracts mapping, thus
    740 		 * remove it after.  See comment below about KVA visibility.
    741 		 */
    742 		uvm_km_pgremove_intrsafe(map, addr, addr + size);
    743 	}
    744 
    745 	/*
    746 	 * Note: uvm_unmap_remove() calls pmap_update() for us, before
    747 	 * KVA becomes globally available.
    748 	 */
    749 
    750 	uvm_unmap1(map, addr, addr + size, UVM_FLAG_VAONLY);
    751 }
    752 
    753 /* Sanity; must specify both or none. */
    754 #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
    755     (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
    756 #error Must specify MAP and UNMAP together.
    757 #endif
    758 
    759 int
    760 uvm_km_kmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags,
    761     vmem_addr_t *addr)
    762 {
    763 	struct vm_page *pg;
    764 	vmem_addr_t va;
    765 	int rc;
    766 	vaddr_t loopva;
    767 	vsize_t loopsize;
    768 
    769 	size = round_page(size);
    770 
    771 #if defined(PMAP_MAP_POOLPAGE)
    772 	if (size == PAGE_SIZE) {
    773 again:
    774 #ifdef PMAP_ALLOC_POOLPAGE
    775 		pg = PMAP_ALLOC_POOLPAGE((flags & VM_SLEEP) ?
    776 		   0 : UVM_PGA_USERESERVE);
    777 #else
    778 		pg = uvm_pagealloc(NULL, 0, NULL,
    779 		   (flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE);
    780 #endif /* PMAP_ALLOC_POOLPAGE */
    781 		if (__predict_false(pg == NULL)) {
    782 			if (flags & VM_SLEEP) {
    783 				uvm_wait("plpg");
    784 				goto again;
    785 			}
    786 			return ENOMEM;
    787 		}
    788 		va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
    789 		if (__predict_false(va == 0)) {
    790 			uvm_pagefree(pg);
    791 			return ENOMEM;
    792 		}
    793 		*addr = va;
    794 		return 0;
    795 	}
    796 #endif /* PMAP_MAP_POOLPAGE */
    797 
    798 	rc = vmem_alloc(vm, size, flags, &va);
    799 	if (rc != 0)
    800 		return rc;
    801 
    802 #ifdef PMAP_GROWKERNEL
    803 	/*
    804 	 * These VA allocations happen independently of uvm_map
    805 	 * so this allocation must not extend beyond the current limit.
    806 	 */
    807 	KASSERTMSG(uvm_maxkaddr >= va + size,
    808 	    "%#"PRIxVADDR" %#"PRIxPTR" %#zx",
    809 	    uvm_maxkaddr, va, size);
    810 #endif
    811 
    812 	loopva = va;
    813 	loopsize = size;
    814 
    815 	while (loopsize) {
    816 		paddr_t pa __diagused;
    817 		KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, &pa),
    818 		    "loopva=%#"PRIxVADDR" loopsize=%#"PRIxVSIZE
    819 		    " pa=%#"PRIxPADDR" vmem=%p",
    820 		    loopva, loopsize, pa, vm);
    821 
    822 		pg = uvm_pagealloc(NULL, loopva, NULL,
    823 		    UVM_FLAG_COLORMATCH
    824 		    | ((flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE));
    825 		if (__predict_false(pg == NULL)) {
    826 			if (flags & VM_SLEEP) {
    827 				uvm_wait("plpg");
    828 				continue;
    829 			} else {
    830 				uvm_km_pgremove_intrsafe(kernel_map, va,
    831 				    va + size);
    832 				vmem_free(vm, va, size);
    833 				return ENOMEM;
    834 			}
    835 		}
    836 
    837 		pg->flags &= ~PG_BUSY;	/* new page */
    838 		UVM_PAGE_OWN(pg, NULL);
    839 		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
    840 		    VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
    841 
    842 		loopva += PAGE_SIZE;
    843 		loopsize -= PAGE_SIZE;
    844 	}
    845 	pmap_update(pmap_kernel());
    846 
    847 	*addr = va;
    848 
    849 	return 0;
    850 }
    851 
    852 void
    853 uvm_km_kmem_free(vmem_t *vm, vmem_addr_t addr, size_t size)
    854 {
    855 
    856 	size = round_page(size);
    857 #if defined(PMAP_UNMAP_POOLPAGE)
    858 	if (size == PAGE_SIZE) {
    859 		paddr_t pa;
    860 
    861 		pa = PMAP_UNMAP_POOLPAGE(addr);
    862 		uvm_pagefree(PHYS_TO_VM_PAGE(pa));
    863 		return;
    864 	}
    865 #endif /* PMAP_UNMAP_POOLPAGE */
    866 	uvm_km_pgremove_intrsafe(kernel_map, addr, addr + size);
    867 	pmap_update(pmap_kernel());
    868 
    869 	vmem_free(vm, addr, size);
    870 }
    871 
    872 bool
    873 uvm_km_va_starved_p(void)
    874 {
    875 	vmem_size_t total;
    876 	vmem_size_t free;
    877 
    878 	if (kmem_arena == NULL)
    879 		return false;
    880 
    881 	total = vmem_size(kmem_arena, VMEM_ALLOC|VMEM_FREE);
    882 	free = vmem_size(kmem_arena, VMEM_FREE);
    883 
    884 	return (free < (total / 10));
    885 }
    886