Home | History | Annotate | Line # | Download | only in uvm
uvm_km.c revision 1.120.2.2
      1 /*	$NetBSD: uvm_km.c,v 1.120.2.2 2012/03/17 17:29:34 bouyer Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6  *
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to Berkeley by
     10  * The Mach Operating System project at Carnegie-Mellon University.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. Neither the name of the University nor the names of its contributors
     21  *    may be used to endorse or promote products derived from this software
     22  *    without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  * SUCH DAMAGE.
     35  *
     36  *	@(#)vm_kern.c   8.3 (Berkeley) 1/12/94
     37  * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
     38  *
     39  *
     40  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     41  * All rights reserved.
     42  *
     43  * Permission to use, copy, modify and distribute this software and
     44  * its documentation is hereby granted, provided that both the copyright
     45  * notice and this permission notice appear in all copies of the
     46  * software, derivative works or modified versions, and any portions
     47  * thereof, and that both notices appear in supporting documentation.
     48  *
     49  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     50  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     51  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     52  *
     53  * Carnegie Mellon requests users of this software to return to
     54  *
     55  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     56  *  School of Computer Science
     57  *  Carnegie Mellon University
     58  *  Pittsburgh PA 15213-3890
     59  *
     60  * any improvements or extensions that they make and grant Carnegie the
     61  * rights to redistribute these changes.
     62  */
     63 
     64 /*
     65  * uvm_km.c: handle kernel memory allocation and management
     66  */
     67 
     68 /*
     69  * overview of kernel memory management:
     70  *
     71  * the kernel virtual address space is mapped by "kernel_map."   kernel_map
     72  * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
     73  * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
     74  *
     75  * the kernel_map has several "submaps."   submaps can only appear in
     76  * the kernel_map (user processes can't use them).   submaps "take over"
     77  * the management of a sub-range of the kernel's address space.  submaps
     78  * are typically allocated at boot time and are never released.   kernel
     79  * virtual address space that is mapped by a submap is locked by the
     80  * submap's lock -- not the kernel_map's lock.
     81  *
     82  * thus, the useful feature of submaps is that they allow us to break
     83  * up the locking and protection of the kernel address space into smaller
     84  * chunks.
     85  *
     86  * the vm system has several standard kernel submaps, including:
     87  *   pager_map => used to map "buf" structures into kernel space
     88  *   exec_map => used during exec to handle exec args
     89  *   etc...
     90  *
     91  * the kernel allocates its private memory out of special uvm_objects whose
     92  * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
     93  * are "special" and never die).   all kernel objects should be thought of
     94  * as large, fixed-sized, sparsely populated uvm_objects.   each kernel
     95  * object is equal to the size of kernel virtual address space (i.e. the
     96  * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
     97  *
     98  * note that just because a kernel object spans the entire kernel virtual
     99  * address space doesn't mean that it has to be mapped into the entire space.
    100  * large chunks of a kernel object's space go unused either because
    101  * that area of kernel VM is unmapped, or there is some other type of
    102  * object mapped into that range (e.g. a vnode).    for submap's kernel
    103  * objects, the only part of the object that can ever be populated is the
    104  * offsets that are managed by the submap.
    105  *
    106  * note that the "offset" in a kernel object is always the kernel virtual
    107  * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
    108  * example:
    109  *   suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
    110  *   uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
    111  *   kernel map].    if uvm_km_alloc returns virtual address 0xf8235000,
    112  *   then that means that the page at offset 0x235000 in kernel_object is
    113  *   mapped at 0xf8235000.
    114  *
    115  * kernel object have one other special property: when the kernel virtual
    116  * memory mapping them is unmapped, the backing memory in the object is
    117  * freed right away.   this is done with the uvm_km_pgremove() function.
    118  * this has to be done because there is no backing store for kernel pages
    119  * and no need to save them after they are no longer referenced.
    120  */
    121 
    122 #include <sys/cdefs.h>
    123 __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.120.2.2 2012/03/17 17:29:34 bouyer Exp $");
    124 
    125 #include "opt_uvmhist.h"
    126 
    127 #include "opt_kmempages.h"
    128 
    129 #ifndef NKMEMPAGES
    130 #define NKMEMPAGES 0
    131 #endif
    132 
    133 /*
    134  * Defaults for lower and upper-bounds for the kmem_arena page count.
    135  * Can be overridden by kernel config options.
    136  */
    137 #ifndef NKMEMPAGES_MIN
    138 #define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT
    139 #endif
    140 
    141 #ifndef NKMEMPAGES_MAX
    142 #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT
    143 #endif
    144 
    145 
    146 #include <sys/param.h>
    147 #include <sys/systm.h>
    148 #include <sys/proc.h>
    149 #include <sys/pool.h>
    150 #include <sys/vmem.h>
    151 #include <sys/kmem.h>
    152 
    153 #include <uvm/uvm.h>
    154 
    155 /*
    156  * global data structures
    157  */
    158 
    159 struct vm_map *kernel_map = NULL;
    160 
    161 /*
    162  * local data structues
    163  */
    164 
    165 static struct vm_map		kernel_map_store;
    166 static struct vm_map_entry	kernel_image_mapent_store;
    167 static struct vm_map_entry	kernel_kmem_mapent_store;
    168 
    169 int nkmempages = 0;
    170 vaddr_t kmembase;
    171 vsize_t kmemsize;
    172 
    173 vmem_t *kmem_arena;
    174 vmem_t *kmem_va_arena;
    175 
    176 /*
    177  * kmeminit_nkmempages: calculate the size of kmem_arena.
    178  */
    179 void
    180 kmeminit_nkmempages(void)
    181 {
    182 	int npages;
    183 
    184 	if (nkmempages != 0) {
    185 		/*
    186 		 * It's already been set (by us being here before)
    187 		 * bail out now;
    188 		 */
    189 		return;
    190 	}
    191 
    192 #if defined(PMAP_MAP_POOLPAGE)
    193 	npages = (physmem / 4);
    194 #else
    195 	npages = (physmem / 3) * 2;
    196 #endif /* defined(PMAP_MAP_POOLPAGE) */
    197 
    198 #ifndef NKMEMPAGES_MAX_UNLIMITED
    199 	if (npages > NKMEMPAGES_MAX)
    200 		npages = NKMEMPAGES_MAX;
    201 #endif
    202 
    203 	if (npages < NKMEMPAGES_MIN)
    204 		npages = NKMEMPAGES_MIN;
    205 
    206 	nkmempages = npages;
    207 }
    208 
    209 /*
    210  * uvm_km_bootstrap: init kernel maps and objects to reflect reality (i.e.
    211  * KVM already allocated for text, data, bss, and static data structures).
    212  *
    213  * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
    214  *    we assume that [vmin -> start] has already been allocated and that
    215  *    "end" is the end.
    216  */
    217 
    218 void
    219 uvm_km_bootstrap(vaddr_t start, vaddr_t end)
    220 {
    221 	bool kmem_arena_small;
    222 	vaddr_t base = VM_MIN_KERNEL_ADDRESS;
    223 	struct uvm_map_args args;
    224 	int error;
    225 
    226 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    227 	UVMHIST_LOG(maphist, "start=%"PRIxVADDR" end=%#"PRIxVADDR,
    228 	    start, end, 0,0);
    229 
    230 	kmeminit_nkmempages();
    231 	kmemsize = (vsize_t)nkmempages * PAGE_SIZE;
    232 	kmem_arena_small = kmemsize < 64 * 1024 * 1024;
    233 
    234 	UVMHIST_LOG(maphist, "kmemsize=%#"PRIxVSIZE, kmemsize, 0,0,0);
    235 
    236 	/*
    237 	 * next, init kernel memory objects.
    238 	 */
    239 
    240 	/* kernel_object: for pageable anonymous kernel memory */
    241 	uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
    242 				VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
    243 
    244 	/*
    245 	 * init the map and reserve any space that might already
    246 	 * have been allocated kernel space before installing.
    247 	 */
    248 
    249 	uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
    250 	kernel_map_store.pmap = pmap_kernel();
    251 	if (start != base) {
    252 		error = uvm_map_prepare(&kernel_map_store,
    253 		    base, start - base,
    254 		    NULL, UVM_UNKNOWN_OFFSET, 0,
    255 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    256 		    		UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
    257 		if (!error) {
    258 			kernel_image_mapent_store.flags =
    259 			    UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE;
    260 			error = uvm_map_enter(&kernel_map_store, &args,
    261 			    &kernel_image_mapent_store);
    262 		}
    263 
    264 		if (error)
    265 			panic(
    266 			    "uvm_km_bootstrap: could not reserve space for kernel");
    267 
    268 		kmembase = args.uma_start + args.uma_size;
    269 	} else {
    270 		kmembase = base;
    271 	}
    272 
    273 	error = uvm_map_prepare(&kernel_map_store,
    274 	    kmembase, kmemsize,
    275 	    NULL, UVM_UNKNOWN_OFFSET, 0,
    276 	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    277 	    		UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
    278 	if (!error) {
    279 		kernel_kmem_mapent_store.flags =
    280 		    UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE;
    281 		error = uvm_map_enter(&kernel_map_store, &args,
    282 		    &kernel_kmem_mapent_store);
    283 	}
    284 
    285 	if (error)
    286 		panic("uvm_km_bootstrap: could not reserve kernel kmem");
    287 
    288 	/*
    289 	 * install!
    290 	 */
    291 
    292 	kernel_map = &kernel_map_store;
    293 
    294 	pool_subsystem_init();
    295 	vmem_bootstrap();
    296 
    297 	kmem_arena = vmem_create("kmem", kmembase, kmemsize, PAGE_SIZE,
    298 	    NULL, NULL, NULL,
    299 	    0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
    300 
    301 	vmem_init(kmem_arena);
    302 
    303 	UVMHIST_LOG(maphist, "kmem vmem created (base=%#"PRIxVADDR
    304 	    ", size=%#"PRIxVSIZE, kmembase, kmemsize, 0,0);
    305 
    306 	kmem_va_arena = vmem_create("kva", 0, 0, PAGE_SIZE,
    307 	    vmem_alloc, vmem_free, kmem_arena,
    308 	    (kmem_arena_small ? 4 : 8) * PAGE_SIZE,
    309 	    VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
    310 
    311 	UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
    312 }
    313 
    314 /*
    315  * uvm_km_init: init the kernel maps virtual memory caches
    316  * and start the pool/kmem allocator.
    317  */
    318 void
    319 uvm_km_init(void)
    320 {
    321 
    322 	kmem_init();
    323 
    324 	kmeminit(); // killme
    325 }
    326 
    327 /*
    328  * uvm_km_suballoc: allocate a submap in the kernel map.   once a submap
    329  * is allocated all references to that area of VM must go through it.  this
    330  * allows the locking of VAs in kernel_map to be broken up into regions.
    331  *
    332  * => if `fixed' is true, *vmin specifies where the region described
    333  *   pager_map => used to map "buf" structures into kernel space
    334  *      by the submap must start
    335  * => if submap is non NULL we use that as the submap, otherwise we
    336  *	alloc a new map
    337  */
    338 
    339 struct vm_map *
    340 uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */,
    341     vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed,
    342     struct vm_map *submap)
    343 {
    344 	int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
    345 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    346 
    347 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    348 
    349 	size = round_page(size);	/* round up to pagesize */
    350 
    351 	/*
    352 	 * first allocate a blank spot in the parent map
    353 	 */
    354 
    355 	if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0,
    356 	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    357 	    UVM_ADV_RANDOM, mapflags)) != 0) {
    358 		panic("%s: unable to allocate space in parent map", __func__);
    359 	}
    360 
    361 	/*
    362 	 * set VM bounds (vmin is filled in by uvm_map)
    363 	 */
    364 
    365 	*vmax = *vmin + size;
    366 
    367 	/*
    368 	 * add references to pmap and create or init the submap
    369 	 */
    370 
    371 	pmap_reference(vm_map_pmap(map));
    372 	if (submap == NULL) {
    373 		submap = kmem_alloc(sizeof(*submap), KM_SLEEP);
    374 		if (submap == NULL)
    375 			panic("uvm_km_suballoc: unable to create submap");
    376 	}
    377 	uvm_map_setup(submap, *vmin, *vmax, flags);
    378 	submap->pmap = vm_map_pmap(map);
    379 
    380 	/*
    381 	 * now let uvm_map_submap plug in it...
    382 	 */
    383 
    384 	if (uvm_map_submap(map, *vmin, *vmax, submap) != 0)
    385 		panic("uvm_km_suballoc: submap allocation failed");
    386 
    387 	return(submap);
    388 }
    389 
    390 /*
    391  * uvm_km_pgremove: remove pages from a kernel uvm_object and KVA.
    392  */
    393 
    394 void
    395 uvm_km_pgremove(vaddr_t startva, vaddr_t endva)
    396 {
    397 	struct uvm_object * const uobj = uvm_kernel_object;
    398 	const voff_t start = startva - vm_map_min(kernel_map);
    399 	const voff_t end = endva - vm_map_min(kernel_map);
    400 	struct vm_page *pg;
    401 	voff_t curoff, nextoff;
    402 	int swpgonlydelta = 0;
    403 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    404 
    405 	KASSERT(VM_MIN_KERNEL_ADDRESS <= startva);
    406 	KASSERT(startva < endva);
    407 	KASSERT(endva <= VM_MAX_KERNEL_ADDRESS);
    408 
    409 	mutex_enter(uobj->vmobjlock);
    410 	pmap_remove(pmap_kernel(), startva, endva);
    411 	for (curoff = start; curoff < end; curoff = nextoff) {
    412 		nextoff = curoff + PAGE_SIZE;
    413 		pg = uvm_pagelookup(uobj, curoff);
    414 		if (pg != NULL && pg->flags & PG_BUSY) {
    415 			pg->flags |= PG_WANTED;
    416 			UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0,
    417 				    "km_pgrm", 0);
    418 			mutex_enter(uobj->vmobjlock);
    419 			nextoff = curoff;
    420 			continue;
    421 		}
    422 
    423 		/*
    424 		 * free the swap slot, then the page.
    425 		 */
    426 
    427 		if (pg == NULL &&
    428 		    uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) {
    429 			swpgonlydelta++;
    430 		}
    431 		uao_dropswap(uobj, curoff >> PAGE_SHIFT);
    432 		if (pg != NULL) {
    433 			mutex_enter(&uvm_pageqlock);
    434 			uvm_pagefree(pg);
    435 			mutex_exit(&uvm_pageqlock);
    436 		}
    437 	}
    438 	mutex_exit(uobj->vmobjlock);
    439 
    440 	if (swpgonlydelta > 0) {
    441 		mutex_enter(&uvm_swap_data_lock);
    442 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
    443 		uvmexp.swpgonly -= swpgonlydelta;
    444 		mutex_exit(&uvm_swap_data_lock);
    445 	}
    446 }
    447 
    448 
    449 /*
    450  * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed
    451  *    regions.
    452  *
    453  * => when you unmap a part of anonymous kernel memory you want to toss
    454  *    the pages right away.    (this is called from uvm_unmap_...).
    455  * => none of the pages will ever be busy, and none of them will ever
    456  *    be on the active or inactive queues (because they have no object).
    457  */
    458 
    459 void
    460 uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end)
    461 {
    462 #define __PGRM_BATCH 16
    463 	struct vm_page *pg;
    464 	paddr_t pa[__PGRM_BATCH];
    465 	int npgrm, i;
    466 	vaddr_t va, batch_vastart;
    467 
    468 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    469 
    470 	KASSERT(VM_MAP_IS_KERNEL(map));
    471 	KASSERT(vm_map_min(map) <= start);
    472 	KASSERT(start < end);
    473 	KASSERT(end <= vm_map_max(map));
    474 
    475 	for (va = start; va < end;) {
    476 		batch_vastart = va;
    477 		/* create a batch of at most __PGRM_BATCH pages to free */
    478 		for (i = 0;
    479 		     i < __PGRM_BATCH && va < end;
    480 		     va += PAGE_SIZE) {
    481 			if (!pmap_extract(pmap_kernel(), va, &pa[i])) {
    482 				continue;
    483 			}
    484 			i++;
    485 		}
    486 		npgrm = i;
    487 		/* now remove the mappings */
    488 		pmap_kremove(batch_vastart, PAGE_SIZE * npgrm);
    489 		/* and free the pages */
    490 		for (i = 0; i < npgrm; i++) {
    491 			pg = PHYS_TO_VM_PAGE(pa[i]);
    492 			KASSERT(pg);
    493 			KASSERT(pg->uobject == NULL && pg->uanon == NULL);
    494 			KASSERT((pg->flags & PG_BUSY) == 0);
    495 			uvm_pagefree(pg);
    496 		}
    497 	}
    498 #undef __PGRM_BATCH
    499 }
    500 
    501 #if defined(DEBUG)
    502 void
    503 uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end)
    504 {
    505 	struct vm_page *pg;
    506 	vaddr_t va;
    507 	paddr_t pa;
    508 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    509 
    510 	KDASSERT(VM_MAP_IS_KERNEL(map));
    511 	KDASSERT(vm_map_min(map) <= start);
    512 	KDASSERT(start < end);
    513 	KDASSERT(end <= vm_map_max(map));
    514 
    515 	for (va = start; va < end; va += PAGE_SIZE) {
    516 		if (pmap_extract(pmap_kernel(), va, &pa)) {
    517 			panic("uvm_km_check_empty: va %p has pa 0x%llx",
    518 			    (void *)va, (long long)pa);
    519 		}
    520 		if ((map->flags & VM_MAP_INTRSAFE) == 0) {
    521 			mutex_enter(uvm_kernel_object->vmobjlock);
    522 			pg = uvm_pagelookup(uvm_kernel_object,
    523 			    va - vm_map_min(kernel_map));
    524 			mutex_exit(uvm_kernel_object->vmobjlock);
    525 			if (pg) {
    526 				panic("uvm_km_check_empty: "
    527 				    "has page hashed at %p", (const void *)va);
    528 			}
    529 		}
    530 	}
    531 }
    532 #endif /* defined(DEBUG) */
    533 
    534 /*
    535  * uvm_km_alloc: allocate an area of kernel memory.
    536  *
    537  * => NOTE: we can return 0 even if we can wait if there is not enough
    538  *	free VM space in the map... caller should be prepared to handle
    539  *	this case.
    540  * => we return KVA of memory allocated
    541  */
    542 
    543 vaddr_t
    544 uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
    545 {
    546 	vaddr_t kva, loopva;
    547 	vaddr_t offset;
    548 	vsize_t loopsize;
    549 	struct vm_page *pg;
    550 	struct uvm_object *obj;
    551 	int pgaflags;
    552 	vm_prot_t prot;
    553 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    554 
    555 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    556 	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
    557 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
    558 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
    559 	KASSERT((flags & UVM_KMF_VAONLY) != 0 || (flags & UVM_KMF_COLORMATCH) == 0);
    560 	KASSERT((flags & UVM_KMF_COLORMATCH) == 0 || (flags & UVM_KMF_VAONLY) != 0);
    561 
    562 	/*
    563 	 * setup for call
    564 	 */
    565 
    566 	kva = vm_map_min(map);	/* hint */
    567 	size = round_page(size);
    568 	obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL;
    569 	UVMHIST_LOG(maphist,"  (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
    570 		    map, obj, size, flags);
    571 
    572 	/*
    573 	 * allocate some virtual space
    574 	 */
    575 
    576 	if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
    577 	    align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    578 	    UVM_ADV_RANDOM,
    579 	    (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA
    580 	     | UVM_KMF_COLORMATCH)))) != 0)) {
    581 		UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
    582 		return(0);
    583 	}
    584 
    585 	/*
    586 	 * if all we wanted was VA, return now
    587 	 */
    588 
    589 	if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) {
    590 		UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
    591 		return(kva);
    592 	}
    593 
    594 	/*
    595 	 * recover object offset from virtual address
    596 	 */
    597 
    598 	offset = kva - vm_map_min(kernel_map);
    599 	UVMHIST_LOG(maphist, "  kva=0x%x, offset=0x%x", kva, offset,0,0);
    600 
    601 	/*
    602 	 * now allocate and map in the memory... note that we are the only ones
    603 	 * whom should ever get a handle on this area of VM.
    604 	 */
    605 
    606 	loopva = kva;
    607 	loopsize = size;
    608 
    609 	pgaflags = UVM_FLAG_COLORMATCH;
    610 	if (flags & UVM_KMF_NOWAIT)
    611 		pgaflags |= UVM_PGA_USERESERVE;
    612 	if (flags & UVM_KMF_ZERO)
    613 		pgaflags |= UVM_PGA_ZERO;
    614 	prot = VM_PROT_READ | VM_PROT_WRITE;
    615 	if (flags & UVM_KMF_EXEC)
    616 		prot |= VM_PROT_EXECUTE;
    617 	while (loopsize) {
    618 		KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, NULL),
    619 		    "loopva=%#"PRIxVADDR, loopva);
    620 
    621 		pg = uvm_pagealloc_strat(NULL, offset, NULL, pgaflags,
    622 #ifdef UVM_KM_VMFREELIST
    623 		   UVM_PGA_STRAT_ONLY, UVM_KM_VMFREELIST
    624 #else
    625 		   UVM_PGA_STRAT_NORMAL, 0
    626 #endif
    627 		   );
    628 
    629 		/*
    630 		 * out of memory?
    631 		 */
    632 
    633 		if (__predict_false(pg == NULL)) {
    634 			if ((flags & UVM_KMF_NOWAIT) ||
    635 			    ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) {
    636 				/* free everything! */
    637 				uvm_km_free(map, kva, size,
    638 				    flags & UVM_KMF_TYPEMASK);
    639 				return (0);
    640 			} else {
    641 				uvm_wait("km_getwait2");	/* sleep here */
    642 				continue;
    643 			}
    644 		}
    645 
    646 		pg->flags &= ~PG_BUSY;	/* new page */
    647 		UVM_PAGE_OWN(pg, NULL);
    648 
    649 		/*
    650 		 * map it in
    651 		 */
    652 
    653 		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
    654 		    prot, PMAP_KMPAGE);
    655 		loopva += PAGE_SIZE;
    656 		offset += PAGE_SIZE;
    657 		loopsize -= PAGE_SIZE;
    658 	}
    659 
    660 	pmap_update(pmap_kernel());
    661 
    662 	UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
    663 	return(kva);
    664 }
    665 
    666 /*
    667  * uvm_km_free: free an area of kernel memory
    668  */
    669 
    670 void
    671 uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags)
    672 {
    673 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    674 
    675 	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
    676 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
    677 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
    678 	KASSERT((addr & PAGE_MASK) == 0);
    679 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    680 
    681 	size = round_page(size);
    682 
    683 	if (flags & UVM_KMF_PAGEABLE) {
    684 		uvm_km_pgremove(addr, addr + size);
    685 	} else if (flags & UVM_KMF_WIRED) {
    686 		/*
    687 		 * Note: uvm_km_pgremove_intrsafe() extracts mapping, thus
    688 		 * remove it after.  See comment below about KVA visibility.
    689 		 */
    690 		uvm_km_pgremove_intrsafe(map, addr, addr + size);
    691 	}
    692 
    693 	/*
    694 	 * Note: uvm_unmap_remove() calls pmap_update() for us, before
    695 	 * KVA becomes globally available.
    696 	 */
    697 
    698 	uvm_unmap1(map, addr, addr + size, UVM_FLAG_VAONLY);
    699 }
    700 
    701 /* Sanity; must specify both or none. */
    702 #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
    703     (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
    704 #error Must specify MAP and UNMAP together.
    705 #endif
    706 
    707 int
    708 uvm_km_kmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags,
    709     vmem_addr_t *addr)
    710 {
    711 	struct vm_page *pg;
    712 	vmem_addr_t va;
    713 	int rc;
    714 	vaddr_t loopva;
    715 	vsize_t loopsize;
    716 
    717 	size = round_page(size);
    718 
    719 #if defined(PMAP_MAP_POOLPAGE)
    720 	if (size == PAGE_SIZE) {
    721 again:
    722 #ifdef PMAP_ALLOC_POOLPAGE
    723 		pg = PMAP_ALLOC_POOLPAGE((flags & VM_SLEEP) ?
    724 		   0 : UVM_PGA_USERESERVE);
    725 #else
    726 		pg = uvm_pagealloc(NULL, 0, NULL,
    727 		   (flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE);
    728 #endif /* PMAP_ALLOC_POOLPAGE */
    729 		if (__predict_false(pg == NULL)) {
    730 			if (flags & VM_SLEEP) {
    731 				uvm_wait("plpg");
    732 				goto again;
    733 			}
    734 			return ENOMEM;
    735 		}
    736 		va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
    737 		if (__predict_false(va == 0)) {
    738 			uvm_pagefree(pg);
    739 			return ENOMEM;
    740 		}
    741 		*addr = va;
    742 		return 0;
    743 	}
    744 #endif /* PMAP_MAP_POOLPAGE */
    745 
    746 	rc = vmem_alloc(vm, size, flags, &va);
    747 	if (rc != 0)
    748 		return rc;
    749 
    750 	loopva = va;
    751 	loopsize = size;
    752 
    753 	while (loopsize) {
    754 		KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, NULL),
    755 		    "loopva=%#"PRIxVADDR" loopsize=%#"PRIxVSIZE" vmem=%p",
    756 		    loopva, loopsize, vm);
    757 
    758 		pg = uvm_pagealloc(NULL, loopva, NULL,
    759 		    UVM_FLAG_COLORMATCH
    760 		    | ((flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE));
    761 		if (__predict_false(pg == NULL)) {
    762 			if (flags & VM_SLEEP) {
    763 				uvm_wait("plpg");
    764 				continue;
    765 			} else {
    766 				uvm_km_pgremove_intrsafe(kernel_map, va,
    767 				    va + size);
    768 				vmem_free(kmem_va_arena, va, size);
    769 				return ENOMEM;
    770 			}
    771 		}
    772 
    773 		pg->flags &= ~PG_BUSY;	/* new page */
    774 		UVM_PAGE_OWN(pg, NULL);
    775 		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
    776 		    VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
    777 
    778 		loopva += PAGE_SIZE;
    779 		loopsize -= PAGE_SIZE;
    780 	}
    781 	pmap_update(pmap_kernel());
    782 
    783 	*addr = va;
    784 
    785 	return 0;
    786 }
    787 
    788 void
    789 uvm_km_kmem_free(vmem_t *vm, vmem_addr_t addr, size_t size)
    790 {
    791 
    792 	size = round_page(size);
    793 #if defined(PMAP_UNMAP_POOLPAGE)
    794 	if (size == PAGE_SIZE) {
    795 		paddr_t pa;
    796 
    797 		pa = PMAP_UNMAP_POOLPAGE(addr);
    798 		uvm_pagefree(PHYS_TO_VM_PAGE(pa));
    799 		return;
    800 	}
    801 #endif /* PMAP_UNMAP_POOLPAGE */
    802 	uvm_km_pgremove_intrsafe(kernel_map, addr, addr + size);
    803 	pmap_update(pmap_kernel());
    804 
    805 	vmem_free(vm, addr, size);
    806 }
    807 
    808 bool
    809 uvm_km_va_starved_p(void)
    810 {
    811 	vmem_size_t total;
    812 	vmem_size_t free;
    813 
    814 	total = vmem_size(kmem_arena, VMEM_ALLOC|VMEM_FREE);
    815 	free = vmem_size(kmem_arena, VMEM_FREE);
    816 
    817 	return (free < (total / 10));
    818 }
    819