Home | History | Annotate | Line # | Download | only in uvm
uvm_km.c revision 1.111
      1 /*	$NetBSD: uvm_km.c,v 1.111 2011/09/01 06:40:28 matt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6  *
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to Berkeley by
     10  * The Mach Operating System project at Carnegie-Mellon University.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. Neither the name of the University nor the names of its contributors
     21  *    may be used to endorse or promote products derived from this software
     22  *    without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  * SUCH DAMAGE.
     35  *
     36  *	@(#)vm_kern.c   8.3 (Berkeley) 1/12/94
     37  * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
     38  *
     39  *
     40  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     41  * All rights reserved.
     42  *
     43  * Permission to use, copy, modify and distribute this software and
     44  * its documentation is hereby granted, provided that both the copyright
     45  * notice and this permission notice appear in all copies of the
     46  * software, derivative works or modified versions, and any portions
     47  * thereof, and that both notices appear in supporting documentation.
     48  *
     49  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     50  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     51  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     52  *
     53  * Carnegie Mellon requests users of this software to return to
     54  *
     55  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     56  *  School of Computer Science
     57  *  Carnegie Mellon University
     58  *  Pittsburgh PA 15213-3890
     59  *
     60  * any improvements or extensions that they make and grant Carnegie the
     61  * rights to redistribute these changes.
     62  */
     63 
     64 /*
     65  * uvm_km.c: handle kernel memory allocation and management
     66  */
     67 
     68 /*
     69  * overview of kernel memory management:
     70  *
     71  * the kernel virtual address space is mapped by "kernel_map."   kernel_map
     72  * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
     73  * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
     74  *
     75  * the kernel_map has several "submaps."   submaps can only appear in
     76  * the kernel_map (user processes can't use them).   submaps "take over"
     77  * the management of a sub-range of the kernel's address space.  submaps
     78  * are typically allocated at boot time and are never released.   kernel
     79  * virtual address space that is mapped by a submap is locked by the
     80  * submap's lock -- not the kernel_map's lock.
     81  *
     82  * thus, the useful feature of submaps is that they allow us to break
     83  * up the locking and protection of the kernel address space into smaller
     84  * chunks.
     85  *
     86  * the vm system has several standard kernel submaps, including:
     87  *   kmem_map => contains only wired kernel memory for the kernel
     88  *		malloc.
     89  *   pager_map => used to map "buf" structures into kernel space
     90  *   exec_map => used during exec to handle exec args
     91  *   etc...
     92  *
     93  * the kernel allocates its private memory out of special uvm_objects whose
     94  * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
     95  * are "special" and never die).   all kernel objects should be thought of
     96  * as large, fixed-sized, sparsely populated uvm_objects.   each kernel
     97  * object is equal to the size of kernel virtual address space (i.e. the
     98  * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
     99  *
    100  * note that just because a kernel object spans the entire kernel virtual
    101  * address space doesn't mean that it has to be mapped into the entire space.
    102  * large chunks of a kernel object's space go unused either because
    103  * that area of kernel VM is unmapped, or there is some other type of
    104  * object mapped into that range (e.g. a vnode).    for submap's kernel
    105  * objects, the only part of the object that can ever be populated is the
    106  * offsets that are managed by the submap.
    107  *
    108  * note that the "offset" in a kernel object is always the kernel virtual
    109  * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
    110  * example:
    111  *   suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
    112  *   uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
    113  *   kernel map].    if uvm_km_alloc returns virtual address 0xf8235000,
    114  *   then that means that the page at offset 0x235000 in kernel_object is
    115  *   mapped at 0xf8235000.
    116  *
    117  * kernel object have one other special property: when the kernel virtual
    118  * memory mapping them is unmapped, the backing memory in the object is
    119  * freed right away.   this is done with the uvm_km_pgremove() function.
    120  * this has to be done because there is no backing store for kernel pages
    121  * and no need to save them after they are no longer referenced.
    122  */
    123 
    124 #include <sys/cdefs.h>
    125 __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.111 2011/09/01 06:40:28 matt Exp $");
    126 
    127 #include "opt_uvmhist.h"
    128 
    129 #include <sys/param.h>
    130 #include <sys/malloc.h>
    131 #include <sys/systm.h>
    132 #include <sys/proc.h>
    133 #include <sys/pool.h>
    134 
    135 #include <uvm/uvm.h>
    136 
    137 /*
    138  * global data structures
    139  */
    140 
    141 struct vm_map *kernel_map = NULL;
    142 
    143 /*
    144  * local data structues
    145  */
    146 
    147 static struct vm_map_kernel	kernel_map_store;
    148 static struct vm_map_entry	kernel_first_mapent_store;
    149 
    150 #if !defined(PMAP_MAP_POOLPAGE)
    151 
    152 /*
    153  * kva cache
    154  *
    155  * XXX maybe it's better to do this at the uvm_map layer.
    156  */
    157 
    158 #define	KM_VACACHE_SIZE	(32 * PAGE_SIZE) /* XXX tune */
    159 
    160 static void *km_vacache_alloc(struct pool *, int);
    161 static void km_vacache_free(struct pool *, void *);
    162 static void km_vacache_init(struct vm_map *, const char *, size_t);
    163 
    164 /* XXX */
    165 #define	KM_VACACHE_POOL_TO_MAP(pp) \
    166 	((struct vm_map *)((char *)(pp) - \
    167 	    offsetof(struct vm_map_kernel, vmk_vacache)))
    168 
    169 static void *
    170 km_vacache_alloc(struct pool *pp, int flags)
    171 {
    172 	vaddr_t va;
    173 	size_t size;
    174 	struct vm_map *map;
    175 	size = pp->pr_alloc->pa_pagesz;
    176 
    177 	map = KM_VACACHE_POOL_TO_MAP(pp);
    178 
    179 	va = vm_map_min(map); /* hint */
    180 	if (uvm_map(map, &va, size, NULL, UVM_UNKNOWN_OFFSET, size,
    181 	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    182 	    UVM_ADV_RANDOM, UVM_FLAG_QUANTUM |
    183 	    ((flags & PR_WAITOK) ? UVM_FLAG_WAITVA :
    184 	    UVM_FLAG_TRYLOCK | UVM_FLAG_NOWAIT))))
    185 		return NULL;
    186 
    187 	return (void *)va;
    188 }
    189 
    190 static void
    191 km_vacache_free(struct pool *pp, void *v)
    192 {
    193 	vaddr_t va = (vaddr_t)v;
    194 	size_t size = pp->pr_alloc->pa_pagesz;
    195 	struct vm_map *map;
    196 
    197 	map = KM_VACACHE_POOL_TO_MAP(pp);
    198 	uvm_unmap1(map, va, va + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY);
    199 }
    200 
    201 /*
    202  * km_vacache_init: initialize kva cache.
    203  */
    204 
    205 static void
    206 km_vacache_init(struct vm_map *map, const char *name, size_t size)
    207 {
    208 	struct vm_map_kernel *vmk;
    209 	struct pool *pp;
    210 	struct pool_allocator *pa;
    211 	int ipl;
    212 
    213 	KASSERT(VM_MAP_IS_KERNEL(map));
    214 	KASSERT(size < (vm_map_max(map) - vm_map_min(map)) / 2); /* sanity */
    215 
    216 
    217 	vmk = vm_map_to_kernel(map);
    218 	pp = &vmk->vmk_vacache;
    219 	pa = &vmk->vmk_vacache_allocator;
    220 	memset(pa, 0, sizeof(*pa));
    221 	pa->pa_alloc = km_vacache_alloc;
    222 	pa->pa_free = km_vacache_free;
    223 	pa->pa_pagesz = (unsigned int)size;
    224 	pa->pa_backingmap = map;
    225 	pa->pa_backingmapptr = NULL;
    226 
    227 	if ((map->flags & VM_MAP_INTRSAFE) != 0)
    228 		ipl = IPL_VM;
    229 	else
    230 		ipl = IPL_NONE;
    231 
    232 	pool_init(pp, PAGE_SIZE, 0, 0, PR_NOTOUCH | PR_RECURSIVE, name, pa,
    233 	    ipl);
    234 }
    235 
    236 void
    237 uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
    238 {
    239 
    240 	map->flags |= VM_MAP_VACACHE;
    241 	if (size == 0)
    242 		size = KM_VACACHE_SIZE;
    243 	km_vacache_init(map, name, size);
    244 }
    245 
    246 #else /* !defined(PMAP_MAP_POOLPAGE) */
    247 
    248 void
    249 uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
    250 {
    251 
    252 	/* nothing */
    253 }
    254 
    255 #endif /* !defined(PMAP_MAP_POOLPAGE) */
    256 
    257 void
    258 uvm_km_va_drain(struct vm_map *map, uvm_flag_t flags)
    259 {
    260 	struct vm_map_kernel *vmk = vm_map_to_kernel(map);
    261 
    262 	callback_run_roundrobin(&vmk->vmk_reclaim_callback, NULL);
    263 }
    264 
    265 /*
    266  * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
    267  * KVM already allocated for text, data, bss, and static data structures).
    268  *
    269  * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
    270  *    we assume that [vmin -> start] has already been allocated and that
    271  *    "end" is the end.
    272  */
    273 
    274 void
    275 uvm_km_init(vaddr_t start, vaddr_t end)
    276 {
    277 	vaddr_t base = VM_MIN_KERNEL_ADDRESS;
    278 
    279 	/*
    280 	 * next, init kernel memory objects.
    281 	 */
    282 
    283 	/* kernel_object: for pageable anonymous kernel memory */
    284 	uao_init();
    285 	uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
    286 				 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
    287 
    288 	/*
    289 	 * init the map and reserve any space that might already
    290 	 * have been allocated kernel space before installing.
    291 	 */
    292 
    293 	uvm_map_setup_kernel(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
    294 	kernel_map_store.vmk_map.pmap = pmap_kernel();
    295 	if (start != base) {
    296 		int error;
    297 		struct uvm_map_args args;
    298 
    299 		error = uvm_map_prepare(&kernel_map_store.vmk_map,
    300 		    base, start - base,
    301 		    NULL, UVM_UNKNOWN_OFFSET, 0,
    302 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    303 		    		UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
    304 		if (!error) {
    305 			kernel_first_mapent_store.flags =
    306 			    UVM_MAP_KERNEL | UVM_MAP_FIRST;
    307 			error = uvm_map_enter(&kernel_map_store.vmk_map, &args,
    308 			    &kernel_first_mapent_store);
    309 		}
    310 
    311 		if (error)
    312 			panic(
    313 			    "uvm_km_init: could not reserve space for kernel");
    314 	}
    315 
    316 	/*
    317 	 * install!
    318 	 */
    319 
    320 	kernel_map = &kernel_map_store.vmk_map;
    321 	uvm_km_vacache_init(kernel_map, "kvakernel", 0);
    322 }
    323 
    324 /*
    325  * uvm_km_suballoc: allocate a submap in the kernel map.   once a submap
    326  * is allocated all references to that area of VM must go through it.  this
    327  * allows the locking of VAs in kernel_map to be broken up into regions.
    328  *
    329  * => if `fixed' is true, *vmin specifies where the region described
    330  *      by the submap must start
    331  * => if submap is non NULL we use that as the submap, otherwise we
    332  *	alloc a new map
    333  */
    334 
    335 struct vm_map *
    336 uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */,
    337     vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed,
    338     struct vm_map_kernel *submap)
    339 {
    340 	int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
    341 
    342 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    343 
    344 	size = round_page(size);	/* round up to pagesize */
    345 	size += uvm_mapent_overhead(size, flags);
    346 
    347 	/*
    348 	 * first allocate a blank spot in the parent map
    349 	 */
    350 
    351 	if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0,
    352 	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    353 	    UVM_ADV_RANDOM, mapflags)) != 0) {
    354 	       panic("uvm_km_suballoc: unable to allocate space in parent map");
    355 	}
    356 
    357 	/*
    358 	 * set VM bounds (vmin is filled in by uvm_map)
    359 	 */
    360 
    361 	*vmax = *vmin + size;
    362 
    363 	/*
    364 	 * add references to pmap and create or init the submap
    365 	 */
    366 
    367 	pmap_reference(vm_map_pmap(map));
    368 	if (submap == NULL) {
    369 		submap = malloc(sizeof(*submap), M_VMMAP, M_WAITOK);
    370 		if (submap == NULL)
    371 			panic("uvm_km_suballoc: unable to create submap");
    372 	}
    373 	uvm_map_setup_kernel(submap, *vmin, *vmax, flags);
    374 	submap->vmk_map.pmap = vm_map_pmap(map);
    375 
    376 	/*
    377 	 * now let uvm_map_submap plug in it...
    378 	 */
    379 
    380 	if (uvm_map_submap(map, *vmin, *vmax, &submap->vmk_map) != 0)
    381 		panic("uvm_km_suballoc: submap allocation failed");
    382 
    383 	return(&submap->vmk_map);
    384 }
    385 
    386 /*
    387  * uvm_km_pgremove: remove pages from a kernel uvm_object and KVA.
    388  */
    389 
    390 void
    391 uvm_km_pgremove(vaddr_t startva, vaddr_t endva)
    392 {
    393 	struct uvm_object * const uobj = uvm_kernel_object;
    394 	const voff_t start = startva - vm_map_min(kernel_map);
    395 	const voff_t end = endva - vm_map_min(kernel_map);
    396 	struct vm_page *pg;
    397 	voff_t curoff, nextoff;
    398 	int swpgonlydelta = 0;
    399 	UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
    400 
    401 	KASSERT(VM_MIN_KERNEL_ADDRESS <= startva);
    402 	KASSERT(startva < endva);
    403 	KASSERT(endva <= VM_MAX_KERNEL_ADDRESS);
    404 
    405 	mutex_enter(uobj->vmobjlock);
    406 	pmap_remove(pmap_kernel(), startva, endva);
    407 	for (curoff = start; curoff < end; curoff = nextoff) {
    408 		nextoff = curoff + PAGE_SIZE;
    409 		pg = uvm_pagelookup(uobj, curoff);
    410 		if (pg != NULL && pg->flags & PG_BUSY) {
    411 			pg->flags |= PG_WANTED;
    412 			UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0,
    413 				    "km_pgrm", 0);
    414 			mutex_enter(uobj->vmobjlock);
    415 			nextoff = curoff;
    416 			continue;
    417 		}
    418 
    419 		/*
    420 		 * free the swap slot, then the page.
    421 		 */
    422 
    423 		if (pg == NULL &&
    424 		    uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) {
    425 			swpgonlydelta++;
    426 		}
    427 		uao_dropswap(uobj, curoff >> PAGE_SHIFT);
    428 		if (pg != NULL) {
    429 			mutex_enter(&uvm_pageqlock);
    430 			uvm_pagefree(pg);
    431 			mutex_exit(&uvm_pageqlock);
    432 		}
    433 	}
    434 	mutex_exit(uobj->vmobjlock);
    435 
    436 	if (swpgonlydelta > 0) {
    437 		mutex_enter(&uvm_swap_data_lock);
    438 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
    439 		uvmexp.swpgonly -= swpgonlydelta;
    440 		mutex_exit(&uvm_swap_data_lock);
    441 	}
    442 }
    443 
    444 
    445 /*
    446  * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed
    447  *    regions.
    448  *
    449  * => when you unmap a part of anonymous kernel memory you want to toss
    450  *    the pages right away.    (this is called from uvm_unmap_...).
    451  * => none of the pages will ever be busy, and none of them will ever
    452  *    be on the active or inactive queues (because they have no object).
    453  */
    454 
    455 void
    456 uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end)
    457 {
    458 	struct vm_page *pg;
    459 	paddr_t pa;
    460 	UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist);
    461 
    462 	KASSERT(VM_MAP_IS_KERNEL(map));
    463 	KASSERT(vm_map_min(map) <= start);
    464 	KASSERT(start < end);
    465 	KASSERT(end <= vm_map_max(map));
    466 
    467 	for (; start < end; start += PAGE_SIZE) {
    468 		if (!pmap_extract(pmap_kernel(), start, &pa)) {
    469 			continue;
    470 		}
    471 		pg = PHYS_TO_VM_PAGE(pa);
    472 		KASSERT(pg);
    473 		KASSERT(pg->uobject == NULL && pg->uanon == NULL);
    474 		KASSERT((pg->flags & PG_BUSY) == 0);
    475 		uvm_pagefree(pg);
    476 	}
    477 }
    478 
    479 #if defined(DEBUG)
    480 void
    481 uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end)
    482 {
    483 	struct vm_page *pg;
    484 	vaddr_t va;
    485 	paddr_t pa;
    486 
    487 	KDASSERT(VM_MAP_IS_KERNEL(map));
    488 	KDASSERT(vm_map_min(map) <= start);
    489 	KDASSERT(start < end);
    490 	KDASSERT(end <= vm_map_max(map));
    491 
    492 	for (va = start; va < end; va += PAGE_SIZE) {
    493 		if (pmap_extract(pmap_kernel(), va, &pa)) {
    494 			panic("uvm_km_check_empty: va %p has pa 0x%llx",
    495 			    (void *)va, (long long)pa);
    496 		}
    497 		if ((map->flags & VM_MAP_INTRSAFE) == 0) {
    498 			mutex_enter(uvm_kernel_object->vmobjlock);
    499 			pg = uvm_pagelookup(uvm_kernel_object,
    500 			    va - vm_map_min(kernel_map));
    501 			mutex_exit(uvm_kernel_object->vmobjlock);
    502 			if (pg) {
    503 				panic("uvm_km_check_empty: "
    504 				    "has page hashed at %p", (const void *)va);
    505 			}
    506 		}
    507 	}
    508 }
    509 #endif /* defined(DEBUG) */
    510 
    511 /*
    512  * uvm_km_alloc: allocate an area of kernel memory.
    513  *
    514  * => NOTE: we can return 0 even if we can wait if there is not enough
    515  *	free VM space in the map... caller should be prepared to handle
    516  *	this case.
    517  * => we return KVA of memory allocated
    518  */
    519 
    520 vaddr_t
    521 uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
    522 {
    523 	vaddr_t kva, loopva;
    524 	vaddr_t offset;
    525 	vsize_t loopsize;
    526 	struct vm_page *pg;
    527 	struct uvm_object *obj;
    528 	int pgaflags;
    529 	vm_prot_t prot;
    530 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    531 
    532 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    533 	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
    534 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
    535 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
    536 	KASSERT((flags & UVM_KMF_VAONLY) != 0 || (flags & UVM_KMF_COLORMATCH) == 0);
    537 	KASSERT((flags & UVM_KMF_COLORMATCH) == 0 || (flags & UVM_KMF_VAONLY) != 0);
    538 
    539 	/*
    540 	 * setup for call
    541 	 */
    542 
    543 	kva = vm_map_min(map);	/* hint */
    544 	size = round_page(size);
    545 	obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL;
    546 	UVMHIST_LOG(maphist,"  (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
    547 		    map, obj, size, flags);
    548 
    549 	/*
    550 	 * allocate some virtual space
    551 	 */
    552 
    553 	if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
    554 	    align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    555 	    UVM_ADV_RANDOM,
    556 	    (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA
    557 	     | UVM_KMF_COLORMATCH))
    558 	    | UVM_FLAG_QUANTUM)) != 0)) {
    559 		UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
    560 		return(0);
    561 	}
    562 
    563 	/*
    564 	 * if all we wanted was VA, return now
    565 	 */
    566 
    567 	if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) {
    568 		UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
    569 		return(kva);
    570 	}
    571 
    572 	/*
    573 	 * recover object offset from virtual address
    574 	 */
    575 
    576 	offset = kva - vm_map_min(kernel_map);
    577 	UVMHIST_LOG(maphist, "  kva=0x%x, offset=0x%x", kva, offset,0,0);
    578 
    579 	/*
    580 	 * now allocate and map in the memory... note that we are the only ones
    581 	 * whom should ever get a handle on this area of VM.
    582 	 */
    583 
    584 	loopva = kva;
    585 	loopsize = size;
    586 
    587 	pgaflags = UVM_FLAG_COLORMATCH;
    588 	if (flags & UVM_KMF_NOWAIT)
    589 		pgaflags |= UVM_PGA_USERESERVE;
    590 	if (flags & UVM_KMF_ZERO)
    591 		pgaflags |= UVM_PGA_ZERO;
    592 	prot = VM_PROT_READ | VM_PROT_WRITE;
    593 	if (flags & UVM_KMF_EXEC)
    594 		prot |= VM_PROT_EXECUTE;
    595 	while (loopsize) {
    596 		KASSERT(!pmap_extract(pmap_kernel(), loopva, NULL));
    597 
    598 		pg = uvm_pagealloc_strat(NULL, offset, NULL, pgaflags,
    599 #ifdef UVM_KM_VMFREELIST
    600 		   UVM_PGA_STRAT_ONLY, UVM_KM_VMFREELIST
    601 #else
    602 		   UVM_PGA_STRAT_NORMAL, 0
    603 #endif
    604 		   );
    605 
    606 		/*
    607 		 * out of memory?
    608 		 */
    609 
    610 		if (__predict_false(pg == NULL)) {
    611 			if ((flags & UVM_KMF_NOWAIT) ||
    612 			    ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) {
    613 				/* free everything! */
    614 				uvm_km_free(map, kva, size,
    615 				    flags & UVM_KMF_TYPEMASK);
    616 				return (0);
    617 			} else {
    618 				uvm_wait("km_getwait2");	/* sleep here */
    619 				continue;
    620 			}
    621 		}
    622 
    623 		pg->flags &= ~PG_BUSY;	/* new page */
    624 		UVM_PAGE_OWN(pg, NULL);
    625 
    626 		/*
    627 		 * map it in
    628 		 */
    629 
    630 		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
    631 		    prot, PMAP_KMPAGE);
    632 		loopva += PAGE_SIZE;
    633 		offset += PAGE_SIZE;
    634 		loopsize -= PAGE_SIZE;
    635 	}
    636 
    637        	pmap_update(pmap_kernel());
    638 
    639 	UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
    640 	return(kva);
    641 }
    642 
    643 /*
    644  * uvm_km_free: free an area of kernel memory
    645  */
    646 
    647 void
    648 uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags)
    649 {
    650 
    651 	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
    652 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
    653 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
    654 	KASSERT((addr & PAGE_MASK) == 0);
    655 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    656 
    657 	size = round_page(size);
    658 
    659 	if (flags & UVM_KMF_PAGEABLE) {
    660 		uvm_km_pgremove(addr, addr + size);
    661 	} else if (flags & UVM_KMF_WIRED) {
    662 		/*
    663 		 * Note: uvm_km_pgremove_intrsafe() extracts mapping, thus
    664 		 * remove it after.  See comment below about KVA visibility.
    665 		 */
    666 		uvm_km_pgremove_intrsafe(map, addr, addr + size);
    667 		pmap_kremove(addr, size);
    668 	}
    669 
    670 	/*
    671 	 * Note: uvm_unmap_remove() calls pmap_update() for us, before
    672 	 * KVA becomes globally available.
    673 	 */
    674 
    675 	uvm_unmap1(map, addr, addr + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY);
    676 }
    677 
    678 /* Sanity; must specify both or none. */
    679 #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
    680     (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
    681 #error Must specify MAP and UNMAP together.
    682 #endif
    683 
    684 /*
    685  * uvm_km_alloc_poolpage: allocate a page for the pool allocator
    686  *
    687  * => if the pmap specifies an alternate mapping method, we use it.
    688  */
    689 
    690 /* ARGSUSED */
    691 vaddr_t
    692 uvm_km_alloc_poolpage_cache(struct vm_map *map, bool waitok)
    693 {
    694 #if defined(PMAP_MAP_POOLPAGE)
    695 	return uvm_km_alloc_poolpage(map, waitok);
    696 #else
    697 	struct vm_page *pg;
    698 	struct pool *pp = &vm_map_to_kernel(map)->vmk_vacache;
    699 	vaddr_t va;
    700 
    701 	if ((map->flags & VM_MAP_VACACHE) == 0)
    702 		return uvm_km_alloc_poolpage(map, waitok);
    703 
    704 	va = (vaddr_t)pool_get(pp, waitok ? PR_WAITOK : PR_NOWAIT);
    705 	if (va == 0)
    706 		return 0;
    707 	KASSERT(!pmap_extract(pmap_kernel(), va, NULL));
    708 again:
    709 	pg = uvm_pagealloc(NULL, 0, NULL, waitok ? 0 : UVM_PGA_USERESERVE);
    710 	if (__predict_false(pg == NULL)) {
    711 		if (waitok) {
    712 			uvm_wait("plpg");
    713 			goto again;
    714 		} else {
    715 			pool_put(pp, (void *)va);
    716 			return 0;
    717 		}
    718 	}
    719 	pg->flags &= ~PG_BUSY;	/* new page */
    720 	UVM_PAGE_OWN(pg, NULL);
    721 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
    722 	    VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
    723 	pmap_update(pmap_kernel());
    724 
    725 	return va;
    726 #endif /* PMAP_MAP_POOLPAGE */
    727 }
    728 
    729 vaddr_t
    730 uvm_km_alloc_poolpage(struct vm_map *map, bool waitok)
    731 {
    732 #if defined(PMAP_MAP_POOLPAGE)
    733 	struct vm_page *pg;
    734 	vaddr_t va;
    735 
    736 
    737  again:
    738 #ifdef PMAP_ALLOC_POOLPAGE
    739 	pg = PMAP_ALLOC_POOLPAGE(waitok ? 0 : UVM_PGA_USERESERVE);
    740 #else
    741 	pg = uvm_pagealloc(NULL, 0, NULL, waitok ? 0 : UVM_PGA_USERESERVE);
    742 #endif
    743 	if (__predict_false(pg == NULL)) {
    744 		if (waitok) {
    745 			uvm_wait("plpg");
    746 			goto again;
    747 		} else
    748 			return (0);
    749 	}
    750 	va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
    751 	if (__predict_false(va == 0))
    752 		uvm_pagefree(pg);
    753 	return (va);
    754 #else
    755 	vaddr_t va;
    756 
    757 	va = uvm_km_alloc(map, PAGE_SIZE, 0,
    758 	    (waitok ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK) | UVM_KMF_WIRED);
    759 	return (va);
    760 #endif /* PMAP_MAP_POOLPAGE */
    761 }
    762 
    763 /*
    764  * uvm_km_free_poolpage: free a previously allocated pool page
    765  *
    766  * => if the pmap specifies an alternate unmapping method, we use it.
    767  */
    768 
    769 /* ARGSUSED */
    770 void
    771 uvm_km_free_poolpage_cache(struct vm_map *map, vaddr_t addr)
    772 {
    773 #if defined(PMAP_UNMAP_POOLPAGE)
    774 	uvm_km_free_poolpage(map, addr);
    775 #else
    776 	struct pool *pp;
    777 
    778 	if ((map->flags & VM_MAP_VACACHE) == 0) {
    779 		uvm_km_free_poolpage(map, addr);
    780 		return;
    781 	}
    782 
    783 	KASSERT(pmap_extract(pmap_kernel(), addr, NULL));
    784 	uvm_km_pgremove_intrsafe(map, addr, addr + PAGE_SIZE);
    785 	pmap_kremove(addr, PAGE_SIZE);
    786 #if defined(DEBUG)
    787 	pmap_update(pmap_kernel());
    788 #endif
    789 	KASSERT(!pmap_extract(pmap_kernel(), addr, NULL));
    790 	pp = &vm_map_to_kernel(map)->vmk_vacache;
    791 	pool_put(pp, (void *)addr);
    792 #endif
    793 }
    794 
    795 /* ARGSUSED */
    796 void
    797 uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr)
    798 {
    799 #if defined(PMAP_UNMAP_POOLPAGE)
    800 	paddr_t pa;
    801 
    802 	pa = PMAP_UNMAP_POOLPAGE(addr);
    803 	uvm_pagefree(PHYS_TO_VM_PAGE(pa));
    804 #else
    805 	uvm_km_free(map, addr, PAGE_SIZE, UVM_KMF_WIRED);
    806 #endif /* PMAP_UNMAP_POOLPAGE */
    807 }
    808