Home | History | Annotate | Line # | Download | only in uvm
uvm_km.c revision 1.89.6.1
      1 /*	$NetBSD: uvm_km.c,v 1.89.6.1 2006/10/22 06:07:52 yamt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6  *
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to Berkeley by
     10  * The Mach Operating System project at Carnegie-Mellon University.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by Charles D. Cranor,
     23  *      Washington University, the University of California, Berkeley and
     24  *      its contributors.
     25  * 4. Neither the name of the University nor the names of its contributors
     26  *    may be used to endorse or promote products derived from this software
     27  *    without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39  * SUCH DAMAGE.
     40  *
     41  *	@(#)vm_kern.c   8.3 (Berkeley) 1/12/94
     42  * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
     43  *
     44  *
     45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     46  * All rights reserved.
     47  *
     48  * Permission to use, copy, modify and distribute this software and
     49  * its documentation is hereby granted, provided that both the copyright
     50  * notice and this permission notice appear in all copies of the
     51  * software, derivative works or modified versions, and any portions
     52  * thereof, and that both notices appear in supporting documentation.
     53  *
     54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     57  *
     58  * Carnegie Mellon requests users of this software to return to
     59  *
     60  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     61  *  School of Computer Science
     62  *  Carnegie Mellon University
     63  *  Pittsburgh PA 15213-3890
     64  *
     65  * any improvements or extensions that they make and grant Carnegie the
     66  * rights to redistribute these changes.
     67  */
     68 
     69 /*
     70  * uvm_km.c: handle kernel memory allocation and management
     71  */
     72 
     73 /*
     74  * overview of kernel memory management:
     75  *
     76  * the kernel virtual address space is mapped by "kernel_map."   kernel_map
     77  * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
     78  * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
     79  *
     80  * the kernel_map has several "submaps."   submaps can only appear in
     81  * the kernel_map (user processes can't use them).   submaps "take over"
     82  * the management of a sub-range of the kernel's address space.  submaps
     83  * are typically allocated at boot time and are never released.   kernel
     84  * virtual address space that is mapped by a submap is locked by the
     85  * submap's lock -- not the kernel_map's lock.
     86  *
     87  * thus, the useful feature of submaps is that they allow us to break
     88  * up the locking and protection of the kernel address space into smaller
     89  * chunks.
     90  *
     91  * the vm system has several standard kernel submaps, including:
     92  *   kmem_map => contains only wired kernel memory for the kernel
     93  *		malloc.   *** access to kmem_map must be protected
     94  *		by splvm() because we are allowed to call malloc()
     95  *		at interrupt time ***
     96  *   mb_map => memory for large mbufs,  *** protected by splvm ***
     97  *   pager_map => used to map "buf" structures into kernel space
     98  *   exec_map => used during exec to handle exec args
     99  *   etc...
    100  *
    101  * the kernel allocates its private memory out of special uvm_objects whose
    102  * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
    103  * are "special" and never die).   all kernel objects should be thought of
    104  * as large, fixed-sized, sparsely populated uvm_objects.   each kernel
    105  * object is equal to the size of kernel virtual address space (i.e. the
    106  * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
    107  *
    108  * note that just because a kernel object spans the entire kernel virutal
    109  * address space doesn't mean that it has to be mapped into the entire space.
    110  * large chunks of a kernel object's space go unused either because
    111  * that area of kernel VM is unmapped, or there is some other type of
    112  * object mapped into that range (e.g. a vnode).    for submap's kernel
    113  * objects, the only part of the object that can ever be populated is the
    114  * offsets that are managed by the submap.
    115  *
    116  * note that the "offset" in a kernel object is always the kernel virtual
    117  * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
    118  * example:
    119  *   suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
    120  *   uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
    121  *   kernel map].    if uvm_km_alloc returns virtual address 0xf8235000,
    122  *   then that means that the page at offset 0x235000 in kernel_object is
    123  *   mapped at 0xf8235000.
    124  *
    125  * kernel object have one other special property: when the kernel virtual
    126  * memory mapping them is unmapped, the backing memory in the object is
    127  * freed right away.   this is done with the uvm_km_pgremove() function.
    128  * this has to be done because there is no backing store for kernel pages
    129  * and no need to save them after they are no longer referenced.
    130  */
    131 
    132 #include <sys/cdefs.h>
    133 __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.89.6.1 2006/10/22 06:07:52 yamt Exp $");
    134 
    135 #include "opt_uvmhist.h"
    136 
    137 #include <sys/param.h>
    138 #include <sys/malloc.h>
    139 #include <sys/systm.h>
    140 #include <sys/proc.h>
    141 #include <sys/pool.h>
    142 
    143 #include <uvm/uvm.h>
    144 
    145 /*
    146  * global data structures
    147  */
    148 
    149 struct vm_map *kernel_map = NULL;
    150 
    151 /*
    152  * local data structues
    153  */
    154 
    155 static struct vm_map_kernel	kernel_map_store;
    156 static struct vm_map_entry	kernel_first_mapent_store;
    157 
    158 #if !defined(PMAP_MAP_POOLPAGE)
    159 
    160 /*
    161  * kva cache
    162  *
    163  * XXX maybe it's better to do this at the uvm_map layer.
    164  */
    165 
    166 #define	KM_VACACHE_SIZE	(32 * PAGE_SIZE) /* XXX tune */
    167 
    168 static void *km_vacache_alloc(struct pool *, int);
    169 static void km_vacache_free(struct pool *, void *);
    170 static void km_vacache_init(struct vm_map *, const char *, size_t);
    171 
    172 /* XXX */
    173 #define	KM_VACACHE_POOL_TO_MAP(pp) \
    174 	((struct vm_map *)((char *)(pp) - \
    175 	    offsetof(struct vm_map_kernel, vmk_vacache)))
    176 
    177 static void *
    178 km_vacache_alloc(struct pool *pp, int flags)
    179 {
    180 	vaddr_t va;
    181 	size_t size;
    182 	struct vm_map *map;
    183 	size = pp->pr_alloc->pa_pagesz;
    184 
    185 	map = KM_VACACHE_POOL_TO_MAP(pp);
    186 
    187 	va = vm_map_min(map); /* hint */
    188 	if (uvm_map(map, &va, size, NULL, UVM_UNKNOWN_OFFSET, size,
    189 	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    190 	    UVM_ADV_RANDOM, UVM_FLAG_QUANTUM |
    191 	    ((flags & PR_WAITOK) ? UVM_FLAG_WAITVA :
    192 	    UVM_FLAG_TRYLOCK | UVM_FLAG_NOWAIT))))
    193 		return NULL;
    194 
    195 	return (void *)va;
    196 }
    197 
    198 static void
    199 km_vacache_free(struct pool *pp, void *v)
    200 {
    201 	vaddr_t va = (vaddr_t)v;
    202 	size_t size = pp->pr_alloc->pa_pagesz;
    203 	struct vm_map *map;
    204 
    205 	map = KM_VACACHE_POOL_TO_MAP(pp);
    206 	uvm_unmap1(map, va, va + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY);
    207 }
    208 
    209 /*
    210  * km_vacache_init: initialize kva cache.
    211  */
    212 
    213 static void
    214 km_vacache_init(struct vm_map *map, const char *name, size_t size)
    215 {
    216 	struct vm_map_kernel *vmk;
    217 	struct pool *pp;
    218 	struct pool_allocator *pa;
    219 
    220 	KASSERT(VM_MAP_IS_KERNEL(map));
    221 	KASSERT(size < (vm_map_max(map) - vm_map_min(map)) / 2); /* sanity */
    222 
    223 	vmk = vm_map_to_kernel(map);
    224 	pp = &vmk->vmk_vacache;
    225 	pa = &vmk->vmk_vacache_allocator;
    226 	memset(pa, 0, sizeof(*pa));
    227 	pa->pa_alloc = km_vacache_alloc;
    228 	pa->pa_free = km_vacache_free;
    229 	pa->pa_pagesz = (unsigned int)size;
    230 	pa->pa_backingmap = map;
    231 	pa->pa_backingmapptr = NULL;
    232 	pool_init(pp, PAGE_SIZE, 0, 0, PR_NOTOUCH | PR_RECURSIVE, name, pa);
    233 }
    234 
    235 void
    236 uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
    237 {
    238 
    239 	map->flags |= VM_MAP_VACACHE;
    240 	if (size == 0)
    241 		size = KM_VACACHE_SIZE;
    242 	km_vacache_init(map, name, size);
    243 }
    244 
    245 #else /* !defined(PMAP_MAP_POOLPAGE) */
    246 
    247 void
    248 uvm_km_vacache_init(struct vm_map *map __unused, const char *name __unused,
    249 		    size_t size __unused)
    250 {
    251 
    252 	/* nothing */
    253 }
    254 
    255 #endif /* !defined(PMAP_MAP_POOLPAGE) */
    256 
    257 void
    258 uvm_km_va_drain(struct vm_map *map, uvm_flag_t flags __unused)
    259 {
    260 	struct vm_map_kernel *vmk = vm_map_to_kernel(map);
    261 	const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0;
    262 	int s = 0xdeadbeaf; /* XXX: gcc */
    263 
    264 	if (intrsafe) {
    265 		s = splvm();
    266 	}
    267 	callback_run_roundrobin(&vmk->vmk_reclaim_callback, NULL);
    268 	if (intrsafe) {
    269 		splx(s);
    270 	}
    271 }
    272 
    273 /*
    274  * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
    275  * KVM already allocated for text, data, bss, and static data structures).
    276  *
    277  * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
    278  *    we assume that [vmin -> start] has already been allocated and that
    279  *    "end" is the end.
    280  */
    281 
    282 void
    283 uvm_km_init(vaddr_t start, vaddr_t end)
    284 {
    285 	vaddr_t base = VM_MIN_KERNEL_ADDRESS;
    286 
    287 	/*
    288 	 * next, init kernel memory objects.
    289 	 */
    290 
    291 	/* kernel_object: for pageable anonymous kernel memory */
    292 	uao_init();
    293 	uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
    294 				 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
    295 
    296 	/*
    297 	 * init the map and reserve any space that might already
    298 	 * have been allocated kernel space before installing.
    299 	 */
    300 
    301 	uvm_map_setup_kernel(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
    302 	kernel_map_store.vmk_map.pmap = pmap_kernel();
    303 	if (start != base) {
    304 		int error;
    305 		struct uvm_map_args args;
    306 
    307 		error = uvm_map_prepare(&kernel_map_store.vmk_map,
    308 		    base, start - base,
    309 		    NULL, UVM_UNKNOWN_OFFSET, 0,
    310 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    311 		    		UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
    312 		if (!error) {
    313 			kernel_first_mapent_store.flags =
    314 			    UVM_MAP_KERNEL | UVM_MAP_FIRST;
    315 			error = uvm_map_enter(&kernel_map_store.vmk_map, &args,
    316 			    &kernel_first_mapent_store);
    317 		}
    318 
    319 		if (error)
    320 			panic(
    321 			    "uvm_km_init: could not reserve space for kernel");
    322 	}
    323 
    324 	/*
    325 	 * install!
    326 	 */
    327 
    328 	kernel_map = &kernel_map_store.vmk_map;
    329 	uvm_km_vacache_init(kernel_map, "kvakernel", 0);
    330 }
    331 
    332 /*
    333  * uvm_km_suballoc: allocate a submap in the kernel map.   once a submap
    334  * is allocated all references to that area of VM must go through it.  this
    335  * allows the locking of VAs in kernel_map to be broken up into regions.
    336  *
    337  * => if `fixed' is true, *vmin specifies where the region described
    338  *      by the submap must start
    339  * => if submap is non NULL we use that as the submap, otherwise we
    340  *	alloc a new map
    341  */
    342 
    343 struct vm_map *
    344 uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */,
    345     vaddr_t *vmax /* OUT */, vsize_t size, int flags, boolean_t fixed,
    346     struct vm_map_kernel *submap)
    347 {
    348 	int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
    349 
    350 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    351 
    352 	size = round_page(size);	/* round up to pagesize */
    353 	size += uvm_mapent_overhead(size, flags);
    354 
    355 	/*
    356 	 * first allocate a blank spot in the parent map
    357 	 */
    358 
    359 	if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0,
    360 	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    361 	    UVM_ADV_RANDOM, mapflags)) != 0) {
    362 	       panic("uvm_km_suballoc: unable to allocate space in parent map");
    363 	}
    364 
    365 	/*
    366 	 * set VM bounds (vmin is filled in by uvm_map)
    367 	 */
    368 
    369 	*vmax = *vmin + size;
    370 
    371 	/*
    372 	 * add references to pmap and create or init the submap
    373 	 */
    374 
    375 	pmap_reference(vm_map_pmap(map));
    376 	if (submap == NULL) {
    377 		submap = malloc(sizeof(*submap), M_VMMAP, M_WAITOK);
    378 		if (submap == NULL)
    379 			panic("uvm_km_suballoc: unable to create submap");
    380 	}
    381 	uvm_map_setup_kernel(submap, *vmin, *vmax, flags);
    382 	submap->vmk_map.pmap = vm_map_pmap(map);
    383 
    384 	/*
    385 	 * now let uvm_map_submap plug in it...
    386 	 */
    387 
    388 	if (uvm_map_submap(map, *vmin, *vmax, &submap->vmk_map) != 0)
    389 		panic("uvm_km_suballoc: submap allocation failed");
    390 
    391 	return(&submap->vmk_map);
    392 }
    393 
    394 /*
    395  * uvm_km_pgremove: remove pages from a kernel uvm_object.
    396  *
    397  * => when you unmap a part of anonymous kernel memory you want to toss
    398  *    the pages right away.    (this gets called from uvm_unmap_...).
    399  */
    400 
    401 void
    402 uvm_km_pgremove(vaddr_t startva, vaddr_t endva)
    403 {
    404 	struct uvm_object * const uobj = uvm.kernel_object;
    405 	const voff_t start = startva - vm_map_min(kernel_map);
    406 	const voff_t end = endva - vm_map_min(kernel_map);
    407 	struct vm_page *pg;
    408 	voff_t curoff, nextoff;
    409 	int swpgonlydelta = 0;
    410 	UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
    411 
    412 	KASSERT(VM_MIN_KERNEL_ADDRESS <= startva);
    413 	KASSERT(startva < endva);
    414 	KASSERT(endva <= VM_MAX_KERNEL_ADDRESS);
    415 
    416 	simple_lock(&uobj->vmobjlock);
    417 
    418 	for (curoff = start; curoff < end; curoff = nextoff) {
    419 		nextoff = curoff + PAGE_SIZE;
    420 		pg = uvm_pagelookup(uobj, curoff);
    421 		if (pg != NULL && pg->flags & PG_BUSY) {
    422 			pg->flags |= PG_WANTED;
    423 			UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
    424 				    "km_pgrm", 0);
    425 			simple_lock(&uobj->vmobjlock);
    426 			nextoff = curoff;
    427 			continue;
    428 		}
    429 
    430 		/*
    431 		 * free the swap slot, then the page.
    432 		 */
    433 
    434 		if (pg == NULL &&
    435 		    uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) {
    436 			swpgonlydelta++;
    437 		}
    438 		uao_dropswap(uobj, curoff >> PAGE_SHIFT);
    439 		if (pg != NULL) {
    440 			uvm_lock_pageq();
    441 			uvm_pagefree(pg);
    442 			uvm_unlock_pageq();
    443 		}
    444 	}
    445 	simple_unlock(&uobj->vmobjlock);
    446 
    447 	if (swpgonlydelta > 0) {
    448 		simple_lock(&uvm.swap_data_lock);
    449 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
    450 		uvmexp.swpgonly -= swpgonlydelta;
    451 		simple_unlock(&uvm.swap_data_lock);
    452 	}
    453 }
    454 
    455 
    456 /*
    457  * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed
    458  *    regions.
    459  *
    460  * => when you unmap a part of anonymous kernel memory you want to toss
    461  *    the pages right away.    (this is called from uvm_unmap_...).
    462  * => none of the pages will ever be busy, and none of them will ever
    463  *    be on the active or inactive queues (because they have no object).
    464  */
    465 
    466 void
    467 uvm_km_pgremove_intrsafe(vaddr_t start, vaddr_t end)
    468 {
    469 	struct vm_page *pg;
    470 	paddr_t pa;
    471 	UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist);
    472 
    473 	KASSERT(VM_MIN_KERNEL_ADDRESS <= start);
    474 	KASSERT(start < end);
    475 	KASSERT(end <= VM_MAX_KERNEL_ADDRESS);
    476 
    477 	for (; start < end; start += PAGE_SIZE) {
    478 		if (!pmap_extract(pmap_kernel(), start, &pa)) {
    479 			continue;
    480 		}
    481 		pg = PHYS_TO_VM_PAGE(pa);
    482 		KASSERT(pg);
    483 		KASSERT(pg->uobject == NULL && pg->uanon == NULL);
    484 		uvm_pagefree(pg);
    485 	}
    486 }
    487 
    488 #if defined(DEBUG)
    489 void
    490 uvm_km_check_empty(vaddr_t start, vaddr_t end, boolean_t intrsafe)
    491 {
    492 	vaddr_t va;
    493 	paddr_t pa;
    494 
    495 	KDASSERT(VM_MIN_KERNEL_ADDRESS <= start);
    496 	KDASSERT(start < end);
    497 	KDASSERT(end <= VM_MAX_KERNEL_ADDRESS);
    498 
    499 	for (va = start; va < end; va += PAGE_SIZE) {
    500 		if (pmap_extract(pmap_kernel(), va, &pa)) {
    501 			panic("uvm_km_check_empty: va %p has pa 0x%llx",
    502 			    (void *)va, (long long)pa);
    503 		}
    504 		if (!intrsafe) {
    505 			const struct vm_page *pg;
    506 
    507 			simple_lock(&uvm.kernel_object->vmobjlock);
    508 			pg = uvm_pagelookup(uvm.kernel_object,
    509 			    va - vm_map_min(kernel_map));
    510 			simple_unlock(&uvm.kernel_object->vmobjlock);
    511 			if (pg) {
    512 				panic("uvm_km_check_empty: "
    513 				    "has page hashed at %p", (const void *)va);
    514 			}
    515 		}
    516 	}
    517 }
    518 #endif /* defined(DEBUG) */
    519 
    520 /*
    521  * uvm_km_alloc: allocate an area of kernel memory.
    522  *
    523  * => NOTE: we can return 0 even if we can wait if there is not enough
    524  *	free VM space in the map... caller should be prepared to handle
    525  *	this case.
    526  * => we return KVA of memory allocated
    527  */
    528 
    529 vaddr_t
    530 uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
    531 {
    532 	vaddr_t kva, loopva;
    533 	vaddr_t offset;
    534 	vsize_t loopsize;
    535 	struct vm_page *pg;
    536 	struct uvm_object *obj;
    537 	int pgaflags;
    538 	vm_prot_t prot;
    539 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    540 
    541 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    542 	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
    543 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
    544 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
    545 
    546 	/*
    547 	 * setup for call
    548 	 */
    549 
    550 	kva = vm_map_min(map);	/* hint */
    551 	size = round_page(size);
    552 	obj = (flags & UVM_KMF_PAGEABLE) ? uvm.kernel_object : NULL;
    553 	UVMHIST_LOG(maphist,"  (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
    554 		    map, obj, size, flags);
    555 
    556 	/*
    557 	 * allocate some virtual space
    558 	 */
    559 
    560 	if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
    561 	    align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    562 	    UVM_ADV_RANDOM,
    563 	    (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA))
    564 	    | UVM_FLAG_QUANTUM)) != 0)) {
    565 		UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
    566 		return(0);
    567 	}
    568 
    569 	/*
    570 	 * if all we wanted was VA, return now
    571 	 */
    572 
    573 	if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) {
    574 		UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
    575 		return(kva);
    576 	}
    577 
    578 	/*
    579 	 * recover object offset from virtual address
    580 	 */
    581 
    582 	offset = kva - vm_map_min(kernel_map);
    583 	UVMHIST_LOG(maphist, "  kva=0x%x, offset=0x%x", kva, offset,0,0);
    584 
    585 	/*
    586 	 * now allocate and map in the memory... note that we are the only ones
    587 	 * whom should ever get a handle on this area of VM.
    588 	 */
    589 
    590 	loopva = kva;
    591 	loopsize = size;
    592 
    593 	pgaflags = UVM_PGA_USERESERVE;
    594 	if (flags & UVM_KMF_ZERO)
    595 		pgaflags |= UVM_PGA_ZERO;
    596 	prot = VM_PROT_READ | VM_PROT_WRITE;
    597 	if (flags & UVM_KMF_EXEC)
    598 		prot |= VM_PROT_EXECUTE;
    599 	while (loopsize) {
    600 		KASSERT(!pmap_extract(pmap_kernel(), loopva, NULL));
    601 
    602 		pg = uvm_pagealloc(NULL, offset, NULL, pgaflags);
    603 
    604 		/*
    605 		 * out of memory?
    606 		 */
    607 
    608 		if (__predict_false(pg == NULL)) {
    609 			if ((flags & UVM_KMF_NOWAIT) ||
    610 			    ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) {
    611 				/* free everything! */
    612 				uvm_km_free(map, kva, size,
    613 				    flags & UVM_KMF_TYPEMASK);
    614 				return (0);
    615 			} else {
    616 				uvm_wait("km_getwait2");	/* sleep here */
    617 				continue;
    618 			}
    619 		}
    620 
    621 		pg->flags &= ~PG_BUSY;	/* new page */
    622 		UVM_PAGE_OWN(pg, NULL);
    623 
    624 		/*
    625 		 * map it in
    626 		 */
    627 
    628 		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), prot);
    629 		loopva += PAGE_SIZE;
    630 		offset += PAGE_SIZE;
    631 		loopsize -= PAGE_SIZE;
    632 	}
    633 
    634        	pmap_update(pmap_kernel());
    635 
    636 	UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
    637 	return(kva);
    638 }
    639 
    640 /*
    641  * uvm_km_free: free an area of kernel memory
    642  */
    643 
    644 void
    645 uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags)
    646 {
    647 
    648 	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
    649 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
    650 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
    651 	KASSERT((addr & PAGE_MASK) == 0);
    652 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    653 
    654 	size = round_page(size);
    655 
    656 	if (flags & UVM_KMF_PAGEABLE) {
    657 		uvm_km_pgremove(addr, addr + size);
    658 		pmap_remove(pmap_kernel(), addr, addr + size);
    659 	} else if (flags & UVM_KMF_WIRED) {
    660 		uvm_km_pgremove_intrsafe(addr, addr + size);
    661 		pmap_kremove(addr, size);
    662 	}
    663 
    664 	uvm_unmap1(map, addr, addr + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY);
    665 }
    666 
    667 /* Sanity; must specify both or none. */
    668 #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
    669     (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
    670 #error Must specify MAP and UNMAP together.
    671 #endif
    672 
    673 /*
    674  * uvm_km_alloc_poolpage: allocate a page for the pool allocator
    675  *
    676  * => if the pmap specifies an alternate mapping method, we use it.
    677  */
    678 
    679 /* ARGSUSED */
    680 vaddr_t
    681 uvm_km_alloc_poolpage_cache(struct vm_map *map, boolean_t waitok)
    682 {
    683 #if defined(PMAP_MAP_POOLPAGE)
    684 	return uvm_km_alloc_poolpage(map, waitok);
    685 #else
    686 	struct vm_page *pg;
    687 	struct pool *pp = &vm_map_to_kernel(map)->vmk_vacache;
    688 	vaddr_t va;
    689 	int s = 0xdeadbeaf; /* XXX: gcc */
    690 	const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0;
    691 
    692 	if ((map->flags & VM_MAP_VACACHE) == 0)
    693 		return uvm_km_alloc_poolpage(map, waitok);
    694 
    695 	if (intrsafe)
    696 		s = splvm();
    697 	va = (vaddr_t)pool_get(pp, waitok ? PR_WAITOK : PR_NOWAIT);
    698 	if (intrsafe)
    699 		splx(s);
    700 	if (va == 0)
    701 		return 0;
    702 	KASSERT(!pmap_extract(pmap_kernel(), va, NULL));
    703 again:
    704 	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
    705 	if (__predict_false(pg == NULL)) {
    706 		if (waitok) {
    707 			uvm_wait("plpg");
    708 			goto again;
    709 		} else {
    710 			if (intrsafe)
    711 				s = splvm();
    712 			pool_put(pp, (void *)va);
    713 			if (intrsafe)
    714 				splx(s);
    715 			return 0;
    716 		}
    717 	}
    718 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE);
    719 	pmap_update(pmap_kernel());
    720 
    721 	return va;
    722 #endif /* PMAP_MAP_POOLPAGE */
    723 }
    724 
    725 vaddr_t
    726 uvm_km_alloc_poolpage(struct vm_map *map, boolean_t waitok)
    727 {
    728 #if defined(PMAP_MAP_POOLPAGE)
    729 	struct vm_page *pg;
    730 	vaddr_t va;
    731 
    732  again:
    733 	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
    734 	if (__predict_false(pg == NULL)) {
    735 		if (waitok) {
    736 			uvm_wait("plpg");
    737 			goto again;
    738 		} else
    739 			return (0);
    740 	}
    741 	va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
    742 	if (__predict_false(va == 0))
    743 		uvm_pagefree(pg);
    744 	return (va);
    745 #else
    746 	vaddr_t va;
    747 	int s = 0xdeadbeaf; /* XXX: gcc */
    748 	const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0;
    749 
    750 	if (intrsafe)
    751 		s = splvm();
    752 	va = uvm_km_alloc(map, PAGE_SIZE, 0,
    753 	    (waitok ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK) | UVM_KMF_WIRED);
    754 	if (intrsafe)
    755 		splx(s);
    756 	return (va);
    757 #endif /* PMAP_MAP_POOLPAGE */
    758 }
    759 
    760 /*
    761  * uvm_km_free_poolpage: free a previously allocated pool page
    762  *
    763  * => if the pmap specifies an alternate unmapping method, we use it.
    764  */
    765 
    766 /* ARGSUSED */
    767 void
    768 uvm_km_free_poolpage_cache(struct vm_map *map, vaddr_t addr)
    769 {
    770 #if defined(PMAP_UNMAP_POOLPAGE)
    771 	uvm_km_free_poolpage(map, addr);
    772 #else
    773 	struct pool *pp;
    774 	int s = 0xdeadbeaf; /* XXX: gcc */
    775 	const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0;
    776 
    777 	if ((map->flags & VM_MAP_VACACHE) == 0) {
    778 		uvm_km_free_poolpage(map, addr);
    779 		return;
    780 	}
    781 
    782 	KASSERT(pmap_extract(pmap_kernel(), addr, NULL));
    783 	uvm_km_pgremove_intrsafe(addr, addr + PAGE_SIZE);
    784 	pmap_kremove(addr, PAGE_SIZE);
    785 #if defined(DEBUG)
    786 	pmap_update(pmap_kernel());
    787 #endif
    788 	KASSERT(!pmap_extract(pmap_kernel(), addr, NULL));
    789 	pp = &vm_map_to_kernel(map)->vmk_vacache;
    790 	if (intrsafe)
    791 		s = splvm();
    792 	pool_put(pp, (void *)addr);
    793 	if (intrsafe)
    794 		splx(s);
    795 #endif
    796 }
    797 
    798 /* ARGSUSED */
    799 void
    800 uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr)
    801 {
    802 #if defined(PMAP_UNMAP_POOLPAGE)
    803 	paddr_t pa;
    804 
    805 	pa = PMAP_UNMAP_POOLPAGE(addr);
    806 	uvm_pagefree(PHYS_TO_VM_PAGE(pa));
    807 #else
    808 	int s = 0xdeadbeaf; /* XXX: gcc */
    809 	const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0;
    810 
    811 	if (intrsafe)
    812 		s = splvm();
    813 	uvm_km_free(map, addr, PAGE_SIZE, UVM_KMF_WIRED);
    814 	if (intrsafe)
    815 		splx(s);
    816 #endif /* PMAP_UNMAP_POOLPAGE */
    817 }
    818