Home | History | Annotate | Line # | Download | only in uvm
uvm_km.c revision 1.138
      1 /*	$NetBSD: uvm_km.c,v 1.138 2013/01/29 21:29:40 para Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6  *
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to Berkeley by
     10  * The Mach Operating System project at Carnegie-Mellon University.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. Neither the name of the University nor the names of its contributors
     21  *    may be used to endorse or promote products derived from this software
     22  *    without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  * SUCH DAMAGE.
     35  *
     36  *	@(#)vm_kern.c   8.3 (Berkeley) 1/12/94
     37  * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
     38  *
     39  *
     40  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     41  * All rights reserved.
     42  *
     43  * Permission to use, copy, modify and distribute this software and
     44  * its documentation is hereby granted, provided that both the copyright
     45  * notice and this permission notice appear in all copies of the
     46  * software, derivative works or modified versions, and any portions
     47  * thereof, and that both notices appear in supporting documentation.
     48  *
     49  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     50  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     51  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     52  *
     53  * Carnegie Mellon requests users of this software to return to
     54  *
     55  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     56  *  School of Computer Science
     57  *  Carnegie Mellon University
     58  *  Pittsburgh PA 15213-3890
     59  *
     60  * any improvements or extensions that they make and grant Carnegie the
     61  * rights to redistribute these changes.
     62  */
     63 
     64 /*
     65  * uvm_km.c: handle kernel memory allocation and management
     66  */
     67 
     68 /*
     69  * overview of kernel memory management:
     70  *
     71  * the kernel virtual address space is mapped by "kernel_map."   kernel_map
     72  * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
     73  * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
     74  *
     75  * the kernel_map has several "submaps."   submaps can only appear in
     76  * the kernel_map (user processes can't use them).   submaps "take over"
     77  * the management of a sub-range of the kernel's address space.  submaps
     78  * are typically allocated at boot time and are never released.   kernel
     79  * virtual address space that is mapped by a submap is locked by the
     80  * submap's lock -- not the kernel_map's lock.
     81  *
     82  * thus, the useful feature of submaps is that they allow us to break
     83  * up the locking and protection of the kernel address space into smaller
     84  * chunks.
     85  *
     86  * the vm system has several standard kernel submaps/arenas, including:
     87  *   kmem_arena => used for kmem/pool (memoryallocators(9))
     88  *   pager_map => used to map "buf" structures into kernel space
     89  *   exec_map => used during exec to handle exec args
     90  *   etc...
     91  *
     92  * The kmem_arena is a "special submap", as it lives in a fixed map entry
     93  * within the kernel_map and is controlled by vmem(9).
     94  *
     95  * the kernel allocates its private memory out of special uvm_objects whose
     96  * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
     97  * are "special" and never die).   all kernel objects should be thought of
     98  * as large, fixed-sized, sparsely populated uvm_objects.   each kernel
     99  * object is equal to the size of kernel virtual address space (i.e. the
    100  * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
    101  *
    102  * note that just because a kernel object spans the entire kernel virtual
    103  * address space doesn't mean that it has to be mapped into the entire space.
    104  * large chunks of a kernel object's space go unused either because
    105  * that area of kernel VM is unmapped, or there is some other type of
    106  * object mapped into that range (e.g. a vnode).    for submap's kernel
    107  * objects, the only part of the object that can ever be populated is the
    108  * offsets that are managed by the submap.
    109  *
    110  * note that the "offset" in a kernel object is always the kernel virtual
    111  * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
    112  * example:
    113  *   suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
    114  *   uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
    115  *   kernel map].    if uvm_km_alloc returns virtual address 0xf8235000,
    116  *   then that means that the page at offset 0x235000 in kernel_object is
    117  *   mapped at 0xf8235000.
    118  *
    119  * kernel object have one other special property: when the kernel virtual
    120  * memory mapping them is unmapped, the backing memory in the object is
    121  * freed right away.   this is done with the uvm_km_pgremove() function.
    122  * this has to be done because there is no backing store for kernel pages
    123  * and no need to save them after they are no longer referenced.
    124  *
    125  * Generic arenas:
    126  *
    127  * kmem_arena:
    128  *	Main arena controlling the kernel KVA used by other arenas.
    129  *
    130  * kmem_va_arena:
    131  *	Implements quantum caching in order to speedup allocations and
    132  *	reduce fragmentation.  The pool(9), unless created with a custom
    133  *	meta-data allocator, and kmem(9) subsystems use this arena.
    134  *
    135  * Arenas for meta-data allocations are used by vmem(9) and pool(9).
    136  * These arenas cannot use quantum cache.  However, kmem_va_meta_arena
    137  * compensates this by importing larger chunks from kmem_arena.
    138  *
    139  * kmem_va_meta_arena:
    140  *	Space for meta-data.
    141  *
    142  * kmem_meta_arena:
    143  *	Imports from kmem_va_meta_arena.  Allocations from this arena are
    144  *	backed with the pages.
    145  *
    146  * Arena stacking:
    147  *
    148  *	kmem_arena
    149  *		kmem_va_arena
    150  *		kmem_va_meta_arena
    151  *			kmem_meta_arena
    152  */
    153 
    154 #include <sys/cdefs.h>
    155 __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.138 2013/01/29 21:29:40 para Exp $");
    156 
    157 #include "opt_uvmhist.h"
    158 
    159 #include "opt_kmempages.h"
    160 
    161 #ifndef NKMEMPAGES
    162 #define NKMEMPAGES 0
    163 #endif
    164 
    165 /*
    166  * Defaults for lower and upper-bounds for the kmem_arena page count.
    167  * Can be overridden by kernel config options.
    168  */
    169 #ifndef NKMEMPAGES_MIN
    170 #define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT
    171 #endif
    172 
    173 #ifndef NKMEMPAGES_MAX
    174 #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT
    175 #endif
    176 
    177 
    178 #include <sys/param.h>
    179 #include <sys/systm.h>
    180 #include <sys/proc.h>
    181 #include <sys/pool.h>
    182 #include <sys/vmem.h>
    183 #include <sys/vmem_impl.h>
    184 #include <sys/kmem.h>
    185 
    186 #include <uvm/uvm.h>
    187 
    188 /*
    189  * global data structures
    190  */
    191 
    192 struct vm_map *kernel_map = NULL;
    193 
    194 /*
    195  * local data structues
    196  */
    197 
    198 static struct vm_map		kernel_map_store;
    199 static struct vm_map_entry	kernel_image_mapent_store;
    200 static struct vm_map_entry	kernel_kmem_mapent_store;
    201 
    202 int nkmempages = 0;
    203 vaddr_t kmembase;
    204 vsize_t kmemsize;
    205 
    206 static struct vmem kmem_arena_store;
    207 vmem_t *kmem_arena = NULL;
    208 static struct vmem kmem_va_arena_store;
    209 vmem_t *kmem_va_arena;
    210 
    211 /*
    212  * kmeminit_nkmempages: calculate the size of kmem_arena.
    213  */
    214 void
    215 kmeminit_nkmempages(void)
    216 {
    217 	int npages;
    218 
    219 	if (nkmempages != 0) {
    220 		/*
    221 		 * It's already been set (by us being here before)
    222 		 * bail out now;
    223 		 */
    224 		return;
    225 	}
    226 
    227 #if defined(PMAP_MAP_POOLPAGE)
    228 	npages = (physmem / 4);
    229 #else
    230 	npages = (physmem / 3) * 2;
    231 #endif /* defined(PMAP_MAP_POOLPAGE) */
    232 
    233 #ifndef NKMEMPAGES_MAX_UNLIMITED
    234 	if (npages > NKMEMPAGES_MAX)
    235 		npages = NKMEMPAGES_MAX;
    236 #endif
    237 
    238 	if (npages < NKMEMPAGES_MIN)
    239 		npages = NKMEMPAGES_MIN;
    240 
    241 	nkmempages = npages;
    242 }
    243 
    244 /*
    245  * uvm_km_bootstrap: init kernel maps and objects to reflect reality (i.e.
    246  * KVM already allocated for text, data, bss, and static data structures).
    247  *
    248  * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
    249  *    we assume that [vmin -> start] has already been allocated and that
    250  *    "end" is the end.
    251  */
    252 
    253 void
    254 uvm_km_bootstrap(vaddr_t start, vaddr_t end)
    255 {
    256 	bool kmem_arena_small;
    257 	vaddr_t base = VM_MIN_KERNEL_ADDRESS;
    258 	struct uvm_map_args args;
    259 	int error;
    260 
    261 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    262 	UVMHIST_LOG(maphist, "start=%"PRIxVADDR" end=%#"PRIxVADDR,
    263 	    start, end, 0,0);
    264 
    265 	kmeminit_nkmempages();
    266 	kmemsize = (vsize_t)nkmempages * PAGE_SIZE;
    267 	kmem_arena_small = kmemsize < 64 * 1024 * 1024;
    268 
    269 	UVMHIST_LOG(maphist, "kmemsize=%#"PRIxVSIZE, kmemsize, 0,0,0);
    270 
    271 	/*
    272 	 * next, init kernel memory objects.
    273 	 */
    274 
    275 	/* kernel_object: for pageable anonymous kernel memory */
    276 	uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
    277 				VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
    278 
    279 	/*
    280 	 * init the map and reserve any space that might already
    281 	 * have been allocated kernel space before installing.
    282 	 */
    283 
    284 	uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
    285 	kernel_map_store.pmap = pmap_kernel();
    286 	if (start != base) {
    287 		error = uvm_map_prepare(&kernel_map_store,
    288 		    base, start - base,
    289 		    NULL, UVM_UNKNOWN_OFFSET, 0,
    290 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    291 		    		UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
    292 		if (!error) {
    293 			kernel_image_mapent_store.flags =
    294 			    UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE;
    295 			error = uvm_map_enter(&kernel_map_store, &args,
    296 			    &kernel_image_mapent_store);
    297 		}
    298 
    299 		if (error)
    300 			panic(
    301 			    "uvm_km_bootstrap: could not reserve space for kernel");
    302 
    303 		kmembase = args.uma_start + args.uma_size;
    304 	} else {
    305 		kmembase = base;
    306 	}
    307 
    308 	error = uvm_map_prepare(&kernel_map_store,
    309 	    kmembase, kmemsize,
    310 	    NULL, UVM_UNKNOWN_OFFSET, 0,
    311 	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    312 	    		UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
    313 	if (!error) {
    314 		kernel_kmem_mapent_store.flags =
    315 		    UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE;
    316 		error = uvm_map_enter(&kernel_map_store, &args,
    317 		    &kernel_kmem_mapent_store);
    318 	}
    319 
    320 	if (error)
    321 		panic("uvm_km_bootstrap: could not reserve kernel kmem");
    322 
    323 	/*
    324 	 * install!
    325 	 */
    326 
    327 	kernel_map = &kernel_map_store;
    328 
    329 	pool_subsystem_init();
    330 
    331 	kmem_arena = vmem_init(&kmem_arena_store, "kmem",
    332 	    kmembase, kmemsize, PAGE_SIZE, NULL, NULL, NULL,
    333 	    0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
    334 #ifdef PMAP_GROWKERNEL
    335 	/*
    336 	 * kmem_arena VA allocations happen independently of uvm_map.
    337 	 * grow kernel to accommodate the kmem_arena.
    338 	 */
    339 	if (uvm_maxkaddr < kmembase + kmemsize) {
    340 		uvm_maxkaddr = pmap_growkernel(kmembase + kmemsize);
    341 		KASSERTMSG(uvm_maxkaddr >= kmembase + kmemsize,
    342 		    "%#"PRIxVADDR" %#"PRIxVADDR" %#"PRIxVSIZE,
    343 		    uvm_maxkaddr, kmembase, kmemsize);
    344 	}
    345 #endif
    346 
    347 	vmem_subsystem_init(kmem_arena);
    348 
    349 	UVMHIST_LOG(maphist, "kmem vmem created (base=%#"PRIxVADDR
    350 	    ", size=%#"PRIxVSIZE, kmembase, kmemsize, 0,0);
    351 
    352 	kmem_va_arena = vmem_init(&kmem_va_arena_store, "kva",
    353 	    0, 0, PAGE_SIZE, vmem_alloc, vmem_free, kmem_arena,
    354 	    (kmem_arena_small ? 4 : VMEM_QCACHE_IDX_MAX) * PAGE_SIZE,
    355 	    VM_NOSLEEP, IPL_VM);
    356 
    357 	UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
    358 }
    359 
    360 /*
    361  * uvm_km_init: init the kernel maps virtual memory caches
    362  * and start the pool/kmem allocator.
    363  */
    364 void
    365 uvm_km_init(void)
    366 {
    367 
    368 	kmem_init();
    369 
    370 	kmeminit(); // killme
    371 }
    372 
    373 /*
    374  * uvm_km_suballoc: allocate a submap in the kernel map.   once a submap
    375  * is allocated all references to that area of VM must go through it.  this
    376  * allows the locking of VAs in kernel_map to be broken up into regions.
    377  *
    378  * => if `fixed' is true, *vmin specifies where the region described
    379  *   pager_map => used to map "buf" structures into kernel space
    380  *      by the submap must start
    381  * => if submap is non NULL we use that as the submap, otherwise we
    382  *	alloc a new map
    383  */
    384 
    385 struct vm_map *
    386 uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */,
    387     vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed,
    388     struct vm_map *submap)
    389 {
    390 	int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
    391 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    392 
    393 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    394 
    395 	size = round_page(size);	/* round up to pagesize */
    396 
    397 	/*
    398 	 * first allocate a blank spot in the parent map
    399 	 */
    400 
    401 	if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0,
    402 	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    403 	    UVM_ADV_RANDOM, mapflags)) != 0) {
    404 		panic("%s: unable to allocate space in parent map", __func__);
    405 	}
    406 
    407 	/*
    408 	 * set VM bounds (vmin is filled in by uvm_map)
    409 	 */
    410 
    411 	*vmax = *vmin + size;
    412 
    413 	/*
    414 	 * add references to pmap and create or init the submap
    415 	 */
    416 
    417 	pmap_reference(vm_map_pmap(map));
    418 	if (submap == NULL) {
    419 		submap = kmem_alloc(sizeof(*submap), KM_SLEEP);
    420 		if (submap == NULL)
    421 			panic("uvm_km_suballoc: unable to create submap");
    422 	}
    423 	uvm_map_setup(submap, *vmin, *vmax, flags);
    424 	submap->pmap = vm_map_pmap(map);
    425 
    426 	/*
    427 	 * now let uvm_map_submap plug in it...
    428 	 */
    429 
    430 	if (uvm_map_submap(map, *vmin, *vmax, submap) != 0)
    431 		panic("uvm_km_suballoc: submap allocation failed");
    432 
    433 	return(submap);
    434 }
    435 
    436 /*
    437  * uvm_km_pgremove: remove pages from a kernel uvm_object and KVA.
    438  */
    439 
    440 void
    441 uvm_km_pgremove(vaddr_t startva, vaddr_t endva)
    442 {
    443 	struct uvm_object * const uobj = uvm_kernel_object;
    444 	const voff_t start = startva - vm_map_min(kernel_map);
    445 	const voff_t end = endva - vm_map_min(kernel_map);
    446 	struct vm_page *pg;
    447 	voff_t curoff, nextoff;
    448 	int swpgonlydelta = 0;
    449 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    450 
    451 	KASSERT(VM_MIN_KERNEL_ADDRESS <= startva);
    452 	KASSERT(startva < endva);
    453 	KASSERT(endva <= VM_MAX_KERNEL_ADDRESS);
    454 
    455 	mutex_enter(uobj->vmobjlock);
    456 	pmap_remove(pmap_kernel(), startva, endva);
    457 	for (curoff = start; curoff < end; curoff = nextoff) {
    458 		nextoff = curoff + PAGE_SIZE;
    459 		pg = uvm_pagelookup(uobj, curoff);
    460 		if (pg != NULL && pg->flags & PG_BUSY) {
    461 			pg->flags |= PG_WANTED;
    462 			UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0,
    463 				    "km_pgrm", 0);
    464 			mutex_enter(uobj->vmobjlock);
    465 			nextoff = curoff;
    466 			continue;
    467 		}
    468 
    469 		/*
    470 		 * free the swap slot, then the page.
    471 		 */
    472 
    473 		if (pg == NULL &&
    474 		    uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) {
    475 			swpgonlydelta++;
    476 		}
    477 		uao_dropswap(uobj, curoff >> PAGE_SHIFT);
    478 		if (pg != NULL) {
    479 			mutex_enter(&uvm_pageqlock);
    480 			uvm_pagefree(pg);
    481 			mutex_exit(&uvm_pageqlock);
    482 		}
    483 	}
    484 	mutex_exit(uobj->vmobjlock);
    485 
    486 	if (swpgonlydelta > 0) {
    487 		mutex_enter(&uvm_swap_data_lock);
    488 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
    489 		uvmexp.swpgonly -= swpgonlydelta;
    490 		mutex_exit(&uvm_swap_data_lock);
    491 	}
    492 }
    493 
    494 
    495 /*
    496  * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed
    497  *    regions.
    498  *
    499  * => when you unmap a part of anonymous kernel memory you want to toss
    500  *    the pages right away.    (this is called from uvm_unmap_...).
    501  * => none of the pages will ever be busy, and none of them will ever
    502  *    be on the active or inactive queues (because they have no object).
    503  */
    504 
    505 void
    506 uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end)
    507 {
    508 #define __PGRM_BATCH 16
    509 	struct vm_page *pg;
    510 	paddr_t pa[__PGRM_BATCH];
    511 	int npgrm, i;
    512 	vaddr_t va, batch_vastart;
    513 
    514 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    515 
    516 	KASSERT(VM_MAP_IS_KERNEL(map));
    517 	KASSERTMSG(vm_map_min(map) <= start,
    518 	    "vm_map_min(map) [%#"PRIxVADDR"] <= start [%#"PRIxVADDR"]"
    519 	    " (size=%#"PRIxVSIZE")",
    520 	    vm_map_min(map), start, end - start);
    521 	KASSERT(start < end);
    522 	KASSERT(end <= vm_map_max(map));
    523 
    524 	for (va = start; va < end;) {
    525 		batch_vastart = va;
    526 		/* create a batch of at most __PGRM_BATCH pages to free */
    527 		for (i = 0;
    528 		     i < __PGRM_BATCH && va < end;
    529 		     va += PAGE_SIZE) {
    530 			if (!pmap_extract(pmap_kernel(), va, &pa[i])) {
    531 				continue;
    532 			}
    533 			i++;
    534 		}
    535 		npgrm = i;
    536 		/* now remove the mappings */
    537 		pmap_kremove(batch_vastart, va - batch_vastart);
    538 		/* and free the pages */
    539 		for (i = 0; i < npgrm; i++) {
    540 			pg = PHYS_TO_VM_PAGE(pa[i]);
    541 			KASSERT(pg);
    542 			KASSERT(pg->uobject == NULL && pg->uanon == NULL);
    543 			KASSERT((pg->flags & PG_BUSY) == 0);
    544 			uvm_pagefree(pg);
    545 		}
    546 	}
    547 #undef __PGRM_BATCH
    548 }
    549 
    550 #if defined(DEBUG)
    551 void
    552 uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end)
    553 {
    554 	struct vm_page *pg;
    555 	vaddr_t va;
    556 	paddr_t pa;
    557 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    558 
    559 	KDASSERT(VM_MAP_IS_KERNEL(map));
    560 	KDASSERT(vm_map_min(map) <= start);
    561 	KDASSERT(start < end);
    562 	KDASSERT(end <= vm_map_max(map));
    563 
    564 	for (va = start; va < end; va += PAGE_SIZE) {
    565 		if (pmap_extract(pmap_kernel(), va, &pa)) {
    566 			panic("uvm_km_check_empty: va %p has pa 0x%llx",
    567 			    (void *)va, (long long)pa);
    568 		}
    569 		mutex_enter(uvm_kernel_object->vmobjlock);
    570 		pg = uvm_pagelookup(uvm_kernel_object,
    571 		    va - vm_map_min(kernel_map));
    572 		mutex_exit(uvm_kernel_object->vmobjlock);
    573 		if (pg) {
    574 			panic("uvm_km_check_empty: "
    575 			    "has page hashed at %p", (const void *)va);
    576 		}
    577 	}
    578 }
    579 #endif /* defined(DEBUG) */
    580 
    581 /*
    582  * uvm_km_alloc: allocate an area of kernel memory.
    583  *
    584  * => NOTE: we can return 0 even if we can wait if there is not enough
    585  *	free VM space in the map... caller should be prepared to handle
    586  *	this case.
    587  * => we return KVA of memory allocated
    588  */
    589 
    590 vaddr_t
    591 uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
    592 {
    593 	vaddr_t kva, loopva;
    594 	vaddr_t offset;
    595 	vsize_t loopsize;
    596 	struct vm_page *pg;
    597 	struct uvm_object *obj;
    598 	int pgaflags;
    599 	vm_prot_t prot;
    600 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    601 
    602 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    603 	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
    604 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
    605 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
    606 	KASSERT((flags & UVM_KMF_VAONLY) != 0 || (flags & UVM_KMF_COLORMATCH) == 0);
    607 	KASSERT((flags & UVM_KMF_COLORMATCH) == 0 || (flags & UVM_KMF_VAONLY) != 0);
    608 
    609 	/*
    610 	 * setup for call
    611 	 */
    612 
    613 	kva = vm_map_min(map);	/* hint */
    614 	size = round_page(size);
    615 	obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL;
    616 	UVMHIST_LOG(maphist,"  (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
    617 		    map, obj, size, flags);
    618 
    619 	/*
    620 	 * allocate some virtual space
    621 	 */
    622 
    623 	if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
    624 	    align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    625 	    UVM_ADV_RANDOM,
    626 	    (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA
    627 	     | UVM_KMF_COLORMATCH)))) != 0)) {
    628 		UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
    629 		return(0);
    630 	}
    631 
    632 	/*
    633 	 * if all we wanted was VA, return now
    634 	 */
    635 
    636 	if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) {
    637 		UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
    638 		return(kva);
    639 	}
    640 
    641 	/*
    642 	 * recover object offset from virtual address
    643 	 */
    644 
    645 	offset = kva - vm_map_min(kernel_map);
    646 	UVMHIST_LOG(maphist, "  kva=0x%x, offset=0x%x", kva, offset,0,0);
    647 
    648 	/*
    649 	 * now allocate and map in the memory... note that we are the only ones
    650 	 * whom should ever get a handle on this area of VM.
    651 	 */
    652 
    653 	loopva = kva;
    654 	loopsize = size;
    655 
    656 	pgaflags = UVM_FLAG_COLORMATCH;
    657 	if (flags & UVM_KMF_NOWAIT)
    658 		pgaflags |= UVM_PGA_USERESERVE;
    659 	if (flags & UVM_KMF_ZERO)
    660 		pgaflags |= UVM_PGA_ZERO;
    661 	prot = VM_PROT_READ | VM_PROT_WRITE;
    662 	if (flags & UVM_KMF_EXEC)
    663 		prot |= VM_PROT_EXECUTE;
    664 	while (loopsize) {
    665 		KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, NULL),
    666 		    "loopva=%#"PRIxVADDR, loopva);
    667 
    668 		pg = uvm_pagealloc_strat(NULL, offset, NULL, pgaflags,
    669 #ifdef UVM_KM_VMFREELIST
    670 		   UVM_PGA_STRAT_ONLY, UVM_KM_VMFREELIST
    671 #else
    672 		   UVM_PGA_STRAT_NORMAL, 0
    673 #endif
    674 		   );
    675 
    676 		/*
    677 		 * out of memory?
    678 		 */
    679 
    680 		if (__predict_false(pg == NULL)) {
    681 			if ((flags & UVM_KMF_NOWAIT) ||
    682 			    ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) {
    683 				/* free everything! */
    684 				uvm_km_free(map, kva, size,
    685 				    flags & UVM_KMF_TYPEMASK);
    686 				return (0);
    687 			} else {
    688 				uvm_wait("km_getwait2");	/* sleep here */
    689 				continue;
    690 			}
    691 		}
    692 
    693 		pg->flags &= ~PG_BUSY;	/* new page */
    694 		UVM_PAGE_OWN(pg, NULL);
    695 
    696 		/*
    697 		 * map it in
    698 		 */
    699 
    700 		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
    701 		    prot, PMAP_KMPAGE);
    702 		loopva += PAGE_SIZE;
    703 		offset += PAGE_SIZE;
    704 		loopsize -= PAGE_SIZE;
    705 	}
    706 
    707 	pmap_update(pmap_kernel());
    708 
    709 	UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
    710 	return(kva);
    711 }
    712 
    713 /*
    714  * uvm_km_free: free an area of kernel memory
    715  */
    716 
    717 void
    718 uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags)
    719 {
    720 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    721 
    722 	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
    723 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
    724 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
    725 	KASSERT((addr & PAGE_MASK) == 0);
    726 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    727 
    728 	size = round_page(size);
    729 
    730 	if (flags & UVM_KMF_PAGEABLE) {
    731 		uvm_km_pgremove(addr, addr + size);
    732 	} else if (flags & UVM_KMF_WIRED) {
    733 		/*
    734 		 * Note: uvm_km_pgremove_intrsafe() extracts mapping, thus
    735 		 * remove it after.  See comment below about KVA visibility.
    736 		 */
    737 		uvm_km_pgremove_intrsafe(map, addr, addr + size);
    738 	}
    739 
    740 	/*
    741 	 * Note: uvm_unmap_remove() calls pmap_update() for us, before
    742 	 * KVA becomes globally available.
    743 	 */
    744 
    745 	uvm_unmap1(map, addr, addr + size, UVM_FLAG_VAONLY);
    746 }
    747 
    748 /* Sanity; must specify both or none. */
    749 #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
    750     (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
    751 #error Must specify MAP and UNMAP together.
    752 #endif
    753 
    754 int
    755 uvm_km_kmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags,
    756     vmem_addr_t *addr)
    757 {
    758 	struct vm_page *pg;
    759 	vmem_addr_t va;
    760 	int rc;
    761 	vaddr_t loopva;
    762 	vsize_t loopsize;
    763 
    764 	size = round_page(size);
    765 
    766 #if defined(PMAP_MAP_POOLPAGE)
    767 	if (size == PAGE_SIZE) {
    768 again:
    769 #ifdef PMAP_ALLOC_POOLPAGE
    770 		pg = PMAP_ALLOC_POOLPAGE((flags & VM_SLEEP) ?
    771 		   0 : UVM_PGA_USERESERVE);
    772 #else
    773 		pg = uvm_pagealloc(NULL, 0, NULL,
    774 		   (flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE);
    775 #endif /* PMAP_ALLOC_POOLPAGE */
    776 		if (__predict_false(pg == NULL)) {
    777 			if (flags & VM_SLEEP) {
    778 				uvm_wait("plpg");
    779 				goto again;
    780 			}
    781 			return ENOMEM;
    782 		}
    783 		va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
    784 		if (__predict_false(va == 0)) {
    785 			uvm_pagefree(pg);
    786 			return ENOMEM;
    787 		}
    788 		*addr = va;
    789 		return 0;
    790 	}
    791 #endif /* PMAP_MAP_POOLPAGE */
    792 
    793 	rc = vmem_alloc(vm, size, flags, &va);
    794 	if (rc != 0)
    795 		return rc;
    796 
    797 #ifdef PMAP_GROWKERNEL
    798 	/*
    799 	 * These VA allocations happen independently of uvm_map
    800 	 * so this allocation must not extend beyond the current limit.
    801 	 */
    802 	KASSERTMSG(uvm_maxkaddr >= va + size,
    803 	    "%#"PRIxVADDR" %#"PRIxPTR" %#zx",
    804 	    uvm_maxkaddr, va, size);
    805 #endif
    806 
    807 	loopva = va;
    808 	loopsize = size;
    809 
    810 	while (loopsize) {
    811 #ifdef DIAGNOSTIC
    812 		paddr_t pa;
    813 #endif
    814 		KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, &pa),
    815 		    "loopva=%#"PRIxVADDR" loopsize=%#"PRIxVSIZE
    816 		    " pa=%#"PRIxPADDR" vmem=%p",
    817 		    loopva, loopsize, pa, vm);
    818 
    819 		pg = uvm_pagealloc(NULL, loopva, NULL,
    820 		    UVM_FLAG_COLORMATCH
    821 		    | ((flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE));
    822 		if (__predict_false(pg == NULL)) {
    823 			if (flags & VM_SLEEP) {
    824 				uvm_wait("plpg");
    825 				continue;
    826 			} else {
    827 				uvm_km_pgremove_intrsafe(kernel_map, va,
    828 				    va + size);
    829 				vmem_free(vm, va, size);
    830 				return ENOMEM;
    831 			}
    832 		}
    833 
    834 		pg->flags &= ~PG_BUSY;	/* new page */
    835 		UVM_PAGE_OWN(pg, NULL);
    836 		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
    837 		    VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
    838 
    839 		loopva += PAGE_SIZE;
    840 		loopsize -= PAGE_SIZE;
    841 	}
    842 	pmap_update(pmap_kernel());
    843 
    844 	*addr = va;
    845 
    846 	return 0;
    847 }
    848 
    849 void
    850 uvm_km_kmem_free(vmem_t *vm, vmem_addr_t addr, size_t size)
    851 {
    852 
    853 	size = round_page(size);
    854 #if defined(PMAP_UNMAP_POOLPAGE)
    855 	if (size == PAGE_SIZE) {
    856 		paddr_t pa;
    857 
    858 		pa = PMAP_UNMAP_POOLPAGE(addr);
    859 		uvm_pagefree(PHYS_TO_VM_PAGE(pa));
    860 		return;
    861 	}
    862 #endif /* PMAP_UNMAP_POOLPAGE */
    863 	uvm_km_pgremove_intrsafe(kernel_map, addr, addr + size);
    864 	pmap_update(pmap_kernel());
    865 
    866 	vmem_free(vm, addr, size);
    867 }
    868 
    869 bool
    870 uvm_km_va_starved_p(void)
    871 {
    872 	vmem_size_t total;
    873 	vmem_size_t free;
    874 
    875 	if (kmem_arena == NULL)
    876 		return false;
    877 
    878 	total = vmem_size(kmem_arena, VMEM_ALLOC|VMEM_FREE);
    879 	free = vmem_size(kmem_arena, VMEM_FREE);
    880 
    881 	return (free < (total / 10));
    882 }
    883