Home | History | Annotate | Line # | Download | only in uvm
uvm_km.c revision 1.104.2.3
      1  1.104.2.1  uebayasi /*	$NetBSD: uvm_km.c,v 1.104.2.3 2010/04/30 14:44:38 uebayasi Exp $	*/
      2        1.1       mrg 
      3       1.47       chs /*
      4        1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5       1.47       chs  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6        1.1       mrg  *
      7        1.1       mrg  * All rights reserved.
      8        1.1       mrg  *
      9        1.1       mrg  * This code is derived from software contributed to Berkeley by
     10        1.1       mrg  * The Mach Operating System project at Carnegie-Mellon University.
     11        1.1       mrg  *
     12        1.1       mrg  * Redistribution and use in source and binary forms, with or without
     13        1.1       mrg  * modification, are permitted provided that the following conditions
     14        1.1       mrg  * are met:
     15        1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     16        1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     17        1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18        1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     19        1.1       mrg  *    documentation and/or other materials provided with the distribution.
     20        1.1       mrg  * 3. All advertising materials mentioning features or use of this software
     21        1.1       mrg  *    must display the following acknowledgement:
     22        1.1       mrg  *	This product includes software developed by Charles D. Cranor,
     23       1.47       chs  *      Washington University, the University of California, Berkeley and
     24        1.1       mrg  *      its contributors.
     25        1.1       mrg  * 4. Neither the name of the University nor the names of its contributors
     26        1.1       mrg  *    may be used to endorse or promote products derived from this software
     27        1.1       mrg  *    without specific prior written permission.
     28        1.1       mrg  *
     29        1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30        1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31        1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32        1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33        1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34        1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35        1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36        1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37        1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38        1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39        1.1       mrg  * SUCH DAMAGE.
     40        1.1       mrg  *
     41        1.1       mrg  *	@(#)vm_kern.c   8.3 (Berkeley) 1/12/94
     42        1.4       mrg  * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
     43        1.1       mrg  *
     44        1.1       mrg  *
     45        1.1       mrg  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     46        1.1       mrg  * All rights reserved.
     47       1.47       chs  *
     48        1.1       mrg  * Permission to use, copy, modify and distribute this software and
     49        1.1       mrg  * its documentation is hereby granted, provided that both the copyright
     50        1.1       mrg  * notice and this permission notice appear in all copies of the
     51        1.1       mrg  * software, derivative works or modified versions, and any portions
     52        1.1       mrg  * thereof, and that both notices appear in supporting documentation.
     53       1.47       chs  *
     54       1.47       chs  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     55       1.47       chs  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     56        1.1       mrg  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     57       1.47       chs  *
     58        1.1       mrg  * Carnegie Mellon requests users of this software to return to
     59        1.1       mrg  *
     60        1.1       mrg  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     61        1.1       mrg  *  School of Computer Science
     62        1.1       mrg  *  Carnegie Mellon University
     63        1.1       mrg  *  Pittsburgh PA 15213-3890
     64        1.1       mrg  *
     65        1.1       mrg  * any improvements or extensions that they make and grant Carnegie the
     66        1.1       mrg  * rights to redistribute these changes.
     67        1.1       mrg  */
     68        1.6       mrg 
     69        1.1       mrg /*
     70        1.1       mrg  * uvm_km.c: handle kernel memory allocation and management
     71        1.1       mrg  */
     72        1.1       mrg 
     73        1.7     chuck /*
     74        1.7     chuck  * overview of kernel memory management:
     75        1.7     chuck  *
     76        1.7     chuck  * the kernel virtual address space is mapped by "kernel_map."   kernel_map
     77       1.62   thorpej  * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
     78       1.62   thorpej  * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
     79        1.7     chuck  *
     80       1.47       chs  * the kernel_map has several "submaps."   submaps can only appear in
     81        1.7     chuck  * the kernel_map (user processes can't use them).   submaps "take over"
     82        1.7     chuck  * the management of a sub-range of the kernel's address space.  submaps
     83        1.7     chuck  * are typically allocated at boot time and are never released.   kernel
     84       1.47       chs  * virtual address space that is mapped by a submap is locked by the
     85        1.7     chuck  * submap's lock -- not the kernel_map's lock.
     86        1.7     chuck  *
     87        1.7     chuck  * thus, the useful feature of submaps is that they allow us to break
     88        1.7     chuck  * up the locking and protection of the kernel address space into smaller
     89        1.7     chuck  * chunks.
     90        1.7     chuck  *
     91        1.7     chuck  * the vm system has several standard kernel submaps, including:
     92        1.7     chuck  *   kmem_map => contains only wired kernel memory for the kernel
     93       1.97        ad  *		malloc.
     94        1.7     chuck  *   pager_map => used to map "buf" structures into kernel space
     95        1.7     chuck  *   exec_map => used during exec to handle exec args
     96        1.7     chuck  *   etc...
     97        1.7     chuck  *
     98        1.7     chuck  * the kernel allocates its private memory out of special uvm_objects whose
     99        1.7     chuck  * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
    100        1.7     chuck  * are "special" and never die).   all kernel objects should be thought of
    101       1.47       chs  * as large, fixed-sized, sparsely populated uvm_objects.   each kernel
    102       1.62   thorpej  * object is equal to the size of kernel virtual address space (i.e. the
    103       1.62   thorpej  * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
    104        1.7     chuck  *
    105      1.101     pooka  * note that just because a kernel object spans the entire kernel virtual
    106        1.7     chuck  * address space doesn't mean that it has to be mapped into the entire space.
    107       1.47       chs  * large chunks of a kernel object's space go unused either because
    108       1.47       chs  * that area of kernel VM is unmapped, or there is some other type of
    109        1.7     chuck  * object mapped into that range (e.g. a vnode).    for submap's kernel
    110        1.7     chuck  * objects, the only part of the object that can ever be populated is the
    111        1.7     chuck  * offsets that are managed by the submap.
    112        1.7     chuck  *
    113        1.7     chuck  * note that the "offset" in a kernel object is always the kernel virtual
    114       1.62   thorpej  * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
    115        1.7     chuck  * example:
    116       1.62   thorpej  *   suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
    117        1.7     chuck  *   uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
    118        1.7     chuck  *   kernel map].    if uvm_km_alloc returns virtual address 0xf8235000,
    119        1.7     chuck  *   then that means that the page at offset 0x235000 in kernel_object is
    120       1.47       chs  *   mapped at 0xf8235000.
    121        1.7     chuck  *
    122        1.7     chuck  * kernel object have one other special property: when the kernel virtual
    123        1.7     chuck  * memory mapping them is unmapped, the backing memory in the object is
    124        1.7     chuck  * freed right away.   this is done with the uvm_km_pgremove() function.
    125        1.7     chuck  * this has to be done because there is no backing store for kernel pages
    126        1.7     chuck  * and no need to save them after they are no longer referenced.
    127        1.7     chuck  */
    128       1.55     lukem 
    129       1.55     lukem #include <sys/cdefs.h>
    130  1.104.2.1  uebayasi __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.104.2.3 2010/04/30 14:44:38 uebayasi Exp $");
    131       1.55     lukem 
    132       1.55     lukem #include "opt_uvmhist.h"
    133  1.104.2.2  uebayasi #include "opt_device_page.h"
    134  1.104.2.1  uebayasi #include "opt_xip.h"
    135        1.7     chuck 
    136        1.1       mrg #include <sys/param.h>
    137       1.71      yamt #include <sys/malloc.h>
    138        1.1       mrg #include <sys/systm.h>
    139        1.1       mrg #include <sys/proc.h>
    140       1.72      yamt #include <sys/pool.h>
    141  1.104.2.1  uebayasi #include <sys/once.h>
    142        1.1       mrg 
    143        1.1       mrg #include <uvm/uvm.h>
    144        1.1       mrg 
    145        1.1       mrg /*
    146        1.1       mrg  * global data structures
    147        1.1       mrg  */
    148        1.1       mrg 
    149       1.49       chs struct vm_map *kernel_map = NULL;
    150        1.1       mrg 
    151        1.1       mrg /*
    152        1.1       mrg  * local data structues
    153        1.1       mrg  */
    154        1.1       mrg 
    155       1.71      yamt static struct vm_map_kernel	kernel_map_store;
    156       1.70      yamt static struct vm_map_entry	kernel_first_mapent_store;
    157        1.1       mrg 
    158       1.72      yamt #if !defined(PMAP_MAP_POOLPAGE)
    159       1.72      yamt 
    160       1.72      yamt /*
    161       1.72      yamt  * kva cache
    162       1.72      yamt  *
    163       1.72      yamt  * XXX maybe it's better to do this at the uvm_map layer.
    164       1.72      yamt  */
    165       1.72      yamt 
    166       1.72      yamt #define	KM_VACACHE_SIZE	(32 * PAGE_SIZE) /* XXX tune */
    167       1.72      yamt 
    168       1.72      yamt static void *km_vacache_alloc(struct pool *, int);
    169       1.72      yamt static void km_vacache_free(struct pool *, void *);
    170       1.72      yamt static void km_vacache_init(struct vm_map *, const char *, size_t);
    171       1.72      yamt 
    172       1.72      yamt /* XXX */
    173       1.72      yamt #define	KM_VACACHE_POOL_TO_MAP(pp) \
    174       1.72      yamt 	((struct vm_map *)((char *)(pp) - \
    175       1.72      yamt 	    offsetof(struct vm_map_kernel, vmk_vacache)))
    176       1.72      yamt 
    177       1.72      yamt static void *
    178       1.72      yamt km_vacache_alloc(struct pool *pp, int flags)
    179       1.72      yamt {
    180       1.72      yamt 	vaddr_t va;
    181       1.72      yamt 	size_t size;
    182       1.72      yamt 	struct vm_map *map;
    183       1.72      yamt 	size = pp->pr_alloc->pa_pagesz;
    184       1.72      yamt 
    185       1.72      yamt 	map = KM_VACACHE_POOL_TO_MAP(pp);
    186       1.72      yamt 
    187       1.73      yamt 	va = vm_map_min(map); /* hint */
    188       1.72      yamt 	if (uvm_map(map, &va, size, NULL, UVM_UNKNOWN_OFFSET, size,
    189       1.74      yamt 	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    190       1.72      yamt 	    UVM_ADV_RANDOM, UVM_FLAG_QUANTUM |
    191       1.88      yamt 	    ((flags & PR_WAITOK) ? UVM_FLAG_WAITVA :
    192       1.88      yamt 	    UVM_FLAG_TRYLOCK | UVM_FLAG_NOWAIT))))
    193       1.72      yamt 		return NULL;
    194       1.72      yamt 
    195       1.72      yamt 	return (void *)va;
    196       1.72      yamt }
    197       1.72      yamt 
    198       1.72      yamt static void
    199       1.72      yamt km_vacache_free(struct pool *pp, void *v)
    200       1.72      yamt {
    201       1.72      yamt 	vaddr_t va = (vaddr_t)v;
    202       1.72      yamt 	size_t size = pp->pr_alloc->pa_pagesz;
    203       1.72      yamt 	struct vm_map *map;
    204       1.72      yamt 
    205       1.72      yamt 	map = KM_VACACHE_POOL_TO_MAP(pp);
    206       1.78      yamt 	uvm_unmap1(map, va, va + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY);
    207       1.72      yamt }
    208       1.72      yamt 
    209       1.72      yamt /*
    210       1.72      yamt  * km_vacache_init: initialize kva cache.
    211       1.72      yamt  */
    212       1.72      yamt 
    213       1.72      yamt static void
    214       1.72      yamt km_vacache_init(struct vm_map *map, const char *name, size_t size)
    215       1.72      yamt {
    216       1.72      yamt 	struct vm_map_kernel *vmk;
    217       1.72      yamt 	struct pool *pp;
    218       1.72      yamt 	struct pool_allocator *pa;
    219       1.94        ad 	int ipl;
    220       1.72      yamt 
    221       1.72      yamt 	KASSERT(VM_MAP_IS_KERNEL(map));
    222       1.72      yamt 	KASSERT(size < (vm_map_max(map) - vm_map_min(map)) / 2); /* sanity */
    223       1.72      yamt 
    224       1.97        ad 
    225       1.72      yamt 	vmk = vm_map_to_kernel(map);
    226       1.72      yamt 	pp = &vmk->vmk_vacache;
    227       1.72      yamt 	pa = &vmk->vmk_vacache_allocator;
    228       1.72      yamt 	memset(pa, 0, sizeof(*pa));
    229       1.72      yamt 	pa->pa_alloc = km_vacache_alloc;
    230       1.72      yamt 	pa->pa_free = km_vacache_free;
    231       1.72      yamt 	pa->pa_pagesz = (unsigned int)size;
    232       1.88      yamt 	pa->pa_backingmap = map;
    233       1.88      yamt 	pa->pa_backingmapptr = NULL;
    234       1.94        ad 
    235       1.94        ad 	if ((map->flags & VM_MAP_INTRSAFE) != 0)
    236       1.94        ad 		ipl = IPL_VM;
    237       1.94        ad 	else
    238       1.94        ad 		ipl = IPL_NONE;
    239       1.94        ad 
    240       1.94        ad 	pool_init(pp, PAGE_SIZE, 0, 0, PR_NOTOUCH | PR_RECURSIVE, name, pa,
    241       1.94        ad 	    ipl);
    242       1.72      yamt }
    243       1.72      yamt 
    244       1.72      yamt void
    245       1.72      yamt uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
    246       1.72      yamt {
    247       1.72      yamt 
    248       1.72      yamt 	map->flags |= VM_MAP_VACACHE;
    249       1.72      yamt 	if (size == 0)
    250       1.72      yamt 		size = KM_VACACHE_SIZE;
    251       1.72      yamt 	km_vacache_init(map, name, size);
    252       1.72      yamt }
    253       1.72      yamt 
    254       1.72      yamt #else /* !defined(PMAP_MAP_POOLPAGE) */
    255       1.72      yamt 
    256       1.72      yamt void
    257       1.92      yamt uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
    258       1.72      yamt {
    259       1.72      yamt 
    260       1.72      yamt 	/* nothing */
    261       1.72      yamt }
    262       1.72      yamt 
    263       1.72      yamt #endif /* !defined(PMAP_MAP_POOLPAGE) */
    264       1.72      yamt 
    265       1.88      yamt void
    266       1.92      yamt uvm_km_va_drain(struct vm_map *map, uvm_flag_t flags)
    267       1.88      yamt {
    268       1.88      yamt 	struct vm_map_kernel *vmk = vm_map_to_kernel(map);
    269       1.88      yamt 
    270       1.88      yamt 	callback_run_roundrobin(&vmk->vmk_reclaim_callback, NULL);
    271       1.88      yamt }
    272       1.88      yamt 
    273        1.1       mrg /*
    274        1.1       mrg  * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
    275        1.1       mrg  * KVM already allocated for text, data, bss, and static data structures).
    276        1.1       mrg  *
    277       1.62   thorpej  * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
    278       1.82  christos  *    we assume that [vmin -> start] has already been allocated and that
    279       1.62   thorpej  *    "end" is the end.
    280        1.1       mrg  */
    281        1.1       mrg 
    282        1.8       mrg void
    283       1.83   thorpej uvm_km_init(vaddr_t start, vaddr_t end)
    284        1.1       mrg {
    285       1.62   thorpej 	vaddr_t base = VM_MIN_KERNEL_ADDRESS;
    286       1.27   thorpej 
    287       1.27   thorpej 	/*
    288       1.27   thorpej 	 * next, init kernel memory objects.
    289        1.8       mrg 	 */
    290        1.1       mrg 
    291        1.8       mrg 	/* kernel_object: for pageable anonymous kernel memory */
    292       1.34       chs 	uao_init();
    293       1.95        ad 	uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
    294       1.62   thorpej 				 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
    295        1.1       mrg 
    296       1.24   thorpej 	/*
    297       1.56   thorpej 	 * init the map and reserve any space that might already
    298       1.56   thorpej 	 * have been allocated kernel space before installing.
    299        1.8       mrg 	 */
    300        1.1       mrg 
    301       1.71      yamt 	uvm_map_setup_kernel(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
    302       1.71      yamt 	kernel_map_store.vmk_map.pmap = pmap_kernel();
    303       1.70      yamt 	if (start != base) {
    304       1.70      yamt 		int error;
    305       1.70      yamt 		struct uvm_map_args args;
    306       1.70      yamt 
    307       1.71      yamt 		error = uvm_map_prepare(&kernel_map_store.vmk_map,
    308       1.71      yamt 		    base, start - base,
    309       1.70      yamt 		    NULL, UVM_UNKNOWN_OFFSET, 0,
    310       1.62   thorpej 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    311       1.70      yamt 		    		UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
    312       1.70      yamt 		if (!error) {
    313       1.70      yamt 			kernel_first_mapent_store.flags =
    314       1.70      yamt 			    UVM_MAP_KERNEL | UVM_MAP_FIRST;
    315       1.71      yamt 			error = uvm_map_enter(&kernel_map_store.vmk_map, &args,
    316       1.70      yamt 			    &kernel_first_mapent_store);
    317       1.70      yamt 		}
    318       1.70      yamt 
    319       1.70      yamt 		if (error)
    320       1.70      yamt 			panic(
    321       1.70      yamt 			    "uvm_km_init: could not reserve space for kernel");
    322       1.70      yamt 	}
    323       1.47       chs 
    324        1.8       mrg 	/*
    325        1.8       mrg 	 * install!
    326        1.8       mrg 	 */
    327        1.8       mrg 
    328       1.71      yamt 	kernel_map = &kernel_map_store.vmk_map;
    329       1.72      yamt 	uvm_km_vacache_init(kernel_map, "kvakernel", 0);
    330        1.1       mrg }
    331        1.1       mrg 
    332        1.1       mrg /*
    333        1.1       mrg  * uvm_km_suballoc: allocate a submap in the kernel map.   once a submap
    334        1.1       mrg  * is allocated all references to that area of VM must go through it.  this
    335        1.1       mrg  * allows the locking of VAs in kernel_map to be broken up into regions.
    336        1.1       mrg  *
    337       1.82  christos  * => if `fixed' is true, *vmin specifies where the region described
    338        1.5   thorpej  *      by the submap must start
    339        1.1       mrg  * => if submap is non NULL we use that as the submap, otherwise we
    340        1.1       mrg  *	alloc a new map
    341        1.1       mrg  */
    342       1.78      yamt 
    343        1.8       mrg struct vm_map *
    344       1.83   thorpej uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */,
    345       1.93   thorpej     vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed,
    346       1.83   thorpej     struct vm_map_kernel *submap)
    347        1.8       mrg {
    348        1.8       mrg 	int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
    349        1.1       mrg 
    350       1.71      yamt 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    351       1.71      yamt 
    352        1.8       mrg 	size = round_page(size);	/* round up to pagesize */
    353       1.87      yamt 	size += uvm_mapent_overhead(size, flags);
    354        1.1       mrg 
    355        1.8       mrg 	/*
    356        1.8       mrg 	 * first allocate a blank spot in the parent map
    357        1.8       mrg 	 */
    358        1.8       mrg 
    359       1.82  christos 	if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0,
    360        1.8       mrg 	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    361       1.43       chs 	    UVM_ADV_RANDOM, mapflags)) != 0) {
    362        1.8       mrg 	       panic("uvm_km_suballoc: unable to allocate space in parent map");
    363        1.8       mrg 	}
    364        1.8       mrg 
    365        1.8       mrg 	/*
    366       1.82  christos 	 * set VM bounds (vmin is filled in by uvm_map)
    367        1.8       mrg 	 */
    368        1.1       mrg 
    369       1.82  christos 	*vmax = *vmin + size;
    370        1.5   thorpej 
    371        1.8       mrg 	/*
    372        1.8       mrg 	 * add references to pmap and create or init the submap
    373        1.8       mrg 	 */
    374        1.1       mrg 
    375        1.8       mrg 	pmap_reference(vm_map_pmap(map));
    376        1.8       mrg 	if (submap == NULL) {
    377       1.71      yamt 		submap = malloc(sizeof(*submap), M_VMMAP, M_WAITOK);
    378        1.8       mrg 		if (submap == NULL)
    379        1.8       mrg 			panic("uvm_km_suballoc: unable to create submap");
    380        1.8       mrg 	}
    381       1.82  christos 	uvm_map_setup_kernel(submap, *vmin, *vmax, flags);
    382       1.71      yamt 	submap->vmk_map.pmap = vm_map_pmap(map);
    383        1.1       mrg 
    384        1.8       mrg 	/*
    385        1.8       mrg 	 * now let uvm_map_submap plug in it...
    386        1.8       mrg 	 */
    387        1.1       mrg 
    388       1.82  christos 	if (uvm_map_submap(map, *vmin, *vmax, &submap->vmk_map) != 0)
    389        1.8       mrg 		panic("uvm_km_suballoc: submap allocation failed");
    390        1.1       mrg 
    391       1.71      yamt 	return(&submap->vmk_map);
    392        1.1       mrg }
    393        1.1       mrg 
    394        1.1       mrg /*
    395        1.1       mrg  * uvm_km_pgremove: remove pages from a kernel uvm_object.
    396        1.1       mrg  *
    397        1.1       mrg  * => when you unmap a part of anonymous kernel memory you want to toss
    398        1.1       mrg  *    the pages right away.    (this gets called from uvm_unmap_...).
    399        1.1       mrg  */
    400        1.1       mrg 
    401        1.8       mrg void
    402       1.83   thorpej uvm_km_pgremove(vaddr_t startva, vaddr_t endva)
    403        1.1       mrg {
    404       1.95        ad 	struct uvm_object * const uobj = uvm_kernel_object;
    405       1.78      yamt 	const voff_t start = startva - vm_map_min(kernel_map);
    406       1.78      yamt 	const voff_t end = endva - vm_map_min(kernel_map);
    407       1.53       chs 	struct vm_page *pg;
    408       1.52       chs 	voff_t curoff, nextoff;
    409       1.53       chs 	int swpgonlydelta = 0;
    410        1.8       mrg 	UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
    411        1.1       mrg 
    412       1.78      yamt 	KASSERT(VM_MIN_KERNEL_ADDRESS <= startva);
    413       1.78      yamt 	KASSERT(startva < endva);
    414       1.86      yamt 	KASSERT(endva <= VM_MAX_KERNEL_ADDRESS);
    415       1.78      yamt 
    416       1.97        ad 	mutex_enter(&uobj->vmobjlock);
    417        1.3       chs 
    418       1.52       chs 	for (curoff = start; curoff < end; curoff = nextoff) {
    419       1.52       chs 		nextoff = curoff + PAGE_SIZE;
    420       1.52       chs 		pg = uvm_pagelookup(uobj, curoff);
    421       1.53       chs 		if (pg != NULL && pg->flags & PG_BUSY) {
    422       1.52       chs 			pg->flags |= PG_WANTED;
    423       1.52       chs 			UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
    424       1.52       chs 				    "km_pgrm", 0);
    425       1.97        ad 			mutex_enter(&uobj->vmobjlock);
    426       1.52       chs 			nextoff = curoff;
    427        1.8       mrg 			continue;
    428       1.52       chs 		}
    429        1.8       mrg 
    430       1.52       chs 		/*
    431       1.52       chs 		 * free the swap slot, then the page.
    432       1.52       chs 		 */
    433        1.8       mrg 
    434       1.53       chs 		if (pg == NULL &&
    435       1.64        pk 		    uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) {
    436       1.53       chs 			swpgonlydelta++;
    437       1.53       chs 		}
    438       1.52       chs 		uao_dropswap(uobj, curoff >> PAGE_SHIFT);
    439       1.53       chs 		if (pg != NULL) {
    440       1.97        ad 			mutex_enter(&uvm_pageqlock);
    441       1.53       chs 			uvm_pagefree(pg);
    442       1.97        ad 			mutex_exit(&uvm_pageqlock);
    443       1.53       chs 		}
    444        1.8       mrg 	}
    445       1.97        ad 	mutex_exit(&uobj->vmobjlock);
    446        1.8       mrg 
    447       1.54       chs 	if (swpgonlydelta > 0) {
    448       1.95        ad 		mutex_enter(&uvm_swap_data_lock);
    449       1.54       chs 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
    450       1.54       chs 		uvmexp.swpgonly -= swpgonlydelta;
    451       1.95        ad 		mutex_exit(&uvm_swap_data_lock);
    452       1.54       chs 	}
    453       1.24   thorpej }
    454       1.24   thorpej 
    455       1.24   thorpej 
    456       1.24   thorpej /*
    457       1.78      yamt  * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed
    458       1.78      yamt  *    regions.
    459       1.24   thorpej  *
    460       1.24   thorpej  * => when you unmap a part of anonymous kernel memory you want to toss
    461       1.52       chs  *    the pages right away.    (this is called from uvm_unmap_...).
    462       1.24   thorpej  * => none of the pages will ever be busy, and none of them will ever
    463       1.52       chs  *    be on the active or inactive queues (because they have no object).
    464       1.24   thorpej  */
    465       1.24   thorpej 
    466       1.24   thorpej void
    467      1.102        ad uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end)
    468       1.24   thorpej {
    469       1.52       chs 	struct vm_page *pg;
    470       1.52       chs 	paddr_t pa;
    471       1.24   thorpej 	UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist);
    472       1.24   thorpej 
    473      1.102        ad 	KASSERT(VM_MAP_IS_KERNEL(map));
    474      1.102        ad 	KASSERT(vm_map_min(map) <= start);
    475       1.78      yamt 	KASSERT(start < end);
    476      1.102        ad 	KASSERT(end <= vm_map_max(map));
    477       1.78      yamt 
    478       1.52       chs 	for (; start < end; start += PAGE_SIZE) {
    479       1.52       chs 		if (!pmap_extract(pmap_kernel(), start, &pa)) {
    480       1.24   thorpej 			continue;
    481       1.40       chs 		}
    482       1.52       chs 		pg = PHYS_TO_VM_PAGE(pa);
    483       1.52       chs 		KASSERT(pg);
    484       1.52       chs 		KASSERT(pg->uobject == NULL && pg->uanon == NULL);
    485       1.52       chs 		uvm_pagefree(pg);
    486       1.24   thorpej 	}
    487        1.1       mrg }
    488        1.1       mrg 
    489       1.78      yamt #if defined(DEBUG)
    490       1.78      yamt void
    491      1.102        ad uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end)
    492       1.78      yamt {
    493      1.102        ad 	struct vm_page *pg;
    494       1.78      yamt 	vaddr_t va;
    495       1.78      yamt 	paddr_t pa;
    496       1.78      yamt 
    497      1.102        ad 	KDASSERT(VM_MAP_IS_KERNEL(map));
    498      1.102        ad 	KDASSERT(vm_map_min(map) <= start);
    499       1.78      yamt 	KDASSERT(start < end);
    500      1.102        ad 	KDASSERT(end <= vm_map_max(map));
    501       1.78      yamt 
    502       1.78      yamt 	for (va = start; va < end; va += PAGE_SIZE) {
    503       1.78      yamt 		if (pmap_extract(pmap_kernel(), va, &pa)) {
    504       1.81    simonb 			panic("uvm_km_check_empty: va %p has pa 0x%llx",
    505       1.81    simonb 			    (void *)va, (long long)pa);
    506       1.78      yamt 		}
    507      1.102        ad 		if ((map->flags & VM_MAP_INTRSAFE) == 0) {
    508       1.97        ad 			mutex_enter(&uvm_kernel_object->vmobjlock);
    509       1.96        ad 			pg = uvm_pagelookup(uvm_kernel_object,
    510       1.78      yamt 			    va - vm_map_min(kernel_map));
    511       1.97        ad 			mutex_exit(&uvm_kernel_object->vmobjlock);
    512       1.78      yamt 			if (pg) {
    513       1.78      yamt 				panic("uvm_km_check_empty: "
    514       1.78      yamt 				    "has page hashed at %p", (const void *)va);
    515       1.78      yamt 			}
    516       1.78      yamt 		}
    517       1.78      yamt 	}
    518       1.78      yamt }
    519       1.78      yamt #endif /* defined(DEBUG) */
    520        1.1       mrg 
    521        1.1       mrg /*
    522       1.78      yamt  * uvm_km_alloc: allocate an area of kernel memory.
    523        1.1       mrg  *
    524       1.78      yamt  * => NOTE: we can return 0 even if we can wait if there is not enough
    525        1.1       mrg  *	free VM space in the map... caller should be prepared to handle
    526        1.1       mrg  *	this case.
    527        1.1       mrg  * => we return KVA of memory allocated
    528        1.1       mrg  */
    529        1.1       mrg 
    530       1.14       eeh vaddr_t
    531       1.83   thorpej uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
    532        1.1       mrg {
    533       1.14       eeh 	vaddr_t kva, loopva;
    534       1.14       eeh 	vaddr_t offset;
    535       1.44   thorpej 	vsize_t loopsize;
    536        1.8       mrg 	struct vm_page *pg;
    537       1.78      yamt 	struct uvm_object *obj;
    538       1.78      yamt 	int pgaflags;
    539       1.89  drochner 	vm_prot_t prot;
    540       1.78      yamt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    541        1.1       mrg 
    542       1.40       chs 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    543       1.78      yamt 	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
    544       1.78      yamt 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
    545       1.78      yamt 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
    546        1.1       mrg 
    547        1.8       mrg 	/*
    548        1.8       mrg 	 * setup for call
    549        1.8       mrg 	 */
    550        1.8       mrg 
    551       1.78      yamt 	kva = vm_map_min(map);	/* hint */
    552        1.8       mrg 	size = round_page(size);
    553       1.95        ad 	obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL;
    554       1.78      yamt 	UVMHIST_LOG(maphist,"  (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
    555       1.78      yamt 		    map, obj, size, flags);
    556        1.1       mrg 
    557        1.8       mrg 	/*
    558        1.8       mrg 	 * allocate some virtual space
    559        1.8       mrg 	 */
    560        1.8       mrg 
    561       1.78      yamt 	if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
    562       1.78      yamt 	    align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    563       1.78      yamt 	    UVM_ADV_RANDOM,
    564       1.78      yamt 	    (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA))
    565       1.78      yamt 	    | UVM_FLAG_QUANTUM)) != 0)) {
    566        1.8       mrg 		UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
    567        1.8       mrg 		return(0);
    568        1.8       mrg 	}
    569        1.8       mrg 
    570        1.8       mrg 	/*
    571        1.8       mrg 	 * if all we wanted was VA, return now
    572        1.8       mrg 	 */
    573        1.8       mrg 
    574       1.78      yamt 	if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) {
    575        1.8       mrg 		UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
    576        1.8       mrg 		return(kva);
    577        1.8       mrg 	}
    578       1.40       chs 
    579        1.8       mrg 	/*
    580        1.8       mrg 	 * recover object offset from virtual address
    581        1.8       mrg 	 */
    582        1.8       mrg 
    583        1.8       mrg 	offset = kva - vm_map_min(kernel_map);
    584        1.8       mrg 	UVMHIST_LOG(maphist, "  kva=0x%x, offset=0x%x", kva, offset,0,0);
    585        1.8       mrg 
    586        1.8       mrg 	/*
    587        1.8       mrg 	 * now allocate and map in the memory... note that we are the only ones
    588        1.8       mrg 	 * whom should ever get a handle on this area of VM.
    589        1.8       mrg 	 */
    590        1.8       mrg 
    591        1.8       mrg 	loopva = kva;
    592       1.44   thorpej 	loopsize = size;
    593       1.78      yamt 
    594      1.103        ad 	pgaflags = 0;
    595      1.103        ad 	if (flags & UVM_KMF_NOWAIT)
    596      1.103        ad 		pgaflags |= UVM_PGA_USERESERVE;
    597       1.78      yamt 	if (flags & UVM_KMF_ZERO)
    598       1.78      yamt 		pgaflags |= UVM_PGA_ZERO;
    599       1.89  drochner 	prot = VM_PROT_READ | VM_PROT_WRITE;
    600       1.89  drochner 	if (flags & UVM_KMF_EXEC)
    601       1.89  drochner 		prot |= VM_PROT_EXECUTE;
    602       1.44   thorpej 	while (loopsize) {
    603       1.78      yamt 		KASSERT(!pmap_extract(pmap_kernel(), loopva, NULL));
    604       1.78      yamt 
    605       1.78      yamt 		pg = uvm_pagealloc(NULL, offset, NULL, pgaflags);
    606       1.47       chs 
    607        1.8       mrg 		/*
    608        1.8       mrg 		 * out of memory?
    609        1.8       mrg 		 */
    610        1.8       mrg 
    611       1.35   thorpej 		if (__predict_false(pg == NULL)) {
    612       1.58       chs 			if ((flags & UVM_KMF_NOWAIT) ||
    613       1.80      yamt 			    ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) {
    614        1.8       mrg 				/* free everything! */
    615       1.78      yamt 				uvm_km_free(map, kva, size,
    616       1.78      yamt 				    flags & UVM_KMF_TYPEMASK);
    617       1.58       chs 				return (0);
    618        1.8       mrg 			} else {
    619        1.8       mrg 				uvm_wait("km_getwait2");	/* sleep here */
    620        1.8       mrg 				continue;
    621        1.8       mrg 			}
    622        1.8       mrg 		}
    623       1.47       chs 
    624       1.78      yamt 		pg->flags &= ~PG_BUSY;	/* new page */
    625       1.78      yamt 		UVM_PAGE_OWN(pg, NULL);
    626       1.78      yamt 
    627        1.8       mrg 		/*
    628       1.52       chs 		 * map it in
    629        1.8       mrg 		 */
    630       1.40       chs 
    631      1.104    cegger 		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
    632      1.104    cegger 		    prot | PMAP_KMPAGE, 0);
    633        1.8       mrg 		loopva += PAGE_SIZE;
    634        1.8       mrg 		offset += PAGE_SIZE;
    635       1.44   thorpej 		loopsize -= PAGE_SIZE;
    636        1.8       mrg 	}
    637       1.69  junyoung 
    638       1.51     chris        	pmap_update(pmap_kernel());
    639       1.69  junyoung 
    640        1.8       mrg 	UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
    641        1.8       mrg 	return(kva);
    642        1.1       mrg }
    643        1.1       mrg 
    644        1.1       mrg /*
    645        1.1       mrg  * uvm_km_free: free an area of kernel memory
    646        1.1       mrg  */
    647        1.1       mrg 
    648        1.8       mrg void
    649       1.83   thorpej uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags)
    650        1.8       mrg {
    651        1.1       mrg 
    652       1.78      yamt 	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
    653       1.78      yamt 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
    654       1.78      yamt 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
    655       1.78      yamt 	KASSERT((addr & PAGE_MASK) == 0);
    656       1.40       chs 	KASSERT(vm_map_pmap(map) == pmap_kernel());
    657        1.1       mrg 
    658        1.8       mrg 	size = round_page(size);
    659        1.1       mrg 
    660       1.78      yamt 	if (flags & UVM_KMF_PAGEABLE) {
    661       1.78      yamt 		uvm_km_pgremove(addr, addr + size);
    662       1.78      yamt 		pmap_remove(pmap_kernel(), addr, addr + size);
    663       1.78      yamt 	} else if (flags & UVM_KMF_WIRED) {
    664      1.102        ad 		uvm_km_pgremove_intrsafe(map, addr, addr + size);
    665       1.78      yamt 		pmap_kremove(addr, size);
    666        1.8       mrg 	}
    667       1.99      yamt 
    668       1.99      yamt 	/*
    669       1.99      yamt 	 * uvm_unmap_remove calls pmap_update for us.
    670       1.99      yamt 	 */
    671        1.8       mrg 
    672       1.78      yamt 	uvm_unmap1(map, addr, addr + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY);
    673       1.66        pk }
    674       1.66        pk 
    675       1.10   thorpej /* Sanity; must specify both or none. */
    676       1.10   thorpej #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
    677       1.10   thorpej     (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
    678       1.10   thorpej #error Must specify MAP and UNMAP together.
    679       1.10   thorpej #endif
    680       1.10   thorpej 
    681       1.10   thorpej /*
    682       1.10   thorpej  * uvm_km_alloc_poolpage: allocate a page for the pool allocator
    683       1.10   thorpej  *
    684       1.10   thorpej  * => if the pmap specifies an alternate mapping method, we use it.
    685       1.10   thorpej  */
    686       1.10   thorpej 
    687       1.11   thorpej /* ARGSUSED */
    688       1.14       eeh vaddr_t
    689       1.93   thorpej uvm_km_alloc_poolpage_cache(struct vm_map *map, bool waitok)
    690       1.72      yamt {
    691       1.72      yamt #if defined(PMAP_MAP_POOLPAGE)
    692       1.78      yamt 	return uvm_km_alloc_poolpage(map, waitok);
    693       1.72      yamt #else
    694       1.72      yamt 	struct vm_page *pg;
    695       1.72      yamt 	struct pool *pp = &vm_map_to_kernel(map)->vmk_vacache;
    696       1.72      yamt 	vaddr_t va;
    697       1.72      yamt 
    698       1.72      yamt 	if ((map->flags & VM_MAP_VACACHE) == 0)
    699       1.78      yamt 		return uvm_km_alloc_poolpage(map, waitok);
    700       1.72      yamt 
    701       1.72      yamt 	va = (vaddr_t)pool_get(pp, waitok ? PR_WAITOK : PR_NOWAIT);
    702       1.72      yamt 	if (va == 0)
    703       1.72      yamt 		return 0;
    704       1.72      yamt 	KASSERT(!pmap_extract(pmap_kernel(), va, NULL));
    705       1.72      yamt again:
    706      1.103        ad 	pg = uvm_pagealloc(NULL, 0, NULL, waitok ? 0 : UVM_PGA_USERESERVE);
    707       1.72      yamt 	if (__predict_false(pg == NULL)) {
    708       1.72      yamt 		if (waitok) {
    709       1.72      yamt 			uvm_wait("plpg");
    710       1.72      yamt 			goto again;
    711       1.72      yamt 		} else {
    712       1.72      yamt 			pool_put(pp, (void *)va);
    713       1.72      yamt 			return 0;
    714       1.72      yamt 		}
    715       1.72      yamt 	}
    716      1.100      matt 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
    717      1.104    cegger 	    VM_PROT_READ|VM_PROT_WRITE|PMAP_KMPAGE, 0);
    718       1.72      yamt 	pmap_update(pmap_kernel());
    719       1.72      yamt 
    720       1.72      yamt 	return va;
    721       1.72      yamt #endif /* PMAP_MAP_POOLPAGE */
    722       1.72      yamt }
    723       1.72      yamt 
    724       1.72      yamt vaddr_t
    725       1.93   thorpej uvm_km_alloc_poolpage(struct vm_map *map, bool waitok)
    726       1.10   thorpej {
    727       1.10   thorpej #if defined(PMAP_MAP_POOLPAGE)
    728       1.10   thorpej 	struct vm_page *pg;
    729       1.14       eeh 	vaddr_t va;
    730       1.10   thorpej 
    731       1.15   thorpej  again:
    732      1.103        ad 	pg = uvm_pagealloc(NULL, 0, NULL, waitok ? 0 : UVM_PGA_USERESERVE);
    733       1.35   thorpej 	if (__predict_false(pg == NULL)) {
    734       1.15   thorpej 		if (waitok) {
    735       1.15   thorpej 			uvm_wait("plpg");
    736       1.15   thorpej 			goto again;
    737       1.15   thorpej 		} else
    738       1.15   thorpej 			return (0);
    739       1.15   thorpej 	}
    740       1.10   thorpej 	va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
    741       1.35   thorpej 	if (__predict_false(va == 0))
    742       1.10   thorpej 		uvm_pagefree(pg);
    743       1.10   thorpej 	return (va);
    744       1.10   thorpej #else
    745       1.14       eeh 	vaddr_t va;
    746       1.16   thorpej 
    747       1.78      yamt 	va = uvm_km_alloc(map, PAGE_SIZE, 0,
    748       1.78      yamt 	    (waitok ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK) | UVM_KMF_WIRED);
    749       1.10   thorpej 	return (va);
    750       1.10   thorpej #endif /* PMAP_MAP_POOLPAGE */
    751       1.10   thorpej }
    752       1.10   thorpej 
    753       1.10   thorpej /*
    754       1.10   thorpej  * uvm_km_free_poolpage: free a previously allocated pool page
    755       1.10   thorpej  *
    756       1.10   thorpej  * => if the pmap specifies an alternate unmapping method, we use it.
    757       1.10   thorpej  */
    758       1.10   thorpej 
    759       1.11   thorpej /* ARGSUSED */
    760       1.10   thorpej void
    761       1.83   thorpej uvm_km_free_poolpage_cache(struct vm_map *map, vaddr_t addr)
    762       1.72      yamt {
    763       1.72      yamt #if defined(PMAP_UNMAP_POOLPAGE)
    764       1.78      yamt 	uvm_km_free_poolpage(map, addr);
    765       1.72      yamt #else
    766       1.72      yamt 	struct pool *pp;
    767       1.72      yamt 
    768       1.72      yamt 	if ((map->flags & VM_MAP_VACACHE) == 0) {
    769       1.78      yamt 		uvm_km_free_poolpage(map, addr);
    770       1.72      yamt 		return;
    771       1.72      yamt 	}
    772       1.72      yamt 
    773       1.72      yamt 	KASSERT(pmap_extract(pmap_kernel(), addr, NULL));
    774      1.102        ad 	uvm_km_pgremove_intrsafe(map, addr, addr + PAGE_SIZE);
    775       1.72      yamt 	pmap_kremove(addr, PAGE_SIZE);
    776       1.72      yamt #if defined(DEBUG)
    777       1.72      yamt 	pmap_update(pmap_kernel());
    778       1.72      yamt #endif
    779       1.72      yamt 	KASSERT(!pmap_extract(pmap_kernel(), addr, NULL));
    780       1.72      yamt 	pp = &vm_map_to_kernel(map)->vmk_vacache;
    781       1.72      yamt 	pool_put(pp, (void *)addr);
    782       1.72      yamt #endif
    783       1.72      yamt }
    784       1.72      yamt 
    785       1.72      yamt /* ARGSUSED */
    786       1.72      yamt void
    787       1.83   thorpej uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr)
    788       1.10   thorpej {
    789       1.10   thorpej #if defined(PMAP_UNMAP_POOLPAGE)
    790       1.14       eeh 	paddr_t pa;
    791       1.10   thorpej 
    792       1.10   thorpej 	pa = PMAP_UNMAP_POOLPAGE(addr);
    793       1.10   thorpej 	uvm_pagefree(PHYS_TO_VM_PAGE(pa));
    794       1.10   thorpej #else
    795       1.78      yamt 	uvm_km_free(map, addr, PAGE_SIZE, UVM_KMF_WIRED);
    796       1.10   thorpej #endif /* PMAP_UNMAP_POOLPAGE */
    797        1.1       mrg }
    798  1.104.2.1  uebayasi 
    799  1.104.2.1  uebayasi #ifdef XIP
    800  1.104.2.1  uebayasi /*
    801  1.104.2.1  uebayasi  * uvm_pageofzero_xip: return a read-only page filled with zeroes.
    802  1.104.2.1  uebayasi  *
    803  1.104.2.1  uebayasi  * XXXUEBS Need better names.
    804  1.104.2.1  uebayasi  */
    805  1.104.2.1  uebayasi 
    806  1.104.2.1  uebayasi static void *uvm_pageofzero_xip_vaddr;
    807  1.104.2.1  uebayasi static paddr_t uvm_pageofzero_xip_paddr;
    808  1.104.2.1  uebayasi static int uvm_pageofzero_xip_init(void);
    809  1.104.2.1  uebayasi 
    810  1.104.2.1  uebayasi void *
    811  1.104.2.1  uebayasi uvm_pageofzero_xip(void)
    812  1.104.2.1  uebayasi {
    813  1.104.2.1  uebayasi 	static ONCE_DECL(uvm_pageofzero_xip_inited);
    814  1.104.2.1  uebayasi 
    815  1.104.2.1  uebayasi 	RUN_ONCE(&uvm_pageofzero_xip_inited, uvm_pageofzero_xip_init);
    816  1.104.2.1  uebayasi 	return uvm_pageofzero_xip_vaddr;
    817  1.104.2.1  uebayasi }
    818  1.104.2.1  uebayasi 
    819  1.104.2.1  uebayasi paddr_t
    820  1.104.2.1  uebayasi uvm_pageofzero_xip_phys_addr(void)
    821  1.104.2.1  uebayasi {
    822  1.104.2.1  uebayasi 	static ONCE_DECL(uvm_pageofzero_xip_inited);
    823  1.104.2.1  uebayasi 
    824  1.104.2.1  uebayasi 	RUN_ONCE(&uvm_pageofzero_xip_inited, uvm_pageofzero_xip_init);
    825  1.104.2.1  uebayasi 	return uvm_pageofzero_xip_paddr;
    826  1.104.2.1  uebayasi }
    827  1.104.2.1  uebayasi 
    828  1.104.2.1  uebayasi static int
    829  1.104.2.1  uebayasi uvm_pageofzero_xip_init(void)
    830  1.104.2.1  uebayasi {
    831  1.104.2.1  uebayasi 	bool rv;
    832  1.104.2.1  uebayasi 
    833  1.104.2.1  uebayasi 	ASSERT_SLEEPABLE();
    834  1.104.2.1  uebayasi 
    835  1.104.2.1  uebayasi 	uvm_pageofzero_xip_vaddr = (void *)uvm_km_alloc_poolpage(kernel_map, true);
    836  1.104.2.1  uebayasi 	KASSERT(uvm_pageofzero_xip_vaddr != NULL);
    837  1.104.2.1  uebayasi 
    838  1.104.2.1  uebayasi 	rv = pmap_extract(pmap_kernel(), (vaddr_t)uvm_pageofzero_xip_vaddr,
    839  1.104.2.1  uebayasi 	    &uvm_pageofzero_xip_paddr);
    840  1.104.2.1  uebayasi 	KASSERT(rv == true && uvm_pageofzero_xip_paddr != 0);
    841  1.104.2.1  uebayasi 
    842  1.104.2.1  uebayasi 	return 0;
    843  1.104.2.1  uebayasi }
    844  1.104.2.1  uebayasi #endif
    845