Home | History | Annotate | Line # | Download | only in uvm
uvm_glue.c revision 1.153
      1 /*	$NetBSD: uvm_glue.c,v 1.153 2012/01/27 19:48:41 para Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6  *
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to Berkeley by
     10  * The Mach Operating System project at Carnegie-Mellon University.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. Neither the name of the University nor the names of its contributors
     21  *    may be used to endorse or promote products derived from this software
     22  *    without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  * SUCH DAMAGE.
     35  *
     36  *	@(#)vm_glue.c	8.6 (Berkeley) 1/5/94
     37  * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp
     38  *
     39  *
     40  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     41  * All rights reserved.
     42  *
     43  * Permission to use, copy, modify and distribute this software and
     44  * its documentation is hereby granted, provided that both the copyright
     45  * notice and this permission notice appear in all copies of the
     46  * software, derivative works or modified versions, and any portions
     47  * thereof, and that both notices appear in supporting documentation.
     48  *
     49  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     50  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     51  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     52  *
     53  * Carnegie Mellon requests users of this software to return to
     54  *
     55  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     56  *  School of Computer Science
     57  *  Carnegie Mellon University
     58  *  Pittsburgh PA 15213-3890
     59  *
     60  * any improvements or extensions that they make and grant Carnegie the
     61  * rights to redistribute these changes.
     62  */
     63 
     64 #include <sys/cdefs.h>
     65 __KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.153 2012/01/27 19:48:41 para Exp $");
     66 
     67 #include "opt_kgdb.h"
     68 #include "opt_kstack.h"
     69 #include "opt_uvmhist.h"
     70 
     71 /*
     72  * uvm_glue.c: glue functions
     73  */
     74 
     75 #include <sys/param.h>
     76 #include <sys/kernel.h>
     77 
     78 #include <sys/systm.h>
     79 #include <sys/proc.h>
     80 #include <sys/resourcevar.h>
     81 #include <sys/buf.h>
     82 #include <sys/syncobj.h>
     83 #include <sys/cpu.h>
     84 #include <sys/atomic.h>
     85 #include <sys/lwp.h>
     86 
     87 #include <uvm/uvm.h>
     88 
     89 /*
     90  * uvm_kernacc: test if kernel can access a memory region.
     91  *
     92  * => Currently used only by /dev/kmem driver (dev/mm.c).
     93  */
     94 bool
     95 uvm_kernacc(void *addr, size_t len, vm_prot_t prot)
     96 {
     97 	vaddr_t saddr = trunc_page((vaddr_t)addr);
     98 	vaddr_t eaddr = round_page(saddr + len);
     99 	bool rv;
    100 
    101 	vm_map_lock_read(kernel_map);
    102 	rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
    103 	vm_map_unlock_read(kernel_map);
    104 
    105 	return rv;
    106 }
    107 
    108 #ifdef KGDB
    109 /*
    110  * Change protections on kernel pages from addr to addr+len
    111  * (presumably so debugger can plant a breakpoint).
    112  *
    113  * We force the protection change at the pmap level.  If we were
    114  * to use vm_map_protect a change to allow writing would be lazily-
    115  * applied meaning we would still take a protection fault, something
    116  * we really don't want to do.  It would also fragment the kernel
    117  * map unnecessarily.  We cannot use pmap_protect since it also won't
    118  * enforce a write-enable request.  Using pmap_enter is the only way
    119  * we can ensure the change takes place properly.
    120  */
    121 void
    122 uvm_chgkprot(void *addr, size_t len, int rw)
    123 {
    124 	vm_prot_t prot;
    125 	paddr_t pa;
    126 	vaddr_t sva, eva;
    127 
    128 	prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
    129 	eva = round_page((vaddr_t)addr + len);
    130 	for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
    131 		/*
    132 		 * Extract physical address for the page.
    133 		 */
    134 		if (pmap_extract(pmap_kernel(), sva, &pa) == false)
    135 			panic("%s: invalid page", __func__);
    136 		pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);
    137 	}
    138 	pmap_update(pmap_kernel());
    139 }
    140 #endif
    141 
    142 /*
    143  * uvm_vslock: wire user memory for I/O
    144  *
    145  * - called from physio and sys___sysctl
    146  * - XXXCDC: consider nuking this (or making it a macro?)
    147  */
    148 
    149 int
    150 uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access_type)
    151 {
    152 	struct vm_map *map;
    153 	vaddr_t start, end;
    154 	int error;
    155 
    156 	map = &vs->vm_map;
    157 	start = trunc_page((vaddr_t)addr);
    158 	end = round_page((vaddr_t)addr + len);
    159 	error = uvm_fault_wire(map, start, end, access_type, 0);
    160 	return error;
    161 }
    162 
    163 /*
    164  * uvm_vsunlock: unwire user memory wired by uvm_vslock()
    165  *
    166  * - called from physio and sys___sysctl
    167  * - XXXCDC: consider nuking this (or making it a macro?)
    168  */
    169 
    170 void
    171 uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
    172 {
    173 	uvm_fault_unwire(&vs->vm_map, trunc_page((vaddr_t)addr),
    174 		round_page((vaddr_t)addr + len));
    175 }
    176 
    177 /*
    178  * uvm_proc_fork: fork a virtual address space
    179  *
    180  * - the address space is copied as per parent map's inherit values
    181  */
    182 void
    183 uvm_proc_fork(struct proc *p1, struct proc *p2, bool shared)
    184 {
    185 
    186 	if (shared == true) {
    187 		p2->p_vmspace = NULL;
    188 		uvmspace_share(p1, p2);
    189 	} else {
    190 		p2->p_vmspace = uvmspace_fork(p1->p_vmspace);
    191 	}
    192 
    193 	cpu_proc_fork(p1, p2);
    194 }
    195 
    196 /*
    197  * uvm_lwp_fork: fork a thread
    198  *
    199  * - a new PCB structure is allocated for the child process,
    200  *	and filled in by MD layer
    201  * - if specified, the child gets a new user stack described by
    202  *	stack and stacksize
    203  * - NOTE: the kernel stack may be at a different location in the child
    204  *	process, and thus addresses of automatic variables may be invalid
    205  *	after cpu_lwp_fork returns in the child process.  We do nothing here
    206  *	after cpu_lwp_fork returns.
    207  */
    208 void
    209 uvm_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
    210     void (*func)(void *), void *arg)
    211 {
    212 
    213 	/* Fill stack with magic number. */
    214 	kstack_setup_magic(l2);
    215 
    216 	/*
    217 	 * cpu_lwp_fork() copy and update the pcb, and make the child ready
    218  	 * to run.  If this is a normal user fork, the child will exit
    219 	 * directly to user mode via child_return() on its first time
    220 	 * slice and will not return here.  If this is a kernel thread,
    221 	 * the specified entry point will be executed.
    222 	 */
    223 	cpu_lwp_fork(l1, l2, stack, stacksize, func, arg);
    224 
    225 	/* Inactive emap for new LWP. */
    226 	l2->l_emap_gen = UVM_EMAP_INACTIVE;
    227 }
    228 
    229 #ifndef USPACE_ALIGN
    230 #define	USPACE_ALIGN	0
    231 #endif
    232 
    233 static pool_cache_t uvm_uarea_cache;
    234 #if defined(__HAVE_CPU_UAREA_ROUTINES)
    235 static pool_cache_t uvm_uarea_system_cache;
    236 #else
    237 #define uvm_uarea_system_cache uvm_uarea_cache
    238 #endif
    239 
    240 static void *
    241 uarea_poolpage_alloc(struct pool *pp, int flags)
    242 {
    243 	if (USPACE_ALIGN == 0) {
    244 		int rc;
    245 		vmem_addr_t va;
    246 
    247 		rc = uvm_km_kmem_alloc(kmem_va_arena, USPACE,
    248 		    ((flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP) |
    249 		    VM_INSTANTFIT, &va);
    250 		return (rc != 0) ? NULL : (void *)va;
    251 	}
    252 #if defined(__HAVE_CPU_UAREA_ROUTINES)
    253 	void *va = cpu_uarea_alloc(false);
    254 	if (va)
    255 		return (void *)va;
    256 #endif
    257 	return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz,
    258 	    USPACE_ALIGN, UVM_KMF_WIRED |
    259 	    ((flags & PR_WAITOK) ? UVM_KMF_WAITVA :
    260 	    (UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)));
    261 }
    262 
    263 static void
    264 uarea_poolpage_free(struct pool *pp, void *addr)
    265 {
    266 	if (USPACE_ALIGN == 0) {
    267 		uvm_km_kmem_free(kmem_va_arena, (vmem_addr_t)addr, USPACE);
    268 		return;
    269 	}
    270 #if defined(__HAVE_CPU_UAREA_ROUTINES)
    271 	if (cpu_uarea_free(addr))
    272 		return;
    273 #endif
    274 	uvm_km_free(kernel_map, (vaddr_t)addr, pp->pr_alloc->pa_pagesz,
    275 	    UVM_KMF_WIRED);
    276 }
    277 
    278 static struct pool_allocator uvm_uarea_allocator = {
    279 	.pa_alloc = uarea_poolpage_alloc,
    280 	.pa_free = uarea_poolpage_free,
    281 	.pa_pagesz = USPACE,
    282 };
    283 
    284 #if defined(__HAVE_CPU_UAREA_ROUTINES)
    285 static void *
    286 uarea_system_poolpage_alloc(struct pool *pp, int flags)
    287 {
    288 	void * const va = cpu_uarea_alloc(true);
    289 	if (va != NULL)
    290 		return va;
    291 
    292 	return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz,
    293 	    USPACE_ALIGN, UVM_KMF_WIRED |
    294 	    ((flags & PR_WAITOK) ? UVM_KMF_WAITVA :
    295 	    (UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)));
    296 }
    297 
    298 static void
    299 uarea_system_poolpage_free(struct pool *pp, void *addr)
    300 {
    301 	if (!cpu_uarea_free(addr))
    302 		panic("%s: failed to free uarea %p", __func__, addr);
    303 }
    304 
    305 static struct pool_allocator uvm_uarea_system_allocator = {
    306 	.pa_alloc = uarea_system_poolpage_alloc,
    307 	.pa_free = uarea_system_poolpage_free,
    308 	.pa_pagesz = USPACE,
    309 };
    310 #endif /* __HAVE_CPU_UAREA_ROUTINES */
    311 
    312 void
    313 uvm_uarea_init(void)
    314 {
    315 	int flags = PR_NOTOUCH;
    316 
    317 	/*
    318 	 * specify PR_NOALIGN unless the alignment provided by
    319 	 * the backend (USPACE_ALIGN) is sufficient to provide
    320 	 * pool page size (UPSACE) alignment.
    321 	 */
    322 
    323 	if ((USPACE_ALIGN == 0 && USPACE != PAGE_SIZE) ||
    324 	    (USPACE_ALIGN % USPACE) != 0) {
    325 		flags |= PR_NOALIGN;
    326 	}
    327 
    328 	uvm_uarea_cache = pool_cache_init(USPACE, USPACE_ALIGN, 0, flags,
    329 	    "uarea", &uvm_uarea_allocator, IPL_NONE, NULL, NULL, NULL);
    330 #if defined(__HAVE_CPU_UAREA_ROUTINES)
    331 	uvm_uarea_system_cache = pool_cache_init(USPACE, USPACE_ALIGN,
    332 	    0, flags, "uareasys", &uvm_uarea_system_allocator,
    333 	    IPL_NONE, NULL, NULL, NULL);
    334 #endif
    335 }
    336 
    337 /*
    338  * uvm_uarea_alloc: allocate a u-area
    339  */
    340 
    341 vaddr_t
    342 uvm_uarea_alloc(void)
    343 {
    344 
    345 	return (vaddr_t)pool_cache_get(uvm_uarea_cache, PR_WAITOK);
    346 }
    347 
    348 vaddr_t
    349 uvm_uarea_system_alloc(void)
    350 {
    351 
    352 	return (vaddr_t)pool_cache_get(uvm_uarea_system_cache, PR_WAITOK);
    353 }
    354 
    355 /*
    356  * uvm_uarea_free: free a u-area
    357  */
    358 
    359 void
    360 uvm_uarea_free(vaddr_t uaddr)
    361 {
    362 
    363 	pool_cache_put(uvm_uarea_cache, (void *)uaddr);
    364 }
    365 
    366 void
    367 uvm_uarea_system_free(vaddr_t uaddr)
    368 {
    369 
    370 	pool_cache_put(uvm_uarea_system_cache, (void *)uaddr);
    371 }
    372 
    373 vaddr_t
    374 uvm_lwp_getuarea(lwp_t *l)
    375 {
    376 
    377 	return (vaddr_t)l->l_addr - UAREA_PCB_OFFSET;
    378 }
    379 
    380 void
    381 uvm_lwp_setuarea(lwp_t *l, vaddr_t addr)
    382 {
    383 
    384 	l->l_addr = (void *)(addr + UAREA_PCB_OFFSET);
    385 }
    386 
    387 /*
    388  * uvm_proc_exit: exit a virtual address space
    389  *
    390  * - borrow proc0's address space because freeing the vmspace
    391  *   of the dead process may block.
    392  */
    393 
    394 void
    395 uvm_proc_exit(struct proc *p)
    396 {
    397 	struct lwp *l = curlwp; /* XXX */
    398 	struct vmspace *ovm;
    399 
    400 	KASSERT(p == l->l_proc);
    401 	ovm = p->p_vmspace;
    402 
    403 	/*
    404 	 * borrow proc0's address space.
    405 	 */
    406 	KPREEMPT_DISABLE(l);
    407 	pmap_deactivate(l);
    408 	p->p_vmspace = proc0.p_vmspace;
    409 	pmap_activate(l);
    410 	KPREEMPT_ENABLE(l);
    411 
    412 	uvmspace_free(ovm);
    413 }
    414 
    415 void
    416 uvm_lwp_exit(struct lwp *l)
    417 {
    418 	vaddr_t va = uvm_lwp_getuarea(l);
    419 	bool system = (l->l_flag & LW_SYSTEM) != 0;
    420 
    421 	if (system)
    422 		uvm_uarea_system_free(va);
    423 	else
    424 		uvm_uarea_free(va);
    425 #ifdef DIAGNOSTIC
    426 	uvm_lwp_setuarea(l, (vaddr_t)NULL);
    427 #endif
    428 }
    429 
    430 /*
    431  * uvm_init_limit: init per-process VM limits
    432  *
    433  * - called for process 0 and then inherited by all others.
    434  */
    435 
    436 void
    437 uvm_init_limits(struct proc *p)
    438 {
    439 
    440 	/*
    441 	 * Set up the initial limits on process VM.  Set the maximum
    442 	 * resident set size to be all of (reasonably) available memory.
    443 	 * This causes any single, large process to start random page
    444 	 * replacement once it fills memory.
    445 	 */
    446 
    447 	p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
    448 	p->p_rlimit[RLIMIT_STACK].rlim_max = maxsmap;
    449 	p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
    450 	p->p_rlimit[RLIMIT_DATA].rlim_max = maxdmap;
    451 	p->p_rlimit[RLIMIT_AS].rlim_cur = RLIM_INFINITY;
    452 	p->p_rlimit[RLIMIT_AS].rlim_max = RLIM_INFINITY;
    453 	p->p_rlimit[RLIMIT_RSS].rlim_cur = MIN(
    454 	    VM_MAXUSER_ADDRESS, ctob((rlim_t)uvmexp.free));
    455 }
    456 
    457 /*
    458  * uvm_scheduler: process zero main loop.
    459  */
    460 
    461 extern struct loadavg averunnable;
    462 
    463 void
    464 uvm_scheduler(void)
    465 {
    466 	lwp_t *l = curlwp;
    467 
    468 	lwp_lock(l);
    469 	l->l_priority = PRI_VM;
    470 	l->l_class = SCHED_FIFO;
    471 	lwp_unlock(l);
    472 
    473 	for (;;) {
    474 		sched_pstats();
    475 		(void)kpause("uvm", false, hz, NULL);
    476 	}
    477 }
    478