Home | History | Annotate | Line # | Download | only in uvm
uvm_glue.c revision 1.44.2.3
      1 /*	$NetBSD: uvm_glue.c,v 1.44.2.3 2001/04/09 01:59:14 nathanw Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6  *
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to Berkeley by
     10  * The Mach Operating System project at Carnegie-Mellon University.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by Charles D. Cranor,
     23  *      Washington University, the University of California, Berkeley and
     24  *      its contributors.
     25  * 4. Neither the name of the University nor the names of its contributors
     26  *    may be used to endorse or promote products derived from this software
     27  *    without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39  * SUCH DAMAGE.
     40  *
     41  *	@(#)vm_glue.c	8.6 (Berkeley) 1/5/94
     42  * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp
     43  *
     44  *
     45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     46  * All rights reserved.
     47  *
     48  * Permission to use, copy, modify and distribute this software and
     49  * its documentation is hereby granted, provided that both the copyright
     50  * notice and this permission notice appear in all copies of the
     51  * software, derivative works or modified versions, and any portions
     52  * thereof, and that both notices appear in supporting documentation.
     53  *
     54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     57  *
     58  * Carnegie Mellon requests users of this software to return to
     59  *
     60  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     61  *  School of Computer Science
     62  *  Carnegie Mellon University
     63  *  Pittsburgh PA 15213-3890
     64  *
     65  * any improvements or extensions that they make and grant Carnegie the
     66  * rights to redistribute these changes.
     67  */
     68 
     69 #include "opt_uvmhist.h"
     70 #include "opt_sysv.h"
     71 
     72 /*
     73  * uvm_glue.c: glue functions
     74  */
     75 
     76 #include <sys/param.h>
     77 #include <sys/systm.h>
     78 #include <sys/lwp.h>
     79 #include <sys/proc.h>
     80 #include <sys/resourcevar.h>
     81 #include <sys/buf.h>
     82 #include <sys/user.h>
     83 #ifdef SYSVSHM
     84 #include <sys/shm.h>
     85 #endif
     86 
     87 #include <uvm/uvm.h>
     88 
     89 #include <machine/cpu.h>
     90 
     91 /*
     92  * local prototypes
     93  */
     94 
     95 static void uvm_swapout __P((struct lwp *));
     96 
     97 /*
     98  * XXXCDC: do these really belong here?
     99  */
    100 
    101 int readbuffers = 0;		/* allow KGDB to read kern buffer pool */
    102 				/* XXX: see uvm_kernacc */
    103 
    104 
    105 /*
    106  * uvm_kernacc: can the kernel access a region of memory
    107  *
    108  * - called from malloc [DIAGNOSTIC], and /dev/kmem driver (mem.c)
    109  */
    110 
    111 boolean_t
    112 uvm_kernacc(addr, len, rw)
    113 	caddr_t addr;
    114 	size_t len;
    115 	int rw;
    116 {
    117 	boolean_t rv;
    118 	vaddr_t saddr, eaddr;
    119 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
    120 
    121 	saddr = trunc_page((vaddr_t)addr);
    122 	eaddr = round_page((vaddr_t)addr + len);
    123 	vm_map_lock_read(kernel_map);
    124 	rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
    125 	vm_map_unlock_read(kernel_map);
    126 
    127 	/*
    128 	 * XXX there are still some things (e.g. the buffer cache) that
    129 	 * are managed behind the VM system's back so even though an
    130 	 * address is accessible in the mind of the VM system, there may
    131 	 * not be physical pages where the VM thinks there is.  This can
    132 	 * lead to bogus allocation of pages in the kernel address space
    133 	 * or worse, inconsistencies at the pmap level.  We only worry
    134 	 * about the buffer cache for now.
    135 	 */
    136 	if (!readbuffers && rv && (eaddr > (vaddr_t)buffers &&
    137 			     saddr < (vaddr_t)buffers + MAXBSIZE * nbuf))
    138 		rv = FALSE;
    139 	return(rv);
    140 }
    141 
    142 /*
    143  * uvm_useracc: can the user access it?
    144  *
    145  * - called from physio() and sys___sysctl().
    146  */
    147 
    148 boolean_t
    149 uvm_useracc(addr, len, rw)
    150 	caddr_t addr;
    151 	size_t len;
    152 	int rw;
    153 {
    154 	vm_map_t map;
    155 	boolean_t rv;
    156 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
    157 
    158 	/* XXX curproc */
    159 	map = &curproc->l_proc->p_vmspace->vm_map;
    160 
    161 	vm_map_lock_read(map);
    162 	rv = uvm_map_checkprot(map, trunc_page((vaddr_t)addr),
    163 	    round_page((vaddr_t)addr + len), prot);
    164 	vm_map_unlock_read(map);
    165 
    166 	return(rv);
    167 }
    168 
    169 #ifdef KGDB
    170 /*
    171  * Change protections on kernel pages from addr to addr+len
    172  * (presumably so debugger can plant a breakpoint).
    173  *
    174  * We force the protection change at the pmap level.  If we were
    175  * to use vm_map_protect a change to allow writing would be lazily-
    176  * applied meaning we would still take a protection fault, something
    177  * we really don't want to do.  It would also fragment the kernel
    178  * map unnecessarily.  We cannot use pmap_protect since it also won't
    179  * enforce a write-enable request.  Using pmap_enter is the only way
    180  * we can ensure the change takes place properly.
    181  */
    182 void
    183 uvm_chgkprot(addr, len, rw)
    184 	caddr_t addr;
    185 	size_t len;
    186 	int rw;
    187 {
    188 	vm_prot_t prot;
    189 	paddr_t pa;
    190 	vaddr_t sva, eva;
    191 
    192 	prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
    193 	eva = round_page((vaddr_t)addr + len);
    194 	for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
    195 		/*
    196 		 * Extract physical address for the page.
    197 		 * We use a cheezy hack to differentiate physical
    198 		 * page 0 from an invalid mapping, not that it
    199 		 * really matters...
    200 		 */
    201 		if (pmap_extract(pmap_kernel(), sva, &pa) == FALSE)
    202 			panic("chgkprot: invalid page");
    203 		pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);
    204 	}
    205 }
    206 #endif
    207 
    208 /*
    209  * vslock: wire user memory for I/O
    210  *
    211  * - called from physio and sys___sysctl
    212  * - XXXCDC: consider nuking this (or making it a macro?)
    213  */
    214 
    215 int
    216 uvm_vslock(p, addr, len, access_type)
    217 	struct proc *p;
    218 	caddr_t	addr;
    219 	size_t	len;
    220 	vm_prot_t access_type;
    221 {
    222 	vm_map_t map;
    223 	vaddr_t start, end;
    224 	int error;
    225 
    226 	map = &p->p_vmspace->vm_map;
    227 	start = trunc_page((vaddr_t)addr);
    228 	end = round_page((vaddr_t)addr + len);
    229 	error = uvm_fault_wire(map, start, end, access_type);
    230 	return error;
    231 }
    232 
    233 /*
    234  * vslock: wire user memory for I/O
    235  *
    236  * - called from physio and sys___sysctl
    237  * - XXXCDC: consider nuking this (or making it a macro?)
    238  */
    239 
    240 void
    241 uvm_vsunlock(p, addr, len)
    242 	struct proc *p;
    243 	caddr_t	addr;
    244 	size_t	len;
    245 {
    246 	uvm_fault_unwire(&p->p_vmspace->vm_map, trunc_page((vaddr_t)addr),
    247 		round_page((vaddr_t)addr + len));
    248 }
    249 
    250 /*
    251  * uvm_proc_fork: fork a virtual address space
    252  *
    253  * - the address space is copied as per parent map's inherit values
    254  */
    255 void
    256 uvm_proc_fork(p1, p2, shared)
    257 	struct proc *p1, *p2;
    258 	boolean_t shared;
    259 {
    260 
    261 	if (shared == TRUE) {
    262 		p2->p_vmspace = NULL;
    263 		uvmspace_share(p1, p2);			/* share vmspace */
    264 	} else {
    265 		p2->p_vmspace = uvmspace_fork(p1->p_vmspace); /* fork vmspace */
    266 	}
    267 }
    268 
    269 
    270 /*
    271  * uvm_lwp_fork: fork a thread
    272  *
    273  * - a new "user" structure is allocated for the child process
    274  *	[filled in by MD layer...]
    275  * - if specified, the child gets a new user stack described by
    276  *	stack and stacksize
    277  * - NOTE: the kernel stack may be at a different location in the child
    278  *	process, and thus addresses of automatic variables may be invalid
    279  *	after cpu_fork returns in the child process.  We do nothing here
    280  *	after cpu_fork returns.
    281  * - XXXCDC: we need a way for this to return a failure value rather
    282  *   than just hang
    283  */
    284 void
    285 uvm_lwp_fork(l1, l2, stack, stacksize, func, arg)
    286 	struct lwp *l1, *l2;
    287 	void *stack;
    288 	size_t stacksize;
    289 	void (*func) __P((void *));
    290 	void *arg;
    291 {
    292 	struct user *up = l2->l_addr;
    293 	int error;
    294 
    295 	/*
    296 	 * Wire down the U-area for the process, which contains the PCB
    297 	 * and the kernel stack.  Wired state is stored in p->p_flag's
    298 	 * P_INMEM bit rather than in the vm_map_entry's wired count
    299 	 * to prevent kernel_map fragmentation.
    300 	 *
    301 	 * Note the kernel stack gets read/write accesses right off
    302 	 * the bat.
    303 	 */
    304 	error = uvm_fault_wire(kernel_map, (vaddr_t)up,
    305 	    (vaddr_t)up + USPACE, VM_PROT_READ | VM_PROT_WRITE);
    306 	if (error)
    307 		panic("uvm_fork: uvm_fault_wire failed: %d", error);
    308 
    309 	/*
    310 	 * cpu_fork() copy and update the pcb, and make the child ready
    311  	 * to run.  If this is a normal user fork, the child will exit
    312 	 * directly to user mode via child_return() on its first time
    313 	 * slice and will not return here.  If this is a kernel thread,
    314 	 * the specified entry point will be executed.
    315 	 */
    316 	cpu_fork(l1, l2, stack, stacksize, func, arg);
    317 }
    318 
    319 /*
    320  * uvm_exit: exit a virtual address space
    321  *
    322  * - the process passed to us is a dead (pre-zombie) process; we
    323  *   are running on a different context now (the reaper).
    324  * - we must run in a separate thread because freeing the vmspace
    325  *   of the dead process may block.
    326  */
    327 void
    328 uvm_proc_exit(p)
    329 	struct proc *p;
    330 {
    331 	uvmspace_free(p->p_vmspace);
    332 }
    333 
    334 void
    335 uvm_lwp_exit(l)
    336 	struct lwp *l;
    337 {
    338 	vaddr_t va = (vaddr_t)l->l_addr;
    339 
    340 	uvm_fault_unwire(kernel_map, va, va + USPACE);
    341 	uvm_km_free(kernel_map, va, USPACE);
    342 
    343 	l->l_flag &= ~L_INMEM;
    344 	l->l_addr = NULL;
    345 }
    346 
    347 /*
    348  * uvm_init_limit: init per-process VM limits
    349  *
    350  * - called for process 0 and then inherited by all others.
    351  */
    352 void
    353 uvm_init_limits(p)
    354 	struct proc *p;
    355 {
    356 
    357 	/*
    358 	 * Set up the initial limits on process VM.  Set the maximum
    359 	 * resident set size to be all of (reasonably) available memory.
    360 	 * This causes any single, large process to start random page
    361 	 * replacement once it fills memory.
    362 	 */
    363 
    364 	p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
    365 	p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
    366 	p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
    367 	p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
    368 	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(uvmexp.free);
    369 }
    370 
    371 #ifdef DEBUG
    372 int	enableswap = 1;
    373 int	swapdebug = 0;
    374 #define	SDB_FOLLOW	1
    375 #define SDB_SWAPIN	2
    376 #define SDB_SWAPOUT	4
    377 #endif
    378 
    379 /*
    380  * uvm_swapin: swap in a process's u-area.
    381  */
    382 
    383 void
    384 uvm_swapin(l)
    385 	struct lwp *l;
    386 {
    387 	vaddr_t addr;
    388 	int s;
    389 
    390 	addr = (vaddr_t)l->l_addr;
    391 	/* make L_INMEM true */
    392 	uvm_fault_wire(kernel_map, addr, addr + USPACE,
    393 	    VM_PROT_READ | VM_PROT_WRITE);
    394 
    395 	/*
    396 	 * Some architectures need to be notified when the user area has
    397 	 * moved to new physical page(s) (e.g.  see mips/mips/vm_machdep.c).
    398 	 */
    399 	cpu_swapin(l);
    400 	SCHED_LOCK(s);
    401 	if (l->l_stat == LSRUN)
    402 		setrunqueue(l);
    403 	l->l_flag |= L_INMEM;
    404 	SCHED_UNLOCK(s);
    405 	l->l_swtime = 0;
    406 	++uvmexp.swapins;
    407 }
    408 
    409 /*
    410  * uvm_scheduler: process zero main loop
    411  *
    412  * - attempt to swapin every swaped-out, runnable process in order of
    413  *	priority.
    414  * - if not enough memory, wake the pagedaemon and let it clear space.
    415  */
    416 
    417 void
    418 uvm_scheduler()
    419 {
    420 	struct lwp *l, *ll;
    421 	int pri;
    422 	int ppri;
    423 
    424 loop:
    425 #ifdef DEBUG
    426 	while (!enableswap)
    427 		tsleep(&proc0, PVM, "noswap", 0);
    428 #endif
    429 	ll = NULL;		/* process to choose */
    430 	ppri = INT_MIN;	/* its priority */
    431 	proclist_lock_read();
    432 
    433 	LIST_FOREACH(l, &alllwp, l_list) {
    434 		/* is it a runnable swapped out process? */
    435 		if (l->l_stat == LSRUN && (l->l_flag & L_INMEM) == 0) {
    436 			pri = l->l_swtime + l->l_slptime -
    437 			    (l->l_proc->p_nice - NZERO) * 8;
    438 			if (pri > ppri) {   /* higher priority?  remember it. */
    439 				ll = l;
    440 				ppri = pri;
    441 			}
    442 		}
    443 	}
    444 	/*
    445 	 * XXXSMP: possible unlock/sleep race between here and the
    446 	 * "scheduler" tsleep below..
    447 	 */
    448 	proclist_unlock_read();
    449 
    450 #ifdef DEBUG
    451 	if (swapdebug & SDB_FOLLOW)
    452 		printf("scheduler: running, procp %p pri %d\n", ll, ppri);
    453 #endif
    454 	/*
    455 	 * Nothing to do, back to sleep
    456 	 */
    457 	if ((l = ll) == NULL) {
    458 		tsleep(&proc0, PVM, "scheduler", 0);
    459 		goto loop;
    460 	}
    461 
    462 	/*
    463 	 * we have found swapped out process which we would like to bring
    464 	 * back in.
    465 	 *
    466 	 * XXX: this part is really bogus cuz we could deadlock on memory
    467 	 * despite our feeble check
    468 	 */
    469 	if (uvmexp.free > atop(USPACE)) {
    470 #ifdef DEBUG
    471 		if (swapdebug & SDB_SWAPIN)
    472 			printf("swapin: pid %d(%s)@%p, pri %d free %d\n",
    473 	     l->l_proc->p_pid, l->l_proc->p_comm, l->l_addr, ppri, uvmexp.free);
    474 #endif
    475 		uvm_swapin(l);
    476 		goto loop;
    477 	}
    478 	/*
    479 	 * not enough memory, jab the pageout daemon and wait til the coast
    480 	 * is clear
    481 	 */
    482 #ifdef DEBUG
    483 	if (swapdebug & SDB_FOLLOW)
    484 		printf("scheduler: no room for pid %d(%s), free %d\n",
    485 	   l->l_proc->p_pid, l->l_proc->p_comm, uvmexp.free);
    486 #endif
    487 	uvm_wait("schedpwait");
    488 #ifdef DEBUG
    489 	if (swapdebug & SDB_FOLLOW)
    490 		printf("scheduler: room again, free %d\n", uvmexp.free);
    491 #endif
    492 	goto loop;
    493 }
    494 
    495 /*
    496  * swappable: is LWP "l" swappable?
    497  */
    498 
    499 #define	swappable(l)							\
    500 	(((l)->l_flag & (L_INMEM)) &&					\
    501 	 ((((l)->l_proc->p_flag) & (P_SYSTEM | P_WEXIT)) == 0) &&	\
    502 	 (l)->l_holdcnt == 0)
    503 
    504 /*
    505  * swapout_threads: find threads that can be swapped and unwire their
    506  *	u-areas.
    507  *
    508  * - called by the pagedaemon
    509  * - try and swap at least one processs
    510  * - processes that are sleeping or stopped for maxslp or more seconds
    511  *   are swapped... otherwise the longest-sleeping or stopped process
    512  *   is swapped, otherwise the longest resident process...
    513  */
    514 void
    515 uvm_swapout_threads()
    516 {
    517 	struct lwp *l;
    518 	struct lwp *outl, *outl2;
    519 	int outpri, outpri2;
    520 	int didswap = 0;
    521 	extern int maxslp;
    522 	/* XXXCDC: should move off to uvmexp. or uvm., also in uvm_meter */
    523 
    524 #ifdef DEBUG
    525 	if (!enableswap)
    526 		return;
    527 #endif
    528 
    529 	/*
    530 	 * outl/outpri  : stop/sleep thread with largest sleeptime < maxslp
    531 	 * outl2/outpri2: the longest resident thread (its swap time)
    532 	 */
    533 	outl = outl2 = NULL;
    534 	outpri = outpri2 = 0;
    535 	proclist_lock_read();
    536 	LIST_FOREACH(l, &alllwp, l_list) {
    537 		if (!swappable(l))
    538 			continue;
    539 		switch (l->l_stat) {
    540 		case LSRUN:
    541 		case LSONPROC:
    542 			if (l->l_swtime > outpri2) {
    543 				outl2 = l;
    544 				outpri2 = l->l_swtime;
    545 			}
    546 			continue;
    547 
    548 		case LSSLEEP:
    549 		case LSSTOP:
    550 			if (l->l_slptime >= maxslp) {
    551 				uvm_swapout(l);
    552 				didswap++;
    553 			} else if (l->l_slptime > outpri) {
    554 				outl = l;
    555 				outpri = l->l_slptime;
    556 			}
    557 			continue;
    558 		}
    559 	}
    560 	proclist_unlock_read();
    561 
    562 	/*
    563 	 * If we didn't get rid of any real duds, toss out the next most
    564 	 * likely sleeping/stopped or running candidate.  We only do this
    565 	 * if we are real low on memory since we don't gain much by doing
    566 	 * it (USPACE bytes).
    567 	 */
    568 	if (didswap == 0 && uvmexp.free <= atop(round_page(USPACE))) {
    569 		if ((l = outl) == NULL)
    570 			l = outl2;
    571 #ifdef DEBUG
    572 		if (swapdebug & SDB_SWAPOUT)
    573 			printf("swapout_threads: no duds, try procp %p\n", l);
    574 #endif
    575 		if (l)
    576 			uvm_swapout(l);
    577 	}
    578 	pmap_update();
    579 }
    580 
    581 /*
    582  * uvm_swapout: swap out lwp "l"
    583  *
    584  * - currently "swapout" means "unwire U-area" and "pmap_collect()"
    585  *   the pmap.
    586  * - XXXCDC: should deactivate all process' private anonymous memory
    587  */
    588 
    589 static void
    590 uvm_swapout(l)
    591 	struct lwp *l;
    592 {
    593 	vaddr_t addr;
    594 	int s;
    595 	struct proc *p = l->l_proc;
    596 
    597 #ifdef DEBUG
    598 	if (swapdebug & SDB_SWAPOUT)
    599 		printf("swapout: pid %d(%s)@%p, stat %x pri %d free %d\n",
    600 	   p->p_pid, p->p_comm, l->l_addr, l->l_stat,
    601 	   l->l_slptime, uvmexp.free);
    602 #endif
    603 
    604 	/*
    605 	 * Do any machine-specific actions necessary before swapout.
    606 	 * This can include saving floating point state, etc.
    607 	 */
    608 	cpu_swapout(l);
    609 
    610 	/*
    611 	 * Mark it as (potentially) swapped out.
    612 	 */
    613 	SCHED_LOCK(s);
    614 	s = splstatclock();
    615 	l->l_flag &= ~L_INMEM;
    616 	if (l->l_stat == LSRUN)
    617 		remrunqueue(l);
    618 	SCHED_UNLOCK(s);
    619 	l->l_swtime = 0;
    620 	++uvmexp.swapouts;
    621 
    622 	/*
    623 	 * Unwire the to-be-swapped process's user struct and kernel stack.
    624 	 */
    625 	addr = (vaddr_t)l->l_addr;
    626 	uvm_fault_unwire(kernel_map, addr, addr + USPACE); /* !P_INMEM */
    627 	pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
    628 }
    629 
    630