Home | History | Annotate | Line # | Download | only in uvm
uvm_glue.c revision 1.108
      1 /*	$NetBSD: uvm_glue.c,v 1.108 2007/07/14 22:27:15 ad Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6  *
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to Berkeley by
     10  * The Mach Operating System project at Carnegie-Mellon University.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by Charles D. Cranor,
     23  *      Washington University, the University of California, Berkeley and
     24  *      its contributors.
     25  * 4. Neither the name of the University nor the names of its contributors
     26  *    may be used to endorse or promote products derived from this software
     27  *    without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39  * SUCH DAMAGE.
     40  *
     41  *	@(#)vm_glue.c	8.6 (Berkeley) 1/5/94
     42  * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp
     43  *
     44  *
     45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     46  * All rights reserved.
     47  *
     48  * Permission to use, copy, modify and distribute this software and
     49  * its documentation is hereby granted, provided that both the copyright
     50  * notice and this permission notice appear in all copies of the
     51  * software, derivative works or modified versions, and any portions
     52  * thereof, and that both notices appear in supporting documentation.
     53  *
     54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     57  *
     58  * Carnegie Mellon requests users of this software to return to
     59  *
     60  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     61  *  School of Computer Science
     62  *  Carnegie Mellon University
     63  *  Pittsburgh PA 15213-3890
     64  *
     65  * any improvements or extensions that they make and grant Carnegie the
     66  * rights to redistribute these changes.
     67  */
     68 
     69 #include <sys/cdefs.h>
     70 __KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.108 2007/07/14 22:27:15 ad Exp $");
     71 
     72 #include "opt_coredump.h"
     73 #include "opt_kgdb.h"
     74 #include "opt_kstack.h"
     75 #include "opt_uvmhist.h"
     76 
     77 /*
     78  * uvm_glue.c: glue functions
     79  */
     80 
     81 #include <sys/param.h>
     82 #include <sys/systm.h>
     83 #include <sys/proc.h>
     84 #include <sys/resourcevar.h>
     85 #include <sys/buf.h>
     86 #include <sys/user.h>
     87 #include <sys/syncobj.h>
     88 
     89 #include <uvm/uvm.h>
     90 
     91 #include <machine/cpu.h>
     92 
     93 /*
     94  * local prototypes
     95  */
     96 
     97 static void uvm_swapout(struct lwp *);
     98 
     99 #define UVM_NUAREA_MAX 16
    100 static vaddr_t uvm_uareas;
    101 static int uvm_nuarea;
    102 kmutex_t uvm_uareas_lock;
    103 #define	UAREA_NEXTFREE(uarea)	(*(vaddr_t *)(UAREA_TO_USER(uarea)))
    104 
    105 void uvm_uarea_free(vaddr_t);
    106 
    107 /*
    108  * XXXCDC: do these really belong here?
    109  */
    110 
    111 /*
    112  * uvm_kernacc: can the kernel access a region of memory
    113  *
    114  * - used only by /dev/kmem driver (mem.c)
    115  */
    116 
    117 bool
    118 uvm_kernacc(void *addr, size_t len, int rw)
    119 {
    120 	bool rv;
    121 	vaddr_t saddr, eaddr;
    122 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
    123 
    124 	saddr = trunc_page((vaddr_t)addr);
    125 	eaddr = round_page((vaddr_t)addr + len);
    126 	vm_map_lock_read(kernel_map);
    127 	rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
    128 	vm_map_unlock_read(kernel_map);
    129 
    130 	return(rv);
    131 }
    132 
    133 #ifdef KGDB
    134 /*
    135  * Change protections on kernel pages from addr to addr+len
    136  * (presumably so debugger can plant a breakpoint).
    137  *
    138  * We force the protection change at the pmap level.  If we were
    139  * to use vm_map_protect a change to allow writing would be lazily-
    140  * applied meaning we would still take a protection fault, something
    141  * we really don't want to do.  It would also fragment the kernel
    142  * map unnecessarily.  We cannot use pmap_protect since it also won't
    143  * enforce a write-enable request.  Using pmap_enter is the only way
    144  * we can ensure the change takes place properly.
    145  */
    146 void
    147 uvm_chgkprot(void *addr, size_t len, int rw)
    148 {
    149 	vm_prot_t prot;
    150 	paddr_t pa;
    151 	vaddr_t sva, eva;
    152 
    153 	prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
    154 	eva = round_page((vaddr_t)addr + len);
    155 	for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
    156 		/*
    157 		 * Extract physical address for the page.
    158 		 */
    159 		if (pmap_extract(pmap_kernel(), sva, &pa) == false)
    160 			panic("chgkprot: invalid page");
    161 		pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);
    162 	}
    163 	pmap_update(pmap_kernel());
    164 }
    165 #endif
    166 
    167 /*
    168  * uvm_vslock: wire user memory for I/O
    169  *
    170  * - called from physio and sys___sysctl
    171  * - XXXCDC: consider nuking this (or making it a macro?)
    172  */
    173 
    174 int
    175 uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access_type)
    176 {
    177 	struct vm_map *map;
    178 	vaddr_t start, end;
    179 	int error;
    180 
    181 	map = &vs->vm_map;
    182 	start = trunc_page((vaddr_t)addr);
    183 	end = round_page((vaddr_t)addr + len);
    184 	error = uvm_fault_wire(map, start, end, access_type, 0);
    185 	return error;
    186 }
    187 
    188 /*
    189  * uvm_vsunlock: unwire user memory wired by uvm_vslock()
    190  *
    191  * - called from physio and sys___sysctl
    192  * - XXXCDC: consider nuking this (or making it a macro?)
    193  */
    194 
    195 void
    196 uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
    197 {
    198 	uvm_fault_unwire(&vs->vm_map, trunc_page((vaddr_t)addr),
    199 		round_page((vaddr_t)addr + len));
    200 }
    201 
    202 /*
    203  * uvm_proc_fork: fork a virtual address space
    204  *
    205  * - the address space is copied as per parent map's inherit values
    206  */
    207 void
    208 uvm_proc_fork(struct proc *p1, struct proc *p2, bool shared)
    209 {
    210 
    211 	if (shared == true) {
    212 		p2->p_vmspace = NULL;
    213 		uvmspace_share(p1, p2);
    214 	} else {
    215 		p2->p_vmspace = uvmspace_fork(p1->p_vmspace);
    216 	}
    217 
    218 	cpu_proc_fork(p1, p2);
    219 }
    220 
    221 
    222 /*
    223  * uvm_lwp_fork: fork a thread
    224  *
    225  * - a new "user" structure is allocated for the child process
    226  *	[filled in by MD layer...]
    227  * - if specified, the child gets a new user stack described by
    228  *	stack and stacksize
    229  * - NOTE: the kernel stack may be at a different location in the child
    230  *	process, and thus addresses of automatic variables may be invalid
    231  *	after cpu_lwp_fork returns in the child process.  We do nothing here
    232  *	after cpu_lwp_fork returns.
    233  * - XXXCDC: we need a way for this to return a failure value rather
    234  *   than just hang
    235  */
    236 void
    237 uvm_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
    238     void (*func)(void *), void *arg)
    239 {
    240 	int error;
    241 
    242 	/*
    243 	 * Wire down the U-area for the process, which contains the PCB
    244 	 * and the kernel stack.  Wired state is stored in l->l_flag's
    245 	 * L_INMEM bit rather than in the vm_map_entry's wired count
    246 	 * to prevent kernel_map fragmentation.  If we reused a cached U-area,
    247 	 * L_INMEM will already be set and we don't need to do anything.
    248 	 *
    249 	 * Note the kernel stack gets read/write accesses right off the bat.
    250 	 */
    251 
    252 	if ((l2->l_flag & LW_INMEM) == 0) {
    253 		vaddr_t uarea = USER_TO_UAREA(l2->l_addr);
    254 
    255 		error = uvm_fault_wire(kernel_map, uarea,
    256 		    uarea + USPACE, VM_PROT_READ | VM_PROT_WRITE, 0);
    257 		if (error)
    258 			panic("uvm_lwp_fork: uvm_fault_wire failed: %d", error);
    259 #ifdef PMAP_UAREA
    260 		/* Tell the pmap this is a u-area mapping */
    261 		PMAP_UAREA(uarea);
    262 #endif
    263 		l2->l_flag |= LW_INMEM;
    264 	}
    265 
    266 #ifdef KSTACK_CHECK_MAGIC
    267 	/*
    268 	 * fill stack with magic number
    269 	 */
    270 	kstack_setup_magic(l2);
    271 #endif
    272 
    273 	/*
    274 	 * cpu_lwp_fork() copy and update the pcb, and make the child ready
    275  	 * to run.  If this is a normal user fork, the child will exit
    276 	 * directly to user mode via child_return() on its first time
    277 	 * slice and will not return here.  If this is a kernel thread,
    278 	 * the specified entry point will be executed.
    279 	 */
    280 	cpu_lwp_fork(l1, l2, stack, stacksize, func, arg);
    281 }
    282 
    283 /*
    284  * uvm_uarea_alloc: allocate a u-area
    285  */
    286 
    287 bool
    288 uvm_uarea_alloc(vaddr_t *uaddrp)
    289 {
    290 	vaddr_t uaddr;
    291 
    292 #ifndef USPACE_ALIGN
    293 #define USPACE_ALIGN    0
    294 #endif
    295 
    296 	mutex_enter(&uvm_uareas_lock);
    297 	if (uvm_nuarea > 0) {
    298 		uaddr = uvm_uareas;
    299 		uvm_uareas = UAREA_NEXTFREE(uaddr);
    300 		uvm_nuarea--;
    301 		mutex_exit(&uvm_uareas_lock);
    302 		*uaddrp = uaddr;
    303 		return true;
    304 	} else {
    305 		mutex_exit(&uvm_uareas_lock);
    306 		*uaddrp = uvm_km_alloc(kernel_map, USPACE, USPACE_ALIGN,
    307 		    UVM_KMF_PAGEABLE);
    308 		return false;
    309 	}
    310 }
    311 
    312 /*
    313  * uvm_uarea_free: free a u-area; never blocks
    314  */
    315 
    316 void
    317 uvm_uarea_free(vaddr_t uaddr)
    318 {
    319 	mutex_enter(&uvm_uareas_lock);
    320 	UAREA_NEXTFREE(uaddr) = uvm_uareas;
    321 	uvm_uareas = uaddr;
    322 	uvm_nuarea++;
    323 	mutex_exit(&uvm_uareas_lock);
    324 }
    325 
    326 /*
    327  * uvm_uarea_drain: return memory of u-areas over limit
    328  * back to system
    329  */
    330 
    331 void
    332 uvm_uarea_drain(bool empty)
    333 {
    334 	int leave = empty ? 0 : UVM_NUAREA_MAX;
    335 	vaddr_t uaddr;
    336 
    337 	if (uvm_nuarea <= leave)
    338 		return;
    339 
    340 	mutex_enter(&uvm_uareas_lock);
    341 	while(uvm_nuarea > leave) {
    342 		uaddr = uvm_uareas;
    343 		uvm_uareas = UAREA_NEXTFREE(uaddr);
    344 		uvm_nuarea--;
    345 		mutex_exit(&uvm_uareas_lock);
    346 		uvm_km_free(kernel_map, uaddr, USPACE, UVM_KMF_PAGEABLE);
    347 		mutex_enter(&uvm_uareas_lock);
    348 	}
    349 	mutex_exit(&uvm_uareas_lock);
    350 }
    351 
    352 /*
    353  * uvm_exit: exit a virtual address space
    354  *
    355  * - the process passed to us is a dead (pre-zombie) process; we
    356  *   are running on a different context now (the reaper).
    357  * - borrow proc0's address space because freeing the vmspace
    358  *   of the dead process may block.
    359  */
    360 
    361 void
    362 uvm_proc_exit(struct proc *p)
    363 {
    364 	struct lwp *l = curlwp; /* XXX */
    365 	struct vmspace *ovm;
    366 
    367 	KASSERT(p == l->l_proc);
    368 	ovm = p->p_vmspace;
    369 
    370 	/*
    371 	 * borrow proc0's address space.
    372 	 */
    373 	pmap_deactivate(l);
    374 	p->p_vmspace = proc0.p_vmspace;
    375 	pmap_activate(l);
    376 
    377 	uvmspace_free(ovm);
    378 }
    379 
    380 void
    381 uvm_lwp_exit(struct lwp *l)
    382 {
    383 	vaddr_t va = USER_TO_UAREA(l->l_addr);
    384 
    385 	l->l_flag &= ~LW_INMEM;
    386 	uvm_uarea_free(va);
    387 	l->l_addr = NULL;
    388 }
    389 
    390 /*
    391  * uvm_init_limit: init per-process VM limits
    392  *
    393  * - called for process 0 and then inherited by all others.
    394  */
    395 
    396 void
    397 uvm_init_limits(struct proc *p)
    398 {
    399 
    400 	/*
    401 	 * Set up the initial limits on process VM.  Set the maximum
    402 	 * resident set size to be all of (reasonably) available memory.
    403 	 * This causes any single, large process to start random page
    404 	 * replacement once it fills memory.
    405 	 */
    406 
    407 	p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
    408 	p->p_rlimit[RLIMIT_STACK].rlim_max = maxsmap;
    409 	p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
    410 	p->p_rlimit[RLIMIT_DATA].rlim_max = maxdmap;
    411 	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(uvmexp.free);
    412 }
    413 
    414 #ifdef DEBUG
    415 int	enableswap = 1;
    416 int	swapdebug = 0;
    417 #define	SDB_FOLLOW	1
    418 #define SDB_SWAPIN	2
    419 #define SDB_SWAPOUT	4
    420 #endif
    421 
    422 /*
    423  * uvm_swapin: swap in an lwp's u-area.
    424  *
    425  * - must be called with the LWP's swap lock held.
    426  * - naturally, must not be called with l == curlwp
    427  */
    428 
    429 void
    430 uvm_swapin(struct lwp *l)
    431 {
    432 	vaddr_t addr;
    433 	int error;
    434 
    435 	KASSERT(mutex_owned(&l->l_swaplock));
    436 	KASSERT(l != curlwp);
    437 
    438 	addr = USER_TO_UAREA(l->l_addr);
    439 	/* make L_INMEM true */
    440 	error = uvm_fault_wire(kernel_map, addr, addr + USPACE,
    441 	    VM_PROT_READ | VM_PROT_WRITE, 0);
    442 	if (error) {
    443 		panic("uvm_swapin: rewiring stack failed: %d", error);
    444 	}
    445 
    446 	/*
    447 	 * Some architectures need to be notified when the user area has
    448 	 * moved to new physical page(s) (e.g.  see mips/mips/vm_machdep.c).
    449 	 */
    450 	cpu_swapin(l);
    451 	lwp_lock(l);
    452 	if (l->l_stat == LSRUN)
    453 		sched_enqueue(l, false);
    454 	l->l_flag |= LW_INMEM;
    455 	l->l_swtime = 0;
    456 	lwp_unlock(l);
    457 	++uvmexp.swapins;
    458 }
    459 
    460 /*
    461  * uvm_kick_scheduler: kick the scheduler into action if not running.
    462  *
    463  * - called when swapped out processes have been awoken.
    464  */
    465 
    466 void
    467 uvm_kick_scheduler(void)
    468 {
    469 
    470 	if (uvm.swap_running == false)
    471 		return;
    472 
    473 	mutex_enter(&uvm_scheduler_mutex);
    474 	uvm.scheduler_kicked = true;
    475 	cv_signal(&uvm.scheduler_cv);
    476 	mutex_exit(&uvm_scheduler_mutex);
    477 }
    478 
    479 /*
    480  * uvm_scheduler: process zero main loop
    481  *
    482  * - attempt to swapin every swaped-out, runnable process in order of
    483  *	priority.
    484  * - if not enough memory, wake the pagedaemon and let it clear space.
    485  */
    486 
    487 void
    488 uvm_scheduler(void)
    489 {
    490 	struct lwp *l, *ll;
    491 	int pri;
    492 	int ppri;
    493 
    494 	l = curlwp;
    495 	lwp_lock(l);
    496 	l->l_priority = PVM;
    497 	l->l_usrpri = PVM;
    498 	lwp_unlock(l);
    499 
    500 	for (;;) {
    501 #ifdef DEBUG
    502 		mutex_enter(&uvm_scheduler_mutex);
    503 		while (!enableswap)
    504 			cv_wait(&uvm.scheduler_cv, &uvm_scheduler_mutex);
    505 		mutex_exit(&uvm_scheduler_mutex);
    506 #endif
    507 		ll = NULL;		/* process to choose */
    508 		ppri = INT_MIN;		/* its priority */
    509 
    510 		mutex_enter(&proclist_lock);
    511 		LIST_FOREACH(l, &alllwp, l_list) {
    512 			/* is it a runnable swapped out process? */
    513 			if (l->l_stat == LSRUN && !(l->l_flag & LW_INMEM)) {
    514 				pri = l->l_swtime + l->l_slptime -
    515 				    (l->l_proc->p_nice - NZERO) * 8;
    516 				if (pri > ppri) {   /* higher priority? */
    517 					ll = l;
    518 					ppri = pri;
    519 				}
    520 			}
    521 		}
    522 #ifdef DEBUG
    523 		if (swapdebug & SDB_FOLLOW)
    524 			printf("scheduler: running, procp %p pri %d\n", ll,
    525 			    ppri);
    526 #endif
    527 		/*
    528 		 * Nothing to do, back to sleep
    529 		 */
    530 		if ((l = ll) == NULL) {
    531 			mutex_exit(&proclist_lock);
    532 			mutex_enter(&uvm_scheduler_mutex);
    533 			if (uvm.scheduler_kicked == false)
    534 				cv_wait(&uvm.scheduler_cv,
    535 				    &uvm_scheduler_mutex);
    536 			uvm.scheduler_kicked = false;
    537 			mutex_exit(&uvm_scheduler_mutex);
    538 			continue;
    539 		}
    540 
    541 		/*
    542 		 * we have found swapped out process which we would like
    543 		 * to bring back in.
    544 		 *
    545 		 * XXX: this part is really bogus cuz we could deadlock
    546 		 * on memory despite our feeble check
    547 		 */
    548 		if (uvmexp.free > atop(USPACE)) {
    549 #ifdef DEBUG
    550 			if (swapdebug & SDB_SWAPIN)
    551 				printf("swapin: pid %d(%s)@%p, pri %d "
    552 				    "free %d\n", l->l_proc->p_pid,
    553 				    l->l_proc->p_comm, l->l_addr, ppri,
    554 				    uvmexp.free);
    555 #endif
    556 			mutex_enter(&l->l_swaplock);
    557 			mutex_exit(&proclist_lock);
    558 			uvm_swapin(l);
    559 			mutex_exit(&l->l_swaplock);
    560 			continue;
    561 		} else {
    562 			/*
    563 			 * not enough memory, jab the pageout daemon and
    564 			 * wait til the coast is clear
    565 			 */
    566 			mutex_exit(&proclist_lock);
    567 #ifdef DEBUG
    568 			if (swapdebug & SDB_FOLLOW)
    569 				printf("scheduler: no room for pid %d(%s),"
    570 				    " free %d\n", l->l_proc->p_pid,
    571 				    l->l_proc->p_comm, uvmexp.free);
    572 #endif
    573 			uvm_wait("schedpwait");
    574 #ifdef DEBUG
    575 			if (swapdebug & SDB_FOLLOW)
    576 				printf("scheduler: room again, free %d\n",
    577 				    uvmexp.free);
    578 #endif
    579 		}
    580 	}
    581 }
    582 
    583 /*
    584  * swappable: is LWP "l" swappable?
    585  */
    586 
    587 static bool
    588 swappable(struct lwp *l)
    589 {
    590 
    591 	if ((l->l_flag & (LW_INMEM|LW_RUNNING|LW_SYSTEM|LW_WEXIT)) != LW_INMEM)
    592 		return false;
    593 	if (l->l_holdcnt != 0)
    594 		return false;
    595 	if (l->l_syncobj == &rw_syncobj || l->l_syncobj == &mutex_syncobj)
    596 		return false;
    597 	return true;
    598 }
    599 
    600 /*
    601  * swapout_threads: find threads that can be swapped and unwire their
    602  *	u-areas.
    603  *
    604  * - called by the pagedaemon
    605  * - try and swap at least one processs
    606  * - processes that are sleeping or stopped for maxslp or more seconds
    607  *   are swapped... otherwise the longest-sleeping or stopped process
    608  *   is swapped, otherwise the longest resident process...
    609  */
    610 
    611 void
    612 uvm_swapout_threads(void)
    613 {
    614 	struct lwp *l;
    615 	struct lwp *outl, *outl2;
    616 	int outpri, outpri2;
    617 	int didswap = 0;
    618 	extern int maxslp;
    619 	bool gotit;
    620 
    621 	/* XXXCDC: should move off to uvmexp. or uvm., also in uvm_meter */
    622 
    623 #ifdef DEBUG
    624 	if (!enableswap)
    625 		return;
    626 #endif
    627 
    628 	/*
    629 	 * outl/outpri  : stop/sleep thread with largest sleeptime < maxslp
    630 	 * outl2/outpri2: the longest resident thread (its swap time)
    631 	 */
    632 	outl = outl2 = NULL;
    633 	outpri = outpri2 = 0;
    634 
    635  restart:
    636 	mutex_enter(&proclist_lock);
    637 	LIST_FOREACH(l, &alllwp, l_list) {
    638 		KASSERT(l->l_proc != NULL);
    639 		if (!mutex_tryenter(&l->l_swaplock))
    640 			continue;
    641 		if (!swappable(l)) {
    642 			mutex_exit(&l->l_swaplock);
    643 			continue;
    644 		}
    645 		switch (l->l_stat) {
    646 		case LSONPROC:
    647 			break;
    648 
    649 		case LSRUN:
    650 			if (l->l_swtime > outpri2) {
    651 				outl2 = l;
    652 				outpri2 = l->l_swtime;
    653 			}
    654 			break;
    655 
    656 		case LSSLEEP:
    657 		case LSSTOP:
    658 			if (l->l_slptime >= maxslp) {
    659 				mutex_exit(&proclist_lock);
    660 				uvm_swapout(l);
    661 				/*
    662 				 * Locking in the wrong direction -
    663 				 * try to prevent the LWP from exiting.
    664 				 */
    665 				gotit = mutex_tryenter(&proclist_lock);
    666 				mutex_exit(&l->l_swaplock);
    667 				didswap++;
    668 				if (!gotit)
    669 					goto restart;
    670 				continue;
    671 			} else if (l->l_slptime > outpri) {
    672 				outl = l;
    673 				outpri = l->l_slptime;
    674 			}
    675 			break;
    676 		}
    677 		mutex_exit(&l->l_swaplock);
    678 	}
    679 
    680 	/*
    681 	 * If we didn't get rid of any real duds, toss out the next most
    682 	 * likely sleeping/stopped or running candidate.  We only do this
    683 	 * if we are real low on memory since we don't gain much by doing
    684 	 * it (USPACE bytes).
    685 	 */
    686 	if (didswap == 0 && uvmexp.free <= atop(round_page(USPACE))) {
    687 		if ((l = outl) == NULL)
    688 			l = outl2;
    689 #ifdef DEBUG
    690 		if (swapdebug & SDB_SWAPOUT)
    691 			printf("swapout_threads: no duds, try procp %p\n", l);
    692 #endif
    693 		if (l) {
    694 			mutex_enter(&l->l_swaplock);
    695 			mutex_exit(&proclist_lock);
    696 			if (swappable(l))
    697 				uvm_swapout(l);
    698 			mutex_exit(&l->l_swaplock);
    699 			return;
    700 		}
    701 	}
    702 
    703 	mutex_exit(&proclist_lock);
    704 }
    705 
    706 /*
    707  * uvm_swapout: swap out lwp "l"
    708  *
    709  * - currently "swapout" means "unwire U-area" and "pmap_collect()"
    710  *   the pmap.
    711  * - must be called with l->l_swaplock held.
    712  * - XXXCDC: should deactivate all process' private anonymous memory
    713  */
    714 
    715 static void
    716 uvm_swapout(struct lwp *l)
    717 {
    718 	vaddr_t addr;
    719 	struct proc *p = l->l_proc;
    720 
    721 	KASSERT(mutex_owned(&l->l_swaplock));
    722 
    723 #ifdef DEBUG
    724 	if (swapdebug & SDB_SWAPOUT)
    725 		printf("swapout: lid %d.%d(%s)@%p, stat %x pri %d free %d\n",
    726 	   p->p_pid, l->l_lid, p->p_comm, l->l_addr, l->l_stat,
    727 	   l->l_slptime, uvmexp.free);
    728 #endif
    729 
    730 	/*
    731 	 * Mark it as (potentially) swapped out.
    732 	 */
    733 	lwp_lock(l);
    734 	if (!swappable(l)) {
    735 		KDASSERT(l->l_cpu != curcpu());
    736 		lwp_unlock(l);
    737 		return;
    738 	}
    739 	l->l_flag &= ~LW_INMEM;
    740 	l->l_swtime = 0;
    741 	if (l->l_stat == LSRUN)
    742 		sched_dequeue(l);
    743 	lwp_unlock(l);
    744 	p->p_stats->p_ru.ru_nswap++;	/* XXXSMP */
    745 	++uvmexp.swapouts;
    746 
    747 	/*
    748 	 * Do any machine-specific actions necessary before swapout.
    749 	 * This can include saving floating point state, etc.
    750 	 */
    751 	cpu_swapout(l);
    752 
    753 	/*
    754 	 * Unwire the to-be-swapped process's user struct and kernel stack.
    755 	 */
    756 	addr = USER_TO_UAREA(l->l_addr);
    757 	uvm_fault_unwire(kernel_map, addr, addr + USPACE); /* !L_INMEM */
    758 	pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
    759 }
    760 
    761 /*
    762  * uvm_lwp_hold: prevent lwp "l" from being swapped out, and bring
    763  * back into memory if it is currently swapped.
    764  */
    765 
    766 void
    767 uvm_lwp_hold(struct lwp *l)
    768 {
    769 
    770 	/* XXXSMP mutex_enter(&l->l_swaplock); */
    771 	if (l->l_holdcnt++ == 0 && (l->l_flag & LW_INMEM) == 0)
    772 		uvm_swapin(l);
    773 	/* XXXSMP mutex_exit(&l->l_swaplock); */
    774 }
    775 
    776 /*
    777  * uvm_lwp_rele: release a hold on lwp "l".  when the holdcount
    778  * drops to zero, it's eligable to be swapped.
    779  */
    780 
    781 void
    782 uvm_lwp_rele(struct lwp *l)
    783 {
    784 
    785 	KASSERT(l->l_holdcnt != 0);
    786 
    787 	/* XXXSMP mutex_enter(&l->l_swaplock); */
    788 	l->l_holdcnt--;
    789 	/* XXXSMP mutex_exit(&l->l_swaplock); */
    790 }
    791 
    792 #ifdef COREDUMP
    793 /*
    794  * uvm_coredump_walkmap: walk a process's map for the purpose of dumping
    795  * a core file.
    796  */
    797 
    798 int
    799 uvm_coredump_walkmap(struct proc *p, void *iocookie,
    800     int (*func)(struct proc *, void *, struct uvm_coredump_state *),
    801     void *cookie)
    802 {
    803 	struct uvm_coredump_state state;
    804 	struct vmspace *vm = p->p_vmspace;
    805 	struct vm_map *map = &vm->vm_map;
    806 	struct vm_map_entry *entry;
    807 	int error;
    808 
    809 	entry = NULL;
    810 	vm_map_lock_read(map);
    811 	state.end = 0;
    812 	for (;;) {
    813 		if (entry == NULL)
    814 			entry = map->header.next;
    815 		else if (!uvm_map_lookup_entry(map, state.end, &entry))
    816 			entry = entry->next;
    817 		if (entry == &map->header)
    818 			break;
    819 
    820 		state.cookie = cookie;
    821 		if (state.end > entry->start) {
    822 			state.start = state.end;
    823 		} else {
    824 			state.start = entry->start;
    825 		}
    826 		state.realend = entry->end;
    827 		state.end = entry->end;
    828 		state.prot = entry->protection;
    829 		state.flags = 0;
    830 
    831 		/*
    832 		 * Dump the region unless one of the following is true:
    833 		 *
    834 		 * (1) the region has neither object nor amap behind it
    835 		 *     (ie. it has never been accessed).
    836 		 *
    837 		 * (2) the region has no amap and is read-only
    838 		 *     (eg. an executable text section).
    839 		 *
    840 		 * (3) the region's object is a device.
    841 		 *
    842 		 * (4) the region is unreadable by the process.
    843 		 */
    844 
    845 		KASSERT(!UVM_ET_ISSUBMAP(entry));
    846 		KASSERT(state.start < VM_MAXUSER_ADDRESS);
    847 		KASSERT(state.end <= VM_MAXUSER_ADDRESS);
    848 		if (entry->object.uvm_obj == NULL &&
    849 		    entry->aref.ar_amap == NULL) {
    850 			state.realend = state.start;
    851 		} else if ((entry->protection & VM_PROT_WRITE) == 0 &&
    852 		    entry->aref.ar_amap == NULL) {
    853 			state.realend = state.start;
    854 		} else if (entry->object.uvm_obj != NULL &&
    855 		    UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) {
    856 			state.realend = state.start;
    857 		} else if ((entry->protection & VM_PROT_READ) == 0) {
    858 			state.realend = state.start;
    859 		} else {
    860 			if (state.start >= (vaddr_t)vm->vm_maxsaddr)
    861 				state.flags |= UVM_COREDUMP_STACK;
    862 
    863 			/*
    864 			 * If this an anonymous entry, only dump instantiated
    865 			 * pages.
    866 			 */
    867 			if (entry->object.uvm_obj == NULL) {
    868 				vaddr_t end;
    869 
    870 				amap_lock(entry->aref.ar_amap);
    871 				for (end = state.start;
    872 				     end < state.end; end += PAGE_SIZE) {
    873 					struct vm_anon *anon;
    874 					anon = amap_lookup(&entry->aref,
    875 					    end - entry->start);
    876 					/*
    877 					 * If we have already encountered an
    878 					 * uninstantiated page, stop at the
    879 					 * first instantied page.
    880 					 */
    881 					if (anon != NULL &&
    882 					    state.realend != state.end) {
    883 						state.end = end;
    884 						break;
    885 					}
    886 
    887 					/*
    888 					 * If this page is the first
    889 					 * uninstantiated page, mark this as
    890 					 * the real ending point.  Continue to
    891 					 * counting uninstantiated pages.
    892 					 */
    893 					if (anon == NULL &&
    894 					    state.realend == state.end) {
    895 						state.realend = end;
    896 					}
    897 				}
    898 				amap_unlock(entry->aref.ar_amap);
    899 			}
    900 		}
    901 
    902 
    903 		vm_map_unlock_read(map);
    904 		error = (*func)(p, iocookie, &state);
    905 		if (error)
    906 			return (error);
    907 		vm_map_lock_read(map);
    908 	}
    909 	vm_map_unlock_read(map);
    910 
    911 	return (0);
    912 }
    913 #endif /* COREDUMP */
    914