Home | History | Annotate | Line # | Download | only in uvm
uvm_glue.c revision 1.139
      1  1.139      matt /*	$NetBSD: uvm_glue.c,v 1.139 2009/08/09 22:19:09 matt Exp $	*/
      2    1.1       mrg 
      3   1.48       chs /*
      4    1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5   1.48       chs  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6    1.1       mrg  *
      7    1.1       mrg  * All rights reserved.
      8    1.1       mrg  *
      9    1.1       mrg  * This code is derived from software contributed to Berkeley by
     10    1.1       mrg  * The Mach Operating System project at Carnegie-Mellon University.
     11    1.1       mrg  *
     12    1.1       mrg  * Redistribution and use in source and binary forms, with or without
     13    1.1       mrg  * modification, are permitted provided that the following conditions
     14    1.1       mrg  * are met:
     15    1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     16    1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     17    1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18    1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     19    1.1       mrg  *    documentation and/or other materials provided with the distribution.
     20    1.1       mrg  * 3. All advertising materials mentioning features or use of this software
     21    1.1       mrg  *    must display the following acknowledgement:
     22    1.1       mrg  *	This product includes software developed by Charles D. Cranor,
     23   1.48       chs  *      Washington University, the University of California, Berkeley and
     24    1.1       mrg  *      its contributors.
     25    1.1       mrg  * 4. Neither the name of the University nor the names of its contributors
     26    1.1       mrg  *    may be used to endorse or promote products derived from this software
     27    1.1       mrg  *    without specific prior written permission.
     28    1.1       mrg  *
     29    1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30    1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31    1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32    1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33    1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34    1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35    1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36    1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37    1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38    1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39    1.1       mrg  * SUCH DAMAGE.
     40    1.1       mrg  *
     41    1.1       mrg  *	@(#)vm_glue.c	8.6 (Berkeley) 1/5/94
     42    1.4       mrg  * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp
     43    1.1       mrg  *
     44    1.1       mrg  *
     45    1.1       mrg  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     46    1.1       mrg  * All rights reserved.
     47   1.48       chs  *
     48    1.1       mrg  * Permission to use, copy, modify and distribute this software and
     49    1.1       mrg  * its documentation is hereby granted, provided that both the copyright
     50    1.1       mrg  * notice and this permission notice appear in all copies of the
     51    1.1       mrg  * software, derivative works or modified versions, and any portions
     52    1.1       mrg  * thereof, and that both notices appear in supporting documentation.
     53   1.48       chs  *
     54   1.48       chs  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     55   1.48       chs  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     56    1.1       mrg  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     57   1.48       chs  *
     58    1.1       mrg  * Carnegie Mellon requests users of this software to return to
     59    1.1       mrg  *
     60    1.1       mrg  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     61    1.1       mrg  *  School of Computer Science
     62    1.1       mrg  *  Carnegie Mellon University
     63    1.1       mrg  *  Pittsburgh PA 15213-3890
     64    1.1       mrg  *
     65    1.1       mrg  * any improvements or extensions that they make and grant Carnegie the
     66    1.1       mrg  * rights to redistribute these changes.
     67    1.1       mrg  */
     68   1.55     lukem 
     69   1.55     lukem #include <sys/cdefs.h>
     70  1.139      matt __KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.139 2009/08/09 22:19:09 matt Exp $");
     71    1.1       mrg 
     72   1.49     lukem #include "opt_kgdb.h"
     73   1.59      yamt #include "opt_kstack.h"
     74    1.5       mrg #include "opt_uvmhist.h"
     75    1.5       mrg 
     76    1.1       mrg /*
     77    1.1       mrg  * uvm_glue.c: glue functions
     78    1.1       mrg  */
     79    1.1       mrg 
     80    1.1       mrg #include <sys/param.h>
     81    1.1       mrg #include <sys/systm.h>
     82    1.1       mrg #include <sys/proc.h>
     83    1.1       mrg #include <sys/resourcevar.h>
     84    1.1       mrg #include <sys/buf.h>
     85    1.1       mrg #include <sys/user.h>
     86  1.106      yamt #include <sys/syncobj.h>
     87  1.111        ad #include <sys/cpu.h>
     88  1.114        ad #include <sys/atomic.h>
     89    1.1       mrg 
     90    1.1       mrg #include <uvm/uvm.h>
     91    1.1       mrg 
     92    1.1       mrg /*
     93    1.1       mrg  * local prototypes
     94    1.1       mrg  */
     95    1.1       mrg 
     96  1.139      matt static int uarea_swapin(vaddr_t);
     97   1.78  junyoung static void uvm_swapout(struct lwp *);
     98    1.1       mrg 
     99    1.1       mrg /*
    100    1.1       mrg  * XXXCDC: do these really belong here?
    101    1.1       mrg  */
    102    1.1       mrg 
    103   1.28   thorpej /*
    104    1.1       mrg  * uvm_kernacc: can the kernel access a region of memory
    105    1.1       mrg  *
    106   1.83      yamt  * - used only by /dev/kmem driver (mem.c)
    107    1.1       mrg  */
    108    1.1       mrg 
    109  1.102   thorpej bool
    110  1.104  christos uvm_kernacc(void *addr, size_t len, int rw)
    111    1.6       mrg {
    112  1.102   thorpej 	bool rv;
    113   1.13       eeh 	vaddr_t saddr, eaddr;
    114    1.6       mrg 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
    115    1.6       mrg 
    116   1.31    kleink 	saddr = trunc_page((vaddr_t)addr);
    117   1.43       chs 	eaddr = round_page((vaddr_t)addr + len);
    118    1.6       mrg 	vm_map_lock_read(kernel_map);
    119    1.6       mrg 	rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
    120    1.6       mrg 	vm_map_unlock_read(kernel_map);
    121    1.6       mrg 
    122    1.6       mrg 	return(rv);
    123    1.1       mrg }
    124    1.1       mrg 
    125    1.1       mrg #ifdef KGDB
    126    1.1       mrg /*
    127    1.1       mrg  * Change protections on kernel pages from addr to addr+len
    128    1.1       mrg  * (presumably so debugger can plant a breakpoint).
    129    1.1       mrg  *
    130    1.1       mrg  * We force the protection change at the pmap level.  If we were
    131    1.1       mrg  * to use vm_map_protect a change to allow writing would be lazily-
    132    1.1       mrg  * applied meaning we would still take a protection fault, something
    133    1.1       mrg  * we really don't want to do.  It would also fragment the kernel
    134    1.1       mrg  * map unnecessarily.  We cannot use pmap_protect since it also won't
    135    1.1       mrg  * enforce a write-enable request.  Using pmap_enter is the only way
    136    1.1       mrg  * we can ensure the change takes place properly.
    137    1.1       mrg  */
    138    1.6       mrg void
    139  1.104  christos uvm_chgkprot(void *addr, size_t len, int rw)
    140    1.6       mrg {
    141    1.6       mrg 	vm_prot_t prot;
    142   1.13       eeh 	paddr_t pa;
    143   1.13       eeh 	vaddr_t sva, eva;
    144    1.6       mrg 
    145    1.6       mrg 	prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
    146   1.31    kleink 	eva = round_page((vaddr_t)addr + len);
    147   1.31    kleink 	for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
    148    1.6       mrg 		/*
    149    1.6       mrg 		 * Extract physical address for the page.
    150    1.6       mrg 		 */
    151  1.103   thorpej 		if (pmap_extract(pmap_kernel(), sva, &pa) == false)
    152  1.123  christos 			panic("%s: invalid page", __func__);
    153   1.30   thorpej 		pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);
    154    1.6       mrg 	}
    155   1.51     chris 	pmap_update(pmap_kernel());
    156    1.1       mrg }
    157    1.1       mrg #endif
    158    1.1       mrg 
    159    1.1       mrg /*
    160   1.52       chs  * uvm_vslock: wire user memory for I/O
    161    1.1       mrg  *
    162    1.1       mrg  * - called from physio and sys___sysctl
    163    1.1       mrg  * - XXXCDC: consider nuking this (or making it a macro?)
    164    1.1       mrg  */
    165    1.1       mrg 
    166   1.26   thorpej int
    167   1.97       chs uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access_type)
    168    1.1       mrg {
    169   1.50       chs 	struct vm_map *map;
    170   1.26   thorpej 	vaddr_t start, end;
    171   1.45       chs 	int error;
    172   1.26   thorpej 
    173   1.97       chs 	map = &vs->vm_map;
    174   1.31    kleink 	start = trunc_page((vaddr_t)addr);
    175   1.31    kleink 	end = round_page((vaddr_t)addr + len);
    176   1.93  drochner 	error = uvm_fault_wire(map, start, end, access_type, 0);
    177   1.45       chs 	return error;
    178    1.1       mrg }
    179    1.1       mrg 
    180    1.1       mrg /*
    181   1.52       chs  * uvm_vsunlock: unwire user memory wired by uvm_vslock()
    182    1.1       mrg  *
    183    1.1       mrg  * - called from physio and sys___sysctl
    184    1.1       mrg  * - XXXCDC: consider nuking this (or making it a macro?)
    185    1.1       mrg  */
    186    1.1       mrg 
    187    1.6       mrg void
    188   1.97       chs uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
    189    1.1       mrg {
    190   1.97       chs 	uvm_fault_unwire(&vs->vm_map, trunc_page((vaddr_t)addr),
    191   1.43       chs 		round_page((vaddr_t)addr + len));
    192    1.1       mrg }
    193    1.1       mrg 
    194    1.1       mrg /*
    195   1.62   thorpej  * uvm_proc_fork: fork a virtual address space
    196    1.1       mrg  *
    197    1.1       mrg  * - the address space is copied as per parent map's inherit values
    198   1.62   thorpej  */
    199   1.62   thorpej void
    200  1.102   thorpej uvm_proc_fork(struct proc *p1, struct proc *p2, bool shared)
    201   1.62   thorpej {
    202   1.62   thorpej 
    203  1.103   thorpej 	if (shared == true) {
    204   1.62   thorpej 		p2->p_vmspace = NULL;
    205   1.62   thorpej 		uvmspace_share(p1, p2);
    206   1.62   thorpej 	} else {
    207   1.62   thorpej 		p2->p_vmspace = uvmspace_fork(p1->p_vmspace);
    208   1.62   thorpej 	}
    209   1.62   thorpej 
    210   1.62   thorpej 	cpu_proc_fork(p1, p2);
    211   1.62   thorpej }
    212   1.62   thorpej 
    213   1.62   thorpej 
    214   1.62   thorpej /*
    215   1.62   thorpej  * uvm_lwp_fork: fork a thread
    216   1.62   thorpej  *
    217    1.1       mrg  * - a new "user" structure is allocated for the child process
    218    1.1       mrg  *	[filled in by MD layer...]
    219   1.20   thorpej  * - if specified, the child gets a new user stack described by
    220   1.20   thorpej  *	stack and stacksize
    221    1.1       mrg  * - NOTE: the kernel stack may be at a different location in the child
    222    1.1       mrg  *	process, and thus addresses of automatic variables may be invalid
    223   1.62   thorpej  *	after cpu_lwp_fork returns in the child process.  We do nothing here
    224   1.62   thorpej  *	after cpu_lwp_fork returns.
    225    1.1       mrg  * - XXXCDC: we need a way for this to return a failure value rather
    226    1.1       mrg  *   than just hang
    227    1.1       mrg  */
    228    1.6       mrg void
    229   1.89   thorpej uvm_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
    230   1.89   thorpej     void (*func)(void *), void *arg)
    231    1.6       mrg {
    232    1.6       mrg 
    233    1.6       mrg 	/*
    234    1.7   thorpej 	 * Wire down the U-area for the process, which contains the PCB
    235   1.62   thorpej 	 * and the kernel stack.  Wired state is stored in l->l_flag's
    236   1.62   thorpej 	 * L_INMEM bit rather than in the vm_map_entry's wired count
    237   1.61       chs 	 * to prevent kernel_map fragmentation.  If we reused a cached U-area,
    238   1.62   thorpej 	 * L_INMEM will already be set and we don't need to do anything.
    239   1.21   thorpej 	 *
    240   1.61       chs 	 * Note the kernel stack gets read/write accesses right off the bat.
    241    1.6       mrg 	 */
    242   1.61       chs 
    243  1.100     pavel 	if ((l2->l_flag & LW_INMEM) == 0) {
    244  1.139      matt #ifdef VMSWAP_UAREA
    245   1.94      yamt 		vaddr_t uarea = USER_TO_UAREA(l2->l_addr);
    246  1.139      matt 		int error;
    247   1.94      yamt 
    248  1.123  christos 		if ((error = uarea_swapin(uarea)) != 0)
    249  1.123  christos 			panic("%s: uvm_fault_wire failed: %d", __func__, error);
    250   1.67       scw #ifdef PMAP_UAREA
    251   1.67       scw 		/* Tell the pmap this is a u-area mapping */
    252   1.94      yamt 		PMAP_UAREA(uarea);
    253   1.67       scw #endif
    254  1.139      matt #endif /* VMSWAP_UAREA */
    255  1.100     pavel 		l2->l_flag |= LW_INMEM;
    256   1.61       chs 	}
    257   1.59      yamt 
    258  1.137     rmind 	/* Fill stack with magic number. */
    259   1.63      yamt 	kstack_setup_magic(l2);
    260    1.6       mrg 
    261    1.6       mrg 	/*
    262   1.62   thorpej 	 * cpu_lwp_fork() copy and update the pcb, and make the child ready
    263   1.62   thorpej  	 * to run.  If this is a normal user fork, the child will exit
    264   1.34   thorpej 	 * directly to user mode via child_return() on its first time
    265   1.34   thorpej 	 * slice and will not return here.  If this is a kernel thread,
    266   1.34   thorpej 	 * the specified entry point will be executed.
    267    1.6       mrg 	 */
    268   1.62   thorpej 	cpu_lwp_fork(l1, l2, stack, stacksize, func, arg);
    269  1.138     rmind 
    270  1.138     rmind 	/* Inactive emap for new LWP. */
    271  1.138     rmind 	l2->l_emap_gen = UVM_EMAP_INACTIVE;
    272   1.14   thorpej }
    273   1.14   thorpej 
    274  1.115      yamt static int
    275  1.115      yamt uarea_swapin(vaddr_t addr)
    276  1.115      yamt {
    277  1.115      yamt 
    278  1.115      yamt 	return uvm_fault_wire(kernel_map, addr, addr + USPACE,
    279  1.115      yamt 	    VM_PROT_READ | VM_PROT_WRITE, 0);
    280  1.115      yamt }
    281   1.60       chs 
    282  1.139      matt #ifdef VMSWAP_UAREA
    283  1.115      yamt static void
    284  1.115      yamt uarea_swapout(vaddr_t addr)
    285   1.60       chs {
    286  1.115      yamt 
    287  1.115      yamt 	uvm_fault_unwire(kernel_map, addr, addr + USPACE);
    288  1.115      yamt }
    289  1.139      matt #endif /* VMSWAP_UAREA */
    290   1.60       chs 
    291   1.60       chs #ifndef USPACE_ALIGN
    292  1.115      yamt #define	USPACE_ALIGN	0
    293   1.60       chs #endif
    294   1.60       chs 
    295  1.115      yamt static pool_cache_t uvm_uarea_cache;
    296  1.115      yamt 
    297  1.115      yamt static int
    298  1.115      yamt uarea_ctor(void *arg, void *obj, int flags)
    299  1.115      yamt {
    300  1.139      matt #if defined(PMAP_MAP_POOLPAGE) && !defined(VMSWAP_UAREA)
    301  1.139      matt 	if (USPACE == PAGE_SIZE && USPACE_ALIGN == 0)
    302  1.139      matt 		return 0;
    303  1.139      matt #endif
    304  1.115      yamt 	KASSERT((flags & PR_WAITOK) != 0);
    305  1.115      yamt 	return uarea_swapin((vaddr_t)obj);
    306  1.115      yamt }
    307  1.115      yamt 
    308  1.115      yamt static void *
    309  1.115      yamt uarea_poolpage_alloc(struct pool *pp, int flags)
    310  1.115      yamt {
    311  1.139      matt #if defined(PMAP_MAP_POOLPAGE) && !defined(VMSWAP_UAREA)
    312  1.139      matt 	if (USPACE == PAGE_SIZE && USPACE_ALIGN == 0) {
    313  1.139      matt 		struct vm_page *pg;
    314  1.139      matt 		vaddr_t va;
    315  1.139      matt 
    316  1.139      matt 		pg = uvm_pagealloc(NULL, 0, NULL,
    317  1.139      matt 		   ((flags & PR_WAITOK) == 0 ? UVM_KMF_NOWAIT : 0));
    318  1.139      matt 		if (pg == NULL)
    319  1.139      matt 			return NULL;
    320  1.139      matt 		va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
    321  1.139      matt 		if (va == 0)
    322  1.139      matt 			uvm_pagefree(pg);
    323  1.139      matt 		return (void *)va;
    324  1.139      matt 	}
    325  1.139      matt #endif
    326  1.115      yamt 	return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz,
    327  1.115      yamt 	    USPACE_ALIGN, UVM_KMF_PAGEABLE |
    328  1.115      yamt 	    ((flags & PR_WAITOK) != 0 ? UVM_KMF_WAITVA :
    329  1.115      yamt 	    (UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)));
    330  1.115      yamt }
    331  1.109        ad 
    332  1.115      yamt static void
    333  1.115      yamt uarea_poolpage_free(struct pool *pp, void *addr)
    334  1.115      yamt {
    335  1.139      matt #if defined(PMAP_MAP_POOLPAGE) && !defined(VMSWAP_UAREA)
    336  1.139      matt 	if (USPACE == PAGE_SIZE && USPACE_ALIGN == 0) {
    337  1.139      matt 		paddr_t pa;
    338  1.139      matt 
    339  1.139      matt 		pa = PMAP_UNMAP_POOLPAGE((vaddr_t) addr);
    340  1.139      matt 		KASSERT(pa != 0);
    341  1.139      matt 		uvm_pagefree(PHYS_TO_VM_PAGE(pa));
    342  1.139      matt 		return;
    343  1.139      matt 	}
    344  1.139      matt #endif
    345  1.115      yamt 	uvm_km_free(kernel_map, (vaddr_t)addr, pp->pr_alloc->pa_pagesz,
    346  1.109        ad 	    UVM_KMF_PAGEABLE);
    347  1.115      yamt }
    348  1.115      yamt 
    349  1.115      yamt static struct pool_allocator uvm_uarea_allocator = {
    350  1.115      yamt 	.pa_alloc = uarea_poolpage_alloc,
    351  1.115      yamt 	.pa_free = uarea_poolpage_free,
    352  1.115      yamt 	.pa_pagesz = USPACE,
    353  1.115      yamt };
    354  1.115      yamt 
    355  1.115      yamt void
    356  1.115      yamt uvm_uarea_init(void)
    357  1.115      yamt {
    358  1.117      yamt 	int flags = PR_NOTOUCH;
    359  1.115      yamt 
    360  1.116      yamt 	/*
    361  1.116      yamt 	 * specify PR_NOALIGN unless the alignment provided by
    362  1.116      yamt 	 * the backend (USPACE_ALIGN) is sufficient to provide
    363  1.116      yamt 	 * pool page size (UPSACE) alignment.
    364  1.116      yamt 	 */
    365  1.116      yamt 
    366  1.117      yamt 	if ((USPACE_ALIGN == 0 && USPACE != PAGE_SIZE) ||
    367  1.117      yamt 	    (USPACE_ALIGN % USPACE) != 0) {
    368  1.117      yamt 		flags |= PR_NOALIGN;
    369  1.117      yamt 	}
    370  1.117      yamt 
    371  1.117      yamt 	uvm_uarea_cache = pool_cache_init(USPACE, USPACE_ALIGN, 0, flags,
    372  1.115      yamt 	    "uarea", &uvm_uarea_allocator, IPL_NONE, uarea_ctor, NULL, NULL);
    373   1.60       chs }
    374   1.60       chs 
    375   1.60       chs /*
    376  1.115      yamt  * uvm_uarea_alloc: allocate a u-area
    377   1.75  jdolecek  */
    378   1.75  jdolecek 
    379  1.115      yamt bool
    380  1.115      yamt uvm_uarea_alloc(vaddr_t *uaddrp)
    381   1.75  jdolecek {
    382  1.109        ad 
    383  1.115      yamt 	*uaddrp = (vaddr_t)pool_cache_get(uvm_uarea_cache, PR_WAITOK);
    384  1.115      yamt 	return true;
    385   1.75  jdolecek }
    386   1.75  jdolecek 
    387   1.75  jdolecek /*
    388  1.115      yamt  * uvm_uarea_free: free a u-area
    389   1.60       chs  */
    390   1.60       chs 
    391   1.60       chs void
    392  1.115      yamt uvm_uarea_free(vaddr_t uaddr, struct cpu_info *ci)
    393   1.60       chs {
    394   1.60       chs 
    395  1.115      yamt 	pool_cache_put(uvm_uarea_cache, (void *)uaddr);
    396   1.60       chs }
    397   1.60       chs 
    398   1.60       chs /*
    399  1.118      yamt  * uvm_proc_exit: exit a virtual address space
    400   1.80        pk  *
    401   1.80        pk  * - borrow proc0's address space because freeing the vmspace
    402   1.80        pk  *   of the dead process may block.
    403   1.80        pk  */
    404   1.80        pk 
    405   1.80        pk void
    406   1.89   thorpej uvm_proc_exit(struct proc *p)
    407   1.80        pk {
    408   1.80        pk 	struct lwp *l = curlwp; /* XXX */
    409   1.80        pk 	struct vmspace *ovm;
    410   1.80        pk 
    411   1.80        pk 	KASSERT(p == l->l_proc);
    412   1.80        pk 	ovm = p->p_vmspace;
    413   1.80        pk 
    414   1.80        pk 	/*
    415   1.80        pk 	 * borrow proc0's address space.
    416   1.80        pk 	 */
    417  1.129        ad 	KPREEMPT_DISABLE(l);
    418   1.80        pk 	pmap_deactivate(l);
    419   1.80        pk 	p->p_vmspace = proc0.p_vmspace;
    420   1.80        pk 	pmap_activate(l);
    421  1.129        ad 	KPREEMPT_ENABLE(l);
    422   1.80        pk 
    423   1.80        pk 	uvmspace_free(ovm);
    424   1.80        pk }
    425   1.80        pk 
    426   1.80        pk void
    427   1.80        pk uvm_lwp_exit(struct lwp *l)
    428   1.80        pk {
    429   1.94      yamt 	vaddr_t va = USER_TO_UAREA(l->l_addr);
    430   1.80        pk 
    431  1.100     pavel 	l->l_flag &= ~LW_INMEM;
    432  1.113        ad 	uvm_uarea_free(va, l->l_cpu);
    433   1.80        pk 	l->l_addr = NULL;
    434   1.80        pk }
    435   1.80        pk 
    436   1.80        pk /*
    437    1.1       mrg  * uvm_init_limit: init per-process VM limits
    438    1.1       mrg  *
    439    1.1       mrg  * - called for process 0 and then inherited by all others.
    440    1.1       mrg  */
    441   1.60       chs 
    442    1.6       mrg void
    443   1.89   thorpej uvm_init_limits(struct proc *p)
    444    1.6       mrg {
    445    1.6       mrg 
    446    1.6       mrg 	/*
    447    1.6       mrg 	 * Set up the initial limits on process VM.  Set the maximum
    448    1.6       mrg 	 * resident set size to be all of (reasonably) available memory.
    449    1.6       mrg 	 * This causes any single, large process to start random page
    450    1.6       mrg 	 * replacement once it fills memory.
    451    1.6       mrg 	 */
    452    1.6       mrg 
    453    1.6       mrg 	p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
    454   1.79        pk 	p->p_rlimit[RLIMIT_STACK].rlim_max = maxsmap;
    455    1.6       mrg 	p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
    456   1.79        pk 	p->p_rlimit[RLIMIT_DATA].rlim_max = maxdmap;
    457  1.136       mrg 	p->p_rlimit[RLIMIT_AS].rlim_cur = RLIM_INFINITY;
    458  1.136       mrg 	p->p_rlimit[RLIMIT_AS].rlim_max = RLIM_INFINITY;
    459    1.6       mrg 	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(uvmexp.free);
    460    1.1       mrg }
    461    1.1       mrg 
    462    1.1       mrg #ifdef DEBUG
    463    1.1       mrg int	enableswap = 1;
    464    1.1       mrg int	swapdebug = 0;
    465    1.1       mrg #define	SDB_FOLLOW	1
    466    1.1       mrg #define SDB_SWAPIN	2
    467    1.1       mrg #define SDB_SWAPOUT	4
    468    1.1       mrg #endif
    469    1.1       mrg 
    470    1.1       mrg /*
    471   1.95      yamt  * uvm_swapin: swap in an lwp's u-area.
    472  1.107        ad  *
    473  1.107        ad  * - must be called with the LWP's swap lock held.
    474  1.107        ad  * - naturally, must not be called with l == curlwp
    475    1.1       mrg  */
    476    1.1       mrg 
    477    1.6       mrg void
    478   1.89   thorpej uvm_swapin(struct lwp *l)
    479    1.6       mrg {
    480  1.139      matt #ifdef VMSWAP_UAREA
    481   1.98        ad 	int error;
    482  1.139      matt #endif
    483    1.6       mrg 
    484  1.135      yamt 	KASSERT(mutex_owned(&l->l_swaplock));
    485  1.107        ad 	KASSERT(l != curlwp);
    486  1.107        ad 
    487  1.139      matt #ifdef VMSWAP_UAREA
    488  1.115      yamt 	error = uarea_swapin(USER_TO_UAREA(l->l_addr));
    489   1.52       chs 	if (error) {
    490  1.123  christos 		panic("%s: rewiring stack failed: %d", __func__, error);
    491   1.52       chs 	}
    492    1.6       mrg 
    493    1.6       mrg 	/*
    494    1.6       mrg 	 * Some architectures need to be notified when the user area has
    495    1.6       mrg 	 * moved to new physical page(s) (e.g.  see mips/mips/vm_machdep.c).
    496    1.6       mrg 	 */
    497   1.62   thorpej 	cpu_swapin(l);
    498  1.139      matt #endif
    499   1.98        ad 	lwp_lock(l);
    500   1.62   thorpej 	if (l->l_stat == LSRUN)
    501  1.106      yamt 		sched_enqueue(l, false);
    502  1.100     pavel 	l->l_flag |= LW_INMEM;
    503   1.62   thorpej 	l->l_swtime = 0;
    504   1.98        ad 	lwp_unlock(l);
    505    1.6       mrg 	++uvmexp.swapins;
    506    1.1       mrg }
    507    1.1       mrg 
    508    1.1       mrg /*
    509   1.99        ad  * uvm_kick_scheduler: kick the scheduler into action if not running.
    510   1.99        ad  *
    511   1.99        ad  * - called when swapped out processes have been awoken.
    512   1.99        ad  */
    513   1.99        ad 
    514   1.99        ad void
    515   1.99        ad uvm_kick_scheduler(void)
    516   1.99        ad {
    517   1.99        ad 
    518  1.103   thorpej 	if (uvm.swap_running == false)
    519  1.101        ad 		return;
    520  1.101        ad 
    521  1.107        ad 	mutex_enter(&uvm_scheduler_mutex);
    522  1.103   thorpej 	uvm.scheduler_kicked = true;
    523   1.99        ad 	cv_signal(&uvm.scheduler_cv);
    524  1.107        ad 	mutex_exit(&uvm_scheduler_mutex);
    525   1.99        ad }
    526   1.99        ad 
    527   1.99        ad /*
    528    1.1       mrg  * uvm_scheduler: process zero main loop
    529    1.1       mrg  *
    530    1.1       mrg  * - attempt to swapin every swaped-out, runnable process in order of
    531    1.1       mrg  *	priority.
    532    1.1       mrg  * - if not enough memory, wake the pagedaemon and let it clear space.
    533    1.1       mrg  */
    534    1.1       mrg 
    535    1.6       mrg void
    536   1.89   thorpej uvm_scheduler(void)
    537    1.1       mrg {
    538   1.62   thorpej 	struct lwp *l, *ll;
    539   1.32  augustss 	int pri;
    540    1.6       mrg 	int ppri;
    541    1.1       mrg 
    542   1.99        ad 	l = curlwp;
    543   1.99        ad 	lwp_lock(l);
    544  1.113        ad 	l->l_priority = PRI_VM;
    545  1.113        ad 	l->l_class = SCHED_FIFO;
    546   1.99        ad 	lwp_unlock(l);
    547   1.99        ad 
    548   1.99        ad 	for (;;) {
    549    1.1       mrg #ifdef DEBUG
    550  1.107        ad 		mutex_enter(&uvm_scheduler_mutex);
    551   1.99        ad 		while (!enableswap)
    552  1.107        ad 			cv_wait(&uvm.scheduler_cv, &uvm_scheduler_mutex);
    553  1.107        ad 		mutex_exit(&uvm_scheduler_mutex);
    554   1.99        ad #endif
    555   1.99        ad 		ll = NULL;		/* process to choose */
    556   1.99        ad 		ppri = INT_MIN;		/* its priority */
    557   1.99        ad 
    558  1.125        ad 		mutex_enter(proc_lock);
    559   1.99        ad 		LIST_FOREACH(l, &alllwp, l_list) {
    560   1.99        ad 			/* is it a runnable swapped out process? */
    561  1.100     pavel 			if (l->l_stat == LSRUN && !(l->l_flag & LW_INMEM)) {
    562   1.99        ad 				pri = l->l_swtime + l->l_slptime -
    563   1.99        ad 				    (l->l_proc->p_nice - NZERO) * 8;
    564   1.99        ad 				if (pri > ppri) {   /* higher priority? */
    565   1.99        ad 					ll = l;
    566   1.99        ad 					ppri = pri;
    567   1.99        ad 				}
    568    1.6       mrg 			}
    569    1.6       mrg 		}
    570    1.1       mrg #ifdef DEBUG
    571   1.99        ad 		if (swapdebug & SDB_FOLLOW)
    572  1.123  christos 			printf("%s: running, procp %p pri %d\n", __func__, ll,
    573   1.99        ad 			    ppri);
    574    1.1       mrg #endif
    575   1.99        ad 		/*
    576   1.99        ad 		 * Nothing to do, back to sleep
    577   1.99        ad 		 */
    578   1.99        ad 		if ((l = ll) == NULL) {
    579  1.125        ad 			mutex_exit(proc_lock);
    580  1.107        ad 			mutex_enter(&uvm_scheduler_mutex);
    581  1.103   thorpej 			if (uvm.scheduler_kicked == false)
    582   1.99        ad 				cv_wait(&uvm.scheduler_cv,
    583  1.107        ad 				    &uvm_scheduler_mutex);
    584  1.103   thorpej 			uvm.scheduler_kicked = false;
    585  1.107        ad 			mutex_exit(&uvm_scheduler_mutex);
    586   1.99        ad 			continue;
    587   1.99        ad 		}
    588    1.6       mrg 
    589   1.99        ad 		/*
    590   1.99        ad 		 * we have found swapped out process which we would like
    591   1.99        ad 		 * to bring back in.
    592   1.99        ad 		 *
    593   1.99        ad 		 * XXX: this part is really bogus cuz we could deadlock
    594   1.99        ad 		 * on memory despite our feeble check
    595   1.99        ad 		 */
    596   1.99        ad 		if (uvmexp.free > atop(USPACE)) {
    597    1.1       mrg #ifdef DEBUG
    598   1.99        ad 			if (swapdebug & SDB_SWAPIN)
    599   1.99        ad 				printf("swapin: pid %d(%s)@%p, pri %d "
    600   1.99        ad 				    "free %d\n", l->l_proc->p_pid,
    601   1.99        ad 				    l->l_proc->p_comm, l->l_addr, ppri,
    602   1.99        ad 				    uvmexp.free);
    603    1.1       mrg #endif
    604  1.107        ad 			mutex_enter(&l->l_swaplock);
    605  1.125        ad 			mutex_exit(proc_lock);
    606   1.99        ad 			uvm_swapin(l);
    607  1.107        ad 			mutex_exit(&l->l_swaplock);
    608  1.107        ad 			continue;
    609   1.99        ad 		} else {
    610   1.99        ad 			/*
    611   1.99        ad 			 * not enough memory, jab the pageout daemon and
    612   1.99        ad 			 * wait til the coast is clear
    613   1.99        ad 			 */
    614  1.125        ad 			mutex_exit(proc_lock);
    615    1.1       mrg #ifdef DEBUG
    616   1.99        ad 			if (swapdebug & SDB_FOLLOW)
    617  1.123  christos 				printf("%s: no room for pid %d(%s),"
    618  1.124      yamt 				    " free %d\n", __func__, l->l_proc->p_pid,
    619   1.99        ad 				    l->l_proc->p_comm, uvmexp.free);
    620    1.1       mrg #endif
    621   1.99        ad 			uvm_wait("schedpwait");
    622    1.1       mrg #ifdef DEBUG
    623   1.99        ad 			if (swapdebug & SDB_FOLLOW)
    624  1.123  christos 				printf("%s: room again, free %d\n", __func__,
    625   1.99        ad 				    uvmexp.free);
    626    1.1       mrg #endif
    627   1.99        ad 		}
    628   1.99        ad 	}
    629    1.1       mrg }
    630    1.1       mrg 
    631    1.1       mrg /*
    632   1.62   thorpej  * swappable: is LWP "l" swappable?
    633    1.1       mrg  */
    634    1.1       mrg 
    635  1.106      yamt static bool
    636  1.106      yamt swappable(struct lwp *l)
    637  1.106      yamt {
    638  1.106      yamt 
    639  1.127        ad 	if ((l->l_flag & (LW_INMEM|LW_SYSTEM|LW_WEXIT)) != LW_INMEM)
    640  1.127        ad 		return false;
    641  1.127        ad 	if ((l->l_pflag & LP_RUNNING) != 0)
    642  1.106      yamt 		return false;
    643  1.106      yamt 	if (l->l_holdcnt != 0)
    644  1.106      yamt 		return false;
    645  1.133        ad 	if (l->l_class != SCHED_OTHER)
    646  1.133        ad 		return false;
    647  1.106      yamt 	if (l->l_syncobj == &rw_syncobj || l->l_syncobj == &mutex_syncobj)
    648  1.106      yamt 		return false;
    649  1.131        ad 	if (l->l_proc->p_stat != SACTIVE && l->l_proc->p_stat != SSTOP)
    650  1.130        ad 		return false;
    651  1.106      yamt 	return true;
    652  1.106      yamt }
    653    1.1       mrg 
    654    1.1       mrg /*
    655    1.1       mrg  * swapout_threads: find threads that can be swapped and unwire their
    656    1.1       mrg  *	u-areas.
    657    1.1       mrg  *
    658    1.1       mrg  * - called by the pagedaemon
    659    1.1       mrg  * - try and swap at least one processs
    660    1.1       mrg  * - processes that are sleeping or stopped for maxslp or more seconds
    661    1.1       mrg  *   are swapped... otherwise the longest-sleeping or stopped process
    662    1.1       mrg  *   is swapped, otherwise the longest resident process...
    663    1.1       mrg  */
    664   1.60       chs 
    665    1.6       mrg void
    666   1.89   thorpej uvm_swapout_threads(void)
    667    1.1       mrg {
    668   1.62   thorpej 	struct lwp *l;
    669   1.62   thorpej 	struct lwp *outl, *outl2;
    670    1.6       mrg 	int outpri, outpri2;
    671    1.6       mrg 	int didswap = 0;
    672   1.48       chs 	extern int maxslp;
    673  1.107        ad 	bool gotit;
    674  1.107        ad 
    675    1.6       mrg 	/* XXXCDC: should move off to uvmexp. or uvm., also in uvm_meter */
    676    1.1       mrg 
    677    1.1       mrg #ifdef DEBUG
    678    1.6       mrg 	if (!enableswap)
    679    1.6       mrg 		return;
    680    1.1       mrg #endif
    681    1.1       mrg 
    682    1.6       mrg 	/*
    683   1.62   thorpej 	 * outl/outpri  : stop/sleep thread with largest sleeptime < maxslp
    684   1.62   thorpej 	 * outl2/outpri2: the longest resident thread (its swap time)
    685    1.6       mrg 	 */
    686   1.62   thorpej 	outl = outl2 = NULL;
    687    1.6       mrg 	outpri = outpri2 = 0;
    688  1.107        ad 
    689  1.107        ad  restart:
    690  1.125        ad 	mutex_enter(proc_lock);
    691   1.62   thorpej 	LIST_FOREACH(l, &alllwp, l_list) {
    692   1.81      yamt 		KASSERT(l->l_proc != NULL);
    693  1.107        ad 		if (!mutex_tryenter(&l->l_swaplock))
    694  1.107        ad 			continue;
    695   1.98        ad 		if (!swappable(l)) {
    696  1.107        ad 			mutex_exit(&l->l_swaplock);
    697    1.6       mrg 			continue;
    698   1.98        ad 		}
    699   1.62   thorpej 		switch (l->l_stat) {
    700   1.68        cl 		case LSONPROC:
    701   1.98        ad 			break;
    702   1.69        cl 
    703   1.62   thorpej 		case LSRUN:
    704   1.62   thorpej 			if (l->l_swtime > outpri2) {
    705   1.62   thorpej 				outl2 = l;
    706   1.62   thorpej 				outpri2 = l->l_swtime;
    707    1.6       mrg 			}
    708   1.98        ad 			break;
    709   1.48       chs 
    710   1.62   thorpej 		case LSSLEEP:
    711   1.62   thorpej 		case LSSTOP:
    712   1.62   thorpej 			if (l->l_slptime >= maxslp) {
    713  1.125        ad 				mutex_exit(proc_lock);
    714   1.62   thorpej 				uvm_swapout(l);
    715  1.107        ad 				/*
    716  1.107        ad 				 * Locking in the wrong direction -
    717  1.107        ad 				 * try to prevent the LWP from exiting.
    718  1.107        ad 				 */
    719  1.125        ad 				gotit = mutex_tryenter(proc_lock);
    720  1.107        ad 				mutex_exit(&l->l_swaplock);
    721    1.6       mrg 				didswap++;
    722  1.107        ad 				if (!gotit)
    723  1.107        ad 					goto restart;
    724   1.98        ad 				continue;
    725   1.62   thorpej 			} else if (l->l_slptime > outpri) {
    726   1.62   thorpej 				outl = l;
    727   1.62   thorpej 				outpri = l->l_slptime;
    728    1.6       mrg 			}
    729   1.98        ad 			break;
    730    1.6       mrg 		}
    731  1.107        ad 		mutex_exit(&l->l_swaplock);
    732    1.6       mrg 	}
    733  1.107        ad 
    734    1.6       mrg 	/*
    735    1.6       mrg 	 * If we didn't get rid of any real duds, toss out the next most
    736    1.6       mrg 	 * likely sleeping/stopped or running candidate.  We only do this
    737    1.6       mrg 	 * if we are real low on memory since we don't gain much by doing
    738    1.6       mrg 	 * it (USPACE bytes).
    739    1.6       mrg 	 */
    740    1.6       mrg 	if (didswap == 0 && uvmexp.free <= atop(round_page(USPACE))) {
    741   1.62   thorpej 		if ((l = outl) == NULL)
    742   1.62   thorpej 			l = outl2;
    743    1.1       mrg #ifdef DEBUG
    744    1.6       mrg 		if (swapdebug & SDB_SWAPOUT)
    745  1.139      matt 			printf(__func__ ": no duds, try procp %p\n", l);
    746    1.1       mrg #endif
    747   1.98        ad 		if (l) {
    748  1.107        ad 			mutex_enter(&l->l_swaplock);
    749  1.125        ad 			mutex_exit(proc_lock);
    750  1.107        ad 			if (swappable(l))
    751  1.107        ad 				uvm_swapout(l);
    752  1.107        ad 			mutex_exit(&l->l_swaplock);
    753  1.107        ad 			return;
    754   1.98        ad 		}
    755    1.6       mrg 	}
    756   1.98        ad 
    757  1.125        ad 	mutex_exit(proc_lock);
    758    1.1       mrg }
    759    1.1       mrg 
    760    1.1       mrg /*
    761   1.62   thorpej  * uvm_swapout: swap out lwp "l"
    762    1.1       mrg  *
    763   1.48       chs  * - currently "swapout" means "unwire U-area" and "pmap_collect()"
    764    1.1       mrg  *   the pmap.
    765  1.107        ad  * - must be called with l->l_swaplock held.
    766    1.1       mrg  * - XXXCDC: should deactivate all process' private anonymous memory
    767    1.1       mrg  */
    768    1.1       mrg 
    769    1.6       mrg static void
    770   1.89   thorpej uvm_swapout(struct lwp *l)
    771    1.1       mrg {
    772  1.132        ad 	struct vm_map *map;
    773  1.132        ad 
    774  1.107        ad 	KASSERT(mutex_owned(&l->l_swaplock));
    775   1.98        ad 
    776    1.1       mrg #ifdef DEBUG
    777    1.6       mrg 	if (swapdebug & SDB_SWAPOUT)
    778  1.123  christos 		printf("%s: lid %d.%d(%s)@%p, stat %x pri %d free %d\n",
    779  1.123  christos 		   __func__, l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm,
    780  1.123  christos 		   l->l_addr, l->l_stat, l->l_slptime, uvmexp.free);
    781    1.1       mrg #endif
    782    1.1       mrg 
    783    1.6       mrg 	/*
    784    1.6       mrg 	 * Mark it as (potentially) swapped out.
    785    1.6       mrg 	 */
    786  1.107        ad 	lwp_lock(l);
    787  1.106      yamt 	if (!swappable(l)) {
    788   1.69        cl 		KDASSERT(l->l_cpu != curcpu());
    789   1.98        ad 		lwp_unlock(l);
    790   1.68        cl 		return;
    791   1.68        cl 	}
    792  1.100     pavel 	l->l_flag &= ~LW_INMEM;
    793   1.98        ad 	l->l_swtime = 0;
    794   1.62   thorpej 	if (l->l_stat == LSRUN)
    795  1.106      yamt 		sched_dequeue(l);
    796   1.98        ad 	lwp_unlock(l);
    797  1.119        ad 	l->l_ru.ru_nswap++;
    798    1.6       mrg 	++uvmexp.swapouts;
    799   1.68        cl 
    800  1.139      matt #ifdef VMSWAP_UAREA
    801   1.68        cl 	/*
    802   1.68        cl 	 * Do any machine-specific actions necessary before swapout.
    803   1.68        cl 	 * This can include saving floating point state, etc.
    804   1.68        cl 	 */
    805   1.68        cl 	cpu_swapout(l);
    806   1.43       chs 
    807   1.43       chs 	/*
    808   1.43       chs 	 * Unwire the to-be-swapped process's user struct and kernel stack.
    809   1.43       chs 	 */
    810  1.115      yamt 	uarea_swapout(USER_TO_UAREA(l->l_addr));
    811  1.139      matt #endif
    812  1.132        ad 	map = &l->l_proc->p_vmspace->vm_map;
    813  1.132        ad 	if (vm_map_lock_try(map)) {
    814  1.132        ad 		pmap_collect(vm_map_pmap(map));
    815  1.132        ad 		vm_map_unlock(map);
    816  1.132        ad 	}
    817  1.107        ad }
    818  1.107        ad 
    819  1.107        ad /*
    820  1.107        ad  * uvm_lwp_hold: prevent lwp "l" from being swapped out, and bring
    821  1.107        ad  * back into memory if it is currently swapped.
    822  1.107        ad  */
    823  1.107        ad 
    824  1.107        ad void
    825  1.107        ad uvm_lwp_hold(struct lwp *l)
    826  1.107        ad {
    827  1.107        ad 
    828  1.114        ad 	if (l == curlwp) {
    829  1.114        ad 		atomic_inc_uint(&l->l_holdcnt);
    830  1.114        ad 	} else {
    831  1.114        ad 		mutex_enter(&l->l_swaplock);
    832  1.114        ad 		if (atomic_inc_uint_nv(&l->l_holdcnt) == 1 &&
    833  1.114        ad 		    (l->l_flag & LW_INMEM) == 0)
    834  1.114        ad 			uvm_swapin(l);
    835  1.114        ad 		mutex_exit(&l->l_swaplock);
    836  1.114        ad 	}
    837  1.107        ad }
    838  1.107        ad 
    839  1.107        ad /*
    840  1.107        ad  * uvm_lwp_rele: release a hold on lwp "l".  when the holdcount
    841  1.107        ad  * drops to zero, it's eligable to be swapped.
    842  1.107        ad  */
    843  1.107        ad 
    844  1.107        ad void
    845  1.107        ad uvm_lwp_rele(struct lwp *l)
    846  1.107        ad {
    847  1.107        ad 
    848  1.107        ad 	KASSERT(l->l_holdcnt != 0);
    849   1.98        ad 
    850  1.114        ad 	atomic_dec_uint(&l->l_holdcnt);
    851    1.1       mrg }
    852