Home | History | Annotate | Line # | Download | only in uvm
uvm_glue.c revision 1.125
      1  1.125        ad /*	$NetBSD: uvm_glue.c,v 1.125 2008/04/24 15:35:31 ad Exp $	*/
      2    1.1       mrg 
      3   1.48       chs /*
      4    1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5   1.48       chs  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6    1.1       mrg  *
      7    1.1       mrg  * All rights reserved.
      8    1.1       mrg  *
      9    1.1       mrg  * This code is derived from software contributed to Berkeley by
     10    1.1       mrg  * The Mach Operating System project at Carnegie-Mellon University.
     11    1.1       mrg  *
     12    1.1       mrg  * Redistribution and use in source and binary forms, with or without
     13    1.1       mrg  * modification, are permitted provided that the following conditions
     14    1.1       mrg  * are met:
     15    1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     16    1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     17    1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18    1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     19    1.1       mrg  *    documentation and/or other materials provided with the distribution.
     20    1.1       mrg  * 3. All advertising materials mentioning features or use of this software
     21    1.1       mrg  *    must display the following acknowledgement:
     22    1.1       mrg  *	This product includes software developed by Charles D. Cranor,
     23   1.48       chs  *      Washington University, the University of California, Berkeley and
     24    1.1       mrg  *      its contributors.
     25    1.1       mrg  * 4. Neither the name of the University nor the names of its contributors
     26    1.1       mrg  *    may be used to endorse or promote products derived from this software
     27    1.1       mrg  *    without specific prior written permission.
     28    1.1       mrg  *
     29    1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30    1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31    1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32    1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33    1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34    1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35    1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36    1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37    1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38    1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39    1.1       mrg  * SUCH DAMAGE.
     40    1.1       mrg  *
     41    1.1       mrg  *	@(#)vm_glue.c	8.6 (Berkeley) 1/5/94
     42    1.4       mrg  * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp
     43    1.1       mrg  *
     44    1.1       mrg  *
     45    1.1       mrg  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     46    1.1       mrg  * All rights reserved.
     47   1.48       chs  *
     48    1.1       mrg  * Permission to use, copy, modify and distribute this software and
     49    1.1       mrg  * its documentation is hereby granted, provided that both the copyright
     50    1.1       mrg  * notice and this permission notice appear in all copies of the
     51    1.1       mrg  * software, derivative works or modified versions, and any portions
     52    1.1       mrg  * thereof, and that both notices appear in supporting documentation.
     53   1.48       chs  *
     54   1.48       chs  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     55   1.48       chs  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     56    1.1       mrg  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     57   1.48       chs  *
     58    1.1       mrg  * Carnegie Mellon requests users of this software to return to
     59    1.1       mrg  *
     60    1.1       mrg  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     61    1.1       mrg  *  School of Computer Science
     62    1.1       mrg  *  Carnegie Mellon University
     63    1.1       mrg  *  Pittsburgh PA 15213-3890
     64    1.1       mrg  *
     65    1.1       mrg  * any improvements or extensions that they make and grant Carnegie the
     66    1.1       mrg  * rights to redistribute these changes.
     67    1.1       mrg  */
     68   1.55     lukem 
     69   1.55     lukem #include <sys/cdefs.h>
     70  1.125        ad __KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.125 2008/04/24 15:35:31 ad Exp $");
     71    1.1       mrg 
     72   1.96      matt #include "opt_coredump.h"
     73   1.49     lukem #include "opt_kgdb.h"
     74   1.59      yamt #include "opt_kstack.h"
     75    1.5       mrg #include "opt_uvmhist.h"
     76    1.5       mrg 
     77    1.1       mrg /*
     78    1.1       mrg  * uvm_glue.c: glue functions
     79    1.1       mrg  */
     80    1.1       mrg 
     81    1.1       mrg #include <sys/param.h>
     82    1.1       mrg #include <sys/systm.h>
     83    1.1       mrg #include <sys/proc.h>
     84    1.1       mrg #include <sys/resourcevar.h>
     85    1.1       mrg #include <sys/buf.h>
     86    1.1       mrg #include <sys/user.h>
     87  1.106      yamt #include <sys/syncobj.h>
     88  1.111        ad #include <sys/cpu.h>
     89  1.114        ad #include <sys/atomic.h>
     90    1.1       mrg 
     91    1.1       mrg #include <uvm/uvm.h>
     92    1.1       mrg 
     93    1.1       mrg /*
     94    1.1       mrg  * local prototypes
     95    1.1       mrg  */
     96    1.1       mrg 
     97   1.78  junyoung static void uvm_swapout(struct lwp *);
     98  1.123  christos static int uarea_swapin(vaddr_t);
     99    1.1       mrg 
    100  1.109        ad #define UVM_NUAREA_HIWAT	20
    101  1.109        ad #define	UVM_NUAREA_LOWAT	16
    102  1.109        ad 
    103   1.94      yamt #define	UAREA_NEXTFREE(uarea)	(*(vaddr_t *)(UAREA_TO_USER(uarea)))
    104   1.60       chs 
    105    1.1       mrg /*
    106    1.1       mrg  * XXXCDC: do these really belong here?
    107    1.1       mrg  */
    108    1.1       mrg 
    109   1.28   thorpej /*
    110    1.1       mrg  * uvm_kernacc: can the kernel access a region of memory
    111    1.1       mrg  *
    112   1.83      yamt  * - used only by /dev/kmem driver (mem.c)
    113    1.1       mrg  */
    114    1.1       mrg 
    115  1.102   thorpej bool
    116  1.104  christos uvm_kernacc(void *addr, size_t len, int rw)
    117    1.6       mrg {
    118  1.102   thorpej 	bool rv;
    119   1.13       eeh 	vaddr_t saddr, eaddr;
    120    1.6       mrg 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
    121    1.6       mrg 
    122   1.31    kleink 	saddr = trunc_page((vaddr_t)addr);
    123   1.43       chs 	eaddr = round_page((vaddr_t)addr + len);
    124    1.6       mrg 	vm_map_lock_read(kernel_map);
    125    1.6       mrg 	rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
    126    1.6       mrg 	vm_map_unlock_read(kernel_map);
    127    1.6       mrg 
    128    1.6       mrg 	return(rv);
    129    1.1       mrg }
    130    1.1       mrg 
    131    1.1       mrg #ifdef KGDB
    132    1.1       mrg /*
    133    1.1       mrg  * Change protections on kernel pages from addr to addr+len
    134    1.1       mrg  * (presumably so debugger can plant a breakpoint).
    135    1.1       mrg  *
    136    1.1       mrg  * We force the protection change at the pmap level.  If we were
    137    1.1       mrg  * to use vm_map_protect a change to allow writing would be lazily-
    138    1.1       mrg  * applied meaning we would still take a protection fault, something
    139    1.1       mrg  * we really don't want to do.  It would also fragment the kernel
    140    1.1       mrg  * map unnecessarily.  We cannot use pmap_protect since it also won't
    141    1.1       mrg  * enforce a write-enable request.  Using pmap_enter is the only way
    142    1.1       mrg  * we can ensure the change takes place properly.
    143    1.1       mrg  */
    144    1.6       mrg void
    145  1.104  christos uvm_chgkprot(void *addr, size_t len, int rw)
    146    1.6       mrg {
    147    1.6       mrg 	vm_prot_t prot;
    148   1.13       eeh 	paddr_t pa;
    149   1.13       eeh 	vaddr_t sva, eva;
    150    1.6       mrg 
    151    1.6       mrg 	prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
    152   1.31    kleink 	eva = round_page((vaddr_t)addr + len);
    153   1.31    kleink 	for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
    154    1.6       mrg 		/*
    155    1.6       mrg 		 * Extract physical address for the page.
    156    1.6       mrg 		 */
    157  1.103   thorpej 		if (pmap_extract(pmap_kernel(), sva, &pa) == false)
    158  1.123  christos 			panic("%s: invalid page", __func__);
    159   1.30   thorpej 		pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);
    160    1.6       mrg 	}
    161   1.51     chris 	pmap_update(pmap_kernel());
    162    1.1       mrg }
    163    1.1       mrg #endif
    164    1.1       mrg 
    165    1.1       mrg /*
    166   1.52       chs  * uvm_vslock: wire user memory for I/O
    167    1.1       mrg  *
    168    1.1       mrg  * - called from physio and sys___sysctl
    169    1.1       mrg  * - XXXCDC: consider nuking this (or making it a macro?)
    170    1.1       mrg  */
    171    1.1       mrg 
    172   1.26   thorpej int
    173   1.97       chs uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access_type)
    174    1.1       mrg {
    175   1.50       chs 	struct vm_map *map;
    176   1.26   thorpej 	vaddr_t start, end;
    177   1.45       chs 	int error;
    178   1.26   thorpej 
    179   1.97       chs 	map = &vs->vm_map;
    180   1.31    kleink 	start = trunc_page((vaddr_t)addr);
    181   1.31    kleink 	end = round_page((vaddr_t)addr + len);
    182   1.93  drochner 	error = uvm_fault_wire(map, start, end, access_type, 0);
    183   1.45       chs 	return error;
    184    1.1       mrg }
    185    1.1       mrg 
    186    1.1       mrg /*
    187   1.52       chs  * uvm_vsunlock: unwire user memory wired by uvm_vslock()
    188    1.1       mrg  *
    189    1.1       mrg  * - called from physio and sys___sysctl
    190    1.1       mrg  * - XXXCDC: consider nuking this (or making it a macro?)
    191    1.1       mrg  */
    192    1.1       mrg 
    193    1.6       mrg void
    194   1.97       chs uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
    195    1.1       mrg {
    196   1.97       chs 	uvm_fault_unwire(&vs->vm_map, trunc_page((vaddr_t)addr),
    197   1.43       chs 		round_page((vaddr_t)addr + len));
    198    1.1       mrg }
    199    1.1       mrg 
    200    1.1       mrg /*
    201   1.62   thorpej  * uvm_proc_fork: fork a virtual address space
    202    1.1       mrg  *
    203    1.1       mrg  * - the address space is copied as per parent map's inherit values
    204   1.62   thorpej  */
    205   1.62   thorpej void
    206  1.102   thorpej uvm_proc_fork(struct proc *p1, struct proc *p2, bool shared)
    207   1.62   thorpej {
    208   1.62   thorpej 
    209  1.103   thorpej 	if (shared == true) {
    210   1.62   thorpej 		p2->p_vmspace = NULL;
    211   1.62   thorpej 		uvmspace_share(p1, p2);
    212   1.62   thorpej 	} else {
    213   1.62   thorpej 		p2->p_vmspace = uvmspace_fork(p1->p_vmspace);
    214   1.62   thorpej 	}
    215   1.62   thorpej 
    216   1.62   thorpej 	cpu_proc_fork(p1, p2);
    217   1.62   thorpej }
    218   1.62   thorpej 
    219   1.62   thorpej 
    220   1.62   thorpej /*
    221   1.62   thorpej  * uvm_lwp_fork: fork a thread
    222   1.62   thorpej  *
    223    1.1       mrg  * - a new "user" structure is allocated for the child process
    224    1.1       mrg  *	[filled in by MD layer...]
    225   1.20   thorpej  * - if specified, the child gets a new user stack described by
    226   1.20   thorpej  *	stack and stacksize
    227    1.1       mrg  * - NOTE: the kernel stack may be at a different location in the child
    228    1.1       mrg  *	process, and thus addresses of automatic variables may be invalid
    229   1.62   thorpej  *	after cpu_lwp_fork returns in the child process.  We do nothing here
    230   1.62   thorpej  *	after cpu_lwp_fork returns.
    231    1.1       mrg  * - XXXCDC: we need a way for this to return a failure value rather
    232    1.1       mrg  *   than just hang
    233    1.1       mrg  */
    234    1.6       mrg void
    235   1.89   thorpej uvm_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
    236   1.89   thorpej     void (*func)(void *), void *arg)
    237    1.6       mrg {
    238   1.45       chs 	int error;
    239    1.6       mrg 
    240    1.6       mrg 	/*
    241    1.7   thorpej 	 * Wire down the U-area for the process, which contains the PCB
    242   1.62   thorpej 	 * and the kernel stack.  Wired state is stored in l->l_flag's
    243   1.62   thorpej 	 * L_INMEM bit rather than in the vm_map_entry's wired count
    244   1.61       chs 	 * to prevent kernel_map fragmentation.  If we reused a cached U-area,
    245   1.62   thorpej 	 * L_INMEM will already be set and we don't need to do anything.
    246   1.21   thorpej 	 *
    247   1.61       chs 	 * Note the kernel stack gets read/write accesses right off the bat.
    248    1.6       mrg 	 */
    249   1.61       chs 
    250  1.100     pavel 	if ((l2->l_flag & LW_INMEM) == 0) {
    251   1.94      yamt 		vaddr_t uarea = USER_TO_UAREA(l2->l_addr);
    252   1.94      yamt 
    253  1.123  christos 		if ((error = uarea_swapin(uarea)) != 0)
    254  1.123  christos 			panic("%s: uvm_fault_wire failed: %d", __func__, error);
    255   1.67       scw #ifdef PMAP_UAREA
    256   1.67       scw 		/* Tell the pmap this is a u-area mapping */
    257   1.94      yamt 		PMAP_UAREA(uarea);
    258   1.67       scw #endif
    259  1.100     pavel 		l2->l_flag |= LW_INMEM;
    260   1.61       chs 	}
    261   1.59      yamt 
    262   1.59      yamt #ifdef KSTACK_CHECK_MAGIC
    263   1.59      yamt 	/*
    264   1.59      yamt 	 * fill stack with magic number
    265   1.59      yamt 	 */
    266   1.63      yamt 	kstack_setup_magic(l2);
    267   1.59      yamt #endif
    268    1.6       mrg 
    269    1.6       mrg 	/*
    270   1.62   thorpej 	 * cpu_lwp_fork() copy and update the pcb, and make the child ready
    271   1.62   thorpej  	 * to run.  If this is a normal user fork, the child will exit
    272   1.34   thorpej 	 * directly to user mode via child_return() on its first time
    273   1.34   thorpej 	 * slice and will not return here.  If this is a kernel thread,
    274   1.34   thorpej 	 * the specified entry point will be executed.
    275    1.6       mrg 	 */
    276   1.62   thorpej 	cpu_lwp_fork(l1, l2, stack, stacksize, func, arg);
    277   1.14   thorpej }
    278   1.14   thorpej 
    279   1.14   thorpej /*
    280  1.109        ad  * uvm_cpu_attach: initialize per-CPU data structures.
    281  1.109        ad  */
    282  1.109        ad 
    283  1.109        ad void
    284  1.109        ad uvm_cpu_attach(struct cpu_info *ci)
    285  1.109        ad {
    286  1.109        ad 
    287  1.109        ad }
    288  1.109        ad 
    289  1.115      yamt static int
    290  1.115      yamt uarea_swapin(vaddr_t addr)
    291  1.115      yamt {
    292  1.115      yamt 
    293  1.115      yamt 	return uvm_fault_wire(kernel_map, addr, addr + USPACE,
    294  1.115      yamt 	    VM_PROT_READ | VM_PROT_WRITE, 0);
    295  1.115      yamt }
    296   1.60       chs 
    297  1.115      yamt static void
    298  1.115      yamt uarea_swapout(vaddr_t addr)
    299   1.60       chs {
    300  1.115      yamt 
    301  1.115      yamt 	uvm_fault_unwire(kernel_map, addr, addr + USPACE);
    302  1.115      yamt }
    303   1.60       chs 
    304   1.60       chs #ifndef USPACE_ALIGN
    305  1.115      yamt #define	USPACE_ALIGN	0
    306   1.60       chs #endif
    307   1.60       chs 
    308  1.115      yamt static pool_cache_t uvm_uarea_cache;
    309  1.115      yamt 
    310  1.115      yamt static int
    311  1.115      yamt uarea_ctor(void *arg, void *obj, int flags)
    312  1.115      yamt {
    313  1.115      yamt 
    314  1.115      yamt 	KASSERT((flags & PR_WAITOK) != 0);
    315  1.115      yamt 	return uarea_swapin((vaddr_t)obj);
    316  1.115      yamt }
    317  1.115      yamt 
    318  1.115      yamt static void *
    319  1.115      yamt uarea_poolpage_alloc(struct pool *pp, int flags)
    320  1.115      yamt {
    321  1.115      yamt 
    322  1.115      yamt 	return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz,
    323  1.115      yamt 	    USPACE_ALIGN, UVM_KMF_PAGEABLE |
    324  1.115      yamt 	    ((flags & PR_WAITOK) != 0 ? UVM_KMF_WAITVA :
    325  1.115      yamt 	    (UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)));
    326  1.115      yamt }
    327  1.109        ad 
    328  1.115      yamt static void
    329  1.115      yamt uarea_poolpage_free(struct pool *pp, void *addr)
    330  1.115      yamt {
    331  1.109        ad 
    332  1.115      yamt 	uvm_km_free(kernel_map, (vaddr_t)addr, pp->pr_alloc->pa_pagesz,
    333  1.109        ad 	    UVM_KMF_PAGEABLE);
    334  1.115      yamt }
    335  1.115      yamt 
    336  1.115      yamt static struct pool_allocator uvm_uarea_allocator = {
    337  1.115      yamt 	.pa_alloc = uarea_poolpage_alloc,
    338  1.115      yamt 	.pa_free = uarea_poolpage_free,
    339  1.115      yamt 	.pa_pagesz = USPACE,
    340  1.115      yamt };
    341  1.115      yamt 
    342  1.115      yamt void
    343  1.115      yamt uvm_uarea_init(void)
    344  1.115      yamt {
    345  1.117      yamt 	int flags = PR_NOTOUCH;
    346  1.115      yamt 
    347  1.116      yamt 	/*
    348  1.116      yamt 	 * specify PR_NOALIGN unless the alignment provided by
    349  1.116      yamt 	 * the backend (USPACE_ALIGN) is sufficient to provide
    350  1.116      yamt 	 * pool page size (UPSACE) alignment.
    351  1.116      yamt 	 */
    352  1.116      yamt 
    353  1.117      yamt 	if ((USPACE_ALIGN == 0 && USPACE != PAGE_SIZE) ||
    354  1.117      yamt 	    (USPACE_ALIGN % USPACE) != 0) {
    355  1.117      yamt 		flags |= PR_NOALIGN;
    356  1.117      yamt 	}
    357  1.117      yamt 
    358  1.117      yamt 	uvm_uarea_cache = pool_cache_init(USPACE, USPACE_ALIGN, 0, flags,
    359  1.115      yamt 	    "uarea", &uvm_uarea_allocator, IPL_NONE, uarea_ctor, NULL, NULL);
    360   1.60       chs }
    361   1.60       chs 
    362   1.60       chs /*
    363  1.115      yamt  * uvm_uarea_alloc: allocate a u-area
    364   1.75  jdolecek  */
    365   1.75  jdolecek 
    366  1.115      yamt bool
    367  1.115      yamt uvm_uarea_alloc(vaddr_t *uaddrp)
    368   1.75  jdolecek {
    369  1.109        ad 
    370  1.115      yamt 	*uaddrp = (vaddr_t)pool_cache_get(uvm_uarea_cache, PR_WAITOK);
    371  1.115      yamt 	return true;
    372   1.75  jdolecek }
    373   1.75  jdolecek 
    374   1.75  jdolecek /*
    375  1.115      yamt  * uvm_uarea_free: free a u-area
    376   1.60       chs  */
    377   1.60       chs 
    378   1.60       chs void
    379  1.115      yamt uvm_uarea_free(vaddr_t uaddr, struct cpu_info *ci)
    380   1.60       chs {
    381   1.60       chs 
    382  1.115      yamt 	pool_cache_put(uvm_uarea_cache, (void *)uaddr);
    383   1.60       chs }
    384   1.60       chs 
    385   1.60       chs /*
    386  1.118      yamt  * uvm_proc_exit: exit a virtual address space
    387   1.80        pk  *
    388   1.80        pk  * - borrow proc0's address space because freeing the vmspace
    389   1.80        pk  *   of the dead process may block.
    390   1.80        pk  */
    391   1.80        pk 
    392   1.80        pk void
    393   1.89   thorpej uvm_proc_exit(struct proc *p)
    394   1.80        pk {
    395   1.80        pk 	struct lwp *l = curlwp; /* XXX */
    396   1.80        pk 	struct vmspace *ovm;
    397   1.80        pk 
    398   1.80        pk 	KASSERT(p == l->l_proc);
    399   1.80        pk 	ovm = p->p_vmspace;
    400   1.80        pk 
    401   1.80        pk 	/*
    402   1.80        pk 	 * borrow proc0's address space.
    403   1.80        pk 	 */
    404   1.80        pk 	pmap_deactivate(l);
    405   1.80        pk 	p->p_vmspace = proc0.p_vmspace;
    406   1.80        pk 	pmap_activate(l);
    407   1.80        pk 
    408   1.80        pk 	uvmspace_free(ovm);
    409   1.80        pk }
    410   1.80        pk 
    411   1.80        pk void
    412   1.80        pk uvm_lwp_exit(struct lwp *l)
    413   1.80        pk {
    414   1.94      yamt 	vaddr_t va = USER_TO_UAREA(l->l_addr);
    415   1.80        pk 
    416  1.100     pavel 	l->l_flag &= ~LW_INMEM;
    417  1.113        ad 	uvm_uarea_free(va, l->l_cpu);
    418   1.80        pk 	l->l_addr = NULL;
    419   1.80        pk }
    420   1.80        pk 
    421   1.80        pk /*
    422    1.1       mrg  * uvm_init_limit: init per-process VM limits
    423    1.1       mrg  *
    424    1.1       mrg  * - called for process 0 and then inherited by all others.
    425    1.1       mrg  */
    426   1.60       chs 
    427    1.6       mrg void
    428   1.89   thorpej uvm_init_limits(struct proc *p)
    429    1.6       mrg {
    430    1.6       mrg 
    431    1.6       mrg 	/*
    432    1.6       mrg 	 * Set up the initial limits on process VM.  Set the maximum
    433    1.6       mrg 	 * resident set size to be all of (reasonably) available memory.
    434    1.6       mrg 	 * This causes any single, large process to start random page
    435    1.6       mrg 	 * replacement once it fills memory.
    436    1.6       mrg 	 */
    437    1.6       mrg 
    438    1.6       mrg 	p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
    439   1.79        pk 	p->p_rlimit[RLIMIT_STACK].rlim_max = maxsmap;
    440    1.6       mrg 	p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
    441   1.79        pk 	p->p_rlimit[RLIMIT_DATA].rlim_max = maxdmap;
    442    1.6       mrg 	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(uvmexp.free);
    443    1.1       mrg }
    444    1.1       mrg 
    445    1.1       mrg #ifdef DEBUG
    446    1.1       mrg int	enableswap = 1;
    447    1.1       mrg int	swapdebug = 0;
    448    1.1       mrg #define	SDB_FOLLOW	1
    449    1.1       mrg #define SDB_SWAPIN	2
    450    1.1       mrg #define SDB_SWAPOUT	4
    451    1.1       mrg #endif
    452    1.1       mrg 
    453    1.1       mrg /*
    454   1.95      yamt  * uvm_swapin: swap in an lwp's u-area.
    455  1.107        ad  *
    456  1.107        ad  * - must be called with the LWP's swap lock held.
    457  1.107        ad  * - naturally, must not be called with l == curlwp
    458    1.1       mrg  */
    459    1.1       mrg 
    460    1.6       mrg void
    461   1.89   thorpej uvm_swapin(struct lwp *l)
    462    1.6       mrg {
    463   1.98        ad 	int error;
    464    1.6       mrg 
    465  1.112        ad 	/* XXXSMP notyet KASSERT(mutex_owned(&l->l_swaplock)); */
    466  1.107        ad 	KASSERT(l != curlwp);
    467  1.107        ad 
    468  1.115      yamt 	error = uarea_swapin(USER_TO_UAREA(l->l_addr));
    469   1.52       chs 	if (error) {
    470  1.123  christos 		panic("%s: rewiring stack failed: %d", __func__, error);
    471   1.52       chs 	}
    472    1.6       mrg 
    473    1.6       mrg 	/*
    474    1.6       mrg 	 * Some architectures need to be notified when the user area has
    475    1.6       mrg 	 * moved to new physical page(s) (e.g.  see mips/mips/vm_machdep.c).
    476    1.6       mrg 	 */
    477   1.62   thorpej 	cpu_swapin(l);
    478   1.98        ad 	lwp_lock(l);
    479   1.62   thorpej 	if (l->l_stat == LSRUN)
    480  1.106      yamt 		sched_enqueue(l, false);
    481  1.100     pavel 	l->l_flag |= LW_INMEM;
    482   1.62   thorpej 	l->l_swtime = 0;
    483   1.98        ad 	lwp_unlock(l);
    484    1.6       mrg 	++uvmexp.swapins;
    485    1.1       mrg }
    486    1.1       mrg 
    487    1.1       mrg /*
    488   1.99        ad  * uvm_kick_scheduler: kick the scheduler into action if not running.
    489   1.99        ad  *
    490   1.99        ad  * - called when swapped out processes have been awoken.
    491   1.99        ad  */
    492   1.99        ad 
    493   1.99        ad void
    494   1.99        ad uvm_kick_scheduler(void)
    495   1.99        ad {
    496   1.99        ad 
    497  1.103   thorpej 	if (uvm.swap_running == false)
    498  1.101        ad 		return;
    499  1.101        ad 
    500  1.107        ad 	mutex_enter(&uvm_scheduler_mutex);
    501  1.103   thorpej 	uvm.scheduler_kicked = true;
    502   1.99        ad 	cv_signal(&uvm.scheduler_cv);
    503  1.107        ad 	mutex_exit(&uvm_scheduler_mutex);
    504   1.99        ad }
    505   1.99        ad 
    506   1.99        ad /*
    507    1.1       mrg  * uvm_scheduler: process zero main loop
    508    1.1       mrg  *
    509    1.1       mrg  * - attempt to swapin every swaped-out, runnable process in order of
    510    1.1       mrg  *	priority.
    511    1.1       mrg  * - if not enough memory, wake the pagedaemon and let it clear space.
    512    1.1       mrg  */
    513    1.1       mrg 
    514    1.6       mrg void
    515   1.89   thorpej uvm_scheduler(void)
    516    1.1       mrg {
    517   1.62   thorpej 	struct lwp *l, *ll;
    518   1.32  augustss 	int pri;
    519    1.6       mrg 	int ppri;
    520    1.1       mrg 
    521   1.99        ad 	l = curlwp;
    522   1.99        ad 	lwp_lock(l);
    523  1.113        ad 	l->l_priority = PRI_VM;
    524  1.113        ad 	l->l_class = SCHED_FIFO;
    525   1.99        ad 	lwp_unlock(l);
    526   1.99        ad 
    527   1.99        ad 	for (;;) {
    528    1.1       mrg #ifdef DEBUG
    529  1.107        ad 		mutex_enter(&uvm_scheduler_mutex);
    530   1.99        ad 		while (!enableswap)
    531  1.107        ad 			cv_wait(&uvm.scheduler_cv, &uvm_scheduler_mutex);
    532  1.107        ad 		mutex_exit(&uvm_scheduler_mutex);
    533   1.99        ad #endif
    534   1.99        ad 		ll = NULL;		/* process to choose */
    535   1.99        ad 		ppri = INT_MIN;		/* its priority */
    536   1.99        ad 
    537  1.125        ad 		mutex_enter(proc_lock);
    538   1.99        ad 		LIST_FOREACH(l, &alllwp, l_list) {
    539   1.99        ad 			/* is it a runnable swapped out process? */
    540  1.100     pavel 			if (l->l_stat == LSRUN && !(l->l_flag & LW_INMEM)) {
    541   1.99        ad 				pri = l->l_swtime + l->l_slptime -
    542   1.99        ad 				    (l->l_proc->p_nice - NZERO) * 8;
    543   1.99        ad 				if (pri > ppri) {   /* higher priority? */
    544   1.99        ad 					ll = l;
    545   1.99        ad 					ppri = pri;
    546   1.99        ad 				}
    547    1.6       mrg 			}
    548    1.6       mrg 		}
    549    1.1       mrg #ifdef DEBUG
    550   1.99        ad 		if (swapdebug & SDB_FOLLOW)
    551  1.123  christos 			printf("%s: running, procp %p pri %d\n", __func__, ll,
    552   1.99        ad 			    ppri);
    553    1.1       mrg #endif
    554   1.99        ad 		/*
    555   1.99        ad 		 * Nothing to do, back to sleep
    556   1.99        ad 		 */
    557   1.99        ad 		if ((l = ll) == NULL) {
    558  1.125        ad 			mutex_exit(proc_lock);
    559  1.107        ad 			mutex_enter(&uvm_scheduler_mutex);
    560  1.103   thorpej 			if (uvm.scheduler_kicked == false)
    561   1.99        ad 				cv_wait(&uvm.scheduler_cv,
    562  1.107        ad 				    &uvm_scheduler_mutex);
    563  1.103   thorpej 			uvm.scheduler_kicked = false;
    564  1.107        ad 			mutex_exit(&uvm_scheduler_mutex);
    565   1.99        ad 			continue;
    566   1.99        ad 		}
    567    1.6       mrg 
    568   1.99        ad 		/*
    569   1.99        ad 		 * we have found swapped out process which we would like
    570   1.99        ad 		 * to bring back in.
    571   1.99        ad 		 *
    572   1.99        ad 		 * XXX: this part is really bogus cuz we could deadlock
    573   1.99        ad 		 * on memory despite our feeble check
    574   1.99        ad 		 */
    575   1.99        ad 		if (uvmexp.free > atop(USPACE)) {
    576    1.1       mrg #ifdef DEBUG
    577   1.99        ad 			if (swapdebug & SDB_SWAPIN)
    578   1.99        ad 				printf("swapin: pid %d(%s)@%p, pri %d "
    579   1.99        ad 				    "free %d\n", l->l_proc->p_pid,
    580   1.99        ad 				    l->l_proc->p_comm, l->l_addr, ppri,
    581   1.99        ad 				    uvmexp.free);
    582    1.1       mrg #endif
    583  1.107        ad 			mutex_enter(&l->l_swaplock);
    584  1.125        ad 			mutex_exit(proc_lock);
    585   1.99        ad 			uvm_swapin(l);
    586  1.107        ad 			mutex_exit(&l->l_swaplock);
    587  1.107        ad 			continue;
    588   1.99        ad 		} else {
    589   1.99        ad 			/*
    590   1.99        ad 			 * not enough memory, jab the pageout daemon and
    591   1.99        ad 			 * wait til the coast is clear
    592   1.99        ad 			 */
    593  1.125        ad 			mutex_exit(proc_lock);
    594    1.1       mrg #ifdef DEBUG
    595   1.99        ad 			if (swapdebug & SDB_FOLLOW)
    596  1.123  christos 				printf("%s: no room for pid %d(%s),"
    597  1.124      yamt 				    " free %d\n", __func__, l->l_proc->p_pid,
    598   1.99        ad 				    l->l_proc->p_comm, uvmexp.free);
    599    1.1       mrg #endif
    600   1.99        ad 			uvm_wait("schedpwait");
    601    1.1       mrg #ifdef DEBUG
    602   1.99        ad 			if (swapdebug & SDB_FOLLOW)
    603  1.123  christos 				printf("%s: room again, free %d\n", __func__,
    604   1.99        ad 				    uvmexp.free);
    605    1.1       mrg #endif
    606   1.99        ad 		}
    607   1.99        ad 	}
    608    1.1       mrg }
    609    1.1       mrg 
    610    1.1       mrg /*
    611   1.62   thorpej  * swappable: is LWP "l" swappable?
    612    1.1       mrg  */
    613    1.1       mrg 
    614  1.106      yamt static bool
    615  1.106      yamt swappable(struct lwp *l)
    616  1.106      yamt {
    617  1.106      yamt 
    618  1.106      yamt 	if ((l->l_flag & (LW_INMEM|LW_RUNNING|LW_SYSTEM|LW_WEXIT)) != LW_INMEM)
    619  1.106      yamt 		return false;
    620  1.106      yamt 	if (l->l_holdcnt != 0)
    621  1.106      yamt 		return false;
    622  1.106      yamt 	if (l->l_syncobj == &rw_syncobj || l->l_syncobj == &mutex_syncobj)
    623  1.106      yamt 		return false;
    624  1.106      yamt 	return true;
    625  1.106      yamt }
    626    1.1       mrg 
    627    1.1       mrg /*
    628    1.1       mrg  * swapout_threads: find threads that can be swapped and unwire their
    629    1.1       mrg  *	u-areas.
    630    1.1       mrg  *
    631    1.1       mrg  * - called by the pagedaemon
    632    1.1       mrg  * - try and swap at least one processs
    633    1.1       mrg  * - processes that are sleeping or stopped for maxslp or more seconds
    634    1.1       mrg  *   are swapped... otherwise the longest-sleeping or stopped process
    635    1.1       mrg  *   is swapped, otherwise the longest resident process...
    636    1.1       mrg  */
    637   1.60       chs 
    638    1.6       mrg void
    639   1.89   thorpej uvm_swapout_threads(void)
    640    1.1       mrg {
    641   1.62   thorpej 	struct lwp *l;
    642   1.62   thorpej 	struct lwp *outl, *outl2;
    643    1.6       mrg 	int outpri, outpri2;
    644    1.6       mrg 	int didswap = 0;
    645   1.48       chs 	extern int maxslp;
    646  1.107        ad 	bool gotit;
    647  1.107        ad 
    648    1.6       mrg 	/* XXXCDC: should move off to uvmexp. or uvm., also in uvm_meter */
    649    1.1       mrg 
    650    1.1       mrg #ifdef DEBUG
    651    1.6       mrg 	if (!enableswap)
    652    1.6       mrg 		return;
    653    1.1       mrg #endif
    654    1.1       mrg 
    655    1.6       mrg 	/*
    656   1.62   thorpej 	 * outl/outpri  : stop/sleep thread with largest sleeptime < maxslp
    657   1.62   thorpej 	 * outl2/outpri2: the longest resident thread (its swap time)
    658    1.6       mrg 	 */
    659   1.62   thorpej 	outl = outl2 = NULL;
    660    1.6       mrg 	outpri = outpri2 = 0;
    661  1.107        ad 
    662  1.107        ad  restart:
    663  1.125        ad 	mutex_enter(proc_lock);
    664   1.62   thorpej 	LIST_FOREACH(l, &alllwp, l_list) {
    665   1.81      yamt 		KASSERT(l->l_proc != NULL);
    666  1.107        ad 		if (!mutex_tryenter(&l->l_swaplock))
    667  1.107        ad 			continue;
    668   1.98        ad 		if (!swappable(l)) {
    669  1.107        ad 			mutex_exit(&l->l_swaplock);
    670    1.6       mrg 			continue;
    671   1.98        ad 		}
    672   1.62   thorpej 		switch (l->l_stat) {
    673   1.68        cl 		case LSONPROC:
    674   1.98        ad 			break;
    675   1.69        cl 
    676   1.62   thorpej 		case LSRUN:
    677   1.62   thorpej 			if (l->l_swtime > outpri2) {
    678   1.62   thorpej 				outl2 = l;
    679   1.62   thorpej 				outpri2 = l->l_swtime;
    680    1.6       mrg 			}
    681   1.98        ad 			break;
    682   1.48       chs 
    683   1.62   thorpej 		case LSSLEEP:
    684   1.62   thorpej 		case LSSTOP:
    685   1.62   thorpej 			if (l->l_slptime >= maxslp) {
    686  1.125        ad 				mutex_exit(proc_lock);
    687   1.62   thorpej 				uvm_swapout(l);
    688  1.107        ad 				/*
    689  1.107        ad 				 * Locking in the wrong direction -
    690  1.107        ad 				 * try to prevent the LWP from exiting.
    691  1.107        ad 				 */
    692  1.125        ad 				gotit = mutex_tryenter(proc_lock);
    693  1.107        ad 				mutex_exit(&l->l_swaplock);
    694    1.6       mrg 				didswap++;
    695  1.107        ad 				if (!gotit)
    696  1.107        ad 					goto restart;
    697   1.98        ad 				continue;
    698   1.62   thorpej 			} else if (l->l_slptime > outpri) {
    699   1.62   thorpej 				outl = l;
    700   1.62   thorpej 				outpri = l->l_slptime;
    701    1.6       mrg 			}
    702   1.98        ad 			break;
    703    1.6       mrg 		}
    704  1.107        ad 		mutex_exit(&l->l_swaplock);
    705    1.6       mrg 	}
    706  1.107        ad 
    707    1.6       mrg 	/*
    708    1.6       mrg 	 * If we didn't get rid of any real duds, toss out the next most
    709    1.6       mrg 	 * likely sleeping/stopped or running candidate.  We only do this
    710    1.6       mrg 	 * if we are real low on memory since we don't gain much by doing
    711    1.6       mrg 	 * it (USPACE bytes).
    712    1.6       mrg 	 */
    713    1.6       mrg 	if (didswap == 0 && uvmexp.free <= atop(round_page(USPACE))) {
    714   1.62   thorpej 		if ((l = outl) == NULL)
    715   1.62   thorpej 			l = outl2;
    716    1.1       mrg #ifdef DEBUG
    717    1.6       mrg 		if (swapdebug & SDB_SWAPOUT)
    718  1.123  christos 			printf("%s: no duds, try procp %p\n", __func__, l);
    719    1.1       mrg #endif
    720   1.98        ad 		if (l) {
    721  1.107        ad 			mutex_enter(&l->l_swaplock);
    722  1.125        ad 			mutex_exit(proc_lock);
    723  1.107        ad 			if (swappable(l))
    724  1.107        ad 				uvm_swapout(l);
    725  1.107        ad 			mutex_exit(&l->l_swaplock);
    726  1.107        ad 			return;
    727   1.98        ad 		}
    728    1.6       mrg 	}
    729   1.98        ad 
    730  1.125        ad 	mutex_exit(proc_lock);
    731    1.1       mrg }
    732    1.1       mrg 
    733    1.1       mrg /*
    734   1.62   thorpej  * uvm_swapout: swap out lwp "l"
    735    1.1       mrg  *
    736   1.48       chs  * - currently "swapout" means "unwire U-area" and "pmap_collect()"
    737    1.1       mrg  *   the pmap.
    738  1.107        ad  * - must be called with l->l_swaplock held.
    739    1.1       mrg  * - XXXCDC: should deactivate all process' private anonymous memory
    740    1.1       mrg  */
    741    1.1       mrg 
    742    1.6       mrg static void
    743   1.89   thorpej uvm_swapout(struct lwp *l)
    744    1.1       mrg {
    745  1.107        ad 	KASSERT(mutex_owned(&l->l_swaplock));
    746   1.98        ad 
    747    1.1       mrg #ifdef DEBUG
    748    1.6       mrg 	if (swapdebug & SDB_SWAPOUT)
    749  1.123  christos 		printf("%s: lid %d.%d(%s)@%p, stat %x pri %d free %d\n",
    750  1.123  christos 		   __func__, l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm,
    751  1.123  christos 		   l->l_addr, l->l_stat, l->l_slptime, uvmexp.free);
    752    1.1       mrg #endif
    753    1.1       mrg 
    754    1.6       mrg 	/*
    755    1.6       mrg 	 * Mark it as (potentially) swapped out.
    756    1.6       mrg 	 */
    757  1.107        ad 	lwp_lock(l);
    758  1.106      yamt 	if (!swappable(l)) {
    759   1.69        cl 		KDASSERT(l->l_cpu != curcpu());
    760   1.98        ad 		lwp_unlock(l);
    761   1.68        cl 		return;
    762   1.68        cl 	}
    763  1.100     pavel 	l->l_flag &= ~LW_INMEM;
    764   1.98        ad 	l->l_swtime = 0;
    765   1.62   thorpej 	if (l->l_stat == LSRUN)
    766  1.106      yamt 		sched_dequeue(l);
    767   1.98        ad 	lwp_unlock(l);
    768  1.119        ad 	l->l_ru.ru_nswap++;
    769    1.6       mrg 	++uvmexp.swapouts;
    770   1.68        cl 
    771   1.68        cl 	/*
    772   1.68        cl 	 * Do any machine-specific actions necessary before swapout.
    773   1.68        cl 	 * This can include saving floating point state, etc.
    774   1.68        cl 	 */
    775   1.68        cl 	cpu_swapout(l);
    776   1.43       chs 
    777   1.43       chs 	/*
    778   1.43       chs 	 * Unwire the to-be-swapped process's user struct and kernel stack.
    779   1.43       chs 	 */
    780  1.115      yamt 	uarea_swapout(USER_TO_UAREA(l->l_addr));
    781  1.121  dholland 	pmap_collect(vm_map_pmap(&l->l_proc->p_vmspace->vm_map));
    782  1.107        ad }
    783  1.107        ad 
    784  1.107        ad /*
    785  1.107        ad  * uvm_lwp_hold: prevent lwp "l" from being swapped out, and bring
    786  1.107        ad  * back into memory if it is currently swapped.
    787  1.107        ad  */
    788  1.107        ad 
    789  1.107        ad void
    790  1.107        ad uvm_lwp_hold(struct lwp *l)
    791  1.107        ad {
    792  1.107        ad 
    793  1.114        ad 	if (l == curlwp) {
    794  1.114        ad 		atomic_inc_uint(&l->l_holdcnt);
    795  1.114        ad 	} else {
    796  1.114        ad 		mutex_enter(&l->l_swaplock);
    797  1.114        ad 		if (atomic_inc_uint_nv(&l->l_holdcnt) == 1 &&
    798  1.114        ad 		    (l->l_flag & LW_INMEM) == 0)
    799  1.114        ad 			uvm_swapin(l);
    800  1.114        ad 		mutex_exit(&l->l_swaplock);
    801  1.114        ad 	}
    802  1.107        ad }
    803  1.107        ad 
    804  1.107        ad /*
    805  1.107        ad  * uvm_lwp_rele: release a hold on lwp "l".  when the holdcount
    806  1.107        ad  * drops to zero, it's eligable to be swapped.
    807  1.107        ad  */
    808  1.107        ad 
    809  1.107        ad void
    810  1.107        ad uvm_lwp_rele(struct lwp *l)
    811  1.107        ad {
    812  1.107        ad 
    813  1.107        ad 	KASSERT(l->l_holdcnt != 0);
    814   1.98        ad 
    815  1.114        ad 	atomic_dec_uint(&l->l_holdcnt);
    816    1.1       mrg }
    817    1.1       mrg 
    818   1.96      matt #ifdef COREDUMP
    819   1.56   thorpej /*
    820   1.56   thorpej  * uvm_coredump_walkmap: walk a process's map for the purpose of dumping
    821   1.56   thorpej  * a core file.
    822   1.56   thorpej  */
    823   1.56   thorpej 
    824   1.56   thorpej int
    825   1.89   thorpej uvm_coredump_walkmap(struct proc *p, void *iocookie,
    826   1.89   thorpej     int (*func)(struct proc *, void *, struct uvm_coredump_state *),
    827   1.89   thorpej     void *cookie)
    828   1.56   thorpej {
    829   1.56   thorpej 	struct uvm_coredump_state state;
    830   1.56   thorpej 	struct vmspace *vm = p->p_vmspace;
    831   1.56   thorpej 	struct vm_map *map = &vm->vm_map;
    832   1.56   thorpej 	struct vm_map_entry *entry;
    833   1.56   thorpej 	int error;
    834   1.56   thorpej 
    835   1.64    atatat 	entry = NULL;
    836   1.64    atatat 	vm_map_lock_read(map);
    837   1.87      matt 	state.end = 0;
    838   1.64    atatat 	for (;;) {
    839   1.64    atatat 		if (entry == NULL)
    840   1.64    atatat 			entry = map->header.next;
    841   1.64    atatat 		else if (!uvm_map_lookup_entry(map, state.end, &entry))
    842   1.64    atatat 			entry = entry->next;
    843   1.64    atatat 		if (entry == &map->header)
    844   1.64    atatat 			break;
    845   1.64    atatat 
    846   1.56   thorpej 		state.cookie = cookie;
    847   1.86      matt 		if (state.end > entry->start) {
    848   1.86      matt 			state.start = state.end;
    849   1.86      matt 		} else {
    850   1.86      matt 			state.start = entry->start;
    851   1.86      matt 		}
    852   1.86      matt 		state.realend = entry->end;
    853   1.56   thorpej 		state.end = entry->end;
    854   1.56   thorpej 		state.prot = entry->protection;
    855   1.56   thorpej 		state.flags = 0;
    856   1.56   thorpej 
    857   1.82       chs 		/*
    858   1.82       chs 		 * Dump the region unless one of the following is true:
    859   1.82       chs 		 *
    860   1.82       chs 		 * (1) the region has neither object nor amap behind it
    861   1.82       chs 		 *     (ie. it has never been accessed).
    862   1.82       chs 		 *
    863   1.82       chs 		 * (2) the region has no amap and is read-only
    864   1.82       chs 		 *     (eg. an executable text section).
    865   1.82       chs 		 *
    866   1.82       chs 		 * (3) the region's object is a device.
    867   1.85   nathanw 		 *
    868   1.85   nathanw 		 * (4) the region is unreadable by the process.
    869   1.82       chs 		 */
    870   1.56   thorpej 
    871   1.82       chs 		KASSERT(!UVM_ET_ISSUBMAP(entry));
    872   1.82       chs 		KASSERT(state.start < VM_MAXUSER_ADDRESS);
    873   1.82       chs 		KASSERT(state.end <= VM_MAXUSER_ADDRESS);
    874   1.82       chs 		if (entry->object.uvm_obj == NULL &&
    875   1.82       chs 		    entry->aref.ar_amap == NULL) {
    876   1.86      matt 			state.realend = state.start;
    877   1.86      matt 		} else if ((entry->protection & VM_PROT_WRITE) == 0 &&
    878   1.82       chs 		    entry->aref.ar_amap == NULL) {
    879   1.86      matt 			state.realend = state.start;
    880   1.86      matt 		} else if (entry->object.uvm_obj != NULL &&
    881   1.82       chs 		    UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) {
    882   1.86      matt 			state.realend = state.start;
    883   1.86      matt 		} else if ((entry->protection & VM_PROT_READ) == 0) {
    884   1.86      matt 			state.realend = state.start;
    885   1.86      matt 		} else {
    886   1.86      matt 			if (state.start >= (vaddr_t)vm->vm_maxsaddr)
    887   1.86      matt 				state.flags |= UVM_COREDUMP_STACK;
    888   1.86      matt 
    889   1.86      matt 			/*
    890   1.86      matt 			 * If this an anonymous entry, only dump instantiated
    891   1.86      matt 			 * pages.
    892   1.86      matt 			 */
    893   1.86      matt 			if (entry->object.uvm_obj == NULL) {
    894   1.86      matt 				vaddr_t end;
    895   1.86      matt 
    896   1.86      matt 				amap_lock(entry->aref.ar_amap);
    897   1.86      matt 				for (end = state.start;
    898   1.86      matt 				     end < state.end; end += PAGE_SIZE) {
    899   1.86      matt 					struct vm_anon *anon;
    900   1.86      matt 					anon = amap_lookup(&entry->aref,
    901   1.86      matt 					    end - entry->start);
    902   1.86      matt 					/*
    903   1.86      matt 					 * If we have already encountered an
    904   1.86      matt 					 * uninstantiated page, stop at the
    905   1.86      matt 					 * first instantied page.
    906   1.86      matt 					 */
    907   1.86      matt 					if (anon != NULL &&
    908   1.86      matt 					    state.realend != state.end) {
    909   1.86      matt 						state.end = end;
    910   1.86      matt 						break;
    911   1.86      matt 					}
    912   1.86      matt 
    913   1.86      matt 					/*
    914   1.86      matt 					 * If this page is the first
    915   1.86      matt 					 * uninstantiated page, mark this as
    916   1.86      matt 					 * the real ending point.  Continue to
    917   1.86      matt 					 * counting uninstantiated pages.
    918   1.86      matt 					 */
    919   1.86      matt 					if (anon == NULL &&
    920   1.86      matt 					    state.realend == state.end) {
    921   1.86      matt 						state.realend = end;
    922   1.86      matt 					}
    923   1.86      matt 				}
    924   1.86      matt 				amap_unlock(entry->aref.ar_amap);
    925   1.86      matt 			}
    926   1.82       chs 		}
    927   1.86      matt 
    928   1.56   thorpej 
    929   1.64    atatat 		vm_map_unlock_read(map);
    930   1.88      matt 		error = (*func)(p, iocookie, &state);
    931   1.56   thorpej 		if (error)
    932   1.56   thorpej 			return (error);
    933   1.64    atatat 		vm_map_lock_read(map);
    934   1.56   thorpej 	}
    935   1.64    atatat 	vm_map_unlock_read(map);
    936   1.56   thorpej 
    937   1.56   thorpej 	return (0);
    938   1.56   thorpej }
    939   1.96      matt #endif /* COREDUMP */
    940