Home | History | Annotate | Line # | Download | only in uvm
uvm_glue.c revision 1.163.18.2
      1  1.163.18.2    martin /*	$NetBSD: uvm_glue.c,v 1.163.18.2 2020/04/08 14:09:04 martin Exp $	*/
      2         1.1       mrg 
      3        1.48       chs /*
      4         1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5        1.48       chs  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6         1.1       mrg  *
      7         1.1       mrg  * All rights reserved.
      8         1.1       mrg  *
      9         1.1       mrg  * This code is derived from software contributed to Berkeley by
     10         1.1       mrg  * The Mach Operating System project at Carnegie-Mellon University.
     11         1.1       mrg  *
     12         1.1       mrg  * Redistribution and use in source and binary forms, with or without
     13         1.1       mrg  * modification, are permitted provided that the following conditions
     14         1.1       mrg  * are met:
     15         1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     16         1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     17         1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18         1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     19         1.1       mrg  *    documentation and/or other materials provided with the distribution.
     20       1.147     chuck  * 3. Neither the name of the University nor the names of its contributors
     21         1.1       mrg  *    may be used to endorse or promote products derived from this software
     22         1.1       mrg  *    without specific prior written permission.
     23         1.1       mrg  *
     24         1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25         1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26         1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27         1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28         1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29         1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30         1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31         1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32         1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33         1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34         1.1       mrg  * SUCH DAMAGE.
     35         1.1       mrg  *
     36         1.1       mrg  *	@(#)vm_glue.c	8.6 (Berkeley) 1/5/94
     37         1.4       mrg  * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp
     38         1.1       mrg  *
     39         1.1       mrg  *
     40         1.1       mrg  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     41         1.1       mrg  * All rights reserved.
     42        1.48       chs  *
     43         1.1       mrg  * Permission to use, copy, modify and distribute this software and
     44         1.1       mrg  * its documentation is hereby granted, provided that both the copyright
     45         1.1       mrg  * notice and this permission notice appear in all copies of the
     46         1.1       mrg  * software, derivative works or modified versions, and any portions
     47         1.1       mrg  * thereof, and that both notices appear in supporting documentation.
     48        1.48       chs  *
     49        1.48       chs  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     50        1.48       chs  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     51         1.1       mrg  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     52        1.48       chs  *
     53         1.1       mrg  * Carnegie Mellon requests users of this software to return to
     54         1.1       mrg  *
     55         1.1       mrg  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     56         1.1       mrg  *  School of Computer Science
     57         1.1       mrg  *  Carnegie Mellon University
     58         1.1       mrg  *  Pittsburgh PA 15213-3890
     59         1.1       mrg  *
     60         1.1       mrg  * any improvements or extensions that they make and grant Carnegie the
     61         1.1       mrg  * rights to redistribute these changes.
     62         1.1       mrg  */
     63        1.55     lukem 
     64        1.55     lukem #include <sys/cdefs.h>
     65  1.163.18.2    martin __KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.163.18.2 2020/04/08 14:09:04 martin Exp $");
     66         1.1       mrg 
     67        1.49     lukem #include "opt_kgdb.h"
     68        1.59      yamt #include "opt_kstack.h"
     69         1.5       mrg #include "opt_uvmhist.h"
     70  1.163.18.1  christos #include "opt_kasan.h"
     71         1.5       mrg 
     72         1.1       mrg /*
     73         1.1       mrg  * uvm_glue.c: glue functions
     74         1.1       mrg  */
     75         1.1       mrg 
     76         1.1       mrg #include <sys/param.h>
     77       1.145     rmind #include <sys/kernel.h>
     78       1.145     rmind 
     79         1.1       mrg #include <sys/systm.h>
     80         1.1       mrg #include <sys/proc.h>
     81         1.1       mrg #include <sys/resourcevar.h>
     82         1.1       mrg #include <sys/buf.h>
     83       1.106      yamt #include <sys/syncobj.h>
     84       1.111        ad #include <sys/cpu.h>
     85       1.114        ad #include <sys/atomic.h>
     86       1.146     rmind #include <sys/lwp.h>
     87  1.163.18.1  christos #include <sys/asan.h>
     88         1.1       mrg 
     89         1.1       mrg #include <uvm/uvm.h>
     90  1.163.18.2    martin #include <uvm/uvm_pdpolicy.h>
     91  1.163.18.2    martin #include <uvm/uvm_pgflcache.h>
     92         1.1       mrg 
     93         1.1       mrg /*
     94       1.150     rmind  * uvm_kernacc: test if kernel can access a memory region.
     95         1.1       mrg  *
     96       1.150     rmind  * => Currently used only by /dev/kmem driver (dev/mm.c).
     97         1.1       mrg  */
     98       1.102   thorpej bool
     99       1.150     rmind uvm_kernacc(void *addr, size_t len, vm_prot_t prot)
    100         1.6       mrg {
    101       1.150     rmind 	vaddr_t saddr = trunc_page((vaddr_t)addr);
    102       1.150     rmind 	vaddr_t eaddr = round_page(saddr + len);
    103       1.102   thorpej 	bool rv;
    104         1.6       mrg 
    105         1.6       mrg 	vm_map_lock_read(kernel_map);
    106         1.6       mrg 	rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
    107         1.6       mrg 	vm_map_unlock_read(kernel_map);
    108         1.6       mrg 
    109       1.150     rmind 	return rv;
    110         1.1       mrg }
    111         1.1       mrg 
    112         1.1       mrg #ifdef KGDB
    113         1.1       mrg /*
    114         1.1       mrg  * Change protections on kernel pages from addr to addr+len
    115         1.1       mrg  * (presumably so debugger can plant a breakpoint).
    116         1.1       mrg  *
    117         1.1       mrg  * We force the protection change at the pmap level.  If we were
    118         1.1       mrg  * to use vm_map_protect a change to allow writing would be lazily-
    119         1.1       mrg  * applied meaning we would still take a protection fault, something
    120         1.1       mrg  * we really don't want to do.  It would also fragment the kernel
    121         1.1       mrg  * map unnecessarily.  We cannot use pmap_protect since it also won't
    122         1.1       mrg  * enforce a write-enable request.  Using pmap_enter is the only way
    123         1.1       mrg  * we can ensure the change takes place properly.
    124         1.1       mrg  */
    125         1.6       mrg void
    126       1.104  christos uvm_chgkprot(void *addr, size_t len, int rw)
    127         1.6       mrg {
    128         1.6       mrg 	vm_prot_t prot;
    129        1.13       eeh 	paddr_t pa;
    130        1.13       eeh 	vaddr_t sva, eva;
    131         1.6       mrg 
    132         1.6       mrg 	prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
    133        1.31    kleink 	eva = round_page((vaddr_t)addr + len);
    134        1.31    kleink 	for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
    135         1.6       mrg 		/*
    136         1.6       mrg 		 * Extract physical address for the page.
    137         1.6       mrg 		 */
    138       1.103   thorpej 		if (pmap_extract(pmap_kernel(), sva, &pa) == false)
    139       1.123  christos 			panic("%s: invalid page", __func__);
    140        1.30   thorpej 		pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);
    141         1.6       mrg 	}
    142        1.51     chris 	pmap_update(pmap_kernel());
    143         1.1       mrg }
    144         1.1       mrg #endif
    145         1.1       mrg 
    146         1.1       mrg /*
    147        1.52       chs  * uvm_vslock: wire user memory for I/O
    148         1.1       mrg  *
    149         1.1       mrg  * - called from physio and sys___sysctl
    150         1.1       mrg  * - XXXCDC: consider nuking this (or making it a macro?)
    151         1.1       mrg  */
    152         1.1       mrg 
    153        1.26   thorpej int
    154        1.97       chs uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access_type)
    155         1.1       mrg {
    156        1.50       chs 	struct vm_map *map;
    157        1.26   thorpej 	vaddr_t start, end;
    158        1.45       chs 	int error;
    159        1.26   thorpej 
    160        1.97       chs 	map = &vs->vm_map;
    161        1.31    kleink 	start = trunc_page((vaddr_t)addr);
    162        1.31    kleink 	end = round_page((vaddr_t)addr + len);
    163        1.93  drochner 	error = uvm_fault_wire(map, start, end, access_type, 0);
    164        1.45       chs 	return error;
    165         1.1       mrg }
    166         1.1       mrg 
    167         1.1       mrg /*
    168        1.52       chs  * uvm_vsunlock: unwire user memory wired by uvm_vslock()
    169         1.1       mrg  *
    170         1.1       mrg  * - called from physio and sys___sysctl
    171         1.1       mrg  * - XXXCDC: consider nuking this (or making it a macro?)
    172         1.1       mrg  */
    173         1.1       mrg 
    174         1.6       mrg void
    175        1.97       chs uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
    176         1.1       mrg {
    177        1.97       chs 	uvm_fault_unwire(&vs->vm_map, trunc_page((vaddr_t)addr),
    178        1.43       chs 		round_page((vaddr_t)addr + len));
    179         1.1       mrg }
    180         1.1       mrg 
    181         1.1       mrg /*
    182        1.62   thorpej  * uvm_proc_fork: fork a virtual address space
    183         1.1       mrg  *
    184         1.1       mrg  * - the address space is copied as per parent map's inherit values
    185        1.62   thorpej  */
    186        1.62   thorpej void
    187       1.102   thorpej uvm_proc_fork(struct proc *p1, struct proc *p2, bool shared)
    188        1.62   thorpej {
    189        1.62   thorpej 
    190       1.103   thorpej 	if (shared == true) {
    191        1.62   thorpej 		p2->p_vmspace = NULL;
    192        1.62   thorpej 		uvmspace_share(p1, p2);
    193        1.62   thorpej 	} else {
    194        1.62   thorpej 		p2->p_vmspace = uvmspace_fork(p1->p_vmspace);
    195        1.62   thorpej 	}
    196        1.62   thorpej 
    197        1.62   thorpej 	cpu_proc_fork(p1, p2);
    198        1.62   thorpej }
    199        1.62   thorpej 
    200        1.62   thorpej /*
    201        1.62   thorpej  * uvm_lwp_fork: fork a thread
    202        1.62   thorpej  *
    203       1.146     rmind  * - a new PCB structure is allocated for the child process,
    204       1.146     rmind  *	and filled in by MD layer
    205        1.20   thorpej  * - if specified, the child gets a new user stack described by
    206        1.20   thorpej  *	stack and stacksize
    207         1.1       mrg  * - NOTE: the kernel stack may be at a different location in the child
    208         1.1       mrg  *	process, and thus addresses of automatic variables may be invalid
    209        1.62   thorpej  *	after cpu_lwp_fork returns in the child process.  We do nothing here
    210        1.62   thorpej  *	after cpu_lwp_fork returns.
    211         1.1       mrg  */
    212         1.6       mrg void
    213        1.89   thorpej uvm_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
    214        1.89   thorpej     void (*func)(void *), void *arg)
    215         1.6       mrg {
    216         1.6       mrg 
    217       1.137     rmind 	/* Fill stack with magic number. */
    218        1.63      yamt 	kstack_setup_magic(l2);
    219         1.6       mrg 
    220         1.6       mrg 	/*
    221        1.62   thorpej 	 * cpu_lwp_fork() copy and update the pcb, and make the child ready
    222        1.62   thorpej  	 * to run.  If this is a normal user fork, the child will exit
    223        1.34   thorpej 	 * directly to user mode via child_return() on its first time
    224        1.34   thorpej 	 * slice and will not return here.  If this is a kernel thread,
    225        1.34   thorpej 	 * the specified entry point will be executed.
    226         1.6       mrg 	 */
    227        1.62   thorpej 	cpu_lwp_fork(l1, l2, stack, stacksize, func, arg);
    228        1.14   thorpej }
    229        1.14   thorpej 
    230        1.60       chs #ifndef USPACE_ALIGN
    231       1.115      yamt #define	USPACE_ALIGN	0
    232        1.60       chs #endif
    233        1.60       chs 
    234       1.115      yamt static pool_cache_t uvm_uarea_cache;
    235       1.148      matt #if defined(__HAVE_CPU_UAREA_ROUTINES)
    236       1.148      matt static pool_cache_t uvm_uarea_system_cache;
    237       1.148      matt #else
    238       1.148      matt #define uvm_uarea_system_cache uvm_uarea_cache
    239       1.148      matt #endif
    240       1.115      yamt 
    241       1.115      yamt static void *
    242       1.115      yamt uarea_poolpage_alloc(struct pool *pp, int flags)
    243       1.115      yamt {
    244  1.163.18.1  christos 
    245  1.163.18.1  christos 	KASSERT((flags & PR_WAITOK) != 0);
    246  1.163.18.1  christos 
    247       1.163      maxv #if defined(PMAP_MAP_POOLPAGE)
    248  1.163.18.2    martin 	while (USPACE == PAGE_SIZE &&
    249  1.163.18.2    martin 	    (USPACE_ALIGN == 0 || USPACE_ALIGN == PAGE_SIZE)) {
    250       1.163      maxv 		struct vm_page *pg;
    251       1.163      maxv 		vaddr_t va;
    252       1.154      para #if defined(PMAP_ALLOC_POOLPAGE)
    253  1.163.18.1  christos 		pg = PMAP_ALLOC_POOLPAGE(0);
    254       1.154      para #else
    255  1.163.18.1  christos 		pg = uvm_pagealloc(NULL, 0, NULL, 0);
    256       1.163      maxv #endif
    257  1.163.18.1  christos 		if (pg == NULL) {
    258  1.163.18.1  christos 			uvm_wait("uarea");
    259  1.163.18.1  christos 			continue;
    260  1.163.18.1  christos 		}
    261       1.163      maxv 		va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
    262  1.163.18.1  christos 		KASSERT(va != 0);
    263       1.163      maxv 		return (void *)va;
    264       1.163      maxv 	}
    265       1.163      maxv #endif
    266       1.163      maxv #if defined(__HAVE_CPU_UAREA_ROUTINES)
    267       1.148      matt 	void *va = cpu_uarea_alloc(false);
    268       1.148      matt 	if (va)
    269       1.148      matt 		return (void *)va;
    270       1.163      maxv #endif
    271       1.115      yamt 	return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz,
    272  1.163.18.1  christos 	    USPACE_ALIGN, UVM_KMF_WIRED | UVM_KMF_WAITVA);
    273       1.115      yamt }
    274       1.109        ad 
    275       1.115      yamt static void
    276       1.115      yamt uarea_poolpage_free(struct pool *pp, void *addr)
    277       1.115      yamt {
    278       1.154      para #if defined(PMAP_MAP_POOLPAGE)
    279  1.163.18.2    martin 	if (USPACE == PAGE_SIZE &&
    280  1.163.18.2    martin 	    (USPACE_ALIGN == 0 || USPACE_ALIGN == PAGE_SIZE)) {
    281       1.154      para 		paddr_t pa;
    282       1.154      para 
    283       1.154      para 		pa = PMAP_UNMAP_POOLPAGE((vaddr_t) addr);
    284       1.154      para 		KASSERT(pa != 0);
    285       1.154      para 		uvm_pagefree(PHYS_TO_VM_PAGE(pa));
    286       1.139      matt 		return;
    287       1.139      matt 	}
    288       1.154      para #endif
    289       1.148      matt #if defined(__HAVE_CPU_UAREA_ROUTINES)
    290       1.148      matt 	if (cpu_uarea_free(addr))
    291       1.148      matt 		return;
    292       1.148      matt #endif
    293       1.115      yamt 	uvm_km_free(kernel_map, (vaddr_t)addr, pp->pr_alloc->pa_pagesz,
    294       1.141     rmind 	    UVM_KMF_WIRED);
    295       1.115      yamt }
    296       1.115      yamt 
    297       1.115      yamt static struct pool_allocator uvm_uarea_allocator = {
    298       1.115      yamt 	.pa_alloc = uarea_poolpage_alloc,
    299       1.115      yamt 	.pa_free = uarea_poolpage_free,
    300       1.115      yamt 	.pa_pagesz = USPACE,
    301       1.115      yamt };
    302       1.115      yamt 
    303       1.148      matt #if defined(__HAVE_CPU_UAREA_ROUTINES)
    304       1.148      matt static void *
    305       1.148      matt uarea_system_poolpage_alloc(struct pool *pp, int flags)
    306       1.148      matt {
    307       1.148      matt 	void * const va = cpu_uarea_alloc(true);
    308       1.151      matt 	if (va != NULL)
    309       1.151      matt 		return va;
    310       1.151      matt 
    311       1.151      matt 	return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz,
    312       1.151      matt 	    USPACE_ALIGN, UVM_KMF_WIRED |
    313       1.151      matt 	    ((flags & PR_WAITOK) ? UVM_KMF_WAITVA :
    314       1.151      matt 	    (UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)));
    315       1.148      matt }
    316       1.148      matt 
    317       1.148      matt static void
    318       1.148      matt uarea_system_poolpage_free(struct pool *pp, void *addr)
    319       1.148      matt {
    320       1.158       chs 	if (cpu_uarea_free(addr))
    321       1.158       chs 		return;
    322       1.158       chs 
    323       1.158       chs 	uvm_km_free(kernel_map, (vaddr_t)addr, pp->pr_alloc->pa_pagesz,
    324       1.158       chs 	    UVM_KMF_WIRED);
    325       1.148      matt }
    326       1.148      matt 
    327       1.148      matt static struct pool_allocator uvm_uarea_system_allocator = {
    328       1.148      matt 	.pa_alloc = uarea_system_poolpage_alloc,
    329       1.148      matt 	.pa_free = uarea_system_poolpage_free,
    330       1.148      matt 	.pa_pagesz = USPACE,
    331       1.148      matt };
    332       1.148      matt #endif /* __HAVE_CPU_UAREA_ROUTINES */
    333       1.148      matt 
    334       1.115      yamt void
    335       1.115      yamt uvm_uarea_init(void)
    336       1.115      yamt {
    337       1.117      yamt 	int flags = PR_NOTOUCH;
    338       1.115      yamt 
    339       1.116      yamt 	/*
    340       1.116      yamt 	 * specify PR_NOALIGN unless the alignment provided by
    341       1.116      yamt 	 * the backend (USPACE_ALIGN) is sufficient to provide
    342       1.116      yamt 	 * pool page size (UPSACE) alignment.
    343       1.116      yamt 	 */
    344       1.116      yamt 
    345       1.117      yamt 	if ((USPACE_ALIGN == 0 && USPACE != PAGE_SIZE) ||
    346       1.117      yamt 	    (USPACE_ALIGN % USPACE) != 0) {
    347       1.117      yamt 		flags |= PR_NOALIGN;
    348       1.117      yamt 	}
    349       1.117      yamt 
    350       1.117      yamt 	uvm_uarea_cache = pool_cache_init(USPACE, USPACE_ALIGN, 0, flags,
    351       1.141     rmind 	    "uarea", &uvm_uarea_allocator, IPL_NONE, NULL, NULL, NULL);
    352       1.149  drochner #if defined(__HAVE_CPU_UAREA_ROUTINES)
    353       1.149  drochner 	uvm_uarea_system_cache = pool_cache_init(USPACE, USPACE_ALIGN,
    354       1.149  drochner 	    0, flags, "uareasys", &uvm_uarea_system_allocator,
    355       1.149  drochner 	    IPL_NONE, NULL, NULL, NULL);
    356       1.149  drochner #endif
    357        1.60       chs }
    358        1.60       chs 
    359        1.60       chs /*
    360       1.115      yamt  * uvm_uarea_alloc: allocate a u-area
    361        1.75  jdolecek  */
    362        1.75  jdolecek 
    363       1.141     rmind vaddr_t
    364       1.141     rmind uvm_uarea_alloc(void)
    365        1.75  jdolecek {
    366       1.109        ad 
    367       1.141     rmind 	return (vaddr_t)pool_cache_get(uvm_uarea_cache, PR_WAITOK);
    368        1.75  jdolecek }
    369        1.75  jdolecek 
    370       1.148      matt vaddr_t
    371       1.160      matt uvm_uarea_system_alloc(struct cpu_info *ci)
    372       1.148      matt {
    373       1.160      matt #ifdef __HAVE_CPU_UAREA_ALLOC_IDLELWP
    374       1.160      matt 	if (__predict_false(ci != NULL))
    375       1.160      matt 		return cpu_uarea_alloc_idlelwp(ci);
    376       1.160      matt #endif
    377       1.148      matt 
    378       1.148      matt 	return (vaddr_t)pool_cache_get(uvm_uarea_system_cache, PR_WAITOK);
    379       1.148      matt }
    380       1.148      matt 
    381        1.75  jdolecek /*
    382       1.115      yamt  * uvm_uarea_free: free a u-area
    383        1.60       chs  */
    384        1.60       chs 
    385        1.60       chs void
    386       1.141     rmind uvm_uarea_free(vaddr_t uaddr)
    387        1.60       chs {
    388        1.60       chs 
    389  1.163.18.1  christos 	kasan_mark((void *)uaddr, USPACE, USPACE, 0);
    390       1.115      yamt 	pool_cache_put(uvm_uarea_cache, (void *)uaddr);
    391        1.60       chs }
    392        1.60       chs 
    393       1.148      matt void
    394       1.148      matt uvm_uarea_system_free(vaddr_t uaddr)
    395       1.148      matt {
    396       1.148      matt 
    397  1.163.18.1  christos 	kasan_mark((void *)uaddr, USPACE, USPACE, 0);
    398       1.148      matt 	pool_cache_put(uvm_uarea_system_cache, (void *)uaddr);
    399       1.148      matt }
    400       1.148      matt 
    401       1.142     rmind vaddr_t
    402       1.142     rmind uvm_lwp_getuarea(lwp_t *l)
    403       1.142     rmind {
    404       1.142     rmind 
    405       1.146     rmind 	return (vaddr_t)l->l_addr - UAREA_PCB_OFFSET;
    406       1.142     rmind }
    407       1.142     rmind 
    408       1.142     rmind void
    409       1.142     rmind uvm_lwp_setuarea(lwp_t *l, vaddr_t addr)
    410       1.142     rmind {
    411       1.142     rmind 
    412       1.146     rmind 	l->l_addr = (void *)(addr + UAREA_PCB_OFFSET);
    413       1.142     rmind }
    414       1.142     rmind 
    415        1.60       chs /*
    416       1.118      yamt  * uvm_proc_exit: exit a virtual address space
    417        1.80        pk  *
    418        1.80        pk  * - borrow proc0's address space because freeing the vmspace
    419        1.80        pk  *   of the dead process may block.
    420        1.80        pk  */
    421        1.80        pk 
    422        1.80        pk void
    423        1.89   thorpej uvm_proc_exit(struct proc *p)
    424        1.80        pk {
    425        1.80        pk 	struct lwp *l = curlwp; /* XXX */
    426        1.80        pk 	struct vmspace *ovm;
    427        1.80        pk 
    428        1.80        pk 	KASSERT(p == l->l_proc);
    429        1.80        pk 	ovm = p->p_vmspace;
    430       1.159    martin 	KASSERT(ovm != NULL);
    431       1.159    martin 
    432       1.159    martin 	if (__predict_false(ovm == proc0.p_vmspace))
    433       1.159    martin 		return;
    434        1.80        pk 
    435        1.80        pk 	/*
    436        1.80        pk 	 * borrow proc0's address space.
    437        1.80        pk 	 */
    438       1.161  uebayasi 	kpreempt_disable();
    439       1.159    martin 	pmap_deactivate(l);
    440        1.80        pk 	p->p_vmspace = proc0.p_vmspace;
    441        1.80        pk 	pmap_activate(l);
    442       1.161  uebayasi 	kpreempt_enable();
    443        1.80        pk 
    444       1.159    martin 	uvmspace_free(ovm);
    445        1.80        pk }
    446        1.80        pk 
    447        1.80        pk void
    448        1.80        pk uvm_lwp_exit(struct lwp *l)
    449        1.80        pk {
    450       1.143     rmind 	vaddr_t va = uvm_lwp_getuarea(l);
    451       1.148      matt 	bool system = (l->l_flag & LW_SYSTEM) != 0;
    452        1.80        pk 
    453       1.148      matt 	if (system)
    454       1.148      matt 		uvm_uarea_system_free(va);
    455       1.148      matt 	else
    456       1.148      matt 		uvm_uarea_free(va);
    457       1.143     rmind #ifdef DIAGNOSTIC
    458       1.143     rmind 	uvm_lwp_setuarea(l, (vaddr_t)NULL);
    459       1.143     rmind #endif
    460        1.80        pk }
    461        1.80        pk 
    462        1.80        pk /*
    463         1.1       mrg  * uvm_init_limit: init per-process VM limits
    464         1.1       mrg  *
    465         1.1       mrg  * - called for process 0 and then inherited by all others.
    466         1.1       mrg  */
    467        1.60       chs 
    468         1.6       mrg void
    469        1.89   thorpej uvm_init_limits(struct proc *p)
    470         1.6       mrg {
    471         1.6       mrg 
    472         1.6       mrg 	/*
    473         1.6       mrg 	 * Set up the initial limits on process VM.  Set the maximum
    474         1.6       mrg 	 * resident set size to be all of (reasonably) available memory.
    475         1.6       mrg 	 * This causes any single, large process to start random page
    476         1.6       mrg 	 * replacement once it fills memory.
    477         1.6       mrg 	 */
    478         1.6       mrg 
    479         1.6       mrg 	p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
    480        1.79        pk 	p->p_rlimit[RLIMIT_STACK].rlim_max = maxsmap;
    481         1.6       mrg 	p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
    482        1.79        pk 	p->p_rlimit[RLIMIT_DATA].rlim_max = maxdmap;
    483       1.136       mrg 	p->p_rlimit[RLIMIT_AS].rlim_cur = RLIM_INFINITY;
    484       1.136       mrg 	p->p_rlimit[RLIMIT_AS].rlim_max = RLIM_INFINITY;
    485  1.163.18.2    martin 	p->p_rlimit[RLIMIT_RSS].rlim_cur = MIN(VM_MAXUSER_ADDRESS,
    486  1.163.18.2    martin 	    ctob((rlim_t)uvm_availmem()));
    487         1.1       mrg }
    488         1.1       mrg 
    489        1.99        ad /*
    490       1.141     rmind  * uvm_scheduler: process zero main loop.
    491         1.1       mrg  */
    492       1.145     rmind 
    493       1.145     rmind extern struct loadavg averunnable;
    494       1.145     rmind 
    495         1.6       mrg void
    496        1.89   thorpej uvm_scheduler(void)
    497         1.1       mrg {
    498       1.141     rmind 	lwp_t *l = curlwp;
    499         1.1       mrg 
    500        1.99        ad 	lwp_lock(l);
    501       1.113        ad 	l->l_class = SCHED_FIFO;
    502  1.163.18.2    martin 	lwp_changepri(l, PRI_VM);
    503        1.99        ad 	lwp_unlock(l);
    504        1.99        ad 
    505  1.163.18.2    martin 	/* Start the freelist cache. */
    506  1.163.18.2    martin 	uvm_pgflcache_start();
    507  1.163.18.2    martin 
    508        1.99        ad 	for (;;) {
    509  1.163.18.2    martin 		/* Update legacy stats for post-mortem debugging. */
    510  1.163.18.2    martin 		uvm_update_uvmexp();
    511  1.163.18.2    martin 
    512  1.163.18.2    martin 		/* See if the pagedaemon needs to generate some free pages. */
    513  1.163.18.2    martin 		uvm_kick_pdaemon();
    514  1.163.18.2    martin 
    515  1.163.18.2    martin 		/* Calculate process statistics. */
    516       1.145     rmind 		sched_pstats();
    517       1.145     rmind 		(void)kpause("uvm", false, hz, NULL);
    518       1.114        ad 	}
    519       1.107        ad }
    520  1.163.18.2    martin 
    521  1.163.18.2    martin /*
    522  1.163.18.2    martin  * uvm_idle: called from the idle loop.
    523  1.163.18.2    martin  */
    524  1.163.18.2    martin 
    525  1.163.18.2    martin void
    526  1.163.18.2    martin uvm_idle(void)
    527  1.163.18.2    martin {
    528  1.163.18.2    martin 	struct cpu_info *ci = curcpu();
    529  1.163.18.2    martin 	struct uvm_cpu *ucpu = ci->ci_data.cpu_uvm;
    530  1.163.18.2    martin 
    531  1.163.18.2    martin 	KASSERT(kpreempt_disabled());
    532  1.163.18.2    martin 
    533  1.163.18.2    martin 	if (!ci->ci_want_resched)
    534  1.163.18.2    martin 		uvmpdpol_idle(ucpu);
    535  1.163.18.2    martin 	if (!ci->ci_want_resched)
    536  1.163.18.2    martin 		uvm_pageidlezero();
    537  1.163.18.2    martin 
    538  1.163.18.2    martin }
    539