Home | History | Annotate | Line # | Download | only in uvm
uvm_glue.c revision 1.163.18.2
      1 /*	$NetBSD: uvm_glue.c,v 1.163.18.2 2020/04/08 14:09:04 martin Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6  *
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to Berkeley by
     10  * The Mach Operating System project at Carnegie-Mellon University.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. Neither the name of the University nor the names of its contributors
     21  *    may be used to endorse or promote products derived from this software
     22  *    without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  * SUCH DAMAGE.
     35  *
     36  *	@(#)vm_glue.c	8.6 (Berkeley) 1/5/94
     37  * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp
     38  *
     39  *
     40  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     41  * All rights reserved.
     42  *
     43  * Permission to use, copy, modify and distribute this software and
     44  * its documentation is hereby granted, provided that both the copyright
     45  * notice and this permission notice appear in all copies of the
     46  * software, derivative works or modified versions, and any portions
     47  * thereof, and that both notices appear in supporting documentation.
     48  *
     49  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     50  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     51  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     52  *
     53  * Carnegie Mellon requests users of this software to return to
     54  *
     55  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     56  *  School of Computer Science
     57  *  Carnegie Mellon University
     58  *  Pittsburgh PA 15213-3890
     59  *
     60  * any improvements or extensions that they make and grant Carnegie the
     61  * rights to redistribute these changes.
     62  */
     63 
     64 #include <sys/cdefs.h>
     65 __KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.163.18.2 2020/04/08 14:09:04 martin Exp $");
     66 
     67 #include "opt_kgdb.h"
     68 #include "opt_kstack.h"
     69 #include "opt_uvmhist.h"
     70 #include "opt_kasan.h"
     71 
     72 /*
     73  * uvm_glue.c: glue functions
     74  */
     75 
     76 #include <sys/param.h>
     77 #include <sys/kernel.h>
     78 
     79 #include <sys/systm.h>
     80 #include <sys/proc.h>
     81 #include <sys/resourcevar.h>
     82 #include <sys/buf.h>
     83 #include <sys/syncobj.h>
     84 #include <sys/cpu.h>
     85 #include <sys/atomic.h>
     86 #include <sys/lwp.h>
     87 #include <sys/asan.h>
     88 
     89 #include <uvm/uvm.h>
     90 #include <uvm/uvm_pdpolicy.h>
     91 #include <uvm/uvm_pgflcache.h>
     92 
     93 /*
     94  * uvm_kernacc: test if kernel can access a memory region.
     95  *
     96  * => Currently used only by /dev/kmem driver (dev/mm.c).
     97  */
     98 bool
     99 uvm_kernacc(void *addr, size_t len, vm_prot_t prot)
    100 {
    101 	vaddr_t saddr = trunc_page((vaddr_t)addr);
    102 	vaddr_t eaddr = round_page(saddr + len);
    103 	bool rv;
    104 
    105 	vm_map_lock_read(kernel_map);
    106 	rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
    107 	vm_map_unlock_read(kernel_map);
    108 
    109 	return rv;
    110 }
    111 
    112 #ifdef KGDB
    113 /*
    114  * Change protections on kernel pages from addr to addr+len
    115  * (presumably so debugger can plant a breakpoint).
    116  *
    117  * We force the protection change at the pmap level.  If we were
    118  * to use vm_map_protect a change to allow writing would be lazily-
    119  * applied meaning we would still take a protection fault, something
    120  * we really don't want to do.  It would also fragment the kernel
    121  * map unnecessarily.  We cannot use pmap_protect since it also won't
    122  * enforce a write-enable request.  Using pmap_enter is the only way
    123  * we can ensure the change takes place properly.
    124  */
    125 void
    126 uvm_chgkprot(void *addr, size_t len, int rw)
    127 {
    128 	vm_prot_t prot;
    129 	paddr_t pa;
    130 	vaddr_t sva, eva;
    131 
    132 	prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
    133 	eva = round_page((vaddr_t)addr + len);
    134 	for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
    135 		/*
    136 		 * Extract physical address for the page.
    137 		 */
    138 		if (pmap_extract(pmap_kernel(), sva, &pa) == false)
    139 			panic("%s: invalid page", __func__);
    140 		pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);
    141 	}
    142 	pmap_update(pmap_kernel());
    143 }
    144 #endif
    145 
    146 /*
    147  * uvm_vslock: wire user memory for I/O
    148  *
    149  * - called from physio and sys___sysctl
    150  * - XXXCDC: consider nuking this (or making it a macro?)
    151  */
    152 
    153 int
    154 uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access_type)
    155 {
    156 	struct vm_map *map;
    157 	vaddr_t start, end;
    158 	int error;
    159 
    160 	map = &vs->vm_map;
    161 	start = trunc_page((vaddr_t)addr);
    162 	end = round_page((vaddr_t)addr + len);
    163 	error = uvm_fault_wire(map, start, end, access_type, 0);
    164 	return error;
    165 }
    166 
    167 /*
    168  * uvm_vsunlock: unwire user memory wired by uvm_vslock()
    169  *
    170  * - called from physio and sys___sysctl
    171  * - XXXCDC: consider nuking this (or making it a macro?)
    172  */
    173 
    174 void
    175 uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
    176 {
    177 	uvm_fault_unwire(&vs->vm_map, trunc_page((vaddr_t)addr),
    178 		round_page((vaddr_t)addr + len));
    179 }
    180 
    181 /*
    182  * uvm_proc_fork: fork a virtual address space
    183  *
    184  * - the address space is copied as per parent map's inherit values
    185  */
    186 void
    187 uvm_proc_fork(struct proc *p1, struct proc *p2, bool shared)
    188 {
    189 
    190 	if (shared == true) {
    191 		p2->p_vmspace = NULL;
    192 		uvmspace_share(p1, p2);
    193 	} else {
    194 		p2->p_vmspace = uvmspace_fork(p1->p_vmspace);
    195 	}
    196 
    197 	cpu_proc_fork(p1, p2);
    198 }
    199 
    200 /*
    201  * uvm_lwp_fork: fork a thread
    202  *
    203  * - a new PCB structure is allocated for the child process,
    204  *	and filled in by MD layer
    205  * - if specified, the child gets a new user stack described by
    206  *	stack and stacksize
    207  * - NOTE: the kernel stack may be at a different location in the child
    208  *	process, and thus addresses of automatic variables may be invalid
    209  *	after cpu_lwp_fork returns in the child process.  We do nothing here
    210  *	after cpu_lwp_fork returns.
    211  */
    212 void
    213 uvm_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
    214     void (*func)(void *), void *arg)
    215 {
    216 
    217 	/* Fill stack with magic number. */
    218 	kstack_setup_magic(l2);
    219 
    220 	/*
    221 	 * cpu_lwp_fork() copy and update the pcb, and make the child ready
    222  	 * to run.  If this is a normal user fork, the child will exit
    223 	 * directly to user mode via child_return() on its first time
    224 	 * slice and will not return here.  If this is a kernel thread,
    225 	 * the specified entry point will be executed.
    226 	 */
    227 	cpu_lwp_fork(l1, l2, stack, stacksize, func, arg);
    228 }
    229 
    230 #ifndef USPACE_ALIGN
    231 #define	USPACE_ALIGN	0
    232 #endif
    233 
    234 static pool_cache_t uvm_uarea_cache;
    235 #if defined(__HAVE_CPU_UAREA_ROUTINES)
    236 static pool_cache_t uvm_uarea_system_cache;
    237 #else
    238 #define uvm_uarea_system_cache uvm_uarea_cache
    239 #endif
    240 
    241 static void *
    242 uarea_poolpage_alloc(struct pool *pp, int flags)
    243 {
    244 
    245 	KASSERT((flags & PR_WAITOK) != 0);
    246 
    247 #if defined(PMAP_MAP_POOLPAGE)
    248 	while (USPACE == PAGE_SIZE &&
    249 	    (USPACE_ALIGN == 0 || USPACE_ALIGN == PAGE_SIZE)) {
    250 		struct vm_page *pg;
    251 		vaddr_t va;
    252 #if defined(PMAP_ALLOC_POOLPAGE)
    253 		pg = PMAP_ALLOC_POOLPAGE(0);
    254 #else
    255 		pg = uvm_pagealloc(NULL, 0, NULL, 0);
    256 #endif
    257 		if (pg == NULL) {
    258 			uvm_wait("uarea");
    259 			continue;
    260 		}
    261 		va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
    262 		KASSERT(va != 0);
    263 		return (void *)va;
    264 	}
    265 #endif
    266 #if defined(__HAVE_CPU_UAREA_ROUTINES)
    267 	void *va = cpu_uarea_alloc(false);
    268 	if (va)
    269 		return (void *)va;
    270 #endif
    271 	return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz,
    272 	    USPACE_ALIGN, UVM_KMF_WIRED | UVM_KMF_WAITVA);
    273 }
    274 
    275 static void
    276 uarea_poolpage_free(struct pool *pp, void *addr)
    277 {
    278 #if defined(PMAP_MAP_POOLPAGE)
    279 	if (USPACE == PAGE_SIZE &&
    280 	    (USPACE_ALIGN == 0 || USPACE_ALIGN == PAGE_SIZE)) {
    281 		paddr_t pa;
    282 
    283 		pa = PMAP_UNMAP_POOLPAGE((vaddr_t) addr);
    284 		KASSERT(pa != 0);
    285 		uvm_pagefree(PHYS_TO_VM_PAGE(pa));
    286 		return;
    287 	}
    288 #endif
    289 #if defined(__HAVE_CPU_UAREA_ROUTINES)
    290 	if (cpu_uarea_free(addr))
    291 		return;
    292 #endif
    293 	uvm_km_free(kernel_map, (vaddr_t)addr, pp->pr_alloc->pa_pagesz,
    294 	    UVM_KMF_WIRED);
    295 }
    296 
    297 static struct pool_allocator uvm_uarea_allocator = {
    298 	.pa_alloc = uarea_poolpage_alloc,
    299 	.pa_free = uarea_poolpage_free,
    300 	.pa_pagesz = USPACE,
    301 };
    302 
    303 #if defined(__HAVE_CPU_UAREA_ROUTINES)
    304 static void *
    305 uarea_system_poolpage_alloc(struct pool *pp, int flags)
    306 {
    307 	void * const va = cpu_uarea_alloc(true);
    308 	if (va != NULL)
    309 		return va;
    310 
    311 	return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz,
    312 	    USPACE_ALIGN, UVM_KMF_WIRED |
    313 	    ((flags & PR_WAITOK) ? UVM_KMF_WAITVA :
    314 	    (UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)));
    315 }
    316 
    317 static void
    318 uarea_system_poolpage_free(struct pool *pp, void *addr)
    319 {
    320 	if (cpu_uarea_free(addr))
    321 		return;
    322 
    323 	uvm_km_free(kernel_map, (vaddr_t)addr, pp->pr_alloc->pa_pagesz,
    324 	    UVM_KMF_WIRED);
    325 }
    326 
    327 static struct pool_allocator uvm_uarea_system_allocator = {
    328 	.pa_alloc = uarea_system_poolpage_alloc,
    329 	.pa_free = uarea_system_poolpage_free,
    330 	.pa_pagesz = USPACE,
    331 };
    332 #endif /* __HAVE_CPU_UAREA_ROUTINES */
    333 
    334 void
    335 uvm_uarea_init(void)
    336 {
    337 	int flags = PR_NOTOUCH;
    338 
    339 	/*
    340 	 * specify PR_NOALIGN unless the alignment provided by
    341 	 * the backend (USPACE_ALIGN) is sufficient to provide
    342 	 * pool page size (UPSACE) alignment.
    343 	 */
    344 
    345 	if ((USPACE_ALIGN == 0 && USPACE != PAGE_SIZE) ||
    346 	    (USPACE_ALIGN % USPACE) != 0) {
    347 		flags |= PR_NOALIGN;
    348 	}
    349 
    350 	uvm_uarea_cache = pool_cache_init(USPACE, USPACE_ALIGN, 0, flags,
    351 	    "uarea", &uvm_uarea_allocator, IPL_NONE, NULL, NULL, NULL);
    352 #if defined(__HAVE_CPU_UAREA_ROUTINES)
    353 	uvm_uarea_system_cache = pool_cache_init(USPACE, USPACE_ALIGN,
    354 	    0, flags, "uareasys", &uvm_uarea_system_allocator,
    355 	    IPL_NONE, NULL, NULL, NULL);
    356 #endif
    357 }
    358 
    359 /*
    360  * uvm_uarea_alloc: allocate a u-area
    361  */
    362 
    363 vaddr_t
    364 uvm_uarea_alloc(void)
    365 {
    366 
    367 	return (vaddr_t)pool_cache_get(uvm_uarea_cache, PR_WAITOK);
    368 }
    369 
    370 vaddr_t
    371 uvm_uarea_system_alloc(struct cpu_info *ci)
    372 {
    373 #ifdef __HAVE_CPU_UAREA_ALLOC_IDLELWP
    374 	if (__predict_false(ci != NULL))
    375 		return cpu_uarea_alloc_idlelwp(ci);
    376 #endif
    377 
    378 	return (vaddr_t)pool_cache_get(uvm_uarea_system_cache, PR_WAITOK);
    379 }
    380 
    381 /*
    382  * uvm_uarea_free: free a u-area
    383  */
    384 
    385 void
    386 uvm_uarea_free(vaddr_t uaddr)
    387 {
    388 
    389 	kasan_mark((void *)uaddr, USPACE, USPACE, 0);
    390 	pool_cache_put(uvm_uarea_cache, (void *)uaddr);
    391 }
    392 
    393 void
    394 uvm_uarea_system_free(vaddr_t uaddr)
    395 {
    396 
    397 	kasan_mark((void *)uaddr, USPACE, USPACE, 0);
    398 	pool_cache_put(uvm_uarea_system_cache, (void *)uaddr);
    399 }
    400 
    401 vaddr_t
    402 uvm_lwp_getuarea(lwp_t *l)
    403 {
    404 
    405 	return (vaddr_t)l->l_addr - UAREA_PCB_OFFSET;
    406 }
    407 
    408 void
    409 uvm_lwp_setuarea(lwp_t *l, vaddr_t addr)
    410 {
    411 
    412 	l->l_addr = (void *)(addr + UAREA_PCB_OFFSET);
    413 }
    414 
    415 /*
    416  * uvm_proc_exit: exit a virtual address space
    417  *
    418  * - borrow proc0's address space because freeing the vmspace
    419  *   of the dead process may block.
    420  */
    421 
    422 void
    423 uvm_proc_exit(struct proc *p)
    424 {
    425 	struct lwp *l = curlwp; /* XXX */
    426 	struct vmspace *ovm;
    427 
    428 	KASSERT(p == l->l_proc);
    429 	ovm = p->p_vmspace;
    430 	KASSERT(ovm != NULL);
    431 
    432 	if (__predict_false(ovm == proc0.p_vmspace))
    433 		return;
    434 
    435 	/*
    436 	 * borrow proc0's address space.
    437 	 */
    438 	kpreempt_disable();
    439 	pmap_deactivate(l);
    440 	p->p_vmspace = proc0.p_vmspace;
    441 	pmap_activate(l);
    442 	kpreempt_enable();
    443 
    444 	uvmspace_free(ovm);
    445 }
    446 
    447 void
    448 uvm_lwp_exit(struct lwp *l)
    449 {
    450 	vaddr_t va = uvm_lwp_getuarea(l);
    451 	bool system = (l->l_flag & LW_SYSTEM) != 0;
    452 
    453 	if (system)
    454 		uvm_uarea_system_free(va);
    455 	else
    456 		uvm_uarea_free(va);
    457 #ifdef DIAGNOSTIC
    458 	uvm_lwp_setuarea(l, (vaddr_t)NULL);
    459 #endif
    460 }
    461 
    462 /*
    463  * uvm_init_limit: init per-process VM limits
    464  *
    465  * - called for process 0 and then inherited by all others.
    466  */
    467 
    468 void
    469 uvm_init_limits(struct proc *p)
    470 {
    471 
    472 	/*
    473 	 * Set up the initial limits on process VM.  Set the maximum
    474 	 * resident set size to be all of (reasonably) available memory.
    475 	 * This causes any single, large process to start random page
    476 	 * replacement once it fills memory.
    477 	 */
    478 
    479 	p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
    480 	p->p_rlimit[RLIMIT_STACK].rlim_max = maxsmap;
    481 	p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
    482 	p->p_rlimit[RLIMIT_DATA].rlim_max = maxdmap;
    483 	p->p_rlimit[RLIMIT_AS].rlim_cur = RLIM_INFINITY;
    484 	p->p_rlimit[RLIMIT_AS].rlim_max = RLIM_INFINITY;
    485 	p->p_rlimit[RLIMIT_RSS].rlim_cur = MIN(VM_MAXUSER_ADDRESS,
    486 	    ctob((rlim_t)uvm_availmem()));
    487 }
    488 
    489 /*
    490  * uvm_scheduler: process zero main loop.
    491  */
    492 
    493 extern struct loadavg averunnable;
    494 
    495 void
    496 uvm_scheduler(void)
    497 {
    498 	lwp_t *l = curlwp;
    499 
    500 	lwp_lock(l);
    501 	l->l_class = SCHED_FIFO;
    502 	lwp_changepri(l, PRI_VM);
    503 	lwp_unlock(l);
    504 
    505 	/* Start the freelist cache. */
    506 	uvm_pgflcache_start();
    507 
    508 	for (;;) {
    509 		/* Update legacy stats for post-mortem debugging. */
    510 		uvm_update_uvmexp();
    511 
    512 		/* See if the pagedaemon needs to generate some free pages. */
    513 		uvm_kick_pdaemon();
    514 
    515 		/* Calculate process statistics. */
    516 		sched_pstats();
    517 		(void)kpause("uvm", false, hz, NULL);
    518 	}
    519 }
    520 
    521 /*
    522  * uvm_idle: called from the idle loop.
    523  */
    524 
    525 void
    526 uvm_idle(void)
    527 {
    528 	struct cpu_info *ci = curcpu();
    529 	struct uvm_cpu *ucpu = ci->ci_data.cpu_uvm;
    530 
    531 	KASSERT(kpreempt_disabled());
    532 
    533 	if (!ci->ci_want_resched)
    534 		uvmpdpol_idle(ucpu);
    535 	if (!ci->ci_want_resched)
    536 		uvm_pageidlezero();
    537 
    538 }
    539