Home | History | Annotate | Line # | Download | only in uvm
uvm_mmap.c revision 1.134
      1 /*	$NetBSD: uvm_mmap.c,v 1.134 2011/02/02 20:07:25 chuck Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993 The Regents of the University of California.
      6  * Copyright (c) 1988 University of Utah.
      7  *
      8  * All rights reserved.
      9  *
     10  * This code is derived from software contributed to Berkeley by
     11  * the Systems Programming Group of the University of Utah Computer
     12  * Science Department.
     13  *
     14  * Redistribution and use in source and binary forms, with or without
     15  * modification, are permitted provided that the following conditions
     16  * are met:
     17  * 1. Redistributions of source code must retain the above copyright
     18  *    notice, this list of conditions and the following disclaimer.
     19  * 2. Redistributions in binary form must reproduce the above copyright
     20  *    notice, this list of conditions and the following disclaimer in the
     21  *    documentation and/or other materials provided with the distribution.
     22  * 3. Neither the name of the University nor the names of its contributors
     23  *    may be used to endorse or promote products derived from this software
     24  *    without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     36  * SUCH DAMAGE.
     37  *
     38  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
     39  *      @(#)vm_mmap.c   8.5 (Berkeley) 5/19/94
     40  * from: Id: uvm_mmap.c,v 1.1.2.14 1998/01/05 21:04:26 chuck Exp
     41  */
     42 
     43 /*
     44  * uvm_mmap.c: system call interface into VM system, plus kernel vm_mmap
     45  * function.
     46  */
     47 
     48 #include <sys/cdefs.h>
     49 __KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.134 2011/02/02 20:07:25 chuck Exp $");
     50 
     51 #include "opt_compat_netbsd.h"
     52 #include "opt_pax.h"
     53 #include "veriexec.h"
     54 
     55 #include <sys/param.h>
     56 #include <sys/systm.h>
     57 #include <sys/file.h>
     58 #include <sys/filedesc.h>
     59 #include <sys/resourcevar.h>
     60 #include <sys/mman.h>
     61 #include <sys/mount.h>
     62 #include <sys/proc.h>
     63 #include <sys/malloc.h>
     64 #include <sys/vnode.h>
     65 #include <sys/conf.h>
     66 #include <sys/stat.h>
     67 
     68 #if NVERIEXEC > 0
     69 #include <sys/verified_exec.h>
     70 #endif /* NVERIEXEC > 0 */
     71 
     72 #ifdef PAX_MPROTECT
     73 #include <sys/pax.h>
     74 #endif /* PAX_MPROTECT */
     75 
     76 #include <miscfs/specfs/specdev.h>
     77 
     78 #include <sys/syscallargs.h>
     79 
     80 #include <uvm/uvm.h>
     81 #include <uvm/uvm_device.h>
     82 
     83 #ifndef COMPAT_ZERODEV
     84 #define COMPAT_ZERODEV(dev)	(0)
     85 #endif
     86 
     87 static int
     88 range_test(vaddr_t addr, vsize_t size, bool ismmap)
     89 {
     90 	vaddr_t vm_min_address = VM_MIN_ADDRESS;
     91 	vaddr_t vm_max_address = VM_MAXUSER_ADDRESS;
     92 	vaddr_t eaddr = addr + size;
     93 
     94 	if (addr < vm_min_address)
     95 		return EINVAL;
     96 	if (eaddr > vm_max_address)
     97 		return ismmap ? EFBIG : EINVAL;
     98 	if (addr > eaddr) /* no wrapping! */
     99 		return ismmap ? EOVERFLOW : EINVAL;
    100 	return 0;
    101 }
    102 
    103 /*
    104  * unimplemented VM system calls:
    105  */
    106 
    107 /*
    108  * sys_sbrk: sbrk system call.
    109  */
    110 
    111 /* ARGSUSED */
    112 int
    113 sys_sbrk(struct lwp *l, const struct sys_sbrk_args *uap, register_t *retval)
    114 {
    115 	/* {
    116 		syscallarg(intptr_t) incr;
    117 	} */
    118 
    119 	return (ENOSYS);
    120 }
    121 
    122 /*
    123  * sys_sstk: sstk system call.
    124  */
    125 
    126 /* ARGSUSED */
    127 int
    128 sys_sstk(struct lwp *l, const struct sys_sstk_args *uap, register_t *retval)
    129 {
    130 	/* {
    131 		syscallarg(int) incr;
    132 	} */
    133 
    134 	return (ENOSYS);
    135 }
    136 
    137 /*
    138  * sys_mincore: determine if pages are in core or not.
    139  */
    140 
    141 /* ARGSUSED */
    142 int
    143 sys_mincore(struct lwp *l, const struct sys_mincore_args *uap,
    144     register_t *retval)
    145 {
    146 	/* {
    147 		syscallarg(void *) addr;
    148 		syscallarg(size_t) len;
    149 		syscallarg(char *) vec;
    150 	} */
    151 	struct proc *p = l->l_proc;
    152 	struct vm_page *pg;
    153 	char *vec, pgi;
    154 	struct uvm_object *uobj;
    155 	struct vm_amap *amap;
    156 	struct vm_anon *anon;
    157 	struct vm_map_entry *entry;
    158 	vaddr_t start, end, lim;
    159 	struct vm_map *map;
    160 	vsize_t len;
    161 	int error = 0, npgs;
    162 
    163 	map = &p->p_vmspace->vm_map;
    164 
    165 	start = (vaddr_t)SCARG(uap, addr);
    166 	len = SCARG(uap, len);
    167 	vec = SCARG(uap, vec);
    168 
    169 	if (start & PAGE_MASK)
    170 		return (EINVAL);
    171 	len = round_page(len);
    172 	end = start + len;
    173 	if (end <= start)
    174 		return (EINVAL);
    175 
    176 	/*
    177 	 * Lock down vec, so our returned status isn't outdated by
    178 	 * storing the status byte for a page.
    179 	 */
    180 
    181 	npgs = len >> PAGE_SHIFT;
    182 	error = uvm_vslock(p->p_vmspace, vec, npgs, VM_PROT_WRITE);
    183 	if (error) {
    184 		return error;
    185 	}
    186 	vm_map_lock_read(map);
    187 
    188 	if (uvm_map_lookup_entry(map, start, &entry) == false) {
    189 		error = ENOMEM;
    190 		goto out;
    191 	}
    192 
    193 	for (/* nothing */;
    194 	     entry != &map->header && entry->start < end;
    195 	     entry = entry->next) {
    196 		KASSERT(!UVM_ET_ISSUBMAP(entry));
    197 		KASSERT(start >= entry->start);
    198 
    199 		/* Make sure there are no holes. */
    200 		if (entry->end < end &&
    201 		     (entry->next == &map->header ||
    202 		      entry->next->start > entry->end)) {
    203 			error = ENOMEM;
    204 			goto out;
    205 		}
    206 
    207 		lim = end < entry->end ? end : entry->end;
    208 
    209 		/*
    210 		 * Special case for objects with no "real" pages.  Those
    211 		 * are always considered resident (mapped devices).
    212 		 */
    213 
    214 		if (UVM_ET_ISOBJ(entry)) {
    215 			KASSERT(!UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj));
    216 			if (UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) {
    217 				for (/* nothing */; start < lim;
    218 				     start += PAGE_SIZE, vec++)
    219 					subyte(vec, 1);
    220 				continue;
    221 			}
    222 		}
    223 
    224 		amap = entry->aref.ar_amap;	/* upper layer */
    225 		uobj = entry->object.uvm_obj;	/* lower layer */
    226 
    227 		if (amap != NULL)
    228 			amap_lock(amap);
    229 		if (uobj != NULL)
    230 			mutex_enter(&uobj->vmobjlock);
    231 
    232 		for (/* nothing */; start < lim; start += PAGE_SIZE, vec++) {
    233 			pgi = 0;
    234 			if (amap != NULL) {
    235 				/* Check the upper layer first. */
    236 				anon = amap_lookup(&entry->aref,
    237 				    start - entry->start);
    238 				/* Don't need to lock anon here. */
    239 				if (anon != NULL && anon->an_page != NULL) {
    240 
    241 					/*
    242 					 * Anon has the page for this entry
    243 					 * offset.
    244 					 */
    245 
    246 					pgi = 1;
    247 				}
    248 			}
    249 			if (uobj != NULL && pgi == 0) {
    250 				/* Check the lower layer. */
    251 				pg = uvm_pagelookup(uobj,
    252 				    entry->offset + (start - entry->start));
    253 				if (pg != NULL) {
    254 
    255 					/*
    256 					 * Object has the page for this entry
    257 					 * offset.
    258 					 */
    259 
    260 					pgi = 1;
    261 				}
    262 			}
    263 			(void) subyte(vec, pgi);
    264 		}
    265 		if (uobj != NULL)
    266 			mutex_exit(&uobj->vmobjlock);
    267 		if (amap != NULL)
    268 			amap_unlock(amap);
    269 	}
    270 
    271  out:
    272 	vm_map_unlock_read(map);
    273 	uvm_vsunlock(p->p_vmspace, SCARG(uap, vec), npgs);
    274 	return (error);
    275 }
    276 
    277 /*
    278  * sys_mmap: mmap system call.
    279  *
    280  * => file offset and address may not be page aligned
    281  *    - if MAP_FIXED, offset and address must have remainder mod PAGE_SIZE
    282  *    - if address isn't page aligned the mapping starts at trunc_page(addr)
    283  *      and the return value is adjusted up by the page offset.
    284  */
    285 
    286 int
    287 sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
    288 {
    289 	/* {
    290 		syscallarg(void *) addr;
    291 		syscallarg(size_t) len;
    292 		syscallarg(int) prot;
    293 		syscallarg(int) flags;
    294 		syscallarg(int) fd;
    295 		syscallarg(long) pad;
    296 		syscallarg(off_t) pos;
    297 	} */
    298 	struct proc *p = l->l_proc;
    299 	vaddr_t addr;
    300 	struct vattr va;
    301 	off_t pos;
    302 	vsize_t size, pageoff;
    303 	vm_prot_t prot, maxprot;
    304 	int flags, fd;
    305 	vaddr_t defaddr;
    306 	struct file *fp = NULL;
    307 	struct vnode *vp;
    308 	void *handle;
    309 	int error;
    310 #ifdef PAX_ASLR
    311 	vaddr_t orig_addr;
    312 #endif /* PAX_ASLR */
    313 
    314 	/*
    315 	 * first, extract syscall args from the uap.
    316 	 */
    317 
    318 	addr = (vaddr_t)SCARG(uap, addr);
    319 	size = (vsize_t)SCARG(uap, len);
    320 	prot = SCARG(uap, prot) & VM_PROT_ALL;
    321 	flags = SCARG(uap, flags);
    322 	fd = SCARG(uap, fd);
    323 	pos = SCARG(uap, pos);
    324 
    325 #ifdef PAX_ASLR
    326 	orig_addr = addr;
    327 #endif /* PAX_ASLR */
    328 
    329 	/*
    330 	 * Fixup the old deprecated MAP_COPY into MAP_PRIVATE, and
    331 	 * validate the flags.
    332 	 */
    333 	if (flags & MAP_COPY)
    334 		flags = (flags & ~MAP_COPY) | MAP_PRIVATE;
    335 	if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
    336 		return (EINVAL);
    337 
    338 	/*
    339 	 * align file position and save offset.  adjust size.
    340 	 */
    341 
    342 	pageoff = (pos & PAGE_MASK);
    343 	pos  -= pageoff;
    344 	size += pageoff;			/* add offset */
    345 	size = (vsize_t)round_page(size);	/* round up */
    346 
    347 	/*
    348 	 * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr"
    349 	 */
    350 	if (flags & MAP_FIXED) {
    351 
    352 		/* ensure address and file offset are aligned properly */
    353 		addr -= pageoff;
    354 		if (addr & PAGE_MASK)
    355 			return (EINVAL);
    356 
    357 		error = range_test(addr, size, true);
    358 		if (error)
    359 			return error;
    360 	} else if (addr == 0 || !(flags & MAP_TRYFIXED)) {
    361 
    362 		/*
    363 		 * not fixed: make sure we skip over the largest
    364 		 * possible heap for non-topdown mapping arrangements.
    365 		 * we will refine our guess later (e.g. to account for
    366 		 * VAC, etc)
    367 		 */
    368 
    369 		defaddr = p->p_emul->e_vm_default_addr(p,
    370 		    (vaddr_t)p->p_vmspace->vm_daddr, size);
    371 
    372 		if (addr == 0 ||
    373 		    !(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN))
    374 			addr = MAX(addr, defaddr);
    375 		else
    376 			addr = MIN(addr, defaddr);
    377 	}
    378 
    379 	/*
    380 	 * check for file mappings (i.e. not anonymous) and verify file.
    381 	 */
    382 
    383 	if ((flags & MAP_ANON) == 0) {
    384 		if ((fp = fd_getfile(fd)) == NULL)
    385 			return (EBADF);
    386 		if (fp->f_type != DTYPE_VNODE) {
    387 			fd_putfile(fd);
    388 			return (ENODEV);		/* only mmap vnodes! */
    389 		}
    390 		vp = fp->f_data;		/* convert to vnode */
    391 		if (vp->v_type != VREG && vp->v_type != VCHR &&
    392 		    vp->v_type != VBLK) {
    393 			fd_putfile(fd);
    394 			return (ENODEV);  /* only REG/CHR/BLK support mmap */
    395 		}
    396 		if (vp->v_type != VCHR && pos < 0) {
    397 			fd_putfile(fd);
    398 			return (EINVAL);
    399 		}
    400 		if (vp->v_type != VCHR && (pos + size) < pos) {
    401 			fd_putfile(fd);
    402 			return (EOVERFLOW);		/* no offset wrapping */
    403 		}
    404 
    405 		/* special case: catch SunOS style /dev/zero */
    406 		if (vp->v_type == VCHR
    407 		    && (vp->v_rdev == zerodev || COMPAT_ZERODEV(vp->v_rdev))) {
    408 			flags |= MAP_ANON;
    409 			fd_putfile(fd);
    410 			fp = NULL;
    411 			goto is_anon;
    412 		}
    413 
    414 		/*
    415 		 * Old programs may not select a specific sharing type, so
    416 		 * default to an appropriate one.
    417 		 *
    418 		 * XXX: how does MAP_ANON fit in the picture?
    419 		 */
    420 		if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) {
    421 #if defined(DEBUG)
    422 			printf("WARNING: defaulted mmap() share type to "
    423 			   "%s (pid %d command %s)\n", vp->v_type == VCHR ?
    424 			   "MAP_SHARED" : "MAP_PRIVATE", p->p_pid,
    425 			    p->p_comm);
    426 #endif
    427 			if (vp->v_type == VCHR)
    428 				flags |= MAP_SHARED;	/* for a device */
    429 			else
    430 				flags |= MAP_PRIVATE;	/* for a file */
    431 		}
    432 
    433 		/*
    434 		 * MAP_PRIVATE device mappings don't make sense (and aren't
    435 		 * supported anyway).  However, some programs rely on this,
    436 		 * so just change it to MAP_SHARED.
    437 		 */
    438 		if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
    439 			flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
    440 		}
    441 
    442 		/*
    443 		 * now check protection
    444 		 */
    445 
    446 		maxprot = VM_PROT_EXECUTE;
    447 
    448 		/* check read access */
    449 		if (fp->f_flag & FREAD)
    450 			maxprot |= VM_PROT_READ;
    451 		else if (prot & PROT_READ) {
    452 			fd_putfile(fd);
    453 			return (EACCES);
    454 		}
    455 
    456 		/* check write access, shared case first */
    457 		if (flags & MAP_SHARED) {
    458 			/*
    459 			 * if the file is writable, only add PROT_WRITE to
    460 			 * maxprot if the file is not immutable, append-only.
    461 			 * otherwise, if we have asked for PROT_WRITE, return
    462 			 * EPERM.
    463 			 */
    464 			if (fp->f_flag & FWRITE) {
    465 				if ((error =
    466 				    VOP_GETATTR(vp, &va, l->l_cred))) {
    467 					fd_putfile(fd);
    468 					return (error);
    469 				}
    470 				if ((va.va_flags &
    471 				    (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0)
    472 					maxprot |= VM_PROT_WRITE;
    473 				else if (prot & PROT_WRITE) {
    474 					fd_putfile(fd);
    475 					return (EPERM);
    476 				}
    477 			}
    478 			else if (prot & PROT_WRITE) {
    479 				fd_putfile(fd);
    480 				return (EACCES);
    481 			}
    482 		} else {
    483 			/* MAP_PRIVATE mappings can always write to */
    484 			maxprot |= VM_PROT_WRITE;
    485 		}
    486 		handle = vp;
    487 
    488 	} else {		/* MAP_ANON case */
    489 		/*
    490 		 * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0?
    491 		 */
    492 		if (fd != -1)
    493 			return (EINVAL);
    494 
    495  is_anon:		/* label for SunOS style /dev/zero */
    496 		handle = NULL;
    497 		maxprot = VM_PROT_ALL;
    498 		pos = 0;
    499 	}
    500 
    501 #if NVERIEXEC > 0
    502 	if (handle != NULL) {
    503 		/*
    504 		 * Check if the file can be executed indirectly.
    505 		 *
    506 		 * XXX: This gives false warnings about "Incorrect access type"
    507 		 * XXX: if the mapping is not executable. Harmless, but will be
    508 		 * XXX: fixed as part of other changes.
    509 		 */
    510 		if (veriexec_verify(l, handle, "(mmap)", VERIEXEC_INDIRECT,
    511 		    NULL)) {
    512 			/*
    513 			 * Don't allow executable mappings if we can't
    514 			 * indirectly execute the file.
    515 			 */
    516 			if (prot & VM_PROT_EXECUTE) {
    517 			     	if (fp != NULL)
    518 					fd_putfile(fd);
    519 				return (EPERM);
    520 			}
    521 
    522 			/*
    523 			 * Strip the executable bit from 'maxprot' to make sure
    524 			 * it can't be made executable later.
    525 			 */
    526 			maxprot &= ~VM_PROT_EXECUTE;
    527 		}
    528 	}
    529 #endif /* NVERIEXEC > 0 */
    530 
    531 #ifdef PAX_MPROTECT
    532 	pax_mprotect(l, &prot, &maxprot);
    533 #endif /* PAX_MPROTECT */
    534 
    535 #ifdef PAX_ASLR
    536 	pax_aslr(l, &addr, orig_addr, flags);
    537 #endif /* PAX_ASLR */
    538 
    539 	/*
    540 	 * now let kernel internal function uvm_mmap do the work.
    541 	 */
    542 
    543 	error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
    544 	    flags, handle, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
    545 
    546 	if (error == 0)
    547 		/* remember to add offset */
    548 		*retval = (register_t)(addr + pageoff);
    549 
    550      	if (fp != NULL)
    551 		fd_putfile(fd);
    552 
    553 	return (error);
    554 }
    555 
    556 /*
    557  * sys___msync13: the msync system call (a front-end for flush)
    558  */
    559 
    560 int
    561 sys___msync13(struct lwp *l, const struct sys___msync13_args *uap,
    562     register_t *retval)
    563 {
    564 	/* {
    565 		syscallarg(void *) addr;
    566 		syscallarg(size_t) len;
    567 		syscallarg(int) flags;
    568 	} */
    569 	struct proc *p = l->l_proc;
    570 	vaddr_t addr;
    571 	vsize_t size, pageoff;
    572 	struct vm_map *map;
    573 	int error, rv, flags, uvmflags;
    574 
    575 	/*
    576 	 * extract syscall args from the uap
    577 	 */
    578 
    579 	addr = (vaddr_t)SCARG(uap, addr);
    580 	size = (vsize_t)SCARG(uap, len);
    581 	flags = SCARG(uap, flags);
    582 
    583 	/* sanity check flags */
    584 	if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 ||
    585 	    (flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 ||
    586 	    (flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC))
    587 		return (EINVAL);
    588 	if ((flags & (MS_ASYNC | MS_SYNC)) == 0)
    589 		flags |= MS_SYNC;
    590 
    591 	/*
    592 	 * align the address to a page boundary and adjust the size accordingly.
    593 	 */
    594 
    595 	pageoff = (addr & PAGE_MASK);
    596 	addr -= pageoff;
    597 	size += pageoff;
    598 	size = (vsize_t)round_page(size);
    599 
    600 	error = range_test(addr, size, false);
    601 	if (error)
    602 		return error;
    603 
    604 	/*
    605 	 * get map
    606 	 */
    607 
    608 	map = &p->p_vmspace->vm_map;
    609 
    610 	/*
    611 	 * XXXCDC: do we really need this semantic?
    612 	 *
    613 	 * XXX Gak!  If size is zero we are supposed to sync "all modified
    614 	 * pages with the region containing addr".  Unfortunately, we
    615 	 * don't really keep track of individual mmaps so we approximate
    616 	 * by flushing the range of the map entry containing addr.
    617 	 * This can be incorrect if the region splits or is coalesced
    618 	 * with a neighbor.
    619 	 */
    620 
    621 	if (size == 0) {
    622 		struct vm_map_entry *entry;
    623 
    624 		vm_map_lock_read(map);
    625 		rv = uvm_map_lookup_entry(map, addr, &entry);
    626 		if (rv == true) {
    627 			addr = entry->start;
    628 			size = entry->end - entry->start;
    629 		}
    630 		vm_map_unlock_read(map);
    631 		if (rv == false)
    632 			return (EINVAL);
    633 	}
    634 
    635 	/*
    636 	 * translate MS_ flags into PGO_ flags
    637 	 */
    638 
    639 	uvmflags = PGO_CLEANIT;
    640 	if (flags & MS_INVALIDATE)
    641 		uvmflags |= PGO_FREE;
    642 	if (flags & MS_SYNC)
    643 		uvmflags |= PGO_SYNCIO;
    644 
    645 	error = uvm_map_clean(map, addr, addr+size, uvmflags);
    646 	return error;
    647 }
    648 
    649 /*
    650  * sys_munmap: unmap a users memory
    651  */
    652 
    653 int
    654 sys_munmap(struct lwp *l, const struct sys_munmap_args *uap, register_t *retval)
    655 {
    656 	/* {
    657 		syscallarg(void *) addr;
    658 		syscallarg(size_t) len;
    659 	} */
    660 	struct proc *p = l->l_proc;
    661 	vaddr_t addr;
    662 	vsize_t size, pageoff;
    663 	struct vm_map *map;
    664 	struct vm_map_entry *dead_entries;
    665 	int error;
    666 
    667 	/*
    668 	 * get syscall args.
    669 	 */
    670 
    671 	addr = (vaddr_t)SCARG(uap, addr);
    672 	size = (vsize_t)SCARG(uap, len);
    673 
    674 	/*
    675 	 * align the address to a page boundary and adjust the size accordingly.
    676 	 */
    677 
    678 	pageoff = (addr & PAGE_MASK);
    679 	addr -= pageoff;
    680 	size += pageoff;
    681 	size = (vsize_t)round_page(size);
    682 
    683 	if (size == 0)
    684 		return (0);
    685 
    686 	error = range_test(addr, size, false);
    687 	if (error)
    688 		return error;
    689 
    690 	map = &p->p_vmspace->vm_map;
    691 
    692 	/*
    693 	 * interesting system call semantic: make sure entire range is
    694 	 * allocated before allowing an unmap.
    695 	 */
    696 
    697 	vm_map_lock(map);
    698 #if 0
    699 	if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) {
    700 		vm_map_unlock(map);
    701 		return (EINVAL);
    702 	}
    703 #endif
    704 	uvm_unmap_remove(map, addr, addr + size, &dead_entries, NULL, 0);
    705 	vm_map_unlock(map);
    706 	if (dead_entries != NULL)
    707 		uvm_unmap_detach(dead_entries, 0);
    708 	return (0);
    709 }
    710 
    711 /*
    712  * sys_mprotect: the mprotect system call
    713  */
    714 
    715 int
    716 sys_mprotect(struct lwp *l, const struct sys_mprotect_args *uap,
    717     register_t *retval)
    718 {
    719 	/* {
    720 		syscallarg(void *) addr;
    721 		syscallarg(size_t) len;
    722 		syscallarg(int) prot;
    723 	} */
    724 	struct proc *p = l->l_proc;
    725 	vaddr_t addr;
    726 	vsize_t size, pageoff;
    727 	vm_prot_t prot;
    728 	int error;
    729 
    730 	/*
    731 	 * extract syscall args from uap
    732 	 */
    733 
    734 	addr = (vaddr_t)SCARG(uap, addr);
    735 	size = (vsize_t)SCARG(uap, len);
    736 	prot = SCARG(uap, prot) & VM_PROT_ALL;
    737 
    738 	/*
    739 	 * align the address to a page boundary and adjust the size accordingly.
    740 	 */
    741 
    742 	pageoff = (addr & PAGE_MASK);
    743 	addr -= pageoff;
    744 	size += pageoff;
    745 	size = round_page(size);
    746 
    747 	error = range_test(addr, size, false);
    748 	if (error)
    749 		return error;
    750 
    751 	error = uvm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, prot,
    752 				false);
    753 	return error;
    754 }
    755 
    756 /*
    757  * sys_minherit: the minherit system call
    758  */
    759 
    760 int
    761 sys_minherit(struct lwp *l, const struct sys_minherit_args *uap,
    762    register_t *retval)
    763 {
    764 	/* {
    765 		syscallarg(void *) addr;
    766 		syscallarg(int) len;
    767 		syscallarg(int) inherit;
    768 	} */
    769 	struct proc *p = l->l_proc;
    770 	vaddr_t addr;
    771 	vsize_t size, pageoff;
    772 	vm_inherit_t inherit;
    773 	int error;
    774 
    775 	addr = (vaddr_t)SCARG(uap, addr);
    776 	size = (vsize_t)SCARG(uap, len);
    777 	inherit = SCARG(uap, inherit);
    778 
    779 	/*
    780 	 * align the address to a page boundary and adjust the size accordingly.
    781 	 */
    782 
    783 	pageoff = (addr & PAGE_MASK);
    784 	addr -= pageoff;
    785 	size += pageoff;
    786 	size = (vsize_t)round_page(size);
    787 
    788 	error = range_test(addr, size, false);
    789 	if (error)
    790 		return error;
    791 
    792 	error = uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr + size,
    793 				inherit);
    794 	return error;
    795 }
    796 
    797 /*
    798  * sys_madvise: give advice about memory usage.
    799  */
    800 
    801 /* ARGSUSED */
    802 int
    803 sys_madvise(struct lwp *l, const struct sys_madvise_args *uap,
    804    register_t *retval)
    805 {
    806 	/* {
    807 		syscallarg(void *) addr;
    808 		syscallarg(size_t) len;
    809 		syscallarg(int) behav;
    810 	} */
    811 	struct proc *p = l->l_proc;
    812 	vaddr_t addr;
    813 	vsize_t size, pageoff;
    814 	int advice, error;
    815 
    816 	addr = (vaddr_t)SCARG(uap, addr);
    817 	size = (vsize_t)SCARG(uap, len);
    818 	advice = SCARG(uap, behav);
    819 
    820 	/*
    821 	 * align the address to a page boundary, and adjust the size accordingly
    822 	 */
    823 
    824 	pageoff = (addr & PAGE_MASK);
    825 	addr -= pageoff;
    826 	size += pageoff;
    827 	size = (vsize_t)round_page(size);
    828 
    829 	error = range_test(addr, size, false);
    830 	if (error)
    831 		return error;
    832 
    833 	switch (advice) {
    834 	case MADV_NORMAL:
    835 	case MADV_RANDOM:
    836 	case MADV_SEQUENTIAL:
    837 		error = uvm_map_advice(&p->p_vmspace->vm_map, addr, addr + size,
    838 		    advice);
    839 		break;
    840 
    841 	case MADV_WILLNEED:
    842 
    843 		/*
    844 		 * Activate all these pages, pre-faulting them in if
    845 		 * necessary.
    846 		 */
    847 		error = uvm_map_willneed(&p->p_vmspace->vm_map,
    848 		    addr, addr + size);
    849 		break;
    850 
    851 	case MADV_DONTNEED:
    852 
    853 		/*
    854 		 * Deactivate all these pages.  We don't need them
    855 		 * any more.  We don't, however, toss the data in
    856 		 * the pages.
    857 		 */
    858 
    859 		error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
    860 		    PGO_DEACTIVATE);
    861 		break;
    862 
    863 	case MADV_FREE:
    864 
    865 		/*
    866 		 * These pages contain no valid data, and may be
    867 		 * garbage-collected.  Toss all resources, including
    868 		 * any swap space in use.
    869 		 */
    870 
    871 		error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
    872 		    PGO_FREE);
    873 		break;
    874 
    875 	case MADV_SPACEAVAIL:
    876 
    877 		/*
    878 		 * XXXMRG What is this?  I think it's:
    879 		 *
    880 		 *	Ensure that we have allocated backing-store
    881 		 *	for these pages.
    882 		 *
    883 		 * This is going to require changes to the page daemon,
    884 		 * as it will free swap space allocated to pages in core.
    885 		 * There's also what to do for device/file/anonymous memory.
    886 		 */
    887 
    888 		return (EINVAL);
    889 
    890 	default:
    891 		return (EINVAL);
    892 	}
    893 
    894 	return error;
    895 }
    896 
    897 /*
    898  * sys_mlock: memory lock
    899  */
    900 
    901 int
    902 sys_mlock(struct lwp *l, const struct sys_mlock_args *uap, register_t *retval)
    903 {
    904 	/* {
    905 		syscallarg(const void *) addr;
    906 		syscallarg(size_t) len;
    907 	} */
    908 	struct proc *p = l->l_proc;
    909 	vaddr_t addr;
    910 	vsize_t size, pageoff;
    911 	int error;
    912 
    913 	/*
    914 	 * extract syscall args from uap
    915 	 */
    916 
    917 	addr = (vaddr_t)SCARG(uap, addr);
    918 	size = (vsize_t)SCARG(uap, len);
    919 
    920 	/*
    921 	 * align the address to a page boundary and adjust the size accordingly
    922 	 */
    923 
    924 	pageoff = (addr & PAGE_MASK);
    925 	addr -= pageoff;
    926 	size += pageoff;
    927 	size = (vsize_t)round_page(size);
    928 
    929 	error = range_test(addr, size, false);
    930 	if (error)
    931 		return error;
    932 
    933 	if (atop(size) + uvmexp.wired > uvmexp.wiredmax)
    934 		return (EAGAIN);
    935 
    936 	if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
    937 			p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
    938 		return (EAGAIN);
    939 
    940 	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, false,
    941 	    0);
    942 	if (error == EFAULT)
    943 		error = ENOMEM;
    944 	return error;
    945 }
    946 
    947 /*
    948  * sys_munlock: unlock wired pages
    949  */
    950 
    951 int
    952 sys_munlock(struct lwp *l, const struct sys_munlock_args *uap,
    953     register_t *retval)
    954 {
    955 	/* {
    956 		syscallarg(const void *) addr;
    957 		syscallarg(size_t) len;
    958 	} */
    959 	struct proc *p = l->l_proc;
    960 	vaddr_t addr;
    961 	vsize_t size, pageoff;
    962 	int error;
    963 
    964 	/*
    965 	 * extract syscall args from uap
    966 	 */
    967 
    968 	addr = (vaddr_t)SCARG(uap, addr);
    969 	size = (vsize_t)SCARG(uap, len);
    970 
    971 	/*
    972 	 * align the address to a page boundary, and adjust the size accordingly
    973 	 */
    974 
    975 	pageoff = (addr & PAGE_MASK);
    976 	addr -= pageoff;
    977 	size += pageoff;
    978 	size = (vsize_t)round_page(size);
    979 
    980 	error = range_test(addr, size, false);
    981 	if (error)
    982 		return error;
    983 
    984 	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, true,
    985 	    0);
    986 	if (error == EFAULT)
    987 		error = ENOMEM;
    988 	return error;
    989 }
    990 
    991 /*
    992  * sys_mlockall: lock all pages mapped into an address space.
    993  */
    994 
    995 int
    996 sys_mlockall(struct lwp *l, const struct sys_mlockall_args *uap,
    997     register_t *retval)
    998 {
    999 	/* {
   1000 		syscallarg(int) flags;
   1001 	} */
   1002 	struct proc *p = l->l_proc;
   1003 	int error, flags;
   1004 
   1005 	flags = SCARG(uap, flags);
   1006 
   1007 	if (flags == 0 ||
   1008 	    (flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)
   1009 		return (EINVAL);
   1010 
   1011 	error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags,
   1012 	    p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
   1013 	return (error);
   1014 }
   1015 
   1016 /*
   1017  * sys_munlockall: unlock all pages mapped into an address space.
   1018  */
   1019 
   1020 int
   1021 sys_munlockall(struct lwp *l, const void *v, register_t *retval)
   1022 {
   1023 	struct proc *p = l->l_proc;
   1024 
   1025 	(void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0);
   1026 	return (0);
   1027 }
   1028 
   1029 /*
   1030  * uvm_mmap: internal version of mmap
   1031  *
   1032  * - used by sys_mmap and various framebuffers
   1033  * - handle is a vnode pointer or NULL for MAP_ANON
   1034  * - caller must page-align the file offset
   1035  */
   1036 
   1037 int
   1038 uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
   1039     vm_prot_t maxprot, int flags, void *handle, voff_t foff, vsize_t locklimit)
   1040 {
   1041 	struct uvm_object *uobj;
   1042 	struct vnode *vp;
   1043 	vaddr_t align = 0;
   1044 	int error;
   1045 	int advice = UVM_ADV_NORMAL;
   1046 	uvm_flag_t uvmflag = 0;
   1047 	bool needwritemap;
   1048 
   1049 	/*
   1050 	 * check params
   1051 	 */
   1052 
   1053 	if (size == 0)
   1054 		return(0);
   1055 	if (foff & PAGE_MASK)
   1056 		return(EINVAL);
   1057 	if ((prot & maxprot) != prot)
   1058 		return(EINVAL);
   1059 
   1060 	/*
   1061 	 * for non-fixed mappings, round off the suggested address.
   1062 	 * for fixed mappings, check alignment and zap old mappings.
   1063 	 */
   1064 
   1065 	if ((flags & MAP_FIXED) == 0) {
   1066 		*addr = round_page(*addr);
   1067 	} else {
   1068 		if (*addr & PAGE_MASK)
   1069 			return(EINVAL);
   1070 		uvmflag |= UVM_FLAG_FIXED;
   1071 		(void) uvm_unmap(map, *addr, *addr + size);
   1072 	}
   1073 
   1074 	/*
   1075 	 * Try to see if any requested alignment can even be attemped.
   1076 	 * Make sure we can express the alignment (asking for a >= 4GB
   1077 	 * alignment on an ILP32 architecure make no sense) and the
   1078 	 * alignment is at least for a page sized quanitiy.  If the
   1079 	 * request was for a fixed mapping, make sure supplied address
   1080 	 * adheres to the request alignment.
   1081 	 */
   1082 	align = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT;
   1083 	if (align) {
   1084 		if (align >= sizeof(vaddr_t) * NBBY)
   1085 			return(EINVAL);
   1086 		align = 1L << align;
   1087 		if (align < PAGE_SIZE)
   1088 			return(EINVAL);
   1089 		if (align >= vm_map_max(map))
   1090 			return(ENOMEM);
   1091 		if (flags & MAP_FIXED) {
   1092 			if ((*addr & (align-1)) != 0)
   1093 				return(EINVAL);
   1094 			align = 0;
   1095 		}
   1096 	}
   1097 
   1098 	/*
   1099 	 * check resource limits
   1100 	 */
   1101 
   1102 	if (!VM_MAP_IS_KERNEL(map) &&
   1103 	    (((rlim_t)curproc->p_vmspace->vm_map.size + (rlim_t)size) >
   1104 	    curproc->p_rlimit[RLIMIT_AS].rlim_cur))
   1105 		return ENOMEM;
   1106 
   1107 	/*
   1108 	 * handle anon vs. non-anon mappings.   for non-anon mappings attach
   1109 	 * to underlying vm object.
   1110 	 */
   1111 
   1112 	if (flags & MAP_ANON) {
   1113 		KASSERT(handle == NULL);
   1114 		foff = UVM_UNKNOWN_OFFSET;
   1115 		uobj = NULL;
   1116 		if ((flags & MAP_SHARED) == 0)
   1117 			/* XXX: defer amap create */
   1118 			uvmflag |= UVM_FLAG_COPYONW;
   1119 		else
   1120 			/* shared: create amap now */
   1121 			uvmflag |= UVM_FLAG_OVERLAY;
   1122 
   1123 	} else {
   1124 		KASSERT(handle != NULL);
   1125 		vp = (struct vnode *)handle;
   1126 
   1127 		/*
   1128 		 * Don't allow mmap for EXEC if the file system
   1129 		 * is mounted NOEXEC.
   1130 		 */
   1131 		if ((prot & PROT_EXEC) != 0 &&
   1132 		    (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0)
   1133 			return (EACCES);
   1134 
   1135 		if (vp->v_type != VCHR) {
   1136 			error = VOP_MMAP(vp, prot, curlwp->l_cred);
   1137 			if (error) {
   1138 				return error;
   1139 			}
   1140 			vref(vp);
   1141 			uobj = &vp->v_uobj;
   1142 
   1143 			/*
   1144 			 * If the vnode is being mapped with PROT_EXEC,
   1145 			 * then mark it as text.
   1146 			 */
   1147 			if (prot & PROT_EXEC) {
   1148 				vn_markexec(vp);
   1149 			}
   1150 		} else {
   1151 			int i = maxprot;
   1152 
   1153 			/*
   1154 			 * XXX Some devices don't like to be mapped with
   1155 			 * XXX PROT_EXEC or PROT_WRITE, but we don't really
   1156 			 * XXX have a better way of handling this, right now
   1157 			 */
   1158 			do {
   1159 				uobj = udv_attach((void *) &vp->v_rdev,
   1160 				    (flags & MAP_SHARED) ? i :
   1161 				    (i & ~VM_PROT_WRITE), foff, size);
   1162 				i--;
   1163 			} while ((uobj == NULL) && (i > 0));
   1164 			if (uobj == NULL)
   1165 				return EINVAL;
   1166 			advice = UVM_ADV_RANDOM;
   1167 		}
   1168 		if ((flags & MAP_SHARED) == 0) {
   1169 			uvmflag |= UVM_FLAG_COPYONW;
   1170 		}
   1171 
   1172 		/*
   1173 		 * Set vnode flags to indicate the new kinds of mapping.
   1174 		 * We take the vnode lock in exclusive mode here to serialize
   1175 		 * with direct I/O.
   1176 		 *
   1177 		 * Safe to check for these flag values without a lock, as
   1178 		 * long as a reference to the vnode is held.
   1179 		 */
   1180 		needwritemap = (vp->v_iflag & VI_WRMAP) == 0 &&
   1181 			(flags & MAP_SHARED) != 0 &&
   1182 			(maxprot & VM_PROT_WRITE) != 0;
   1183 		if ((vp->v_vflag & VV_MAPPED) == 0 || needwritemap) {
   1184 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   1185 			vp->v_vflag |= VV_MAPPED;
   1186 			if (needwritemap) {
   1187 				mutex_enter(&vp->v_interlock);
   1188 				vp->v_iflag |= VI_WRMAP;
   1189 				mutex_exit(&vp->v_interlock);
   1190 			}
   1191 			VOP_UNLOCK(vp);
   1192 		}
   1193 	}
   1194 
   1195 	uvmflag = UVM_MAPFLAG(prot, maxprot,
   1196 			(flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY,
   1197 			advice, uvmflag);
   1198 	error = uvm_map(map, addr, size, uobj, foff, align, uvmflag);
   1199 	if (error) {
   1200 		if (uobj)
   1201 			uobj->pgops->pgo_detach(uobj);
   1202 		return error;
   1203 	}
   1204 
   1205 	/*
   1206 	 * POSIX 1003.1b -- if our address space was configured
   1207 	 * to lock all future mappings, wire the one we just made.
   1208 	 *
   1209 	 * Also handle the MAP_WIRED flag here.
   1210 	 */
   1211 
   1212 	if (prot == VM_PROT_NONE) {
   1213 
   1214 		/*
   1215 		 * No more work to do in this case.
   1216 		 */
   1217 
   1218 		return (0);
   1219 	}
   1220 	if ((flags & MAP_WIRED) != 0 || (map->flags & VM_MAP_WIREFUTURE) != 0) {
   1221 		vm_map_lock(map);
   1222 		if (atop(size) + uvmexp.wired > uvmexp.wiredmax ||
   1223 		    (locklimit != 0 &&
   1224 		     size + ptoa(pmap_wired_count(vm_map_pmap(map))) >
   1225 		     locklimit)) {
   1226 			vm_map_unlock(map);
   1227 			uvm_unmap(map, *addr, *addr + size);
   1228 			return ENOMEM;
   1229 		}
   1230 
   1231 		/*
   1232 		 * uvm_map_pageable() always returns the map unlocked.
   1233 		 */
   1234 
   1235 		error = uvm_map_pageable(map, *addr, *addr + size,
   1236 					 false, UVM_LK_ENTER);
   1237 		if (error) {
   1238 			uvm_unmap(map, *addr, *addr + size);
   1239 			return error;
   1240 		}
   1241 		return (0);
   1242 	}
   1243 	return 0;
   1244 }
   1245 
   1246 vaddr_t
   1247 uvm_default_mapaddr(struct proc *p, vaddr_t base, vsize_t sz)
   1248 {
   1249 
   1250 	return VM_DEFAULT_ADDRESS(base, sz);
   1251 }
   1252