Home | History | Annotate | Line # | Download | only in uvm
uvm_mmap.c revision 1.102
      1 /*	$NetBSD: uvm_mmap.c,v 1.102 2006/11/01 10:18:27 yamt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993 The Regents of the University of California.
      6  * Copyright (c) 1988 University of Utah.
      7  *
      8  * All rights reserved.
      9  *
     10  * This code is derived from software contributed to Berkeley by
     11  * the Systems Programming Group of the University of Utah Computer
     12  * Science Department.
     13  *
     14  * Redistribution and use in source and binary forms, with or without
     15  * modification, are permitted provided that the following conditions
     16  * are met:
     17  * 1. Redistributions of source code must retain the above copyright
     18  *    notice, this list of conditions and the following disclaimer.
     19  * 2. Redistributions in binary form must reproduce the above copyright
     20  *    notice, this list of conditions and the following disclaimer in the
     21  *    documentation and/or other materials provided with the distribution.
     22  * 3. All advertising materials mentioning features or use of this software
     23  *    must display the following acknowledgement:
     24  *      This product includes software developed by the Charles D. Cranor,
     25  *	Washington University, University of California, Berkeley and
     26  *	its contributors.
     27  * 4. Neither the name of the University nor the names of its contributors
     28  *    may be used to endorse or promote products derived from this software
     29  *    without specific prior written permission.
     30  *
     31  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     32  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     33  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     34  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     35  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     39  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     40  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     41  * SUCH DAMAGE.
     42  *
     43  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
     44  *      @(#)vm_mmap.c   8.5 (Berkeley) 5/19/94
     45  * from: Id: uvm_mmap.c,v 1.1.2.14 1998/01/05 21:04:26 chuck Exp
     46  */
     47 
     48 /*
     49  * uvm_mmap.c: system call interface into VM system, plus kernel vm_mmap
     50  * function.
     51  */
     52 
     53 #include <sys/cdefs.h>
     54 __KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.102 2006/11/01 10:18:27 yamt Exp $");
     55 
     56 #include "opt_compat_netbsd.h"
     57 #include "opt_pax.h"
     58 #include "veriexec.h"
     59 
     60 #include <sys/param.h>
     61 #include <sys/systm.h>
     62 #include <sys/file.h>
     63 #include <sys/filedesc.h>
     64 #include <sys/resourcevar.h>
     65 #include <sys/mman.h>
     66 #include <sys/mount.h>
     67 #include <sys/proc.h>
     68 #include <sys/malloc.h>
     69 #include <sys/vnode.h>
     70 #include <sys/conf.h>
     71 #include <sys/stat.h>
     72 
     73 #if NVERIEXEC > 0
     74 #include <sys/verified_exec.h>
     75 #endif /* NVERIEXEC > 0 */
     76 
     77 #ifdef PAX_MPROTECT
     78 #include <sys/pax.h>
     79 #endif /* PAX_MPROTECT */
     80 
     81 #include <miscfs/specfs/specdev.h>
     82 
     83 #include <sys/sa.h>
     84 #include <sys/syscallargs.h>
     85 
     86 #include <uvm/uvm.h>
     87 #include <uvm/uvm_device.h>
     88 
     89 #ifndef COMPAT_ZERODEV
     90 #define COMPAT_ZERODEV(dev)	(0)
     91 #endif
     92 
     93 /*
     94  * unimplemented VM system calls:
     95  */
     96 
     97 /*
     98  * sys_sbrk: sbrk system call.
     99  */
    100 
    101 /* ARGSUSED */
    102 int
    103 sys_sbrk(struct lwp *l, void *v, register_t *retval)
    104 {
    105 #if 0
    106 	struct sys_sbrk_args /* {
    107 		syscallarg(intptr_t) incr;
    108 	} */ *uap = v;
    109 #endif
    110 
    111 	return (ENOSYS);
    112 }
    113 
    114 /*
    115  * sys_sstk: sstk system call.
    116  */
    117 
    118 /* ARGSUSED */
    119 int
    120 sys_sstk(struct lwp *l, void *v, register_t *retval)
    121 {
    122 #if 0
    123 	struct sys_sstk_args /* {
    124 		syscallarg(int) incr;
    125 	} */ *uap = v;
    126 #endif
    127 
    128 	return (ENOSYS);
    129 }
    130 
    131 /*
    132  * sys_mincore: determine if pages are in core or not.
    133  */
    134 
    135 /* ARGSUSED */
    136 int
    137 sys_mincore(struct lwp *l, void *v, register_t *retval)
    138 {
    139 	struct sys_mincore_args /* {
    140 		syscallarg(void *) addr;
    141 		syscallarg(size_t) len;
    142 		syscallarg(char *) vec;
    143 	} */ *uap = v;
    144 	struct proc *p = l->l_proc;
    145 	struct vm_page *pg;
    146 	char *vec, pgi;
    147 	struct uvm_object *uobj;
    148 	struct vm_amap *amap;
    149 	struct vm_anon *anon;
    150 	struct vm_map_entry *entry;
    151 	vaddr_t start, end, lim;
    152 	struct vm_map *map;
    153 	vsize_t len;
    154 	int error = 0, npgs;
    155 
    156 	map = &p->p_vmspace->vm_map;
    157 
    158 	start = (vaddr_t)SCARG(uap, addr);
    159 	len = SCARG(uap, len);
    160 	vec = SCARG(uap, vec);
    161 
    162 	if (start & PAGE_MASK)
    163 		return (EINVAL);
    164 	len = round_page(len);
    165 	end = start + len;
    166 	if (end <= start)
    167 		return (EINVAL);
    168 
    169 	/*
    170 	 * Lock down vec, so our returned status isn't outdated by
    171 	 * storing the status byte for a page.
    172 	 */
    173 
    174 	npgs = len >> PAGE_SHIFT;
    175 	error = uvm_vslock(p->p_vmspace, vec, npgs, VM_PROT_WRITE);
    176 	if (error) {
    177 		return error;
    178 	}
    179 	vm_map_lock_read(map);
    180 
    181 	if (uvm_map_lookup_entry(map, start, &entry) == FALSE) {
    182 		error = ENOMEM;
    183 		goto out;
    184 	}
    185 
    186 	for (/* nothing */;
    187 	     entry != &map->header && entry->start < end;
    188 	     entry = entry->next) {
    189 		KASSERT(!UVM_ET_ISSUBMAP(entry));
    190 		KASSERT(start >= entry->start);
    191 
    192 		/* Make sure there are no holes. */
    193 		if (entry->end < end &&
    194 		     (entry->next == &map->header ||
    195 		      entry->next->start > entry->end)) {
    196 			error = ENOMEM;
    197 			goto out;
    198 		}
    199 
    200 		lim = end < entry->end ? end : entry->end;
    201 
    202 		/*
    203 		 * Special case for objects with no "real" pages.  Those
    204 		 * are always considered resident (mapped devices).
    205 		 */
    206 
    207 		if (UVM_ET_ISOBJ(entry)) {
    208 			KASSERT(!UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj));
    209 			if (UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) {
    210 				for (/* nothing */; start < lim;
    211 				     start += PAGE_SIZE, vec++)
    212 					subyte(vec, 1);
    213 				continue;
    214 			}
    215 		}
    216 
    217 		amap = entry->aref.ar_amap;	/* top layer */
    218 		uobj = entry->object.uvm_obj;	/* bottom layer */
    219 
    220 		if (amap != NULL)
    221 			amap_lock(amap);
    222 		if (uobj != NULL)
    223 			simple_lock(&uobj->vmobjlock);
    224 
    225 		for (/* nothing */; start < lim; start += PAGE_SIZE, vec++) {
    226 			pgi = 0;
    227 			if (amap != NULL) {
    228 				/* Check the top layer first. */
    229 				anon = amap_lookup(&entry->aref,
    230 				    start - entry->start);
    231 				/* Don't need to lock anon here. */
    232 				if (anon != NULL && anon->an_page != NULL) {
    233 
    234 					/*
    235 					 * Anon has the page for this entry
    236 					 * offset.
    237 					 */
    238 
    239 					pgi = 1;
    240 				}
    241 			}
    242 			if (uobj != NULL && pgi == 0) {
    243 				/* Check the bottom layer. */
    244 				pg = uvm_pagelookup(uobj,
    245 				    entry->offset + (start - entry->start));
    246 				if (pg != NULL) {
    247 
    248 					/*
    249 					 * Object has the page for this entry
    250 					 * offset.
    251 					 */
    252 
    253 					pgi = 1;
    254 				}
    255 			}
    256 			(void) subyte(vec, pgi);
    257 		}
    258 		if (uobj != NULL)
    259 			simple_unlock(&uobj->vmobjlock);
    260 		if (amap != NULL)
    261 			amap_unlock(amap);
    262 	}
    263 
    264  out:
    265 	vm_map_unlock_read(map);
    266 	uvm_vsunlock(p->p_vmspace, SCARG(uap, vec), npgs);
    267 	return (error);
    268 }
    269 
    270 /*
    271  * sys_mmap: mmap system call.
    272  *
    273  * => file offset and address may not be page aligned
    274  *    - if MAP_FIXED, offset and address must have remainder mod PAGE_SIZE
    275  *    - if address isn't page aligned the mapping starts at trunc_page(addr)
    276  *      and the return value is adjusted up by the page offset.
    277  */
    278 
    279 int
    280 sys_mmap(l, v, retval)
    281 	struct lwp *l;
    282 	void *v;
    283 	register_t *retval;
    284 {
    285 	struct sys_mmap_args /* {
    286 		syscallarg(caddr_t) addr;
    287 		syscallarg(size_t) len;
    288 		syscallarg(int) prot;
    289 		syscallarg(int) flags;
    290 		syscallarg(int) fd;
    291 		syscallarg(long) pad;
    292 		syscallarg(off_t) pos;
    293 	} */ *uap = v;
    294 	struct proc *p = l->l_proc;
    295 	vaddr_t addr;
    296 	struct vattr va;
    297 	off_t pos;
    298 	vsize_t size, pageoff;
    299 	vm_prot_t prot, maxprot;
    300 	int flags, fd;
    301 	vaddr_t vm_min_address = VM_MIN_ADDRESS, defaddr;
    302 	struct filedesc *fdp = p->p_fd;
    303 	struct file *fp;
    304 	struct vnode *vp;
    305 	void *handle;
    306 	int error;
    307 
    308 	/*
    309 	 * first, extract syscall args from the uap.
    310 	 */
    311 
    312 	addr = (vaddr_t)SCARG(uap, addr);
    313 	size = (vsize_t)SCARG(uap, len);
    314 	prot = SCARG(uap, prot) & VM_PROT_ALL;
    315 	flags = SCARG(uap, flags);
    316 	fd = SCARG(uap, fd);
    317 	pos = SCARG(uap, pos);
    318 
    319 	/*
    320 	 * Fixup the old deprecated MAP_COPY into MAP_PRIVATE, and
    321 	 * validate the flags.
    322 	 */
    323 	if (flags & MAP_COPY)
    324 		flags = (flags & ~MAP_COPY) | MAP_PRIVATE;
    325 	if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
    326 		return (EINVAL);
    327 
    328 	/*
    329 	 * align file position and save offset.  adjust size.
    330 	 */
    331 
    332 	pageoff = (pos & PAGE_MASK);
    333 	pos  -= pageoff;
    334 	size += pageoff;			/* add offset */
    335 	size = (vsize_t)round_page(size);	/* round up */
    336 	if ((ssize_t) size < 0)
    337 		return (EINVAL);			/* don't allow wrap */
    338 
    339 	/*
    340 	 * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr"
    341 	 */
    342 
    343 	if (flags & MAP_FIXED) {
    344 
    345 		/* ensure address and file offset are aligned properly */
    346 		addr -= pageoff;
    347 		if (addr & PAGE_MASK)
    348 			return (EINVAL);
    349 
    350 		if (VM_MAXUSER_ADDRESS > 0 &&
    351 		    (addr + size) > VM_MAXUSER_ADDRESS)
    352 			return (EFBIG);
    353 		if (vm_min_address > 0 && addr < vm_min_address)
    354 			return (EINVAL);
    355 		if (addr > addr + size)
    356 			return (EOVERFLOW);		/* no wrapping! */
    357 
    358 	} else if (addr == 0 || !(flags & MAP_TRYFIXED)) {
    359 
    360 		/*
    361 		 * not fixed: make sure we skip over the largest
    362 		 * possible heap for non-topdown mapping arrangements.
    363 		 * we will refine our guess later (e.g. to account for
    364 		 * VAC, etc)
    365 		 */
    366 
    367 		defaddr = p->p_emul->e_vm_default_addr(p,
    368 		    (vaddr_t)p->p_vmspace->vm_daddr, size);
    369 
    370 		if (addr == 0 ||
    371 		    !(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN))
    372 			addr = MAX(addr, defaddr);
    373 		else
    374 			addr = MIN(addr, defaddr);
    375 	}
    376 
    377 	/*
    378 	 * check for file mappings (i.e. not anonymous) and verify file.
    379 	 */
    380 
    381 	if ((flags & MAP_ANON) == 0) {
    382 
    383 		if ((fp = fd_getfile(fdp, fd)) == NULL)
    384 			return (EBADF);
    385 
    386 		simple_unlock(&fp->f_slock);
    387 
    388 		if (fp->f_type != DTYPE_VNODE)
    389 			return (ENODEV);		/* only mmap vnodes! */
    390 		vp = (struct vnode *)fp->f_data;	/* convert to vnode */
    391 
    392 		if (vp->v_type != VREG && vp->v_type != VCHR &&
    393 		    vp->v_type != VBLK)
    394 			return (ENODEV);  /* only REG/CHR/BLK support mmap */
    395 
    396 		if (vp->v_type != VCHR && pos < 0)
    397 			return (EINVAL);
    398 
    399 		if (vp->v_type != VCHR && (pos + size) < pos)
    400 			return (EOVERFLOW);		/* no offset wrapping */
    401 
    402 		/* special case: catch SunOS style /dev/zero */
    403 		if (vp->v_type == VCHR
    404 		    && (vp->v_rdev == zerodev || COMPAT_ZERODEV(vp->v_rdev))) {
    405 			flags |= MAP_ANON;
    406 			goto is_anon;
    407 		}
    408 
    409 #if NVERIEXEC > 0
    410 		/*
    411 		 * If we are mapping the file as executable, we expect to
    412 		 * have the VERIEXEC_INDIRECT flag set for the entry if it
    413 		 * exists.
    414 		 */
    415 		if (prot & VM_PROT_EXECUTE) {
    416 			if (veriexec_verify(l, vp, "[mmap]",
    417 			    VERIEXEC_INDIRECT, NULL) != 0)
    418 				return (EPERM);
    419 		}
    420 #endif /* NVERIEXEC > 0 */
    421 
    422 		/*
    423 		 * Old programs may not select a specific sharing type, so
    424 		 * default to an appropriate one.
    425 		 *
    426 		 * XXX: how does MAP_ANON fit in the picture?
    427 		 */
    428 		if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) {
    429 #if defined(DEBUG)
    430 			printf("WARNING: defaulted mmap() share type to "
    431 			   "%s (pid %d command %s)\n", vp->v_type == VCHR ?
    432 			   "MAP_SHARED" : "MAP_PRIVATE", p->p_pid,
    433 			    p->p_comm);
    434 #endif
    435 			if (vp->v_type == VCHR)
    436 				flags |= MAP_SHARED;	/* for a device */
    437 			else
    438 				flags |= MAP_PRIVATE;	/* for a file */
    439 		}
    440 
    441 		/*
    442 		 * MAP_PRIVATE device mappings don't make sense (and aren't
    443 		 * supported anyway).  However, some programs rely on this,
    444 		 * so just change it to MAP_SHARED.
    445 		 */
    446 		if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
    447 			flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
    448 		}
    449 
    450 		/*
    451 		 * now check protection
    452 		 */
    453 
    454 		maxprot = VM_PROT_EXECUTE;
    455 
    456 		/* check read access */
    457 		if (fp->f_flag & FREAD)
    458 			maxprot |= VM_PROT_READ;
    459 		else if (prot & PROT_READ)
    460 			return (EACCES);
    461 
    462 		/* check write access, shared case first */
    463 		if (flags & MAP_SHARED) {
    464 			/*
    465 			 * if the file is writable, only add PROT_WRITE to
    466 			 * maxprot if the file is not immutable, append-only.
    467 			 * otherwise, if we have asked for PROT_WRITE, return
    468 			 * EPERM.
    469 			 */
    470 			if (fp->f_flag & FWRITE) {
    471 				if ((error =
    472 				    VOP_GETATTR(vp, &va, l->l_cred, l)))
    473 					return (error);
    474 				if ((va.va_flags &
    475 				    (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0)
    476 					maxprot |= VM_PROT_WRITE;
    477 				else if (prot & PROT_WRITE)
    478 					return (EPERM);
    479 			}
    480 			else if (prot & PROT_WRITE)
    481 				return (EACCES);
    482 		} else {
    483 			/* MAP_PRIVATE mappings can always write to */
    484 			maxprot |= VM_PROT_WRITE;
    485 		}
    486 		handle = vp;
    487 
    488 	} else {		/* MAP_ANON case */
    489 		/*
    490 		 * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0?
    491 		 */
    492 		if (fd != -1)
    493 			return (EINVAL);
    494 
    495  is_anon:		/* label for SunOS style /dev/zero */
    496 		handle = NULL;
    497 		maxprot = VM_PROT_ALL;
    498 		pos = 0;
    499 	}
    500 
    501 	/*
    502 	 * XXX (in)sanity check.  We don't do proper datasize checking
    503 	 * XXX for anonymous (or private writable) mmap().  However,
    504 	 * XXX know that if we're trying to allocate more than the amount
    505 	 * XXX remaining under our current data size limit, _that_ should
    506 	 * XXX be disallowed.
    507 	 */
    508 	if ((flags & MAP_ANON) != 0 ||
    509 	    ((flags & MAP_PRIVATE) != 0 && (prot & PROT_WRITE) != 0)) {
    510 		if (size >
    511 		    (p->p_rlimit[RLIMIT_DATA].rlim_cur -
    512 		     ctob(p->p_vmspace->vm_dsize))) {
    513 			return (ENOMEM);
    514 		}
    515 	}
    516 
    517 #ifdef PAX_MPROTECT
    518 	pax_mprotect(l, &prot, &maxprot);
    519 #endif /* PAX_MPROTECT */
    520 
    521 	/*
    522 	 * now let kernel internal function uvm_mmap do the work.
    523 	 */
    524 
    525 	error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
    526 	    flags, handle, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
    527 
    528 	if (error == 0)
    529 		/* remember to add offset */
    530 		*retval = (register_t)(addr + pageoff);
    531 
    532 	return (error);
    533 }
    534 
    535 /*
    536  * sys___msync13: the msync system call (a front-end for flush)
    537  */
    538 
    539 int
    540 sys___msync13(struct lwp *l, void *v, register_t *retval)
    541 {
    542 	struct sys___msync13_args /* {
    543 		syscallarg(caddr_t) addr;
    544 		syscallarg(size_t) len;
    545 		syscallarg(int) flags;
    546 	} */ *uap = v;
    547 	struct proc *p = l->l_proc;
    548 	vaddr_t addr;
    549 	vsize_t size, pageoff;
    550 	struct vm_map *map;
    551 	int error, rv, flags, uvmflags;
    552 
    553 	/*
    554 	 * extract syscall args from the uap
    555 	 */
    556 
    557 	addr = (vaddr_t)SCARG(uap, addr);
    558 	size = (vsize_t)SCARG(uap, len);
    559 	flags = SCARG(uap, flags);
    560 
    561 	/* sanity check flags */
    562 	if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 ||
    563 	    (flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 ||
    564 	    (flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC))
    565 		return (EINVAL);
    566 	if ((flags & (MS_ASYNC | MS_SYNC)) == 0)
    567 		flags |= MS_SYNC;
    568 
    569 	/*
    570 	 * align the address to a page boundary and adjust the size accordingly.
    571 	 */
    572 
    573 	pageoff = (addr & PAGE_MASK);
    574 	addr -= pageoff;
    575 	size += pageoff;
    576 	size = (vsize_t)round_page(size);
    577 
    578 	/* disallow wrap-around. */
    579 	if (addr + size < addr)
    580 		return (EINVAL);
    581 
    582 	/*
    583 	 * get map
    584 	 */
    585 
    586 	map = &p->p_vmspace->vm_map;
    587 
    588 	/*
    589 	 * XXXCDC: do we really need this semantic?
    590 	 *
    591 	 * XXX Gak!  If size is zero we are supposed to sync "all modified
    592 	 * pages with the region containing addr".  Unfortunately, we
    593 	 * don't really keep track of individual mmaps so we approximate
    594 	 * by flushing the range of the map entry containing addr.
    595 	 * This can be incorrect if the region splits or is coalesced
    596 	 * with a neighbor.
    597 	 */
    598 
    599 	if (size == 0) {
    600 		struct vm_map_entry *entry;
    601 
    602 		vm_map_lock_read(map);
    603 		rv = uvm_map_lookup_entry(map, addr, &entry);
    604 		if (rv == TRUE) {
    605 			addr = entry->start;
    606 			size = entry->end - entry->start;
    607 		}
    608 		vm_map_unlock_read(map);
    609 		if (rv == FALSE)
    610 			return (EINVAL);
    611 	}
    612 
    613 	/*
    614 	 * translate MS_ flags into PGO_ flags
    615 	 */
    616 
    617 	uvmflags = PGO_CLEANIT;
    618 	if (flags & MS_INVALIDATE)
    619 		uvmflags |= PGO_FREE;
    620 	if (flags & MS_SYNC)
    621 		uvmflags |= PGO_SYNCIO;
    622 
    623 	error = uvm_map_clean(map, addr, addr+size, uvmflags);
    624 	return error;
    625 }
    626 
    627 /*
    628  * sys_munmap: unmap a users memory
    629  */
    630 
    631 int
    632 sys_munmap(struct lwp *l, void *v, register_t *retval)
    633 {
    634 	struct sys_munmap_args /* {
    635 		syscallarg(caddr_t) addr;
    636 		syscallarg(size_t) len;
    637 	} */ *uap = v;
    638 	struct proc *p = l->l_proc;
    639 	vaddr_t addr;
    640 	vsize_t size, pageoff;
    641 	struct vm_map *map;
    642 	vaddr_t vm_min_address = VM_MIN_ADDRESS;
    643 	struct vm_map_entry *dead_entries;
    644 
    645 	/*
    646 	 * get syscall args.
    647 	 */
    648 
    649 	addr = (vaddr_t)SCARG(uap, addr);
    650 	size = (vsize_t)SCARG(uap, len);
    651 
    652 	/*
    653 	 * align the address to a page boundary and adjust the size accordingly.
    654 	 */
    655 
    656 	pageoff = (addr & PAGE_MASK);
    657 	addr -= pageoff;
    658 	size += pageoff;
    659 	size = (vsize_t)round_page(size);
    660 
    661 	if ((int)size < 0)
    662 		return (EINVAL);
    663 	if (size == 0)
    664 		return (0);
    665 
    666 	/*
    667 	 * Check for illegal addresses.  Watch out for address wrap...
    668 	 * Note that VM_*_ADDRESS are not constants due to casts (argh).
    669 	 */
    670 	if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
    671 		return (EINVAL);
    672 	if (vm_min_address > 0 && addr < vm_min_address)
    673 		return (EINVAL);
    674 	if (addr > addr + size)
    675 		return (EINVAL);
    676 	map = &p->p_vmspace->vm_map;
    677 
    678 	/*
    679 	 * interesting system call semantic: make sure entire range is
    680 	 * allocated before allowing an unmap.
    681 	 */
    682 
    683 	vm_map_lock(map);
    684 #if 0
    685 	if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) {
    686 		vm_map_unlock(map);
    687 		return (EINVAL);
    688 	}
    689 #endif
    690 	uvm_unmap_remove(map, addr, addr + size, &dead_entries, NULL, 0);
    691 	vm_map_unlock(map);
    692 	if (dead_entries != NULL)
    693 		uvm_unmap_detach(dead_entries, 0);
    694 	return (0);
    695 }
    696 
    697 /*
    698  * sys_mprotect: the mprotect system call
    699  */
    700 
    701 int
    702 sys_mprotect(struct lwp *l, void *v, register_t *retval)
    703 {
    704 	struct sys_mprotect_args /* {
    705 		syscallarg(caddr_t) addr;
    706 		syscallarg(size_t) len;
    707 		syscallarg(int) prot;
    708 	} */ *uap = v;
    709 	struct proc *p = l->l_proc;
    710 	vaddr_t addr;
    711 	vsize_t size, pageoff;
    712 	vm_prot_t prot;
    713 	int error;
    714 
    715 	/*
    716 	 * extract syscall args from uap
    717 	 */
    718 
    719 	addr = (vaddr_t)SCARG(uap, addr);
    720 	size = (vsize_t)SCARG(uap, len);
    721 	prot = SCARG(uap, prot) & VM_PROT_ALL;
    722 
    723 	/*
    724 	 * align the address to a page boundary and adjust the size accordingly.
    725 	 */
    726 
    727 	pageoff = (addr & PAGE_MASK);
    728 	addr -= pageoff;
    729 	size += pageoff;
    730 	size = round_page(size);
    731 
    732 	error = uvm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, prot,
    733 				FALSE);
    734 	return error;
    735 }
    736 
    737 /*
    738  * sys_minherit: the minherit system call
    739  */
    740 
    741 int
    742 sys_minherit(struct lwp *l, void *v, register_t *retval)
    743 {
    744 	struct sys_minherit_args /* {
    745 		syscallarg(caddr_t) addr;
    746 		syscallarg(int) len;
    747 		syscallarg(int) inherit;
    748 	} */ *uap = v;
    749 	struct proc *p = l->l_proc;
    750 	vaddr_t addr;
    751 	vsize_t size, pageoff;
    752 	vm_inherit_t inherit;
    753 	int error;
    754 
    755 	addr = (vaddr_t)SCARG(uap, addr);
    756 	size = (vsize_t)SCARG(uap, len);
    757 	inherit = SCARG(uap, inherit);
    758 
    759 	/*
    760 	 * align the address to a page boundary and adjust the size accordingly.
    761 	 */
    762 
    763 	pageoff = (addr & PAGE_MASK);
    764 	addr -= pageoff;
    765 	size += pageoff;
    766 	size = (vsize_t)round_page(size);
    767 
    768 	if ((int)size < 0)
    769 		return (EINVAL);
    770 	error = uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr + size,
    771 				inherit);
    772 	return error;
    773 }
    774 
    775 /*
    776  * sys_madvise: give advice about memory usage.
    777  */
    778 
    779 /* ARGSUSED */
    780 int
    781 sys_madvise(struct lwp *l, void *v, register_t *retval)
    782 {
    783 	struct sys_madvise_args /* {
    784 		syscallarg(caddr_t) addr;
    785 		syscallarg(size_t) len;
    786 		syscallarg(int) behav;
    787 	} */ *uap = v;
    788 	struct proc *p = l->l_proc;
    789 	vaddr_t addr;
    790 	vsize_t size, pageoff;
    791 	int advice, error;
    792 
    793 	addr = (vaddr_t)SCARG(uap, addr);
    794 	size = (vsize_t)SCARG(uap, len);
    795 	advice = SCARG(uap, behav);
    796 
    797 	/*
    798 	 * align the address to a page boundary, and adjust the size accordingly
    799 	 */
    800 
    801 	pageoff = (addr & PAGE_MASK);
    802 	addr -= pageoff;
    803 	size += pageoff;
    804 	size = (vsize_t)round_page(size);
    805 
    806 	if ((ssize_t)size <= 0)
    807 		return (EINVAL);
    808 
    809 	switch (advice) {
    810 	case MADV_NORMAL:
    811 	case MADV_RANDOM:
    812 	case MADV_SEQUENTIAL:
    813 		error = uvm_map_advice(&p->p_vmspace->vm_map, addr, addr + size,
    814 		    advice);
    815 		break;
    816 
    817 	case MADV_WILLNEED:
    818 
    819 		/*
    820 		 * Activate all these pages, pre-faulting them in if
    821 		 * necessary.
    822 		 */
    823 		/*
    824 		 * XXX IMPLEMENT ME.
    825 		 * Should invent a "weak" mode for uvm_fault()
    826 		 * which would only do the PGO_LOCKED pgo_get().
    827 		 */
    828 
    829 		return (0);
    830 
    831 	case MADV_DONTNEED:
    832 
    833 		/*
    834 		 * Deactivate all these pages.  We don't need them
    835 		 * any more.  We don't, however, toss the data in
    836 		 * the pages.
    837 		 */
    838 
    839 		error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
    840 		    PGO_DEACTIVATE);
    841 		break;
    842 
    843 	case MADV_FREE:
    844 
    845 		/*
    846 		 * These pages contain no valid data, and may be
    847 		 * garbage-collected.  Toss all resources, including
    848 		 * any swap space in use.
    849 		 */
    850 
    851 		error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
    852 		    PGO_FREE);
    853 		break;
    854 
    855 	case MADV_SPACEAVAIL:
    856 
    857 		/*
    858 		 * XXXMRG What is this?  I think it's:
    859 		 *
    860 		 *	Ensure that we have allocated backing-store
    861 		 *	for these pages.
    862 		 *
    863 		 * This is going to require changes to the page daemon,
    864 		 * as it will free swap space allocated to pages in core.
    865 		 * There's also what to do for device/file/anonymous memory.
    866 		 */
    867 
    868 		return (EINVAL);
    869 
    870 	default:
    871 		return (EINVAL);
    872 	}
    873 
    874 	return error;
    875 }
    876 
    877 /*
    878  * sys_mlock: memory lock
    879  */
    880 
    881 int
    882 sys_mlock(struct lwp *l, void *v, register_t *retval)
    883 {
    884 	struct sys_mlock_args /* {
    885 		syscallarg(const void *) addr;
    886 		syscallarg(size_t) len;
    887 	} */ *uap = v;
    888 	struct proc *p = l->l_proc;
    889 	vaddr_t addr;
    890 	vsize_t size, pageoff;
    891 	int error;
    892 
    893 	/*
    894 	 * extract syscall args from uap
    895 	 */
    896 
    897 	addr = (vaddr_t)SCARG(uap, addr);
    898 	size = (vsize_t)SCARG(uap, len);
    899 
    900 	/*
    901 	 * align the address to a page boundary and adjust the size accordingly
    902 	 */
    903 
    904 	pageoff = (addr & PAGE_MASK);
    905 	addr -= pageoff;
    906 	size += pageoff;
    907 	size = (vsize_t)round_page(size);
    908 
    909 	/* disallow wrap-around. */
    910 	if (addr + size < addr)
    911 		return (EINVAL);
    912 
    913 	if (atop(size) + uvmexp.wired > uvmexp.wiredmax)
    914 		return (EAGAIN);
    915 
    916 	if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
    917 			p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
    918 		return (EAGAIN);
    919 
    920 	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, FALSE,
    921 	    0);
    922 	if (error == EFAULT)
    923 		error = ENOMEM;
    924 	return error;
    925 }
    926 
    927 /*
    928  * sys_munlock: unlock wired pages
    929  */
    930 
    931 int
    932 sys_munlock(struct lwp *l, void *v, register_t *retval)
    933 {
    934 	struct sys_munlock_args /* {
    935 		syscallarg(const void *) addr;
    936 		syscallarg(size_t) len;
    937 	} */ *uap = v;
    938 	struct proc *p = l->l_proc;
    939 	vaddr_t addr;
    940 	vsize_t size, pageoff;
    941 	int error;
    942 
    943 	/*
    944 	 * extract syscall args from uap
    945 	 */
    946 
    947 	addr = (vaddr_t)SCARG(uap, addr);
    948 	size = (vsize_t)SCARG(uap, len);
    949 
    950 	/*
    951 	 * align the address to a page boundary, and adjust the size accordingly
    952 	 */
    953 
    954 	pageoff = (addr & PAGE_MASK);
    955 	addr -= pageoff;
    956 	size += pageoff;
    957 	size = (vsize_t)round_page(size);
    958 
    959 	/* disallow wrap-around. */
    960 	if (addr + size < addr)
    961 		return (EINVAL);
    962 
    963 	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, TRUE,
    964 	    0);
    965 	if (error == EFAULT)
    966 		error = ENOMEM;
    967 	return error;
    968 }
    969 
    970 /*
    971  * sys_mlockall: lock all pages mapped into an address space.
    972  */
    973 
    974 int
    975 sys_mlockall(struct lwp *l, void *v, register_t *retval)
    976 {
    977 	struct sys_mlockall_args /* {
    978 		syscallarg(int) flags;
    979 	} */ *uap = v;
    980 	struct proc *p = l->l_proc;
    981 	int error, flags;
    982 
    983 	flags = SCARG(uap, flags);
    984 
    985 	if (flags == 0 ||
    986 	    (flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)
    987 		return (EINVAL);
    988 
    989 	error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags,
    990 	    p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
    991 	return (error);
    992 }
    993 
    994 /*
    995  * sys_munlockall: unlock all pages mapped into an address space.
    996  */
    997 
    998 int
    999 sys_munlockall(struct lwp *l, void *v, register_t *retval)
   1000 {
   1001 	struct proc *p = l->l_proc;
   1002 
   1003 	(void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0);
   1004 	return (0);
   1005 }
   1006 
   1007 /*
   1008  * uvm_mmap: internal version of mmap
   1009  *
   1010  * - used by sys_mmap and various framebuffers
   1011  * - handle is a vnode pointer or NULL for MAP_ANON
   1012  * - caller must page-align the file offset
   1013  */
   1014 
   1015 int
   1016 uvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff, locklimit)
   1017 	struct vm_map *map;
   1018 	vaddr_t *addr;
   1019 	vsize_t size;
   1020 	vm_prot_t prot, maxprot;
   1021 	int flags;
   1022 	void *handle;
   1023 	voff_t foff;
   1024 	vsize_t locklimit;
   1025 {
   1026 	struct uvm_object *uobj;
   1027 	struct vnode *vp;
   1028 	vaddr_t align = 0;
   1029 	int error;
   1030 	int advice = UVM_ADV_NORMAL;
   1031 	uvm_flag_t uvmflag = 0;
   1032 	boolean_t needwritemap;
   1033 
   1034 	/*
   1035 	 * check params
   1036 	 */
   1037 
   1038 	if (size == 0)
   1039 		return(0);
   1040 	if (foff & PAGE_MASK)
   1041 		return(EINVAL);
   1042 	if ((prot & maxprot) != prot)
   1043 		return(EINVAL);
   1044 
   1045 	/*
   1046 	 * for non-fixed mappings, round off the suggested address.
   1047 	 * for fixed mappings, check alignment and zap old mappings.
   1048 	 */
   1049 
   1050 	if ((flags & MAP_FIXED) == 0) {
   1051 		*addr = round_page(*addr);
   1052 	} else {
   1053 		if (*addr & PAGE_MASK)
   1054 			return(EINVAL);
   1055 		uvmflag |= UVM_FLAG_FIXED;
   1056 		(void) uvm_unmap(map, *addr, *addr + size);
   1057 	}
   1058 
   1059 	/*
   1060 	 * Try to see if any requested alignment can even be attemped.
   1061 	 * Make sure we can express the alignment (asking for a >= 4GB
   1062 	 * alignment on an ILP32 architecure make no sense) and the
   1063 	 * alignment is at least for a page sized quanitiy.  If the
   1064 	 * request was for a fixed mapping, make sure supplied address
   1065 	 * adheres to the request alignment.
   1066 	 */
   1067 	align = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT;
   1068 	if (align) {
   1069 		if (align >= sizeof(vaddr_t) * NBBY)
   1070 			return(EINVAL);
   1071 		align = 1L << align;
   1072 		if (align < PAGE_SIZE)
   1073 			return(EINVAL);
   1074 		if (align >= vm_map_max(map))
   1075 			return(ENOMEM);
   1076 		if (flags & MAP_FIXED) {
   1077 			if ((*addr & (align-1)) != 0)
   1078 				return(EINVAL);
   1079 			align = 0;
   1080 		}
   1081 	}
   1082 
   1083 	/*
   1084 	 * handle anon vs. non-anon mappings.   for non-anon mappings attach
   1085 	 * to underlying vm object.
   1086 	 */
   1087 
   1088 	if (flags & MAP_ANON) {
   1089 		KASSERT(handle == NULL);
   1090 		foff = UVM_UNKNOWN_OFFSET;
   1091 		uobj = NULL;
   1092 		if ((flags & MAP_SHARED) == 0)
   1093 			/* XXX: defer amap create */
   1094 			uvmflag |= UVM_FLAG_COPYONW;
   1095 		else
   1096 			/* shared: create amap now */
   1097 			uvmflag |= UVM_FLAG_OVERLAY;
   1098 
   1099 	} else {
   1100 		KASSERT(handle != NULL);
   1101 		vp = (struct vnode *)handle;
   1102 
   1103 		/*
   1104 		 * Don't allow mmap for EXEC if the file system
   1105 		 * is mounted NOEXEC.
   1106 		 */
   1107 		if ((prot & PROT_EXEC) != 0 &&
   1108 		    (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0)
   1109 			return (EACCES);
   1110 
   1111 		if (vp->v_type != VCHR) {
   1112 			error = VOP_MMAP(vp, 0, curlwp->l_cred, curlwp);
   1113 			if (error) {
   1114 				return error;
   1115 			}
   1116 
   1117 			uobj = uvn_attach((void *)vp, (flags & MAP_SHARED) ?
   1118 			   maxprot : (maxprot & ~VM_PROT_WRITE));
   1119 
   1120 			/* XXX for now, attach doesn't gain a ref */
   1121 			VREF(vp);
   1122 
   1123 			/*
   1124 			 * If the vnode is being mapped with PROT_EXEC,
   1125 			 * then mark it as text.
   1126 			 */
   1127 			if (prot & PROT_EXEC)
   1128 				vn_markexec(vp);
   1129 		} else {
   1130 			int i = maxprot;
   1131 
   1132 			/*
   1133 			 * XXX Some devices don't like to be mapped with
   1134 			 * XXX PROT_EXEC or PROT_WRITE, but we don't really
   1135 			 * XXX have a better way of handling this, right now
   1136 			 */
   1137 			do {
   1138 				uobj = udv_attach((void *) &vp->v_rdev,
   1139 				    (flags & MAP_SHARED) ? i :
   1140 				    (i & ~VM_PROT_WRITE), foff, size);
   1141 				i--;
   1142 			} while ((uobj == NULL) && (i > 0));
   1143 			advice = UVM_ADV_RANDOM;
   1144 		}
   1145 		if (uobj == NULL)
   1146 			return((vp->v_type == VREG) ? ENOMEM : EINVAL);
   1147 		if ((flags & MAP_SHARED) == 0) {
   1148 			uvmflag |= UVM_FLAG_COPYONW;
   1149 		}
   1150 
   1151 		/*
   1152 		 * Set vnode flags to indicate the new kinds of mapping.
   1153 		 * We take the vnode lock in exclusive mode here to serialize
   1154 		 * with direct I/O.
   1155 		 */
   1156 
   1157 		needwritemap = (vp->v_flag & VWRITEMAP) == 0 &&
   1158 			(flags & MAP_SHARED) != 0 &&
   1159 			(maxprot & VM_PROT_WRITE) != 0;
   1160 		if ((vp->v_flag & VMAPPED) == 0 || needwritemap) {
   1161 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   1162 			simple_lock(&vp->v_interlock);
   1163 			vp->v_flag |= VMAPPED;
   1164 			if (needwritemap) {
   1165 				vp->v_flag |= VWRITEMAP;
   1166 			}
   1167 			simple_unlock(&vp->v_interlock);
   1168 			VOP_UNLOCK(vp, 0);
   1169 		}
   1170 	}
   1171 
   1172 	uvmflag = UVM_MAPFLAG(prot, maxprot,
   1173 			(flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY,
   1174 			advice, uvmflag);
   1175 	error = uvm_map(map, addr, size, uobj, foff, align, uvmflag);
   1176 	if (error) {
   1177 		if (uobj)
   1178 			uobj->pgops->pgo_detach(uobj);
   1179 		return error;
   1180 	}
   1181 
   1182 	/*
   1183 	 * POSIX 1003.1b -- if our address space was configured
   1184 	 * to lock all future mappings, wire the one we just made.
   1185 	 *
   1186 	 * Also handle the MAP_WIRED flag here.
   1187 	 */
   1188 
   1189 	if (prot == VM_PROT_NONE) {
   1190 
   1191 		/*
   1192 		 * No more work to do in this case.
   1193 		 */
   1194 
   1195 		return (0);
   1196 	}
   1197 	vm_map_lock(map);
   1198 	if ((flags & MAP_WIRED) != 0 || (map->flags & VM_MAP_WIREFUTURE) != 0) {
   1199 		if (atop(size) + uvmexp.wired > uvmexp.wiredmax ||
   1200 		    (locklimit != 0 &&
   1201 		     size + ptoa(pmap_wired_count(vm_map_pmap(map))) >
   1202 		     locklimit)) {
   1203 			vm_map_unlock(map);
   1204 			uvm_unmap(map, *addr, *addr + size);
   1205 			return ENOMEM;
   1206 		}
   1207 
   1208 		/*
   1209 		 * uvm_map_pageable() always returns the map unlocked.
   1210 		 */
   1211 
   1212 		error = uvm_map_pageable(map, *addr, *addr + size,
   1213 					 FALSE, UVM_LK_ENTER);
   1214 		if (error) {
   1215 			uvm_unmap(map, *addr, *addr + size);
   1216 			return error;
   1217 		}
   1218 		return (0);
   1219 	}
   1220 	vm_map_unlock(map);
   1221 	return 0;
   1222 }
   1223 
   1224 vaddr_t
   1225 uvm_default_mapaddr(struct proc *p, vaddr_t base, vsize_t sz)
   1226 {
   1227 
   1228 	return VM_DEFAULT_ADDRESS(base, sz);
   1229 }
   1230