Home | History | Annotate | Line # | Download | only in uvm
uvm_mmap.c revision 1.147
      1  1.147  christos /*	$NetBSD: uvm_mmap.c,v 1.147 2014/01/25 17:21:49 christos Exp $	*/
      2    1.1       mrg 
      3    1.1       mrg /*
      4    1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5   1.51       chs  * Copyright (c) 1991, 1993 The Regents of the University of California.
      6    1.1       mrg  * Copyright (c) 1988 University of Utah.
      7   1.51       chs  *
      8    1.1       mrg  * All rights reserved.
      9    1.1       mrg  *
     10    1.1       mrg  * This code is derived from software contributed to Berkeley by
     11    1.1       mrg  * the Systems Programming Group of the University of Utah Computer
     12    1.1       mrg  * Science Department.
     13    1.1       mrg  *
     14    1.1       mrg  * Redistribution and use in source and binary forms, with or without
     15    1.1       mrg  * modification, are permitted provided that the following conditions
     16    1.1       mrg  * are met:
     17    1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     18    1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     19    1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     20    1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     21    1.1       mrg  *    documentation and/or other materials provided with the distribution.
     22  1.134     chuck  * 3. Neither the name of the University nor the names of its contributors
     23    1.1       mrg  *    may be used to endorse or promote products derived from this software
     24    1.1       mrg  *    without specific prior written permission.
     25    1.1       mrg  *
     26    1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     27    1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     28    1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     29    1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     30    1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     31    1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     32    1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     33    1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     34    1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     35    1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     36    1.1       mrg  * SUCH DAMAGE.
     37    1.1       mrg  *
     38    1.1       mrg  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
     39    1.1       mrg  *      @(#)vm_mmap.c   8.5 (Berkeley) 5/19/94
     40    1.3       mrg  * from: Id: uvm_mmap.c,v 1.1.2.14 1998/01/05 21:04:26 chuck Exp
     41    1.1       mrg  */
     42    1.1       mrg 
     43    1.1       mrg /*
     44    1.1       mrg  * uvm_mmap.c: system call interface into VM system, plus kernel vm_mmap
     45    1.1       mrg  * function.
     46    1.1       mrg  */
     47   1.60     lukem 
     48   1.60     lukem #include <sys/cdefs.h>
     49  1.147  christos __KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.147 2014/01/25 17:21:49 christos Exp $");
     50   1.80  jdolecek 
     51   1.80  jdolecek #include "opt_compat_netbsd.h"
     52   1.97      elad #include "opt_pax.h"
     53   1.99      elad #include "veriexec.h"
     54   1.60     lukem 
     55    1.1       mrg #include <sys/param.h>
     56    1.1       mrg #include <sys/systm.h>
     57    1.1       mrg #include <sys/file.h>
     58    1.1       mrg #include <sys/filedesc.h>
     59    1.1       mrg #include <sys/resourcevar.h>
     60    1.1       mrg #include <sys/mman.h>
     61    1.1       mrg #include <sys/mount.h>
     62    1.1       mrg #include <sys/vnode.h>
     63    1.1       mrg #include <sys/conf.h>
     64    1.9       mrg #include <sys/stat.h>
     65   1.99      elad 
     66   1.99      elad #if NVERIEXEC > 0
     67   1.99      elad #include <sys/verified_exec.h>
     68   1.99      elad #endif /* NVERIEXEC > 0 */
     69   1.97      elad 
     70  1.137      matt #if defined(PAX_ASLR) || defined(PAX_MPROTECT)
     71   1.97      elad #include <sys/pax.h>
     72  1.137      matt #endif /* PAX_ASLR || PAX_MPROTECT */
     73    1.1       mrg 
     74    1.1       mrg #include <miscfs/specfs/specdev.h>
     75    1.1       mrg 
     76    1.1       mrg #include <sys/syscallargs.h>
     77    1.1       mrg 
     78    1.1       mrg #include <uvm/uvm.h>
     79    1.1       mrg #include <uvm/uvm_device.h>
     80    1.1       mrg 
     81   1.80  jdolecek #ifndef COMPAT_ZERODEV
     82   1.81       dsl #define COMPAT_ZERODEV(dev)	(0)
     83   1.80  jdolecek #endif
     84    1.1       mrg 
     85  1.115      yamt static int
     86  1.115      yamt range_test(vaddr_t addr, vsize_t size, bool ismmap)
     87  1.115      yamt {
     88  1.115      yamt 	vaddr_t vm_min_address = VM_MIN_ADDRESS;
     89  1.115      yamt 	vaddr_t vm_max_address = VM_MAXUSER_ADDRESS;
     90  1.115      yamt 	vaddr_t eaddr = addr + size;
     91  1.145    martin 	int res = 0;
     92  1.115      yamt 
     93  1.115      yamt 	if (addr < vm_min_address)
     94  1.115      yamt 		return EINVAL;
     95  1.115      yamt 	if (eaddr > vm_max_address)
     96  1.115      yamt 		return ismmap ? EFBIG : EINVAL;
     97  1.115      yamt 	if (addr > eaddr) /* no wrapping! */
     98  1.115      yamt 		return ismmap ? EOVERFLOW : EINVAL;
     99  1.145    martin 
    100  1.145    martin #ifdef MD_MMAP_RANGE_TEST
    101  1.145    martin 	res = MD_MMAP_RANGE_TEST(addr, eaddr);
    102  1.145    martin #endif
    103  1.145    martin 
    104  1.145    martin 	return res;
    105  1.115      yamt }
    106  1.110  christos 
    107    1.1       mrg /*
    108    1.1       mrg  * unimplemented VM system calls:
    109    1.1       mrg  */
    110    1.1       mrg 
    111    1.1       mrg /*
    112    1.1       mrg  * sys_sbrk: sbrk system call.
    113    1.1       mrg  */
    114    1.1       mrg 
    115    1.1       mrg /* ARGSUSED */
    116    1.6       mrg int
    117  1.119       dsl sys_sbrk(struct lwp *l, const struct sys_sbrk_args *uap, register_t *retval)
    118    1.1       mrg {
    119  1.119       dsl 	/* {
    120   1.33    kleink 		syscallarg(intptr_t) incr;
    121  1.119       dsl 	} */
    122    1.6       mrg 
    123   1.17    kleink 	return (ENOSYS);
    124    1.1       mrg }
    125    1.1       mrg 
    126    1.1       mrg /*
    127    1.1       mrg  * sys_sstk: sstk system call.
    128    1.1       mrg  */
    129    1.1       mrg 
    130    1.1       mrg /* ARGSUSED */
    131    1.6       mrg int
    132  1.119       dsl sys_sstk(struct lwp *l, const struct sys_sstk_args *uap, register_t *retval)
    133    1.1       mrg {
    134  1.119       dsl 	/* {
    135   1.20       mrg 		syscallarg(int) incr;
    136  1.119       dsl 	} */
    137    1.6       mrg 
    138   1.17    kleink 	return (ENOSYS);
    139    1.1       mrg }
    140    1.1       mrg 
    141    1.1       mrg /*
    142    1.1       mrg  * sys_mincore: determine if pages are in core or not.
    143    1.1       mrg  */
    144    1.1       mrg 
    145    1.1       mrg /* ARGSUSED */
    146    1.6       mrg int
    147  1.129      yamt sys_mincore(struct lwp *l, const struct sys_mincore_args *uap,
    148  1.129      yamt     register_t *retval)
    149    1.1       mrg {
    150  1.119       dsl 	/* {
    151   1.22   thorpej 		syscallarg(void *) addr;
    152   1.20       mrg 		syscallarg(size_t) len;
    153   1.20       mrg 		syscallarg(char *) vec;
    154  1.119       dsl 	} */
    155   1.67   thorpej 	struct proc *p = l->l_proc;
    156   1.56       chs 	struct vm_page *pg;
    157   1.22   thorpej 	char *vec, pgi;
    158   1.22   thorpej 	struct uvm_object *uobj;
    159   1.22   thorpej 	struct vm_amap *amap;
    160   1.22   thorpej 	struct vm_anon *anon;
    161   1.53       chs 	struct vm_map_entry *entry;
    162   1.22   thorpej 	vaddr_t start, end, lim;
    163   1.53       chs 	struct vm_map *map;
    164   1.22   thorpej 	vsize_t len;
    165   1.22   thorpej 	int error = 0, npgs;
    166   1.22   thorpej 
    167   1.22   thorpej 	map = &p->p_vmspace->vm_map;
    168   1.22   thorpej 
    169   1.22   thorpej 	start = (vaddr_t)SCARG(uap, addr);
    170   1.22   thorpej 	len = SCARG(uap, len);
    171   1.22   thorpej 	vec = SCARG(uap, vec);
    172   1.22   thorpej 
    173   1.22   thorpej 	if (start & PAGE_MASK)
    174   1.22   thorpej 		return (EINVAL);
    175   1.22   thorpej 	len = round_page(len);
    176   1.22   thorpej 	end = start + len;
    177   1.22   thorpej 	if (end <= start)
    178   1.22   thorpej 		return (EINVAL);
    179   1.22   thorpej 
    180   1.22   thorpej 	/*
    181   1.22   thorpej 	 * Lock down vec, so our returned status isn't outdated by
    182   1.22   thorpej 	 * storing the status byte for a page.
    183   1.22   thorpej 	 */
    184   1.50       chs 
    185   1.62       chs 	npgs = len >> PAGE_SHIFT;
    186  1.100       chs 	error = uvm_vslock(p->p_vmspace, vec, npgs, VM_PROT_WRITE);
    187   1.62       chs 	if (error) {
    188   1.62       chs 		return error;
    189   1.62       chs 	}
    190   1.22   thorpej 	vm_map_lock_read(map);
    191   1.22   thorpej 
    192  1.107   thorpej 	if (uvm_map_lookup_entry(map, start, &entry) == false) {
    193   1.22   thorpej 		error = ENOMEM;
    194   1.22   thorpej 		goto out;
    195   1.22   thorpej 	}
    196   1.22   thorpej 
    197   1.22   thorpej 	for (/* nothing */;
    198   1.22   thorpej 	     entry != &map->header && entry->start < end;
    199   1.22   thorpej 	     entry = entry->next) {
    200   1.49       chs 		KASSERT(!UVM_ET_ISSUBMAP(entry));
    201   1.49       chs 		KASSERT(start >= entry->start);
    202   1.49       chs 
    203   1.22   thorpej 		/* Make sure there are no holes. */
    204   1.22   thorpej 		if (entry->end < end &&
    205   1.22   thorpej 		     (entry->next == &map->header ||
    206   1.22   thorpej 		      entry->next->start > entry->end)) {
    207   1.22   thorpej 			error = ENOMEM;
    208   1.22   thorpej 			goto out;
    209   1.22   thorpej 		}
    210    1.6       mrg 
    211   1.22   thorpej 		lim = end < entry->end ? end : entry->end;
    212   1.22   thorpej 
    213   1.22   thorpej 		/*
    214   1.31   thorpej 		 * Special case for objects with no "real" pages.  Those
    215   1.31   thorpej 		 * are always considered resident (mapped devices).
    216   1.22   thorpej 		 */
    217   1.50       chs 
    218   1.22   thorpej 		if (UVM_ET_ISOBJ(entry)) {
    219   1.49       chs 			KASSERT(!UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj));
    220   1.79      yamt 			if (UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) {
    221   1.22   thorpej 				for (/* nothing */; start < lim;
    222   1.22   thorpej 				     start += PAGE_SIZE, vec++)
    223   1.22   thorpej 					subyte(vec, 1);
    224   1.22   thorpej 				continue;
    225   1.22   thorpej 			}
    226   1.22   thorpej 		}
    227   1.22   thorpej 
    228  1.132  uebayasi 		amap = entry->aref.ar_amap;	/* upper layer */
    229  1.132  uebayasi 		uobj = entry->object.uvm_obj;	/* lower layer */
    230   1.22   thorpej 
    231   1.22   thorpej 		if (amap != NULL)
    232   1.22   thorpej 			amap_lock(amap);
    233   1.22   thorpej 		if (uobj != NULL)
    234  1.136     rmind 			mutex_enter(uobj->vmobjlock);
    235   1.22   thorpej 
    236   1.22   thorpej 		for (/* nothing */; start < lim; start += PAGE_SIZE, vec++) {
    237   1.22   thorpej 			pgi = 0;
    238   1.22   thorpej 			if (amap != NULL) {
    239  1.132  uebayasi 				/* Check the upper layer first. */
    240   1.22   thorpej 				anon = amap_lookup(&entry->aref,
    241   1.22   thorpej 				    start - entry->start);
    242   1.22   thorpej 				/* Don't need to lock anon here. */
    243   1.91      yamt 				if (anon != NULL && anon->an_page != NULL) {
    244   1.50       chs 
    245   1.22   thorpej 					/*
    246   1.22   thorpej 					 * Anon has the page for this entry
    247   1.22   thorpej 					 * offset.
    248   1.22   thorpej 					 */
    249   1.50       chs 
    250   1.22   thorpej 					pgi = 1;
    251   1.22   thorpej 				}
    252   1.22   thorpej 			}
    253   1.22   thorpej 			if (uobj != NULL && pgi == 0) {
    254  1.132  uebayasi 				/* Check the lower layer. */
    255   1.56       chs 				pg = uvm_pagelookup(uobj,
    256   1.22   thorpej 				    entry->offset + (start - entry->start));
    257   1.56       chs 				if (pg != NULL) {
    258   1.50       chs 
    259   1.22   thorpej 					/*
    260   1.22   thorpej 					 * Object has the page for this entry
    261   1.22   thorpej 					 * offset.
    262   1.22   thorpej 					 */
    263   1.50       chs 
    264   1.22   thorpej 					pgi = 1;
    265   1.22   thorpej 				}
    266   1.22   thorpej 			}
    267   1.22   thorpej 			(void) subyte(vec, pgi);
    268   1.22   thorpej 		}
    269   1.22   thorpej 		if (uobj != NULL)
    270  1.136     rmind 			mutex_exit(uobj->vmobjlock);
    271   1.22   thorpej 		if (amap != NULL)
    272   1.22   thorpej 			amap_unlock(amap);
    273   1.22   thorpej 	}
    274   1.22   thorpej 
    275   1.22   thorpej  out:
    276   1.22   thorpej 	vm_map_unlock_read(map);
    277  1.100       chs 	uvm_vsunlock(p->p_vmspace, SCARG(uap, vec), npgs);
    278   1.22   thorpej 	return (error);
    279    1.1       mrg }
    280    1.1       mrg 
    281    1.1       mrg /*
    282    1.1       mrg  * sys_mmap: mmap system call.
    283    1.1       mrg  *
    284   1.64    atatat  * => file offset and address may not be page aligned
    285    1.1       mrg  *    - if MAP_FIXED, offset and address must have remainder mod PAGE_SIZE
    286    1.1       mrg  *    - if address isn't page aligned the mapping starts at trunc_page(addr)
    287    1.1       mrg  *      and the return value is adjusted up by the page offset.
    288    1.1       mrg  */
    289    1.1       mrg 
    290    1.6       mrg int
    291  1.119       dsl sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
    292    1.6       mrg {
    293  1.119       dsl 	/* {
    294  1.108  christos 		syscallarg(void *) addr;
    295    1.6       mrg 		syscallarg(size_t) len;
    296    1.6       mrg 		syscallarg(int) prot;
    297    1.6       mrg 		syscallarg(int) flags;
    298    1.6       mrg 		syscallarg(int) fd;
    299    1.6       mrg 		syscallarg(long) pad;
    300    1.6       mrg 		syscallarg(off_t) pos;
    301  1.119       dsl 	} */
    302   1.67   thorpej 	struct proc *p = l->l_proc;
    303   1.12       eeh 	vaddr_t addr;
    304    1.9       mrg 	struct vattr va;
    305    1.6       mrg 	off_t pos;
    306   1.12       eeh 	vsize_t size, pageoff;
    307    1.6       mrg 	vm_prot_t prot, maxprot;
    308    1.6       mrg 	int flags, fd;
    309  1.110  christos 	vaddr_t defaddr;
    310  1.116        ad 	struct file *fp = NULL;
    311    1.6       mrg 	struct vnode *vp;
    312   1.50       chs 	void *handle;
    313    1.6       mrg 	int error;
    314  1.120  christos #ifdef PAX_ASLR
    315  1.120  christos 	vaddr_t orig_addr;
    316  1.120  christos #endif /* PAX_ASLR */
    317    1.6       mrg 
    318    1.6       mrg 	/*
    319    1.6       mrg 	 * first, extract syscall args from the uap.
    320    1.6       mrg 	 */
    321    1.6       mrg 
    322   1.50       chs 	addr = (vaddr_t)SCARG(uap, addr);
    323   1.50       chs 	size = (vsize_t)SCARG(uap, len);
    324    1.6       mrg 	prot = SCARG(uap, prot) & VM_PROT_ALL;
    325    1.6       mrg 	flags = SCARG(uap, flags);
    326    1.6       mrg 	fd = SCARG(uap, fd);
    327    1.6       mrg 	pos = SCARG(uap, pos);
    328    1.6       mrg 
    329  1.120  christos #ifdef PAX_ASLR
    330  1.120  christos 	orig_addr = addr;
    331  1.120  christos #endif /* PAX_ASLR */
    332  1.120  christos 
    333    1.6       mrg 	/*
    334   1.24   thorpej 	 * Fixup the old deprecated MAP_COPY into MAP_PRIVATE, and
    335   1.24   thorpej 	 * validate the flags.
    336   1.24   thorpej 	 */
    337  1.147  christos 	if (flags & MAP_COPY) {
    338   1.24   thorpej 		flags = (flags & ~MAP_COPY) | MAP_PRIVATE;
    339  1.147  christos #if defined(COMPAT_10) && defined(__i386__)
    340  1.147  christos 		/*
    341  1.147  christos 		 * Ancient kernel on x86 did not obey PROT_EXEC on i386 at least
    342  1.147  christos 		 * and ld.so did not turn it on. We take care of this on amd64
    343  1.147  christos 		 * in compat32.
    344  1.147  christos 		 */
    345  1.147  christos 		SCARG(&ua, prot) |= PROT_EXEC;
    346  1.147  christos #endif
    347  1.147  christos 	}
    348   1.24   thorpej 	if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
    349   1.24   thorpej 		return (EINVAL);
    350   1.24   thorpej 
    351   1.24   thorpej 	/*
    352    1.6       mrg 	 * align file position and save offset.  adjust size.
    353    1.6       mrg 	 */
    354    1.6       mrg 
    355    1.6       mrg 	pageoff = (pos & PAGE_MASK);
    356    1.6       mrg 	pos  -= pageoff;
    357    1.6       mrg 	size += pageoff;			/* add offset */
    358   1.50       chs 	size = (vsize_t)round_page(size);	/* round up */
    359    1.6       mrg 
    360    1.6       mrg 	/*
    361   1.51       chs 	 * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr"
    362    1.6       mrg 	 */
    363    1.6       mrg 	if (flags & MAP_FIXED) {
    364    1.6       mrg 
    365    1.6       mrg 		/* ensure address and file offset are aligned properly */
    366    1.6       mrg 		addr -= pageoff;
    367    1.6       mrg 		if (addr & PAGE_MASK)
    368    1.6       mrg 			return (EINVAL);
    369    1.6       mrg 
    370  1.115      yamt 		error = range_test(addr, size, true);
    371  1.115      yamt 		if (error)
    372  1.115      yamt 			return error;
    373   1.75  christos 	} else if (addr == 0 || !(flags & MAP_TRYFIXED)) {
    374    1.6       mrg 
    375    1.6       mrg 		/*
    376   1.68    atatat 		 * not fixed: make sure we skip over the largest
    377   1.68    atatat 		 * possible heap for non-topdown mapping arrangements.
    378   1.68    atatat 		 * we will refine our guess later (e.g. to account for
    379   1.68    atatat 		 * VAC, etc)
    380    1.6       mrg 		 */
    381   1.46       chs 
    382   1.89      fvdl 		defaddr = p->p_emul->e_vm_default_addr(p,
    383   1.89      fvdl 		    (vaddr_t)p->p_vmspace->vm_daddr, size);
    384   1.89      fvdl 
    385   1.68    atatat 		if (addr == 0 ||
    386   1.68    atatat 		    !(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN))
    387   1.89      fvdl 			addr = MAX(addr, defaddr);
    388   1.68    atatat 		else
    389   1.89      fvdl 			addr = MIN(addr, defaddr);
    390    1.6       mrg 	}
    391    1.6       mrg 
    392    1.6       mrg 	/*
    393    1.6       mrg 	 * check for file mappings (i.e. not anonymous) and verify file.
    394    1.6       mrg 	 */
    395    1.6       mrg 
    396    1.6       mrg 	if ((flags & MAP_ANON) == 0) {
    397  1.122        ad 		if ((fp = fd_getfile(fd)) == NULL)
    398   1.54   thorpej 			return (EBADF);
    399  1.116        ad 		if (fp->f_type != DTYPE_VNODE) {
    400  1.122        ad 			fd_putfile(fd);
    401    1.7    kleink 			return (ENODEV);		/* only mmap vnodes! */
    402  1.116        ad 		}
    403  1.122        ad 		vp = fp->f_data;		/* convert to vnode */
    404   1.11   thorpej 		if (vp->v_type != VREG && vp->v_type != VCHR &&
    405  1.116        ad 		    vp->v_type != VBLK) {
    406  1.122        ad 			fd_putfile(fd);
    407   1.11   thorpej 			return (ENODEV);  /* only REG/CHR/BLK support mmap */
    408  1.116        ad 		}
    409  1.116        ad 		if (vp->v_type != VCHR && pos < 0) {
    410  1.122        ad 			fd_putfile(fd);
    411   1.61       chs 			return (EINVAL);
    412  1.116        ad 		}
    413  1.138      yamt 		if (vp->v_type != VCHR && (off_t)(pos + size) < pos) {
    414  1.122        ad 			fd_putfile(fd);
    415   1.39    kleink 			return (EOVERFLOW);		/* no offset wrapping */
    416  1.116        ad 		}
    417    1.6       mrg 
    418    1.6       mrg 		/* special case: catch SunOS style /dev/zero */
    419   1.80  jdolecek 		if (vp->v_type == VCHR
    420   1.80  jdolecek 		    && (vp->v_rdev == zerodev || COMPAT_ZERODEV(vp->v_rdev))) {
    421    1.6       mrg 			flags |= MAP_ANON;
    422  1.122        ad 			fd_putfile(fd);
    423  1.116        ad 			fp = NULL;
    424    1.6       mrg 			goto is_anon;
    425    1.6       mrg 		}
    426    1.6       mrg 
    427    1.6       mrg 		/*
    428    1.6       mrg 		 * Old programs may not select a specific sharing type, so
    429    1.6       mrg 		 * default to an appropriate one.
    430    1.6       mrg 		 *
    431    1.6       mrg 		 * XXX: how does MAP_ANON fit in the picture?
    432    1.6       mrg 		 */
    433   1.24   thorpej 		if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) {
    434    1.8        tv #if defined(DEBUG)
    435    1.6       mrg 			printf("WARNING: defaulted mmap() share type to "
    436   1.71  gmcgarry 			   "%s (pid %d command %s)\n", vp->v_type == VCHR ?
    437    1.6       mrg 			   "MAP_SHARED" : "MAP_PRIVATE", p->p_pid,
    438    1.6       mrg 			    p->p_comm);
    439    1.1       mrg #endif
    440    1.6       mrg 			if (vp->v_type == VCHR)
    441    1.6       mrg 				flags |= MAP_SHARED;	/* for a device */
    442    1.6       mrg 			else
    443    1.6       mrg 				flags |= MAP_PRIVATE;	/* for a file */
    444    1.6       mrg 		}
    445    1.6       mrg 
    446   1.51       chs 		/*
    447    1.6       mrg 		 * MAP_PRIVATE device mappings don't make sense (and aren't
    448    1.6       mrg 		 * supported anyway).  However, some programs rely on this,
    449    1.6       mrg 		 * so just change it to MAP_SHARED.
    450    1.6       mrg 		 */
    451    1.6       mrg 		if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
    452    1.6       mrg 			flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
    453    1.6       mrg 		}
    454    1.1       mrg 
    455    1.6       mrg 		/*
    456    1.6       mrg 		 * now check protection
    457    1.6       mrg 		 */
    458    1.6       mrg 
    459   1.48   thorpej 		maxprot = VM_PROT_EXECUTE;
    460    1.6       mrg 
    461    1.6       mrg 		/* check read access */
    462    1.6       mrg 		if (fp->f_flag & FREAD)
    463    1.6       mrg 			maxprot |= VM_PROT_READ;
    464  1.116        ad 		else if (prot & PROT_READ) {
    465  1.122        ad 			fd_putfile(fd);
    466    1.6       mrg 			return (EACCES);
    467  1.116        ad 		}
    468    1.6       mrg 
    469    1.9       mrg 		/* check write access, shared case first */
    470    1.6       mrg 		if (flags & MAP_SHARED) {
    471    1.9       mrg 			/*
    472    1.9       mrg 			 * if the file is writable, only add PROT_WRITE to
    473    1.9       mrg 			 * maxprot if the file is not immutable, append-only.
    474    1.9       mrg 			 * otherwise, if we have asked for PROT_WRITE, return
    475    1.9       mrg 			 * EPERM.
    476    1.9       mrg 			 */
    477    1.9       mrg 			if (fp->f_flag & FWRITE) {
    478  1.139   hannken 				vn_lock(vp, LK_SHARED | LK_RETRY);
    479  1.139   hannken 				error = VOP_GETATTR(vp, &va, l->l_cred);
    480  1.139   hannken 				VOP_UNLOCK(vp);
    481  1.139   hannken 				if (error) {
    482  1.122        ad 					fd_putfile(fd);
    483    1.9       mrg 					return (error);
    484  1.116        ad 				}
    485   1.84   hannken 				if ((va.va_flags &
    486   1.84   hannken 				    (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0)
    487    1.9       mrg 					maxprot |= VM_PROT_WRITE;
    488  1.116        ad 				else if (prot & PROT_WRITE) {
    489  1.122        ad 					fd_putfile(fd);
    490    1.9       mrg 					return (EPERM);
    491  1.116        ad 				}
    492    1.9       mrg 			}
    493  1.116        ad 			else if (prot & PROT_WRITE) {
    494  1.122        ad 				fd_putfile(fd);
    495    1.6       mrg 				return (EACCES);
    496  1.116        ad 			}
    497    1.6       mrg 		} else {
    498    1.6       mrg 			/* MAP_PRIVATE mappings can always write to */
    499    1.6       mrg 			maxprot |= VM_PROT_WRITE;
    500    1.6       mrg 		}
    501   1.50       chs 		handle = vp;
    502    1.1       mrg 
    503    1.6       mrg 	} else {		/* MAP_ANON case */
    504   1.24   thorpej 		/*
    505   1.24   thorpej 		 * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0?
    506   1.24   thorpej 		 */
    507    1.6       mrg 		if (fd != -1)
    508    1.6       mrg 			return (EINVAL);
    509    1.1       mrg 
    510   1.24   thorpej  is_anon:		/* label for SunOS style /dev/zero */
    511    1.6       mrg 		handle = NULL;
    512    1.6       mrg 		maxprot = VM_PROT_ALL;
    513    1.6       mrg 		pos = 0;
    514   1.28       cgd 	}
    515   1.28       cgd 
    516  1.112      elad #if NVERIEXEC > 0
    517  1.112      elad 	if (handle != NULL) {
    518  1.112      elad 		/*
    519  1.112      elad 		 * Check if the file can be executed indirectly.
    520  1.112      elad 		 *
    521  1.112      elad 		 * XXX: This gives false warnings about "Incorrect access type"
    522  1.112      elad 		 * XXX: if the mapping is not executable. Harmless, but will be
    523  1.112      elad 		 * XXX: fixed as part of other changes.
    524  1.112      elad 		 */
    525  1.112      elad 		if (veriexec_verify(l, handle, "(mmap)", VERIEXEC_INDIRECT,
    526  1.112      elad 		    NULL)) {
    527  1.112      elad 			/*
    528  1.112      elad 			 * Don't allow executable mappings if we can't
    529  1.112      elad 			 * indirectly execute the file.
    530  1.112      elad 			 */
    531  1.116        ad 			if (prot & VM_PROT_EXECUTE) {
    532  1.116        ad 			     	if (fp != NULL)
    533  1.122        ad 					fd_putfile(fd);
    534  1.112      elad 				return (EPERM);
    535  1.116        ad 			}
    536  1.112      elad 
    537  1.112      elad 			/*
    538  1.112      elad 			 * Strip the executable bit from 'maxprot' to make sure
    539  1.112      elad 			 * it can't be made executable later.
    540  1.112      elad 			 */
    541  1.112      elad 			maxprot &= ~VM_PROT_EXECUTE;
    542  1.112      elad 		}
    543  1.112      elad 	}
    544  1.112      elad #endif /* NVERIEXEC > 0 */
    545  1.112      elad 
    546   1.97      elad #ifdef PAX_MPROTECT
    547   1.97      elad 	pax_mprotect(l, &prot, &maxprot);
    548   1.97      elad #endif /* PAX_MPROTECT */
    549   1.97      elad 
    550  1.120  christos #ifdef PAX_ASLR
    551  1.120  christos 	pax_aslr(l, &addr, orig_addr, flags);
    552  1.120  christos #endif /* PAX_ASLR */
    553  1.120  christos 
    554    1.6       mrg 	/*
    555    1.6       mrg 	 * now let kernel internal function uvm_mmap do the work.
    556    1.6       mrg 	 */
    557    1.6       mrg 
    558    1.6       mrg 	error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
    559   1.25   thorpej 	    flags, handle, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
    560    1.6       mrg 
    561    1.6       mrg 	if (error == 0)
    562    1.6       mrg 		/* remember to add offset */
    563    1.6       mrg 		*retval = (register_t)(addr + pageoff);
    564    1.1       mrg 
    565  1.116        ad      	if (fp != NULL)
    566  1.122        ad 		fd_putfile(fd);
    567  1.116        ad 
    568    1.6       mrg 	return (error);
    569    1.1       mrg }
    570    1.1       mrg 
    571    1.1       mrg /*
    572    1.1       mrg  * sys___msync13: the msync system call (a front-end for flush)
    573    1.1       mrg  */
    574    1.1       mrg 
    575    1.6       mrg int
    576  1.129      yamt sys___msync13(struct lwp *l, const struct sys___msync13_args *uap,
    577  1.129      yamt     register_t *retval)
    578    1.6       mrg {
    579  1.119       dsl 	/* {
    580  1.108  christos 		syscallarg(void *) addr;
    581    1.6       mrg 		syscallarg(size_t) len;
    582    1.6       mrg 		syscallarg(int) flags;
    583  1.119       dsl 	} */
    584   1.67   thorpej 	struct proc *p = l->l_proc;
    585   1.12       eeh 	vaddr_t addr;
    586   1.12       eeh 	vsize_t size, pageoff;
    587   1.53       chs 	struct vm_map *map;
    588   1.50       chs 	int error, rv, flags, uvmflags;
    589    1.6       mrg 
    590    1.6       mrg 	/*
    591    1.6       mrg 	 * extract syscall args from the uap
    592    1.6       mrg 	 */
    593    1.6       mrg 
    594   1.12       eeh 	addr = (vaddr_t)SCARG(uap, addr);
    595   1.12       eeh 	size = (vsize_t)SCARG(uap, len);
    596    1.6       mrg 	flags = SCARG(uap, flags);
    597    1.6       mrg 
    598    1.6       mrg 	/* sanity check flags */
    599    1.6       mrg 	if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 ||
    600   1.77       chs 	    (flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 ||
    601   1.77       chs 	    (flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC))
    602   1.77       chs 		return (EINVAL);
    603    1.6       mrg 	if ((flags & (MS_ASYNC | MS_SYNC)) == 0)
    604   1.77       chs 		flags |= MS_SYNC;
    605    1.1       mrg 
    606    1.6       mrg 	/*
    607   1.50       chs 	 * align the address to a page boundary and adjust the size accordingly.
    608    1.6       mrg 	 */
    609    1.6       mrg 
    610    1.6       mrg 	pageoff = (addr & PAGE_MASK);
    611    1.6       mrg 	addr -= pageoff;
    612    1.6       mrg 	size += pageoff;
    613   1.50       chs 	size = (vsize_t)round_page(size);
    614    1.6       mrg 
    615  1.115      yamt 	error = range_test(addr, size, false);
    616  1.115      yamt 	if (error)
    617  1.115      yamt 		return error;
    618    1.6       mrg 
    619    1.6       mrg 	/*
    620    1.6       mrg 	 * get map
    621    1.6       mrg 	 */
    622    1.6       mrg 
    623    1.6       mrg 	map = &p->p_vmspace->vm_map;
    624    1.6       mrg 
    625    1.6       mrg 	/*
    626    1.6       mrg 	 * XXXCDC: do we really need this semantic?
    627    1.6       mrg 	 *
    628    1.6       mrg 	 * XXX Gak!  If size is zero we are supposed to sync "all modified
    629    1.6       mrg 	 * pages with the region containing addr".  Unfortunately, we
    630    1.6       mrg 	 * don't really keep track of individual mmaps so we approximate
    631    1.6       mrg 	 * by flushing the range of the map entry containing addr.
    632    1.6       mrg 	 * This can be incorrect if the region splits or is coalesced
    633    1.6       mrg 	 * with a neighbor.
    634    1.6       mrg 	 */
    635   1.50       chs 
    636    1.6       mrg 	if (size == 0) {
    637   1.53       chs 		struct vm_map_entry *entry;
    638   1.51       chs 
    639    1.6       mrg 		vm_map_lock_read(map);
    640    1.6       mrg 		rv = uvm_map_lookup_entry(map, addr, &entry);
    641  1.107   thorpej 		if (rv == true) {
    642    1.6       mrg 			addr = entry->start;
    643    1.6       mrg 			size = entry->end - entry->start;
    644    1.6       mrg 		}
    645    1.6       mrg 		vm_map_unlock_read(map);
    646  1.107   thorpej 		if (rv == false)
    647    1.6       mrg 			return (EINVAL);
    648    1.6       mrg 	}
    649    1.6       mrg 
    650    1.6       mrg 	/*
    651    1.6       mrg 	 * translate MS_ flags into PGO_ flags
    652    1.6       mrg 	 */
    653   1.50       chs 
    654   1.34   thorpej 	uvmflags = PGO_CLEANIT;
    655   1.34   thorpej 	if (flags & MS_INVALIDATE)
    656   1.34   thorpej 		uvmflags |= PGO_FREE;
    657    1.6       mrg 	if (flags & MS_SYNC)
    658    1.6       mrg 		uvmflags |= PGO_SYNCIO;
    659    1.6       mrg 
    660   1.50       chs 	error = uvm_map_clean(map, addr, addr+size, uvmflags);
    661   1.50       chs 	return error;
    662    1.1       mrg }
    663    1.1       mrg 
    664    1.1       mrg /*
    665    1.1       mrg  * sys_munmap: unmap a users memory
    666    1.1       mrg  */
    667    1.1       mrg 
    668    1.6       mrg int
    669  1.119       dsl sys_munmap(struct lwp *l, const struct sys_munmap_args *uap, register_t *retval)
    670    1.6       mrg {
    671  1.119       dsl 	/* {
    672  1.108  christos 		syscallarg(void *) addr;
    673    1.6       mrg 		syscallarg(size_t) len;
    674  1.119       dsl 	} */
    675   1.67   thorpej 	struct proc *p = l->l_proc;
    676   1.12       eeh 	vaddr_t addr;
    677   1.12       eeh 	vsize_t size, pageoff;
    678   1.53       chs 	struct vm_map *map;
    679    1.6       mrg 	struct vm_map_entry *dead_entries;
    680  1.115      yamt 	int error;
    681    1.6       mrg 
    682    1.6       mrg 	/*
    683   1.50       chs 	 * get syscall args.
    684    1.6       mrg 	 */
    685    1.6       mrg 
    686   1.50       chs 	addr = (vaddr_t)SCARG(uap, addr);
    687   1.50       chs 	size = (vsize_t)SCARG(uap, len);
    688   1.51       chs 
    689    1.6       mrg 	/*
    690   1.50       chs 	 * align the address to a page boundary and adjust the size accordingly.
    691    1.6       mrg 	 */
    692    1.6       mrg 
    693    1.6       mrg 	pageoff = (addr & PAGE_MASK);
    694    1.6       mrg 	addr -= pageoff;
    695    1.6       mrg 	size += pageoff;
    696   1.50       chs 	size = (vsize_t)round_page(size);
    697    1.6       mrg 
    698    1.6       mrg 	if (size == 0)
    699    1.6       mrg 		return (0);
    700    1.6       mrg 
    701  1.115      yamt 	error = range_test(addr, size, false);
    702  1.115      yamt 	if (error)
    703  1.115      yamt 		return error;
    704  1.110  christos 
    705    1.6       mrg 	map = &p->p_vmspace->vm_map;
    706    1.6       mrg 
    707    1.6       mrg 	/*
    708   1.51       chs 	 * interesting system call semantic: make sure entire range is
    709    1.6       mrg 	 * allocated before allowing an unmap.
    710    1.6       mrg 	 */
    711    1.6       mrg 
    712   1.50       chs 	vm_map_lock(map);
    713   1.66   mycroft #if 0
    714    1.6       mrg 	if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) {
    715    1.6       mrg 		vm_map_unlock(map);
    716    1.6       mrg 		return (EINVAL);
    717    1.6       mrg 	}
    718   1.66   mycroft #endif
    719  1.144      para 	uvm_unmap_remove(map, addr, addr + size, &dead_entries, 0);
    720   1.50       chs 	vm_map_unlock(map);
    721    1.6       mrg 	if (dead_entries != NULL)
    722    1.6       mrg 		uvm_unmap_detach(dead_entries, 0);
    723    1.6       mrg 	return (0);
    724    1.1       mrg }
    725    1.1       mrg 
    726    1.1       mrg /*
    727    1.1       mrg  * sys_mprotect: the mprotect system call
    728    1.1       mrg  */
    729    1.1       mrg 
    730    1.6       mrg int
    731  1.129      yamt sys_mprotect(struct lwp *l, const struct sys_mprotect_args *uap,
    732  1.129      yamt     register_t *retval)
    733    1.6       mrg {
    734  1.119       dsl 	/* {
    735  1.108  christos 		syscallarg(void *) addr;
    736   1.76       chs 		syscallarg(size_t) len;
    737    1.6       mrg 		syscallarg(int) prot;
    738  1.119       dsl 	} */
    739   1.67   thorpej 	struct proc *p = l->l_proc;
    740   1.12       eeh 	vaddr_t addr;
    741   1.12       eeh 	vsize_t size, pageoff;
    742    1.6       mrg 	vm_prot_t prot;
    743   1.50       chs 	int error;
    744    1.6       mrg 
    745    1.6       mrg 	/*
    746    1.6       mrg 	 * extract syscall args from uap
    747    1.6       mrg 	 */
    748    1.6       mrg 
    749   1.12       eeh 	addr = (vaddr_t)SCARG(uap, addr);
    750   1.12       eeh 	size = (vsize_t)SCARG(uap, len);
    751    1.6       mrg 	prot = SCARG(uap, prot) & VM_PROT_ALL;
    752    1.6       mrg 
    753    1.6       mrg 	/*
    754   1.50       chs 	 * align the address to a page boundary and adjust the size accordingly.
    755    1.6       mrg 	 */
    756   1.50       chs 
    757    1.6       mrg 	pageoff = (addr & PAGE_MASK);
    758    1.6       mrg 	addr -= pageoff;
    759    1.6       mrg 	size += pageoff;
    760   1.76       chs 	size = round_page(size);
    761   1.50       chs 
    762  1.115      yamt 	error = range_test(addr, size, false);
    763  1.115      yamt 	if (error)
    764  1.115      yamt 		return error;
    765  1.110  christos 
    766   1.50       chs 	error = uvm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, prot,
    767  1.107   thorpej 				false);
    768   1.50       chs 	return error;
    769    1.1       mrg }
    770    1.1       mrg 
    771    1.1       mrg /*
    772    1.1       mrg  * sys_minherit: the minherit system call
    773    1.1       mrg  */
    774    1.1       mrg 
    775    1.6       mrg int
    776  1.129      yamt sys_minherit(struct lwp *l, const struct sys_minherit_args *uap,
    777  1.129      yamt    register_t *retval)
    778    1.6       mrg {
    779  1.119       dsl 	/* {
    780  1.108  christos 		syscallarg(void *) addr;
    781    1.6       mrg 		syscallarg(int) len;
    782    1.6       mrg 		syscallarg(int) inherit;
    783  1.119       dsl 	} */
    784   1.67   thorpej 	struct proc *p = l->l_proc;
    785   1.12       eeh 	vaddr_t addr;
    786   1.12       eeh 	vsize_t size, pageoff;
    787   1.40  augustss 	vm_inherit_t inherit;
    788   1.50       chs 	int error;
    789   1.51       chs 
    790   1.12       eeh 	addr = (vaddr_t)SCARG(uap, addr);
    791   1.12       eeh 	size = (vsize_t)SCARG(uap, len);
    792    1.6       mrg 	inherit = SCARG(uap, inherit);
    793   1.50       chs 
    794    1.6       mrg 	/*
    795   1.50       chs 	 * align the address to a page boundary and adjust the size accordingly.
    796    1.6       mrg 	 */
    797    1.6       mrg 
    798    1.6       mrg 	pageoff = (addr & PAGE_MASK);
    799    1.6       mrg 	addr -= pageoff;
    800    1.6       mrg 	size += pageoff;
    801   1.50       chs 	size = (vsize_t)round_page(size);
    802    1.6       mrg 
    803  1.115      yamt 	error = range_test(addr, size, false);
    804  1.115      yamt 	if (error)
    805  1.115      yamt 		return error;
    806  1.110  christos 
    807   1.50       chs 	error = uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr + size,
    808   1.50       chs 				inherit);
    809   1.50       chs 	return error;
    810   1.21       mrg }
    811   1.21       mrg 
    812   1.21       mrg /*
    813   1.21       mrg  * sys_madvise: give advice about memory usage.
    814   1.21       mrg  */
    815   1.21       mrg 
    816   1.21       mrg /* ARGSUSED */
    817   1.21       mrg int
    818  1.129      yamt sys_madvise(struct lwp *l, const struct sys_madvise_args *uap,
    819  1.129      yamt    register_t *retval)
    820   1.21       mrg {
    821  1.119       dsl 	/* {
    822  1.108  christos 		syscallarg(void *) addr;
    823   1.21       mrg 		syscallarg(size_t) len;
    824   1.21       mrg 		syscallarg(int) behav;
    825  1.119       dsl 	} */
    826   1.67   thorpej 	struct proc *p = l->l_proc;
    827   1.21       mrg 	vaddr_t addr;
    828   1.21       mrg 	vsize_t size, pageoff;
    829   1.50       chs 	int advice, error;
    830   1.51       chs 
    831   1.21       mrg 	addr = (vaddr_t)SCARG(uap, addr);
    832   1.21       mrg 	size = (vsize_t)SCARG(uap, len);
    833   1.21       mrg 	advice = SCARG(uap, behav);
    834   1.21       mrg 
    835   1.21       mrg 	/*
    836   1.21       mrg 	 * align the address to a page boundary, and adjust the size accordingly
    837   1.21       mrg 	 */
    838   1.50       chs 
    839   1.21       mrg 	pageoff = (addr & PAGE_MASK);
    840   1.21       mrg 	addr -= pageoff;
    841   1.21       mrg 	size += pageoff;
    842   1.50       chs 	size = (vsize_t)round_page(size);
    843   1.21       mrg 
    844  1.115      yamt 	error = range_test(addr, size, false);
    845  1.115      yamt 	if (error)
    846  1.115      yamt 		return error;
    847   1.29   thorpej 
    848   1.29   thorpej 	switch (advice) {
    849   1.29   thorpej 	case MADV_NORMAL:
    850   1.29   thorpej 	case MADV_RANDOM:
    851   1.29   thorpej 	case MADV_SEQUENTIAL:
    852   1.50       chs 		error = uvm_map_advice(&p->p_vmspace->vm_map, addr, addr + size,
    853   1.29   thorpej 		    advice);
    854   1.29   thorpej 		break;
    855   1.29   thorpej 
    856   1.29   thorpej 	case MADV_WILLNEED:
    857   1.50       chs 
    858   1.29   thorpej 		/*
    859   1.29   thorpej 		 * Activate all these pages, pre-faulting them in if
    860   1.29   thorpej 		 * necessary.
    861   1.29   thorpej 		 */
    862  1.130      yamt 		error = uvm_map_willneed(&p->p_vmspace->vm_map,
    863  1.130      yamt 		    addr, addr + size);
    864  1.130      yamt 		break;
    865   1.29   thorpej 
    866   1.29   thorpej 	case MADV_DONTNEED:
    867   1.50       chs 
    868   1.29   thorpej 		/*
    869   1.29   thorpej 		 * Deactivate all these pages.  We don't need them
    870   1.29   thorpej 		 * any more.  We don't, however, toss the data in
    871   1.29   thorpej 		 * the pages.
    872   1.29   thorpej 		 */
    873   1.50       chs 
    874   1.50       chs 		error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
    875   1.29   thorpej 		    PGO_DEACTIVATE);
    876   1.29   thorpej 		break;
    877   1.29   thorpej 
    878   1.29   thorpej 	case MADV_FREE:
    879   1.50       chs 
    880   1.29   thorpej 		/*
    881   1.29   thorpej 		 * These pages contain no valid data, and may be
    882   1.45     soren 		 * garbage-collected.  Toss all resources, including
    883   1.30   thorpej 		 * any swap space in use.
    884   1.29   thorpej 		 */
    885   1.50       chs 
    886   1.50       chs 		error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
    887   1.29   thorpej 		    PGO_FREE);
    888   1.29   thorpej 		break;
    889   1.29   thorpej 
    890   1.29   thorpej 	case MADV_SPACEAVAIL:
    891   1.50       chs 
    892   1.29   thorpej 		/*
    893   1.29   thorpej 		 * XXXMRG What is this?  I think it's:
    894   1.29   thorpej 		 *
    895   1.29   thorpej 		 *	Ensure that we have allocated backing-store
    896   1.29   thorpej 		 *	for these pages.
    897   1.29   thorpej 		 *
    898   1.29   thorpej 		 * This is going to require changes to the page daemon,
    899   1.29   thorpej 		 * as it will free swap space allocated to pages in core.
    900   1.29   thorpej 		 * There's also what to do for device/file/anonymous memory.
    901   1.29   thorpej 		 */
    902   1.50       chs 
    903   1.29   thorpej 		return (EINVAL);
    904   1.29   thorpej 
    905   1.29   thorpej 	default:
    906   1.21       mrg 		return (EINVAL);
    907   1.29   thorpej 	}
    908   1.29   thorpej 
    909   1.50       chs 	return error;
    910    1.1       mrg }
    911    1.1       mrg 
    912    1.1       mrg /*
    913    1.1       mrg  * sys_mlock: memory lock
    914    1.1       mrg  */
    915    1.1       mrg 
    916    1.6       mrg int
    917  1.119       dsl sys_mlock(struct lwp *l, const struct sys_mlock_args *uap, register_t *retval)
    918    1.6       mrg {
    919  1.119       dsl 	/* {
    920   1.10    kleink 		syscallarg(const void *) addr;
    921    1.6       mrg 		syscallarg(size_t) len;
    922  1.119       dsl 	} */
    923   1.67   thorpej 	struct proc *p = l->l_proc;
    924   1.12       eeh 	vaddr_t addr;
    925   1.12       eeh 	vsize_t size, pageoff;
    926    1.6       mrg 	int error;
    927    1.6       mrg 
    928    1.6       mrg 	/*
    929    1.6       mrg 	 * extract syscall args from uap
    930    1.6       mrg 	 */
    931   1.50       chs 
    932   1.12       eeh 	addr = (vaddr_t)SCARG(uap, addr);
    933   1.12       eeh 	size = (vsize_t)SCARG(uap, len);
    934    1.6       mrg 
    935    1.6       mrg 	/*
    936    1.6       mrg 	 * align the address to a page boundary and adjust the size accordingly
    937    1.6       mrg 	 */
    938   1.50       chs 
    939    1.6       mrg 	pageoff = (addr & PAGE_MASK);
    940    1.6       mrg 	addr -= pageoff;
    941    1.6       mrg 	size += pageoff;
    942   1.50       chs 	size = (vsize_t)round_page(size);
    943   1.51       chs 
    944  1.115      yamt 	error = range_test(addr, size, false);
    945  1.115      yamt 	if (error)
    946  1.115      yamt 		return error;
    947    1.1       mrg 
    948    1.6       mrg 	if (atop(size) + uvmexp.wired > uvmexp.wiredmax)
    949    1.6       mrg 		return (EAGAIN);
    950    1.1       mrg 
    951    1.6       mrg 	if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
    952    1.6       mrg 			p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
    953    1.6       mrg 		return (EAGAIN);
    954    1.1       mrg 
    955  1.107   thorpej 	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, false,
    956   1.35   thorpej 	    0);
    957   1.85    briggs 	if (error == EFAULT)
    958   1.85    briggs 		error = ENOMEM;
    959   1.50       chs 	return error;
    960    1.1       mrg }
    961    1.1       mrg 
    962    1.1       mrg /*
    963    1.1       mrg  * sys_munlock: unlock wired pages
    964    1.1       mrg  */
    965    1.1       mrg 
    966    1.6       mrg int
    967  1.129      yamt sys_munlock(struct lwp *l, const struct sys_munlock_args *uap,
    968  1.129      yamt     register_t *retval)
    969    1.6       mrg {
    970  1.119       dsl 	/* {
    971   1.10    kleink 		syscallarg(const void *) addr;
    972    1.6       mrg 		syscallarg(size_t) len;
    973  1.119       dsl 	} */
    974   1.67   thorpej 	struct proc *p = l->l_proc;
    975   1.12       eeh 	vaddr_t addr;
    976   1.12       eeh 	vsize_t size, pageoff;
    977    1.6       mrg 	int error;
    978    1.6       mrg 
    979    1.6       mrg 	/*
    980    1.6       mrg 	 * extract syscall args from uap
    981    1.6       mrg 	 */
    982    1.6       mrg 
    983   1.12       eeh 	addr = (vaddr_t)SCARG(uap, addr);
    984   1.12       eeh 	size = (vsize_t)SCARG(uap, len);
    985    1.6       mrg 
    986    1.6       mrg 	/*
    987    1.6       mrg 	 * align the address to a page boundary, and adjust the size accordingly
    988    1.6       mrg 	 */
    989   1.50       chs 
    990    1.6       mrg 	pageoff = (addr & PAGE_MASK);
    991    1.6       mrg 	addr -= pageoff;
    992    1.6       mrg 	size += pageoff;
    993   1.50       chs 	size = (vsize_t)round_page(size);
    994    1.6       mrg 
    995  1.115      yamt 	error = range_test(addr, size, false);
    996  1.115      yamt 	if (error)
    997  1.115      yamt 		return error;
    998    1.1       mrg 
    999  1.107   thorpej 	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, true,
   1000   1.35   thorpej 	    0);
   1001   1.85    briggs 	if (error == EFAULT)
   1002   1.85    briggs 		error = ENOMEM;
   1003   1.50       chs 	return error;
   1004   1.22   thorpej }
   1005   1.22   thorpej 
   1006   1.22   thorpej /*
   1007   1.22   thorpej  * sys_mlockall: lock all pages mapped into an address space.
   1008   1.22   thorpej  */
   1009   1.22   thorpej 
   1010   1.22   thorpej int
   1011  1.129      yamt sys_mlockall(struct lwp *l, const struct sys_mlockall_args *uap,
   1012  1.129      yamt     register_t *retval)
   1013   1.22   thorpej {
   1014  1.119       dsl 	/* {
   1015   1.22   thorpej 		syscallarg(int) flags;
   1016  1.119       dsl 	} */
   1017   1.67   thorpej 	struct proc *p = l->l_proc;
   1018   1.22   thorpej 	int error, flags;
   1019   1.22   thorpej 
   1020   1.22   thorpej 	flags = SCARG(uap, flags);
   1021   1.22   thorpej 
   1022   1.22   thorpej 	if (flags == 0 ||
   1023   1.22   thorpej 	    (flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)
   1024   1.22   thorpej 		return (EINVAL);
   1025   1.22   thorpej 
   1026   1.25   thorpej 	error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags,
   1027   1.25   thorpej 	    p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
   1028   1.22   thorpej 	return (error);
   1029   1.22   thorpej }
   1030   1.22   thorpej 
   1031   1.22   thorpej /*
   1032   1.22   thorpej  * sys_munlockall: unlock all pages mapped into an address space.
   1033   1.22   thorpej  */
   1034   1.22   thorpej 
   1035   1.22   thorpej int
   1036  1.119       dsl sys_munlockall(struct lwp *l, const void *v, register_t *retval)
   1037   1.22   thorpej {
   1038   1.67   thorpej 	struct proc *p = l->l_proc;
   1039   1.22   thorpej 
   1040   1.22   thorpej 	(void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0);
   1041   1.22   thorpej 	return (0);
   1042    1.1       mrg }
   1043    1.1       mrg 
   1044    1.1       mrg /*
   1045    1.1       mrg  * uvm_mmap: internal version of mmap
   1046    1.1       mrg  *
   1047   1.56       chs  * - used by sys_mmap and various framebuffers
   1048   1.56       chs  * - handle is a vnode pointer or NULL for MAP_ANON
   1049    1.1       mrg  * - caller must page-align the file offset
   1050    1.1       mrg  */
   1051    1.1       mrg 
   1052    1.6       mrg int
   1053  1.129      yamt uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
   1054  1.129      yamt     vm_prot_t maxprot, int flags, void *handle, voff_t foff, vsize_t locklimit)
   1055    1.6       mrg {
   1056    1.6       mrg 	struct uvm_object *uobj;
   1057    1.6       mrg 	struct vnode *vp;
   1058   1.70      matt 	vaddr_t align = 0;
   1059   1.50       chs 	int error;
   1060    1.6       mrg 	int advice = UVM_ADV_NORMAL;
   1061    1.6       mrg 	uvm_flag_t uvmflag = 0;
   1062  1.106   thorpej 	bool needwritemap;
   1063    1.6       mrg 
   1064    1.6       mrg 	/*
   1065    1.6       mrg 	 * check params
   1066    1.6       mrg 	 */
   1067    1.6       mrg 
   1068    1.6       mrg 	if (size == 0)
   1069    1.6       mrg 		return(0);
   1070    1.6       mrg 	if (foff & PAGE_MASK)
   1071    1.6       mrg 		return(EINVAL);
   1072    1.6       mrg 	if ((prot & maxprot) != prot)
   1073    1.6       mrg 		return(EINVAL);
   1074    1.6       mrg 
   1075    1.6       mrg 	/*
   1076    1.6       mrg 	 * for non-fixed mappings, round off the suggested address.
   1077    1.6       mrg 	 * for fixed mappings, check alignment and zap old mappings.
   1078    1.6       mrg 	 */
   1079    1.6       mrg 
   1080    1.6       mrg 	if ((flags & MAP_FIXED) == 0) {
   1081   1.56       chs 		*addr = round_page(*addr);
   1082    1.6       mrg 	} else {
   1083    1.6       mrg 		if (*addr & PAGE_MASK)
   1084    1.6       mrg 			return(EINVAL);
   1085    1.6       mrg 		uvmflag |= UVM_FLAG_FIXED;
   1086   1.56       chs 		(void) uvm_unmap(map, *addr, *addr + size);
   1087    1.6       mrg 	}
   1088    1.6       mrg 
   1089    1.6       mrg 	/*
   1090   1.70      matt 	 * Try to see if any requested alignment can even be attemped.
   1091   1.70      matt 	 * Make sure we can express the alignment (asking for a >= 4GB
   1092   1.70      matt 	 * alignment on an ILP32 architecure make no sense) and the
   1093   1.70      matt 	 * alignment is at least for a page sized quanitiy.  If the
   1094   1.70      matt 	 * request was for a fixed mapping, make sure supplied address
   1095   1.70      matt 	 * adheres to the request alignment.
   1096   1.70      matt 	 */
   1097   1.70      matt 	align = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT;
   1098   1.70      matt 	if (align) {
   1099   1.70      matt 		if (align >= sizeof(vaddr_t) * NBBY)
   1100   1.70      matt 			return(EINVAL);
   1101   1.70      matt 		align = 1L << align;
   1102   1.70      matt 		if (align < PAGE_SIZE)
   1103   1.70      matt 			return(EINVAL);
   1104   1.88       chs 		if (align >= vm_map_max(map))
   1105   1.70      matt 			return(ENOMEM);
   1106   1.70      matt 		if (flags & MAP_FIXED) {
   1107   1.70      matt 			if ((*addr & (align-1)) != 0)
   1108   1.70      matt 				return(EINVAL);
   1109   1.70      matt 			align = 0;
   1110   1.70      matt 		}
   1111   1.70      matt 	}
   1112   1.70      matt 
   1113   1.70      matt 	/*
   1114  1.128       mrg 	 * check resource limits
   1115  1.128       mrg 	 */
   1116  1.128       mrg 
   1117  1.128       mrg 	if (!VM_MAP_IS_KERNEL(map) &&
   1118  1.128       mrg 	    (((rlim_t)curproc->p_vmspace->vm_map.size + (rlim_t)size) >
   1119  1.128       mrg 	    curproc->p_rlimit[RLIMIT_AS].rlim_cur))
   1120  1.128       mrg 		return ENOMEM;
   1121  1.128       mrg 
   1122  1.128       mrg 	/*
   1123    1.6       mrg 	 * handle anon vs. non-anon mappings.   for non-anon mappings attach
   1124    1.6       mrg 	 * to underlying vm object.
   1125    1.6       mrg 	 */
   1126    1.6       mrg 
   1127    1.6       mrg 	if (flags & MAP_ANON) {
   1128   1.95  christos 		KASSERT(handle == NULL);
   1129   1.36   thorpej 		foff = UVM_UNKNOWN_OFFSET;
   1130    1.6       mrg 		uobj = NULL;
   1131    1.6       mrg 		if ((flags & MAP_SHARED) == 0)
   1132    1.6       mrg 			/* XXX: defer amap create */
   1133    1.6       mrg 			uvmflag |= UVM_FLAG_COPYONW;
   1134    1.6       mrg 		else
   1135    1.6       mrg 			/* shared: create amap now */
   1136    1.6       mrg 			uvmflag |= UVM_FLAG_OVERLAY;
   1137    1.6       mrg 
   1138    1.6       mrg 	} else {
   1139   1.95  christos 		KASSERT(handle != NULL);
   1140   1.50       chs 		vp = (struct vnode *)handle;
   1141   1.59   thorpej 
   1142   1.59   thorpej 		/*
   1143   1.59   thorpej 		 * Don't allow mmap for EXEC if the file system
   1144   1.59   thorpej 		 * is mounted NOEXEC.
   1145   1.59   thorpej 		 */
   1146   1.59   thorpej 		if ((prot & PROT_EXEC) != 0 &&
   1147   1.59   thorpej 		    (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0)
   1148   1.59   thorpej 			return (EACCES);
   1149   1.59   thorpej 
   1150    1.6       mrg 		if (vp->v_type != VCHR) {
   1151  1.118     pooka 			error = VOP_MMAP(vp, prot, curlwp->l_cred);
   1152   1.55       chs 			if (error) {
   1153   1.55       chs 				return error;
   1154   1.55       chs 			}
   1155  1.113     pooka 			vref(vp);
   1156  1.113     pooka 			uobj = &vp->v_uobj;
   1157   1.57   thorpej 
   1158   1.57   thorpej 			/*
   1159   1.57   thorpej 			 * If the vnode is being mapped with PROT_EXEC,
   1160   1.57   thorpej 			 * then mark it as text.
   1161   1.57   thorpej 			 */
   1162  1.117        ad 			if (prot & PROT_EXEC) {
   1163   1.58   thorpej 				vn_markexec(vp);
   1164  1.117        ad 			}
   1165    1.6       mrg 		} else {
   1166   1.83   darrenr 			int i = maxprot;
   1167   1.83   darrenr 
   1168   1.48   thorpej 			/*
   1169   1.48   thorpej 			 * XXX Some devices don't like to be mapped with
   1170   1.83   darrenr 			 * XXX PROT_EXEC or PROT_WRITE, but we don't really
   1171   1.83   darrenr 			 * XXX have a better way of handling this, right now
   1172   1.48   thorpej 			 */
   1173   1.83   darrenr 			do {
   1174   1.83   darrenr 				uobj = udv_attach((void *) &vp->v_rdev,
   1175   1.83   darrenr 				    (flags & MAP_SHARED) ? i :
   1176   1.83   darrenr 				    (i & ~VM_PROT_WRITE), foff, size);
   1177   1.83   darrenr 				i--;
   1178   1.83   darrenr 			} while ((uobj == NULL) && (i > 0));
   1179  1.131      yamt 			if (uobj == NULL)
   1180  1.131      yamt 				return EINVAL;
   1181    1.6       mrg 			advice = UVM_ADV_RANDOM;
   1182    1.6       mrg 		}
   1183   1.92      yamt 		if ((flags & MAP_SHARED) == 0) {
   1184    1.6       mrg 			uvmflag |= UVM_FLAG_COPYONW;
   1185  1.100       chs 		}
   1186  1.100       chs 
   1187  1.100       chs 		/*
   1188  1.100       chs 		 * Set vnode flags to indicate the new kinds of mapping.
   1189  1.100       chs 		 * We take the vnode lock in exclusive mode here to serialize
   1190  1.100       chs 		 * with direct I/O.
   1191  1.124        ad 		 *
   1192  1.124        ad 		 * Safe to check for these flag values without a lock, as
   1193  1.124        ad 		 * long as a reference to the vnode is held.
   1194  1.100       chs 		 */
   1195  1.117        ad 		needwritemap = (vp->v_iflag & VI_WRMAP) == 0 &&
   1196  1.100       chs 			(flags & MAP_SHARED) != 0 &&
   1197  1.100       chs 			(maxprot & VM_PROT_WRITE) != 0;
   1198  1.124        ad 		if ((vp->v_vflag & VV_MAPPED) == 0 || needwritemap) {
   1199  1.124        ad 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   1200  1.117        ad 			vp->v_vflag |= VV_MAPPED;
   1201  1.100       chs 			if (needwritemap) {
   1202  1.136     rmind 				mutex_enter(vp->v_interlock);
   1203  1.117        ad 				vp->v_iflag |= VI_WRMAP;
   1204  1.136     rmind 				mutex_exit(vp->v_interlock);
   1205  1.100       chs 			}
   1206  1.133   hannken 			VOP_UNLOCK(vp);
   1207  1.124        ad 		}
   1208    1.6       mrg 	}
   1209    1.6       mrg 
   1210   1.51       chs 	uvmflag = UVM_MAPFLAG(prot, maxprot,
   1211    1.1       mrg 			(flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY,
   1212    1.1       mrg 			advice, uvmflag);
   1213   1.70      matt 	error = uvm_map(map, addr, size, uobj, foff, align, uvmflag);
   1214   1.50       chs 	if (error) {
   1215   1.50       chs 		if (uobj)
   1216   1.50       chs 			uobj->pgops->pgo_detach(uobj);
   1217   1.50       chs 		return error;
   1218   1.50       chs 	}
   1219    1.1       mrg 
   1220    1.6       mrg 	/*
   1221   1.50       chs 	 * POSIX 1003.1b -- if our address space was configured
   1222   1.50       chs 	 * to lock all future mappings, wire the one we just made.
   1223   1.78   thorpej 	 *
   1224   1.78   thorpej 	 * Also handle the MAP_WIRED flag here.
   1225    1.6       mrg 	 */
   1226    1.6       mrg 
   1227   1.50       chs 	if (prot == VM_PROT_NONE) {
   1228    1.6       mrg 
   1229   1.25   thorpej 		/*
   1230   1.50       chs 		 * No more work to do in this case.
   1231   1.25   thorpej 		 */
   1232   1.25   thorpej 
   1233   1.50       chs 		return (0);
   1234   1.50       chs 	}
   1235   1.78   thorpej 	if ((flags & MAP_WIRED) != 0 || (map->flags & VM_MAP_WIREFUTURE) != 0) {
   1236  1.126        ad 		vm_map_lock(map);
   1237   1.87       chs 		if (atop(size) + uvmexp.wired > uvmexp.wiredmax ||
   1238   1.87       chs 		    (locklimit != 0 &&
   1239   1.87       chs 		     size + ptoa(pmap_wired_count(vm_map_pmap(map))) >
   1240   1.87       chs 		     locklimit)) {
   1241   1.50       chs 			vm_map_unlock(map);
   1242   1.50       chs 			uvm_unmap(map, *addr, *addr + size);
   1243   1.50       chs 			return ENOMEM;
   1244   1.25   thorpej 		}
   1245   1.25   thorpej 
   1246   1.50       chs 		/*
   1247   1.50       chs 		 * uvm_map_pageable() always returns the map unlocked.
   1248   1.50       chs 		 */
   1249   1.25   thorpej 
   1250   1.50       chs 		error = uvm_map_pageable(map, *addr, *addr + size,
   1251  1.107   thorpej 					 false, UVM_LK_ENTER);
   1252   1.50       chs 		if (error) {
   1253   1.50       chs 			uvm_unmap(map, *addr, *addr + size);
   1254   1.50       chs 			return error;
   1255   1.50       chs 		}
   1256   1.25   thorpej 		return (0);
   1257   1.25   thorpej 	}
   1258   1.50       chs 	return 0;
   1259    1.1       mrg }
   1260   1.89      fvdl 
   1261   1.89      fvdl vaddr_t
   1262  1.102      yamt uvm_default_mapaddr(struct proc *p, vaddr_t base, vsize_t sz)
   1263   1.89      fvdl {
   1264  1.102      yamt 
   1265  1.146  christos 	if (p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN)
   1266  1.146  christos 		return VM_DEFAULT_ADDRESS_TOPDOWN(base, sz);
   1267  1.146  christos 	else
   1268  1.146  christos 		return VM_DEFAULT_ADDRESS_BOTTOMUP(base, sz);
   1269   1.89      fvdl }
   1270