Home | History | Annotate | Line # | Download | only in uvm
uvm_mmap.c revision 1.117
      1  1.117        ad /*	$NetBSD: uvm_mmap.c,v 1.117 2007/10/10 20:42:41 ad Exp $	*/
      2    1.1       mrg 
      3    1.1       mrg /*
      4    1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5   1.51       chs  * Copyright (c) 1991, 1993 The Regents of the University of California.
      6    1.1       mrg  * Copyright (c) 1988 University of Utah.
      7   1.51       chs  *
      8    1.1       mrg  * All rights reserved.
      9    1.1       mrg  *
     10    1.1       mrg  * This code is derived from software contributed to Berkeley by
     11    1.1       mrg  * the Systems Programming Group of the University of Utah Computer
     12    1.1       mrg  * Science Department.
     13    1.1       mrg  *
     14    1.1       mrg  * Redistribution and use in source and binary forms, with or without
     15    1.1       mrg  * modification, are permitted provided that the following conditions
     16    1.1       mrg  * are met:
     17    1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     18    1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     19    1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     20    1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     21    1.1       mrg  *    documentation and/or other materials provided with the distribution.
     22    1.1       mrg  * 3. All advertising materials mentioning features or use of this software
     23    1.1       mrg  *    must display the following acknowledgement:
     24    1.1       mrg  *      This product includes software developed by the Charles D. Cranor,
     25   1.51       chs  *	Washington University, University of California, Berkeley and
     26    1.1       mrg  *	its contributors.
     27    1.1       mrg  * 4. Neither the name of the University nor the names of its contributors
     28    1.1       mrg  *    may be used to endorse or promote products derived from this software
     29    1.1       mrg  *    without specific prior written permission.
     30    1.1       mrg  *
     31    1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     32    1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     33    1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     34    1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     35    1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     36    1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     37    1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     38    1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     39    1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     40    1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     41    1.1       mrg  * SUCH DAMAGE.
     42    1.1       mrg  *
     43    1.1       mrg  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
     44    1.1       mrg  *      @(#)vm_mmap.c   8.5 (Berkeley) 5/19/94
     45    1.3       mrg  * from: Id: uvm_mmap.c,v 1.1.2.14 1998/01/05 21:04:26 chuck Exp
     46    1.1       mrg  */
     47    1.1       mrg 
     48    1.1       mrg /*
     49    1.1       mrg  * uvm_mmap.c: system call interface into VM system, plus kernel vm_mmap
     50    1.1       mrg  * function.
     51    1.1       mrg  */
     52   1.60     lukem 
     53   1.60     lukem #include <sys/cdefs.h>
     54  1.117        ad __KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.117 2007/10/10 20:42:41 ad Exp $");
     55   1.80  jdolecek 
     56   1.80  jdolecek #include "opt_compat_netbsd.h"
     57   1.97      elad #include "opt_pax.h"
     58   1.99      elad #include "veriexec.h"
     59   1.60     lukem 
     60    1.1       mrg #include <sys/param.h>
     61    1.1       mrg #include <sys/systm.h>
     62    1.1       mrg #include <sys/file.h>
     63    1.1       mrg #include <sys/filedesc.h>
     64    1.1       mrg #include <sys/resourcevar.h>
     65    1.1       mrg #include <sys/mman.h>
     66    1.1       mrg #include <sys/mount.h>
     67    1.1       mrg #include <sys/proc.h>
     68    1.1       mrg #include <sys/malloc.h>
     69    1.1       mrg #include <sys/vnode.h>
     70    1.1       mrg #include <sys/conf.h>
     71    1.9       mrg #include <sys/stat.h>
     72   1.99      elad 
     73   1.99      elad #if NVERIEXEC > 0
     74   1.99      elad #include <sys/verified_exec.h>
     75   1.99      elad #endif /* NVERIEXEC > 0 */
     76   1.97      elad 
     77   1.97      elad #ifdef PAX_MPROTECT
     78   1.97      elad #include <sys/pax.h>
     79   1.97      elad #endif /* PAX_MPROTECT */
     80    1.1       mrg 
     81    1.1       mrg #include <miscfs/specfs/specdev.h>
     82    1.1       mrg 
     83    1.1       mrg #include <sys/syscallargs.h>
     84    1.1       mrg 
     85    1.1       mrg #include <uvm/uvm.h>
     86    1.1       mrg #include <uvm/uvm_device.h>
     87    1.1       mrg 
     88   1.80  jdolecek #ifndef COMPAT_ZERODEV
     89   1.81       dsl #define COMPAT_ZERODEV(dev)	(0)
     90   1.80  jdolecek #endif
     91    1.1       mrg 
     92  1.115      yamt static int
     93  1.115      yamt range_test(vaddr_t addr, vsize_t size, bool ismmap)
     94  1.115      yamt {
     95  1.115      yamt 	vaddr_t vm_min_address = VM_MIN_ADDRESS;
     96  1.115      yamt 	vaddr_t vm_max_address = VM_MAXUSER_ADDRESS;
     97  1.115      yamt 	vaddr_t eaddr = addr + size;
     98  1.115      yamt 
     99  1.115      yamt 	if (addr < vm_min_address)
    100  1.115      yamt 		return EINVAL;
    101  1.115      yamt 	if (eaddr > vm_max_address)
    102  1.115      yamt 		return ismmap ? EFBIG : EINVAL;
    103  1.115      yamt 	if (addr > eaddr) /* no wrapping! */
    104  1.115      yamt 		return ismmap ? EOVERFLOW : EINVAL;
    105  1.115      yamt 	return 0;
    106  1.115      yamt }
    107  1.110  christos 
    108    1.1       mrg /*
    109    1.1       mrg  * unimplemented VM system calls:
    110    1.1       mrg  */
    111    1.1       mrg 
    112    1.1       mrg /*
    113    1.1       mrg  * sys_sbrk: sbrk system call.
    114    1.1       mrg  */
    115    1.1       mrg 
    116    1.1       mrg /* ARGSUSED */
    117    1.6       mrg int
    118  1.102      yamt sys_sbrk(struct lwp *l, void *v, register_t *retval)
    119    1.1       mrg {
    120    1.1       mrg #if 0
    121    1.6       mrg 	struct sys_sbrk_args /* {
    122   1.33    kleink 		syscallarg(intptr_t) incr;
    123   1.20       mrg 	} */ *uap = v;
    124    1.1       mrg #endif
    125    1.6       mrg 
    126   1.17    kleink 	return (ENOSYS);
    127    1.1       mrg }
    128    1.1       mrg 
    129    1.1       mrg /*
    130    1.1       mrg  * sys_sstk: sstk system call.
    131    1.1       mrg  */
    132    1.1       mrg 
    133    1.1       mrg /* ARGSUSED */
    134    1.6       mrg int
    135  1.102      yamt sys_sstk(struct lwp *l, void *v, register_t *retval)
    136    1.1       mrg {
    137    1.1       mrg #if 0
    138    1.6       mrg 	struct sys_sstk_args /* {
    139   1.20       mrg 		syscallarg(int) incr;
    140   1.20       mrg 	} */ *uap = v;
    141    1.1       mrg #endif
    142    1.6       mrg 
    143   1.17    kleink 	return (ENOSYS);
    144    1.1       mrg }
    145    1.1       mrg 
    146    1.1       mrg /*
    147    1.1       mrg  * sys_mincore: determine if pages are in core or not.
    148    1.1       mrg  */
    149    1.1       mrg 
    150    1.1       mrg /* ARGSUSED */
    151    1.6       mrg int
    152  1.102      yamt sys_mincore(struct lwp *l, void *v, register_t *retval)
    153    1.1       mrg {
    154    1.6       mrg 	struct sys_mincore_args /* {
    155   1.22   thorpej 		syscallarg(void *) addr;
    156   1.20       mrg 		syscallarg(size_t) len;
    157   1.20       mrg 		syscallarg(char *) vec;
    158   1.20       mrg 	} */ *uap = v;
    159   1.67   thorpej 	struct proc *p = l->l_proc;
    160   1.56       chs 	struct vm_page *pg;
    161   1.22   thorpej 	char *vec, pgi;
    162   1.22   thorpej 	struct uvm_object *uobj;
    163   1.22   thorpej 	struct vm_amap *amap;
    164   1.22   thorpej 	struct vm_anon *anon;
    165   1.53       chs 	struct vm_map_entry *entry;
    166   1.22   thorpej 	vaddr_t start, end, lim;
    167   1.53       chs 	struct vm_map *map;
    168   1.22   thorpej 	vsize_t len;
    169   1.22   thorpej 	int error = 0, npgs;
    170   1.22   thorpej 
    171   1.22   thorpej 	map = &p->p_vmspace->vm_map;
    172   1.22   thorpej 
    173   1.22   thorpej 	start = (vaddr_t)SCARG(uap, addr);
    174   1.22   thorpej 	len = SCARG(uap, len);
    175   1.22   thorpej 	vec = SCARG(uap, vec);
    176   1.22   thorpej 
    177   1.22   thorpej 	if (start & PAGE_MASK)
    178   1.22   thorpej 		return (EINVAL);
    179   1.22   thorpej 	len = round_page(len);
    180   1.22   thorpej 	end = start + len;
    181   1.22   thorpej 	if (end <= start)
    182   1.22   thorpej 		return (EINVAL);
    183   1.22   thorpej 
    184   1.22   thorpej 	/*
    185   1.22   thorpej 	 * Lock down vec, so our returned status isn't outdated by
    186   1.22   thorpej 	 * storing the status byte for a page.
    187   1.22   thorpej 	 */
    188   1.50       chs 
    189   1.62       chs 	npgs = len >> PAGE_SHIFT;
    190  1.100       chs 	error = uvm_vslock(p->p_vmspace, vec, npgs, VM_PROT_WRITE);
    191   1.62       chs 	if (error) {
    192   1.62       chs 		return error;
    193   1.62       chs 	}
    194   1.22   thorpej 	vm_map_lock_read(map);
    195   1.22   thorpej 
    196  1.107   thorpej 	if (uvm_map_lookup_entry(map, start, &entry) == false) {
    197   1.22   thorpej 		error = ENOMEM;
    198   1.22   thorpej 		goto out;
    199   1.22   thorpej 	}
    200   1.22   thorpej 
    201   1.22   thorpej 	for (/* nothing */;
    202   1.22   thorpej 	     entry != &map->header && entry->start < end;
    203   1.22   thorpej 	     entry = entry->next) {
    204   1.49       chs 		KASSERT(!UVM_ET_ISSUBMAP(entry));
    205   1.49       chs 		KASSERT(start >= entry->start);
    206   1.49       chs 
    207   1.22   thorpej 		/* Make sure there are no holes. */
    208   1.22   thorpej 		if (entry->end < end &&
    209   1.22   thorpej 		     (entry->next == &map->header ||
    210   1.22   thorpej 		      entry->next->start > entry->end)) {
    211   1.22   thorpej 			error = ENOMEM;
    212   1.22   thorpej 			goto out;
    213   1.22   thorpej 		}
    214    1.6       mrg 
    215   1.22   thorpej 		lim = end < entry->end ? end : entry->end;
    216   1.22   thorpej 
    217   1.22   thorpej 		/*
    218   1.31   thorpej 		 * Special case for objects with no "real" pages.  Those
    219   1.31   thorpej 		 * are always considered resident (mapped devices).
    220   1.22   thorpej 		 */
    221   1.50       chs 
    222   1.22   thorpej 		if (UVM_ET_ISOBJ(entry)) {
    223   1.49       chs 			KASSERT(!UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj));
    224   1.79      yamt 			if (UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) {
    225   1.22   thorpej 				for (/* nothing */; start < lim;
    226   1.22   thorpej 				     start += PAGE_SIZE, vec++)
    227   1.22   thorpej 					subyte(vec, 1);
    228   1.22   thorpej 				continue;
    229   1.22   thorpej 			}
    230   1.22   thorpej 		}
    231   1.22   thorpej 
    232   1.32   thorpej 		amap = entry->aref.ar_amap;	/* top layer */
    233   1.32   thorpej 		uobj = entry->object.uvm_obj;	/* bottom layer */
    234   1.22   thorpej 
    235   1.22   thorpej 		if (amap != NULL)
    236   1.22   thorpej 			amap_lock(amap);
    237   1.22   thorpej 		if (uobj != NULL)
    238   1.22   thorpej 			simple_lock(&uobj->vmobjlock);
    239   1.22   thorpej 
    240   1.22   thorpej 		for (/* nothing */; start < lim; start += PAGE_SIZE, vec++) {
    241   1.22   thorpej 			pgi = 0;
    242   1.22   thorpej 			if (amap != NULL) {
    243   1.22   thorpej 				/* Check the top layer first. */
    244   1.22   thorpej 				anon = amap_lookup(&entry->aref,
    245   1.22   thorpej 				    start - entry->start);
    246   1.22   thorpej 				/* Don't need to lock anon here. */
    247   1.91      yamt 				if (anon != NULL && anon->an_page != NULL) {
    248   1.50       chs 
    249   1.22   thorpej 					/*
    250   1.22   thorpej 					 * Anon has the page for this entry
    251   1.22   thorpej 					 * offset.
    252   1.22   thorpej 					 */
    253   1.50       chs 
    254   1.22   thorpej 					pgi = 1;
    255   1.22   thorpej 				}
    256   1.22   thorpej 			}
    257   1.22   thorpej 			if (uobj != NULL && pgi == 0) {
    258   1.22   thorpej 				/* Check the bottom layer. */
    259   1.56       chs 				pg = uvm_pagelookup(uobj,
    260   1.22   thorpej 				    entry->offset + (start - entry->start));
    261   1.56       chs 				if (pg != NULL) {
    262   1.50       chs 
    263   1.22   thorpej 					/*
    264   1.22   thorpej 					 * Object has the page for this entry
    265   1.22   thorpej 					 * offset.
    266   1.22   thorpej 					 */
    267   1.50       chs 
    268   1.22   thorpej 					pgi = 1;
    269   1.22   thorpej 				}
    270   1.22   thorpej 			}
    271   1.22   thorpej 			(void) subyte(vec, pgi);
    272   1.22   thorpej 		}
    273   1.22   thorpej 		if (uobj != NULL)
    274   1.27   thorpej 			simple_unlock(&uobj->vmobjlock);
    275   1.22   thorpej 		if (amap != NULL)
    276   1.22   thorpej 			amap_unlock(amap);
    277   1.22   thorpej 	}
    278   1.22   thorpej 
    279   1.22   thorpej  out:
    280   1.22   thorpej 	vm_map_unlock_read(map);
    281  1.100       chs 	uvm_vsunlock(p->p_vmspace, SCARG(uap, vec), npgs);
    282   1.22   thorpej 	return (error);
    283    1.1       mrg }
    284    1.1       mrg 
    285    1.1       mrg /*
    286    1.1       mrg  * sys_mmap: mmap system call.
    287    1.1       mrg  *
    288   1.64    atatat  * => file offset and address may not be page aligned
    289    1.1       mrg  *    - if MAP_FIXED, offset and address must have remainder mod PAGE_SIZE
    290    1.1       mrg  *    - if address isn't page aligned the mapping starts at trunc_page(addr)
    291    1.1       mrg  *      and the return value is adjusted up by the page offset.
    292    1.1       mrg  */
    293    1.1       mrg 
    294    1.6       mrg int
    295   1.67   thorpej sys_mmap(l, v, retval)
    296   1.67   thorpej 	struct lwp *l;
    297    1.6       mrg 	void *v;
    298    1.6       mrg 	register_t *retval;
    299    1.6       mrg {
    300   1.40  augustss 	struct sys_mmap_args /* {
    301  1.108  christos 		syscallarg(void *) addr;
    302    1.6       mrg 		syscallarg(size_t) len;
    303    1.6       mrg 		syscallarg(int) prot;
    304    1.6       mrg 		syscallarg(int) flags;
    305    1.6       mrg 		syscallarg(int) fd;
    306    1.6       mrg 		syscallarg(long) pad;
    307    1.6       mrg 		syscallarg(off_t) pos;
    308    1.6       mrg 	} */ *uap = v;
    309   1.67   thorpej 	struct proc *p = l->l_proc;
    310   1.12       eeh 	vaddr_t addr;
    311    1.9       mrg 	struct vattr va;
    312    1.6       mrg 	off_t pos;
    313   1.12       eeh 	vsize_t size, pageoff;
    314    1.6       mrg 	vm_prot_t prot, maxprot;
    315    1.6       mrg 	int flags, fd;
    316  1.110  christos 	vaddr_t defaddr;
    317   1.40  augustss 	struct filedesc *fdp = p->p_fd;
    318  1.116        ad 	struct file *fp = NULL;
    319    1.6       mrg 	struct vnode *vp;
    320   1.50       chs 	void *handle;
    321    1.6       mrg 	int error;
    322    1.6       mrg 
    323    1.6       mrg 	/*
    324    1.6       mrg 	 * first, extract syscall args from the uap.
    325    1.6       mrg 	 */
    326    1.6       mrg 
    327   1.50       chs 	addr = (vaddr_t)SCARG(uap, addr);
    328   1.50       chs 	size = (vsize_t)SCARG(uap, len);
    329    1.6       mrg 	prot = SCARG(uap, prot) & VM_PROT_ALL;
    330    1.6       mrg 	flags = SCARG(uap, flags);
    331    1.6       mrg 	fd = SCARG(uap, fd);
    332    1.6       mrg 	pos = SCARG(uap, pos);
    333    1.6       mrg 
    334    1.6       mrg 	/*
    335   1.24   thorpej 	 * Fixup the old deprecated MAP_COPY into MAP_PRIVATE, and
    336   1.24   thorpej 	 * validate the flags.
    337   1.24   thorpej 	 */
    338   1.24   thorpej 	if (flags & MAP_COPY)
    339   1.24   thorpej 		flags = (flags & ~MAP_COPY) | MAP_PRIVATE;
    340   1.24   thorpej 	if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
    341   1.24   thorpej 		return (EINVAL);
    342   1.24   thorpej 
    343   1.24   thorpej 	/*
    344    1.6       mrg 	 * align file position and save offset.  adjust size.
    345    1.6       mrg 	 */
    346    1.6       mrg 
    347    1.6       mrg 	pageoff = (pos & PAGE_MASK);
    348    1.6       mrg 	pos  -= pageoff;
    349    1.6       mrg 	size += pageoff;			/* add offset */
    350   1.50       chs 	size = (vsize_t)round_page(size);	/* round up */
    351    1.6       mrg 
    352    1.6       mrg 	/*
    353   1.51       chs 	 * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr"
    354    1.6       mrg 	 */
    355    1.6       mrg 	if (flags & MAP_FIXED) {
    356    1.6       mrg 
    357    1.6       mrg 		/* ensure address and file offset are aligned properly */
    358    1.6       mrg 		addr -= pageoff;
    359    1.6       mrg 		if (addr & PAGE_MASK)
    360    1.6       mrg 			return (EINVAL);
    361    1.6       mrg 
    362  1.115      yamt 		error = range_test(addr, size, true);
    363  1.115      yamt 		if (error)
    364  1.115      yamt 			return error;
    365   1.75  christos 	} else if (addr == 0 || !(flags & MAP_TRYFIXED)) {
    366    1.6       mrg 
    367    1.6       mrg 		/*
    368   1.68    atatat 		 * not fixed: make sure we skip over the largest
    369   1.68    atatat 		 * possible heap for non-topdown mapping arrangements.
    370   1.68    atatat 		 * we will refine our guess later (e.g. to account for
    371   1.68    atatat 		 * VAC, etc)
    372    1.6       mrg 		 */
    373   1.46       chs 
    374   1.89      fvdl 		defaddr = p->p_emul->e_vm_default_addr(p,
    375   1.89      fvdl 		    (vaddr_t)p->p_vmspace->vm_daddr, size);
    376   1.89      fvdl 
    377   1.68    atatat 		if (addr == 0 ||
    378   1.68    atatat 		    !(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN))
    379   1.89      fvdl 			addr = MAX(addr, defaddr);
    380   1.68    atatat 		else
    381   1.89      fvdl 			addr = MIN(addr, defaddr);
    382    1.6       mrg 	}
    383    1.6       mrg 
    384    1.6       mrg 	/*
    385    1.6       mrg 	 * check for file mappings (i.e. not anonymous) and verify file.
    386    1.6       mrg 	 */
    387    1.6       mrg 
    388    1.6       mrg 	if ((flags & MAP_ANON) == 0) {
    389    1.6       mrg 
    390   1.54   thorpej 		if ((fp = fd_getfile(fdp, fd)) == NULL)
    391   1.54   thorpej 			return (EBADF);
    392  1.116        ad 		if (fp->f_type != DTYPE_VNODE) {
    393  1.116        ad 			mutex_exit(&fp->f_lock);
    394    1.7    kleink 			return (ENODEV);		/* only mmap vnodes! */
    395  1.116        ad 		}
    396    1.6       mrg 		vp = (struct vnode *)fp->f_data;	/* convert to vnode */
    397    1.6       mrg 
    398   1.11   thorpej 		if (vp->v_type != VREG && vp->v_type != VCHR &&
    399  1.116        ad 		    vp->v_type != VBLK) {
    400  1.116        ad 			mutex_exit(&fp->f_lock);
    401   1.11   thorpej 			return (ENODEV);  /* only REG/CHR/BLK support mmap */
    402  1.116        ad 		}
    403  1.116        ad 		if (vp->v_type != VCHR && pos < 0) {
    404  1.116        ad 			mutex_exit(&fp->f_lock);
    405   1.61       chs 			return (EINVAL);
    406  1.116        ad 		}
    407  1.116        ad 		if (vp->v_type != VCHR && (pos + size) < pos) {
    408  1.116        ad 			mutex_exit(&fp->f_lock);
    409   1.39    kleink 			return (EOVERFLOW);		/* no offset wrapping */
    410  1.116        ad 		}
    411    1.6       mrg 
    412    1.6       mrg 		/* special case: catch SunOS style /dev/zero */
    413   1.80  jdolecek 		if (vp->v_type == VCHR
    414   1.80  jdolecek 		    && (vp->v_rdev == zerodev || COMPAT_ZERODEV(vp->v_rdev))) {
    415    1.6       mrg 			flags |= MAP_ANON;
    416  1.116        ad 			mutex_exit(&fp->f_lock);
    417  1.116        ad 			fp = NULL;
    418    1.6       mrg 			goto is_anon;
    419    1.6       mrg 		}
    420    1.6       mrg 
    421    1.6       mrg 		/*
    422    1.6       mrg 		 * Old programs may not select a specific sharing type, so
    423    1.6       mrg 		 * default to an appropriate one.
    424    1.6       mrg 		 *
    425    1.6       mrg 		 * XXX: how does MAP_ANON fit in the picture?
    426    1.6       mrg 		 */
    427   1.24   thorpej 		if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) {
    428    1.8        tv #if defined(DEBUG)
    429    1.6       mrg 			printf("WARNING: defaulted mmap() share type to "
    430   1.71  gmcgarry 			   "%s (pid %d command %s)\n", vp->v_type == VCHR ?
    431    1.6       mrg 			   "MAP_SHARED" : "MAP_PRIVATE", p->p_pid,
    432    1.6       mrg 			    p->p_comm);
    433    1.1       mrg #endif
    434    1.6       mrg 			if (vp->v_type == VCHR)
    435    1.6       mrg 				flags |= MAP_SHARED;	/* for a device */
    436    1.6       mrg 			else
    437    1.6       mrg 				flags |= MAP_PRIVATE;	/* for a file */
    438    1.6       mrg 		}
    439    1.6       mrg 
    440   1.51       chs 		/*
    441    1.6       mrg 		 * MAP_PRIVATE device mappings don't make sense (and aren't
    442    1.6       mrg 		 * supported anyway).  However, some programs rely on this,
    443    1.6       mrg 		 * so just change it to MAP_SHARED.
    444    1.6       mrg 		 */
    445    1.6       mrg 		if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
    446    1.6       mrg 			flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
    447    1.6       mrg 		}
    448    1.1       mrg 
    449    1.6       mrg 		/*
    450    1.6       mrg 		 * now check protection
    451    1.6       mrg 		 */
    452    1.6       mrg 
    453   1.48   thorpej 		maxprot = VM_PROT_EXECUTE;
    454    1.6       mrg 
    455    1.6       mrg 		/* check read access */
    456    1.6       mrg 		if (fp->f_flag & FREAD)
    457    1.6       mrg 			maxprot |= VM_PROT_READ;
    458  1.116        ad 		else if (prot & PROT_READ) {
    459  1.116        ad 			mutex_exit(&fp->f_lock);
    460    1.6       mrg 			return (EACCES);
    461  1.116        ad 		}
    462  1.116        ad 		FILE_USE(fp);
    463    1.6       mrg 
    464    1.9       mrg 		/* check write access, shared case first */
    465    1.6       mrg 		if (flags & MAP_SHARED) {
    466    1.9       mrg 			/*
    467    1.9       mrg 			 * if the file is writable, only add PROT_WRITE to
    468    1.9       mrg 			 * maxprot if the file is not immutable, append-only.
    469    1.9       mrg 			 * otherwise, if we have asked for PROT_WRITE, return
    470    1.9       mrg 			 * EPERM.
    471    1.9       mrg 			 */
    472    1.9       mrg 			if (fp->f_flag & FWRITE) {
    473    1.9       mrg 				if ((error =
    474  1.116        ad 				    VOP_GETATTR(vp, &va, l->l_cred, l))) {
    475  1.116        ad 				    	FILE_UNUSE(fp, l);
    476    1.9       mrg 					return (error);
    477  1.116        ad 				}
    478   1.84   hannken 				if ((va.va_flags &
    479   1.84   hannken 				    (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0)
    480    1.9       mrg 					maxprot |= VM_PROT_WRITE;
    481  1.116        ad 				else if (prot & PROT_WRITE) {
    482  1.116        ad 				    	FILE_UNUSE(fp, l);
    483    1.9       mrg 					return (EPERM);
    484  1.116        ad 				}
    485    1.9       mrg 			}
    486  1.116        ad 			else if (prot & PROT_WRITE) {
    487  1.116        ad 			    	FILE_UNUSE(fp, l);
    488    1.6       mrg 				return (EACCES);
    489  1.116        ad 			}
    490    1.6       mrg 		} else {
    491    1.6       mrg 			/* MAP_PRIVATE mappings can always write to */
    492    1.6       mrg 			maxprot |= VM_PROT_WRITE;
    493    1.6       mrg 		}
    494   1.50       chs 		handle = vp;
    495    1.1       mrg 
    496    1.6       mrg 	} else {		/* MAP_ANON case */
    497   1.24   thorpej 		/*
    498   1.24   thorpej 		 * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0?
    499   1.24   thorpej 		 */
    500    1.6       mrg 		if (fd != -1)
    501    1.6       mrg 			return (EINVAL);
    502    1.1       mrg 
    503   1.24   thorpej  is_anon:		/* label for SunOS style /dev/zero */
    504    1.6       mrg 		handle = NULL;
    505    1.6       mrg 		maxprot = VM_PROT_ALL;
    506    1.6       mrg 		pos = 0;
    507   1.28       cgd 	}
    508   1.28       cgd 
    509   1.28       cgd 	/*
    510   1.28       cgd 	 * XXX (in)sanity check.  We don't do proper datasize checking
    511   1.28       cgd 	 * XXX for anonymous (or private writable) mmap().  However,
    512   1.28       cgd 	 * XXX know that if we're trying to allocate more than the amount
    513   1.28       cgd 	 * XXX remaining under our current data size limit, _that_ should
    514   1.28       cgd 	 * XXX be disallowed.
    515   1.28       cgd 	 */
    516   1.28       cgd 	if ((flags & MAP_ANON) != 0 ||
    517   1.28       cgd 	    ((flags & MAP_PRIVATE) != 0 && (prot & PROT_WRITE) != 0)) {
    518   1.28       cgd 		if (size >
    519   1.50       chs 		    (p->p_rlimit[RLIMIT_DATA].rlim_cur -
    520   1.50       chs 		     ctob(p->p_vmspace->vm_dsize))) {
    521  1.116        ad 		     	if (fp != NULL)
    522  1.116        ad 			    	FILE_UNUSE(fp, l);
    523   1.28       cgd 			return (ENOMEM);
    524   1.28       cgd 		}
    525    1.6       mrg 	}
    526    1.6       mrg 
    527  1.112      elad #if NVERIEXEC > 0
    528  1.112      elad 	if (handle != NULL) {
    529  1.112      elad 		/*
    530  1.112      elad 		 * Check if the file can be executed indirectly.
    531  1.112      elad 		 *
    532  1.112      elad 		 * XXX: This gives false warnings about "Incorrect access type"
    533  1.112      elad 		 * XXX: if the mapping is not executable. Harmless, but will be
    534  1.112      elad 		 * XXX: fixed as part of other changes.
    535  1.112      elad 		 */
    536  1.112      elad 		if (veriexec_verify(l, handle, "(mmap)", VERIEXEC_INDIRECT,
    537  1.112      elad 		    NULL)) {
    538  1.112      elad 			/*
    539  1.112      elad 			 * Don't allow executable mappings if we can't
    540  1.112      elad 			 * indirectly execute the file.
    541  1.112      elad 			 */
    542  1.116        ad 			if (prot & VM_PROT_EXECUTE) {
    543  1.116        ad 			     	if (fp != NULL)
    544  1.116        ad 				    	FILE_UNUSE(fp, l);
    545  1.112      elad 				return (EPERM);
    546  1.116        ad 			}
    547  1.112      elad 
    548  1.112      elad 			/*
    549  1.112      elad 			 * Strip the executable bit from 'maxprot' to make sure
    550  1.112      elad 			 * it can't be made executable later.
    551  1.112      elad 			 */
    552  1.112      elad 			maxprot &= ~VM_PROT_EXECUTE;
    553  1.112      elad 		}
    554  1.112      elad 	}
    555  1.112      elad #endif /* NVERIEXEC > 0 */
    556  1.112      elad 
    557   1.97      elad #ifdef PAX_MPROTECT
    558   1.97      elad 	pax_mprotect(l, &prot, &maxprot);
    559   1.97      elad #endif /* PAX_MPROTECT */
    560   1.97      elad 
    561    1.6       mrg 	/*
    562    1.6       mrg 	 * now let kernel internal function uvm_mmap do the work.
    563    1.6       mrg 	 */
    564    1.6       mrg 
    565    1.6       mrg 	error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
    566   1.25   thorpej 	    flags, handle, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
    567    1.6       mrg 
    568    1.6       mrg 	if (error == 0)
    569    1.6       mrg 		/* remember to add offset */
    570    1.6       mrg 		*retval = (register_t)(addr + pageoff);
    571    1.1       mrg 
    572  1.116        ad      	if (fp != NULL)
    573  1.116        ad 	    	FILE_UNUSE(fp, l);
    574  1.116        ad 
    575    1.6       mrg 	return (error);
    576    1.1       mrg }
    577    1.1       mrg 
    578    1.1       mrg /*
    579    1.1       mrg  * sys___msync13: the msync system call (a front-end for flush)
    580    1.1       mrg  */
    581    1.1       mrg 
    582    1.6       mrg int
    583  1.102      yamt sys___msync13(struct lwp *l, void *v, register_t *retval)
    584    1.6       mrg {
    585    1.6       mrg 	struct sys___msync13_args /* {
    586  1.108  christos 		syscallarg(void *) addr;
    587    1.6       mrg 		syscallarg(size_t) len;
    588    1.6       mrg 		syscallarg(int) flags;
    589    1.6       mrg 	} */ *uap = v;
    590   1.67   thorpej 	struct proc *p = l->l_proc;
    591   1.12       eeh 	vaddr_t addr;
    592   1.12       eeh 	vsize_t size, pageoff;
    593   1.53       chs 	struct vm_map *map;
    594   1.50       chs 	int error, rv, flags, uvmflags;
    595    1.6       mrg 
    596    1.6       mrg 	/*
    597    1.6       mrg 	 * extract syscall args from the uap
    598    1.6       mrg 	 */
    599    1.6       mrg 
    600   1.12       eeh 	addr = (vaddr_t)SCARG(uap, addr);
    601   1.12       eeh 	size = (vsize_t)SCARG(uap, len);
    602    1.6       mrg 	flags = SCARG(uap, flags);
    603    1.6       mrg 
    604    1.6       mrg 	/* sanity check flags */
    605    1.6       mrg 	if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 ||
    606   1.77       chs 	    (flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 ||
    607   1.77       chs 	    (flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC))
    608   1.77       chs 		return (EINVAL);
    609    1.6       mrg 	if ((flags & (MS_ASYNC | MS_SYNC)) == 0)
    610   1.77       chs 		flags |= MS_SYNC;
    611    1.1       mrg 
    612    1.6       mrg 	/*
    613   1.50       chs 	 * align the address to a page boundary and adjust the size accordingly.
    614    1.6       mrg 	 */
    615    1.6       mrg 
    616    1.6       mrg 	pageoff = (addr & PAGE_MASK);
    617    1.6       mrg 	addr -= pageoff;
    618    1.6       mrg 	size += pageoff;
    619   1.50       chs 	size = (vsize_t)round_page(size);
    620    1.6       mrg 
    621  1.115      yamt 	error = range_test(addr, size, false);
    622  1.115      yamt 	if (error)
    623  1.115      yamt 		return error;
    624    1.6       mrg 
    625    1.6       mrg 	/*
    626    1.6       mrg 	 * get map
    627    1.6       mrg 	 */
    628    1.6       mrg 
    629    1.6       mrg 	map = &p->p_vmspace->vm_map;
    630    1.6       mrg 
    631    1.6       mrg 	/*
    632    1.6       mrg 	 * XXXCDC: do we really need this semantic?
    633    1.6       mrg 	 *
    634    1.6       mrg 	 * XXX Gak!  If size is zero we are supposed to sync "all modified
    635    1.6       mrg 	 * pages with the region containing addr".  Unfortunately, we
    636    1.6       mrg 	 * don't really keep track of individual mmaps so we approximate
    637    1.6       mrg 	 * by flushing the range of the map entry containing addr.
    638    1.6       mrg 	 * This can be incorrect if the region splits or is coalesced
    639    1.6       mrg 	 * with a neighbor.
    640    1.6       mrg 	 */
    641   1.50       chs 
    642    1.6       mrg 	if (size == 0) {
    643   1.53       chs 		struct vm_map_entry *entry;
    644   1.51       chs 
    645    1.6       mrg 		vm_map_lock_read(map);
    646    1.6       mrg 		rv = uvm_map_lookup_entry(map, addr, &entry);
    647  1.107   thorpej 		if (rv == true) {
    648    1.6       mrg 			addr = entry->start;
    649    1.6       mrg 			size = entry->end - entry->start;
    650    1.6       mrg 		}
    651    1.6       mrg 		vm_map_unlock_read(map);
    652  1.107   thorpej 		if (rv == false)
    653    1.6       mrg 			return (EINVAL);
    654    1.6       mrg 	}
    655    1.6       mrg 
    656    1.6       mrg 	/*
    657    1.6       mrg 	 * translate MS_ flags into PGO_ flags
    658    1.6       mrg 	 */
    659   1.50       chs 
    660   1.34   thorpej 	uvmflags = PGO_CLEANIT;
    661   1.34   thorpej 	if (flags & MS_INVALIDATE)
    662   1.34   thorpej 		uvmflags |= PGO_FREE;
    663    1.6       mrg 	if (flags & MS_SYNC)
    664    1.6       mrg 		uvmflags |= PGO_SYNCIO;
    665    1.6       mrg 
    666   1.50       chs 	error = uvm_map_clean(map, addr, addr+size, uvmflags);
    667   1.50       chs 	return error;
    668    1.1       mrg }
    669    1.1       mrg 
    670    1.1       mrg /*
    671    1.1       mrg  * sys_munmap: unmap a users memory
    672    1.1       mrg  */
    673    1.1       mrg 
    674    1.6       mrg int
    675  1.102      yamt sys_munmap(struct lwp *l, void *v, register_t *retval)
    676    1.6       mrg {
    677   1.40  augustss 	struct sys_munmap_args /* {
    678  1.108  christos 		syscallarg(void *) addr;
    679    1.6       mrg 		syscallarg(size_t) len;
    680    1.6       mrg 	} */ *uap = v;
    681   1.67   thorpej 	struct proc *p = l->l_proc;
    682   1.12       eeh 	vaddr_t addr;
    683   1.12       eeh 	vsize_t size, pageoff;
    684   1.53       chs 	struct vm_map *map;
    685    1.6       mrg 	struct vm_map_entry *dead_entries;
    686  1.115      yamt 	int error;
    687    1.6       mrg 
    688    1.6       mrg 	/*
    689   1.50       chs 	 * get syscall args.
    690    1.6       mrg 	 */
    691    1.6       mrg 
    692   1.50       chs 	addr = (vaddr_t)SCARG(uap, addr);
    693   1.50       chs 	size = (vsize_t)SCARG(uap, len);
    694   1.51       chs 
    695    1.6       mrg 	/*
    696   1.50       chs 	 * align the address to a page boundary and adjust the size accordingly.
    697    1.6       mrg 	 */
    698    1.6       mrg 
    699    1.6       mrg 	pageoff = (addr & PAGE_MASK);
    700    1.6       mrg 	addr -= pageoff;
    701    1.6       mrg 	size += pageoff;
    702   1.50       chs 	size = (vsize_t)round_page(size);
    703    1.6       mrg 
    704    1.6       mrg 	if (size == 0)
    705    1.6       mrg 		return (0);
    706    1.6       mrg 
    707  1.115      yamt 	error = range_test(addr, size, false);
    708  1.115      yamt 	if (error)
    709  1.115      yamt 		return error;
    710  1.110  christos 
    711    1.6       mrg 	map = &p->p_vmspace->vm_map;
    712    1.6       mrg 
    713    1.6       mrg 	/*
    714   1.51       chs 	 * interesting system call semantic: make sure entire range is
    715    1.6       mrg 	 * allocated before allowing an unmap.
    716    1.6       mrg 	 */
    717    1.6       mrg 
    718   1.50       chs 	vm_map_lock(map);
    719   1.66   mycroft #if 0
    720    1.6       mrg 	if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) {
    721    1.6       mrg 		vm_map_unlock(map);
    722    1.6       mrg 		return (EINVAL);
    723    1.6       mrg 	}
    724   1.66   mycroft #endif
    725   1.90      yamt 	uvm_unmap_remove(map, addr, addr + size, &dead_entries, NULL, 0);
    726   1.50       chs 	vm_map_unlock(map);
    727    1.6       mrg 	if (dead_entries != NULL)
    728    1.6       mrg 		uvm_unmap_detach(dead_entries, 0);
    729    1.6       mrg 	return (0);
    730    1.1       mrg }
    731    1.1       mrg 
    732    1.1       mrg /*
    733    1.1       mrg  * sys_mprotect: the mprotect system call
    734    1.1       mrg  */
    735    1.1       mrg 
    736    1.6       mrg int
    737  1.102      yamt sys_mprotect(struct lwp *l, void *v, register_t *retval)
    738    1.6       mrg {
    739    1.6       mrg 	struct sys_mprotect_args /* {
    740  1.108  christos 		syscallarg(void *) addr;
    741   1.76       chs 		syscallarg(size_t) len;
    742    1.6       mrg 		syscallarg(int) prot;
    743    1.6       mrg 	} */ *uap = v;
    744   1.67   thorpej 	struct proc *p = l->l_proc;
    745   1.12       eeh 	vaddr_t addr;
    746   1.12       eeh 	vsize_t size, pageoff;
    747    1.6       mrg 	vm_prot_t prot;
    748   1.50       chs 	int error;
    749    1.6       mrg 
    750    1.6       mrg 	/*
    751    1.6       mrg 	 * extract syscall args from uap
    752    1.6       mrg 	 */
    753    1.6       mrg 
    754   1.12       eeh 	addr = (vaddr_t)SCARG(uap, addr);
    755   1.12       eeh 	size = (vsize_t)SCARG(uap, len);
    756    1.6       mrg 	prot = SCARG(uap, prot) & VM_PROT_ALL;
    757    1.6       mrg 
    758    1.6       mrg 	/*
    759   1.50       chs 	 * align the address to a page boundary and adjust the size accordingly.
    760    1.6       mrg 	 */
    761   1.50       chs 
    762    1.6       mrg 	pageoff = (addr & PAGE_MASK);
    763    1.6       mrg 	addr -= pageoff;
    764    1.6       mrg 	size += pageoff;
    765   1.76       chs 	size = round_page(size);
    766   1.50       chs 
    767  1.115      yamt 	error = range_test(addr, size, false);
    768  1.115      yamt 	if (error)
    769  1.115      yamt 		return error;
    770  1.110  christos 
    771   1.50       chs 	error = uvm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, prot,
    772  1.107   thorpej 				false);
    773   1.50       chs 	return error;
    774    1.1       mrg }
    775    1.1       mrg 
    776    1.1       mrg /*
    777    1.1       mrg  * sys_minherit: the minherit system call
    778    1.1       mrg  */
    779    1.1       mrg 
    780    1.6       mrg int
    781  1.102      yamt sys_minherit(struct lwp *l, void *v, register_t *retval)
    782    1.6       mrg {
    783    1.6       mrg 	struct sys_minherit_args /* {
    784  1.108  christos 		syscallarg(void *) addr;
    785    1.6       mrg 		syscallarg(int) len;
    786    1.6       mrg 		syscallarg(int) inherit;
    787    1.6       mrg 	} */ *uap = v;
    788   1.67   thorpej 	struct proc *p = l->l_proc;
    789   1.12       eeh 	vaddr_t addr;
    790   1.12       eeh 	vsize_t size, pageoff;
    791   1.40  augustss 	vm_inherit_t inherit;
    792   1.50       chs 	int error;
    793   1.51       chs 
    794   1.12       eeh 	addr = (vaddr_t)SCARG(uap, addr);
    795   1.12       eeh 	size = (vsize_t)SCARG(uap, len);
    796    1.6       mrg 	inherit = SCARG(uap, inherit);
    797   1.50       chs 
    798    1.6       mrg 	/*
    799   1.50       chs 	 * align the address to a page boundary and adjust the size accordingly.
    800    1.6       mrg 	 */
    801    1.6       mrg 
    802    1.6       mrg 	pageoff = (addr & PAGE_MASK);
    803    1.6       mrg 	addr -= pageoff;
    804    1.6       mrg 	size += pageoff;
    805   1.50       chs 	size = (vsize_t)round_page(size);
    806    1.6       mrg 
    807  1.115      yamt 	error = range_test(addr, size, false);
    808  1.115      yamt 	if (error)
    809  1.115      yamt 		return error;
    810  1.110  christos 
    811   1.50       chs 	error = uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr + size,
    812   1.50       chs 				inherit);
    813   1.50       chs 	return error;
    814   1.21       mrg }
    815   1.21       mrg 
    816   1.21       mrg /*
    817   1.21       mrg  * sys_madvise: give advice about memory usage.
    818   1.21       mrg  */
    819   1.21       mrg 
    820   1.21       mrg /* ARGSUSED */
    821   1.21       mrg int
    822  1.102      yamt sys_madvise(struct lwp *l, void *v, register_t *retval)
    823   1.21       mrg {
    824   1.21       mrg 	struct sys_madvise_args /* {
    825  1.108  christos 		syscallarg(void *) addr;
    826   1.21       mrg 		syscallarg(size_t) len;
    827   1.21       mrg 		syscallarg(int) behav;
    828   1.21       mrg 	} */ *uap = v;
    829   1.67   thorpej 	struct proc *p = l->l_proc;
    830   1.21       mrg 	vaddr_t addr;
    831   1.21       mrg 	vsize_t size, pageoff;
    832   1.50       chs 	int advice, error;
    833   1.51       chs 
    834   1.21       mrg 	addr = (vaddr_t)SCARG(uap, addr);
    835   1.21       mrg 	size = (vsize_t)SCARG(uap, len);
    836   1.21       mrg 	advice = SCARG(uap, behav);
    837   1.21       mrg 
    838   1.21       mrg 	/*
    839   1.21       mrg 	 * align the address to a page boundary, and adjust the size accordingly
    840   1.21       mrg 	 */
    841   1.50       chs 
    842   1.21       mrg 	pageoff = (addr & PAGE_MASK);
    843   1.21       mrg 	addr -= pageoff;
    844   1.21       mrg 	size += pageoff;
    845   1.50       chs 	size = (vsize_t)round_page(size);
    846   1.21       mrg 
    847  1.115      yamt 	error = range_test(addr, size, false);
    848  1.115      yamt 	if (error)
    849  1.115      yamt 		return error;
    850   1.29   thorpej 
    851   1.29   thorpej 	switch (advice) {
    852   1.29   thorpej 	case MADV_NORMAL:
    853   1.29   thorpej 	case MADV_RANDOM:
    854   1.29   thorpej 	case MADV_SEQUENTIAL:
    855   1.50       chs 		error = uvm_map_advice(&p->p_vmspace->vm_map, addr, addr + size,
    856   1.29   thorpej 		    advice);
    857   1.29   thorpej 		break;
    858   1.29   thorpej 
    859   1.29   thorpej 	case MADV_WILLNEED:
    860   1.50       chs 
    861   1.29   thorpej 		/*
    862   1.29   thorpej 		 * Activate all these pages, pre-faulting them in if
    863   1.29   thorpej 		 * necessary.
    864   1.29   thorpej 		 */
    865   1.29   thorpej 		/*
    866   1.29   thorpej 		 * XXX IMPLEMENT ME.
    867   1.29   thorpej 		 * Should invent a "weak" mode for uvm_fault()
    868   1.29   thorpej 		 * which would only do the PGO_LOCKED pgo_get().
    869   1.29   thorpej 		 */
    870   1.50       chs 
    871   1.29   thorpej 		return (0);
    872   1.29   thorpej 
    873   1.29   thorpej 	case MADV_DONTNEED:
    874   1.50       chs 
    875   1.29   thorpej 		/*
    876   1.29   thorpej 		 * Deactivate all these pages.  We don't need them
    877   1.29   thorpej 		 * any more.  We don't, however, toss the data in
    878   1.29   thorpej 		 * the pages.
    879   1.29   thorpej 		 */
    880   1.50       chs 
    881   1.50       chs 		error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
    882   1.29   thorpej 		    PGO_DEACTIVATE);
    883   1.29   thorpej 		break;
    884   1.29   thorpej 
    885   1.29   thorpej 	case MADV_FREE:
    886   1.50       chs 
    887   1.29   thorpej 		/*
    888   1.29   thorpej 		 * These pages contain no valid data, and may be
    889   1.45     soren 		 * garbage-collected.  Toss all resources, including
    890   1.30   thorpej 		 * any swap space in use.
    891   1.29   thorpej 		 */
    892   1.50       chs 
    893   1.50       chs 		error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
    894   1.29   thorpej 		    PGO_FREE);
    895   1.29   thorpej 		break;
    896   1.29   thorpej 
    897   1.29   thorpej 	case MADV_SPACEAVAIL:
    898   1.50       chs 
    899   1.29   thorpej 		/*
    900   1.29   thorpej 		 * XXXMRG What is this?  I think it's:
    901   1.29   thorpej 		 *
    902   1.29   thorpej 		 *	Ensure that we have allocated backing-store
    903   1.29   thorpej 		 *	for these pages.
    904   1.29   thorpej 		 *
    905   1.29   thorpej 		 * This is going to require changes to the page daemon,
    906   1.29   thorpej 		 * as it will free swap space allocated to pages in core.
    907   1.29   thorpej 		 * There's also what to do for device/file/anonymous memory.
    908   1.29   thorpej 		 */
    909   1.50       chs 
    910   1.29   thorpej 		return (EINVAL);
    911   1.29   thorpej 
    912   1.29   thorpej 	default:
    913   1.21       mrg 		return (EINVAL);
    914   1.29   thorpej 	}
    915   1.29   thorpej 
    916   1.50       chs 	return error;
    917    1.1       mrg }
    918    1.1       mrg 
    919    1.1       mrg /*
    920    1.1       mrg  * sys_mlock: memory lock
    921    1.1       mrg  */
    922    1.1       mrg 
    923    1.6       mrg int
    924  1.102      yamt sys_mlock(struct lwp *l, void *v, register_t *retval)
    925    1.6       mrg {
    926    1.6       mrg 	struct sys_mlock_args /* {
    927   1.10    kleink 		syscallarg(const void *) addr;
    928    1.6       mrg 		syscallarg(size_t) len;
    929    1.6       mrg 	} */ *uap = v;
    930   1.67   thorpej 	struct proc *p = l->l_proc;
    931   1.12       eeh 	vaddr_t addr;
    932   1.12       eeh 	vsize_t size, pageoff;
    933    1.6       mrg 	int error;
    934    1.6       mrg 
    935    1.6       mrg 	/*
    936    1.6       mrg 	 * extract syscall args from uap
    937    1.6       mrg 	 */
    938   1.50       chs 
    939   1.12       eeh 	addr = (vaddr_t)SCARG(uap, addr);
    940   1.12       eeh 	size = (vsize_t)SCARG(uap, len);
    941    1.6       mrg 
    942    1.6       mrg 	/*
    943    1.6       mrg 	 * align the address to a page boundary and adjust the size accordingly
    944    1.6       mrg 	 */
    945   1.50       chs 
    946    1.6       mrg 	pageoff = (addr & PAGE_MASK);
    947    1.6       mrg 	addr -= pageoff;
    948    1.6       mrg 	size += pageoff;
    949   1.50       chs 	size = (vsize_t)round_page(size);
    950   1.51       chs 
    951  1.115      yamt 	error = range_test(addr, size, false);
    952  1.115      yamt 	if (error)
    953  1.115      yamt 		return error;
    954    1.1       mrg 
    955    1.6       mrg 	if (atop(size) + uvmexp.wired > uvmexp.wiredmax)
    956    1.6       mrg 		return (EAGAIN);
    957    1.1       mrg 
    958    1.6       mrg 	if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
    959    1.6       mrg 			p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
    960    1.6       mrg 		return (EAGAIN);
    961    1.1       mrg 
    962  1.107   thorpej 	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, false,
    963   1.35   thorpej 	    0);
    964   1.85    briggs 	if (error == EFAULT)
    965   1.85    briggs 		error = ENOMEM;
    966   1.50       chs 	return error;
    967    1.1       mrg }
    968    1.1       mrg 
    969    1.1       mrg /*
    970    1.1       mrg  * sys_munlock: unlock wired pages
    971    1.1       mrg  */
    972    1.1       mrg 
    973    1.6       mrg int
    974  1.102      yamt sys_munlock(struct lwp *l, void *v, register_t *retval)
    975    1.6       mrg {
    976    1.6       mrg 	struct sys_munlock_args /* {
    977   1.10    kleink 		syscallarg(const void *) addr;
    978    1.6       mrg 		syscallarg(size_t) len;
    979    1.6       mrg 	} */ *uap = v;
    980   1.67   thorpej 	struct proc *p = l->l_proc;
    981   1.12       eeh 	vaddr_t addr;
    982   1.12       eeh 	vsize_t size, pageoff;
    983    1.6       mrg 	int error;
    984    1.6       mrg 
    985    1.6       mrg 	/*
    986    1.6       mrg 	 * extract syscall args from uap
    987    1.6       mrg 	 */
    988    1.6       mrg 
    989   1.12       eeh 	addr = (vaddr_t)SCARG(uap, addr);
    990   1.12       eeh 	size = (vsize_t)SCARG(uap, len);
    991    1.6       mrg 
    992    1.6       mrg 	/*
    993    1.6       mrg 	 * align the address to a page boundary, and adjust the size accordingly
    994    1.6       mrg 	 */
    995   1.50       chs 
    996    1.6       mrg 	pageoff = (addr & PAGE_MASK);
    997    1.6       mrg 	addr -= pageoff;
    998    1.6       mrg 	size += pageoff;
    999   1.50       chs 	size = (vsize_t)round_page(size);
   1000    1.6       mrg 
   1001  1.115      yamt 	error = range_test(addr, size, false);
   1002  1.115      yamt 	if (error)
   1003  1.115      yamt 		return error;
   1004    1.1       mrg 
   1005  1.107   thorpej 	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, true,
   1006   1.35   thorpej 	    0);
   1007   1.85    briggs 	if (error == EFAULT)
   1008   1.85    briggs 		error = ENOMEM;
   1009   1.50       chs 	return error;
   1010   1.22   thorpej }
   1011   1.22   thorpej 
   1012   1.22   thorpej /*
   1013   1.22   thorpej  * sys_mlockall: lock all pages mapped into an address space.
   1014   1.22   thorpej  */
   1015   1.22   thorpej 
   1016   1.22   thorpej int
   1017  1.102      yamt sys_mlockall(struct lwp *l, void *v, register_t *retval)
   1018   1.22   thorpej {
   1019   1.22   thorpej 	struct sys_mlockall_args /* {
   1020   1.22   thorpej 		syscallarg(int) flags;
   1021   1.22   thorpej 	} */ *uap = v;
   1022   1.67   thorpej 	struct proc *p = l->l_proc;
   1023   1.22   thorpej 	int error, flags;
   1024   1.22   thorpej 
   1025   1.22   thorpej 	flags = SCARG(uap, flags);
   1026   1.22   thorpej 
   1027   1.22   thorpej 	if (flags == 0 ||
   1028   1.22   thorpej 	    (flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)
   1029   1.22   thorpej 		return (EINVAL);
   1030   1.22   thorpej 
   1031   1.25   thorpej 	error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags,
   1032   1.25   thorpej 	    p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
   1033   1.22   thorpej 	return (error);
   1034   1.22   thorpej }
   1035   1.22   thorpej 
   1036   1.22   thorpej /*
   1037   1.22   thorpej  * sys_munlockall: unlock all pages mapped into an address space.
   1038   1.22   thorpej  */
   1039   1.22   thorpej 
   1040   1.22   thorpej int
   1041  1.102      yamt sys_munlockall(struct lwp *l, void *v, register_t *retval)
   1042   1.22   thorpej {
   1043   1.67   thorpej 	struct proc *p = l->l_proc;
   1044   1.22   thorpej 
   1045   1.22   thorpej 	(void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0);
   1046   1.22   thorpej 	return (0);
   1047    1.1       mrg }
   1048    1.1       mrg 
   1049    1.1       mrg /*
   1050    1.1       mrg  * uvm_mmap: internal version of mmap
   1051    1.1       mrg  *
   1052   1.56       chs  * - used by sys_mmap and various framebuffers
   1053   1.56       chs  * - handle is a vnode pointer or NULL for MAP_ANON
   1054    1.1       mrg  * - caller must page-align the file offset
   1055    1.1       mrg  */
   1056    1.1       mrg 
   1057    1.6       mrg int
   1058   1.25   thorpej uvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff, locklimit)
   1059   1.53       chs 	struct vm_map *map;
   1060   1.12       eeh 	vaddr_t *addr;
   1061   1.12       eeh 	vsize_t size;
   1062    1.6       mrg 	vm_prot_t prot, maxprot;
   1063    1.6       mrg 	int flags;
   1064   1.50       chs 	void *handle;
   1065   1.38    kleink 	voff_t foff;
   1066   1.25   thorpej 	vsize_t locklimit;
   1067    1.6       mrg {
   1068    1.6       mrg 	struct uvm_object *uobj;
   1069    1.6       mrg 	struct vnode *vp;
   1070   1.70      matt 	vaddr_t align = 0;
   1071   1.50       chs 	int error;
   1072    1.6       mrg 	int advice = UVM_ADV_NORMAL;
   1073    1.6       mrg 	uvm_flag_t uvmflag = 0;
   1074  1.106   thorpej 	bool needwritemap;
   1075    1.6       mrg 
   1076    1.6       mrg 	/*
   1077    1.6       mrg 	 * check params
   1078    1.6       mrg 	 */
   1079    1.6       mrg 
   1080    1.6       mrg 	if (size == 0)
   1081    1.6       mrg 		return(0);
   1082    1.6       mrg 	if (foff & PAGE_MASK)
   1083    1.6       mrg 		return(EINVAL);
   1084    1.6       mrg 	if ((prot & maxprot) != prot)
   1085    1.6       mrg 		return(EINVAL);
   1086    1.6       mrg 
   1087    1.6       mrg 	/*
   1088    1.6       mrg 	 * for non-fixed mappings, round off the suggested address.
   1089    1.6       mrg 	 * for fixed mappings, check alignment and zap old mappings.
   1090    1.6       mrg 	 */
   1091    1.6       mrg 
   1092    1.6       mrg 	if ((flags & MAP_FIXED) == 0) {
   1093   1.56       chs 		*addr = round_page(*addr);
   1094    1.6       mrg 	} else {
   1095    1.6       mrg 		if (*addr & PAGE_MASK)
   1096    1.6       mrg 			return(EINVAL);
   1097    1.6       mrg 		uvmflag |= UVM_FLAG_FIXED;
   1098   1.56       chs 		(void) uvm_unmap(map, *addr, *addr + size);
   1099    1.6       mrg 	}
   1100    1.6       mrg 
   1101    1.6       mrg 	/*
   1102   1.70      matt 	 * Try to see if any requested alignment can even be attemped.
   1103   1.70      matt 	 * Make sure we can express the alignment (asking for a >= 4GB
   1104   1.70      matt 	 * alignment on an ILP32 architecure make no sense) and the
   1105   1.70      matt 	 * alignment is at least for a page sized quanitiy.  If the
   1106   1.70      matt 	 * request was for a fixed mapping, make sure supplied address
   1107   1.70      matt 	 * adheres to the request alignment.
   1108   1.70      matt 	 */
   1109   1.70      matt 	align = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT;
   1110   1.70      matt 	if (align) {
   1111   1.70      matt 		if (align >= sizeof(vaddr_t) * NBBY)
   1112   1.70      matt 			return(EINVAL);
   1113   1.70      matt 		align = 1L << align;
   1114   1.70      matt 		if (align < PAGE_SIZE)
   1115   1.70      matt 			return(EINVAL);
   1116   1.88       chs 		if (align >= vm_map_max(map))
   1117   1.70      matt 			return(ENOMEM);
   1118   1.70      matt 		if (flags & MAP_FIXED) {
   1119   1.70      matt 			if ((*addr & (align-1)) != 0)
   1120   1.70      matt 				return(EINVAL);
   1121   1.70      matt 			align = 0;
   1122   1.70      matt 		}
   1123   1.70      matt 	}
   1124   1.70      matt 
   1125   1.70      matt 	/*
   1126    1.6       mrg 	 * handle anon vs. non-anon mappings.   for non-anon mappings attach
   1127    1.6       mrg 	 * to underlying vm object.
   1128    1.6       mrg 	 */
   1129    1.6       mrg 
   1130    1.6       mrg 	if (flags & MAP_ANON) {
   1131   1.95  christos 		KASSERT(handle == NULL);
   1132   1.36   thorpej 		foff = UVM_UNKNOWN_OFFSET;
   1133    1.6       mrg 		uobj = NULL;
   1134    1.6       mrg 		if ((flags & MAP_SHARED) == 0)
   1135    1.6       mrg 			/* XXX: defer amap create */
   1136    1.6       mrg 			uvmflag |= UVM_FLAG_COPYONW;
   1137    1.6       mrg 		else
   1138    1.6       mrg 			/* shared: create amap now */
   1139    1.6       mrg 			uvmflag |= UVM_FLAG_OVERLAY;
   1140    1.6       mrg 
   1141    1.6       mrg 	} else {
   1142   1.95  christos 		KASSERT(handle != NULL);
   1143   1.50       chs 		vp = (struct vnode *)handle;
   1144   1.59   thorpej 
   1145   1.59   thorpej 		/*
   1146   1.59   thorpej 		 * Don't allow mmap for EXEC if the file system
   1147   1.59   thorpej 		 * is mounted NOEXEC.
   1148   1.59   thorpej 		 */
   1149   1.59   thorpej 		if ((prot & PROT_EXEC) != 0 &&
   1150   1.59   thorpej 		    (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0)
   1151   1.59   thorpej 			return (EACCES);
   1152   1.59   thorpej 
   1153    1.6       mrg 		if (vp->v_type != VCHR) {
   1154  1.114     pooka 			error = VOP_MMAP(vp, prot, curlwp->l_cred, curlwp);
   1155   1.55       chs 			if (error) {
   1156   1.55       chs 				return error;
   1157   1.55       chs 			}
   1158  1.113     pooka 			vref(vp);
   1159  1.113     pooka 			uobj = &vp->v_uobj;
   1160   1.57   thorpej 
   1161   1.57   thorpej 			/*
   1162   1.57   thorpej 			 * If the vnode is being mapped with PROT_EXEC,
   1163   1.57   thorpej 			 * then mark it as text.
   1164   1.57   thorpej 			 */
   1165  1.117        ad 			if (prot & PROT_EXEC) {
   1166  1.117        ad 				simple_lock(&uobj->vmobjlock);
   1167   1.58   thorpej 				vn_markexec(vp);
   1168  1.117        ad 				simple_unlock(&uobj->vmobjlock);
   1169  1.117        ad 			}
   1170    1.6       mrg 		} else {
   1171   1.83   darrenr 			int i = maxprot;
   1172   1.83   darrenr 
   1173   1.48   thorpej 			/*
   1174   1.48   thorpej 			 * XXX Some devices don't like to be mapped with
   1175   1.83   darrenr 			 * XXX PROT_EXEC or PROT_WRITE, but we don't really
   1176   1.83   darrenr 			 * XXX have a better way of handling this, right now
   1177   1.48   thorpej 			 */
   1178   1.83   darrenr 			do {
   1179   1.83   darrenr 				uobj = udv_attach((void *) &vp->v_rdev,
   1180   1.83   darrenr 				    (flags & MAP_SHARED) ? i :
   1181   1.83   darrenr 				    (i & ~VM_PROT_WRITE), foff, size);
   1182   1.83   darrenr 				i--;
   1183   1.83   darrenr 			} while ((uobj == NULL) && (i > 0));
   1184    1.6       mrg 			advice = UVM_ADV_RANDOM;
   1185    1.6       mrg 		}
   1186    1.6       mrg 		if (uobj == NULL)
   1187   1.11   thorpej 			return((vp->v_type == VREG) ? ENOMEM : EINVAL);
   1188   1.92      yamt 		if ((flags & MAP_SHARED) == 0) {
   1189    1.6       mrg 			uvmflag |= UVM_FLAG_COPYONW;
   1190  1.100       chs 		}
   1191  1.100       chs 
   1192  1.100       chs 		/*
   1193  1.100       chs 		 * Set vnode flags to indicate the new kinds of mapping.
   1194  1.100       chs 		 * We take the vnode lock in exclusive mode here to serialize
   1195  1.100       chs 		 * with direct I/O.
   1196  1.100       chs 		 */
   1197  1.100       chs 
   1198  1.117        ad 		simple_lock(&vp->v_interlock);
   1199  1.117        ad 		needwritemap = (vp->v_iflag & VI_WRMAP) == 0 &&
   1200  1.100       chs 			(flags & MAP_SHARED) != 0 &&
   1201  1.100       chs 			(maxprot & VM_PROT_WRITE) != 0;
   1202  1.117        ad 		if ((vp->v_iflag & VI_MAPPED) == 0 || needwritemap) {
   1203  1.117        ad 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK);
   1204   1.92      yamt 			simple_lock(&vp->v_interlock);
   1205  1.117        ad 			vp->v_iflag |= VI_MAPPED;
   1206  1.117        ad 			vp->v_vflag |= VV_MAPPED;
   1207  1.100       chs 			if (needwritemap) {
   1208  1.117        ad 				vp->v_iflag |= VI_WRMAP;
   1209  1.100       chs 			}
   1210   1.92      yamt 			simple_unlock(&vp->v_interlock);
   1211  1.100       chs 			VOP_UNLOCK(vp, 0);
   1212  1.117        ad 		} else
   1213  1.117        ad 			simple_unlock(&vp->v_interlock);
   1214    1.6       mrg 	}
   1215    1.6       mrg 
   1216   1.51       chs 	uvmflag = UVM_MAPFLAG(prot, maxprot,
   1217    1.1       mrg 			(flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY,
   1218    1.1       mrg 			advice, uvmflag);
   1219   1.70      matt 	error = uvm_map(map, addr, size, uobj, foff, align, uvmflag);
   1220   1.50       chs 	if (error) {
   1221   1.50       chs 		if (uobj)
   1222   1.50       chs 			uobj->pgops->pgo_detach(uobj);
   1223   1.50       chs 		return error;
   1224   1.50       chs 	}
   1225    1.1       mrg 
   1226    1.6       mrg 	/*
   1227   1.50       chs 	 * POSIX 1003.1b -- if our address space was configured
   1228   1.50       chs 	 * to lock all future mappings, wire the one we just made.
   1229   1.78   thorpej 	 *
   1230   1.78   thorpej 	 * Also handle the MAP_WIRED flag here.
   1231    1.6       mrg 	 */
   1232    1.6       mrg 
   1233   1.50       chs 	if (prot == VM_PROT_NONE) {
   1234    1.6       mrg 
   1235   1.25   thorpej 		/*
   1236   1.50       chs 		 * No more work to do in this case.
   1237   1.25   thorpej 		 */
   1238   1.25   thorpej 
   1239   1.50       chs 		return (0);
   1240   1.50       chs 	}
   1241   1.50       chs 	vm_map_lock(map);
   1242   1.78   thorpej 	if ((flags & MAP_WIRED) != 0 || (map->flags & VM_MAP_WIREFUTURE) != 0) {
   1243   1.87       chs 		if (atop(size) + uvmexp.wired > uvmexp.wiredmax ||
   1244   1.87       chs 		    (locklimit != 0 &&
   1245   1.87       chs 		     size + ptoa(pmap_wired_count(vm_map_pmap(map))) >
   1246   1.87       chs 		     locklimit)) {
   1247   1.50       chs 			vm_map_unlock(map);
   1248   1.50       chs 			uvm_unmap(map, *addr, *addr + size);
   1249   1.50       chs 			return ENOMEM;
   1250   1.25   thorpej 		}
   1251   1.25   thorpej 
   1252   1.50       chs 		/*
   1253   1.50       chs 		 * uvm_map_pageable() always returns the map unlocked.
   1254   1.50       chs 		 */
   1255   1.25   thorpej 
   1256   1.50       chs 		error = uvm_map_pageable(map, *addr, *addr + size,
   1257  1.107   thorpej 					 false, UVM_LK_ENTER);
   1258   1.50       chs 		if (error) {
   1259   1.50       chs 			uvm_unmap(map, *addr, *addr + size);
   1260   1.50       chs 			return error;
   1261   1.50       chs 		}
   1262   1.25   thorpej 		return (0);
   1263   1.25   thorpej 	}
   1264   1.50       chs 	vm_map_unlock(map);
   1265   1.50       chs 	return 0;
   1266    1.1       mrg }
   1267   1.89      fvdl 
   1268   1.89      fvdl vaddr_t
   1269  1.102      yamt uvm_default_mapaddr(struct proc *p, vaddr_t base, vsize_t sz)
   1270   1.89      fvdl {
   1271  1.102      yamt 
   1272   1.89      fvdl 	return VM_DEFAULT_ADDRESS(base, sz);
   1273   1.89      fvdl }
   1274