Home | History | Annotate | Line # | Download | only in uvm
uvm_mmap.c revision 1.70
      1  1.70      matt /*	$NetBSD: uvm_mmap.c,v 1.70 2003/03/06 00:41:52 matt Exp $	*/
      2   1.1       mrg 
      3   1.1       mrg /*
      4   1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  1.51       chs  * Copyright (c) 1991, 1993 The Regents of the University of California.
      6   1.1       mrg  * Copyright (c) 1988 University of Utah.
      7  1.51       chs  *
      8   1.1       mrg  * All rights reserved.
      9   1.1       mrg  *
     10   1.1       mrg  * This code is derived from software contributed to Berkeley by
     11   1.1       mrg  * the Systems Programming Group of the University of Utah Computer
     12   1.1       mrg  * Science Department.
     13   1.1       mrg  *
     14   1.1       mrg  * Redistribution and use in source and binary forms, with or without
     15   1.1       mrg  * modification, are permitted provided that the following conditions
     16   1.1       mrg  * are met:
     17   1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     18   1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     19   1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     20   1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     21   1.1       mrg  *    documentation and/or other materials provided with the distribution.
     22   1.1       mrg  * 3. All advertising materials mentioning features or use of this software
     23   1.1       mrg  *    must display the following acknowledgement:
     24   1.1       mrg  *      This product includes software developed by the Charles D. Cranor,
     25  1.51       chs  *	Washington University, University of California, Berkeley and
     26   1.1       mrg  *	its contributors.
     27   1.1       mrg  * 4. Neither the name of the University nor the names of its contributors
     28   1.1       mrg  *    may be used to endorse or promote products derived from this software
     29   1.1       mrg  *    without specific prior written permission.
     30   1.1       mrg  *
     31   1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     32   1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     33   1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     34   1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     35   1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     36   1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     37   1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     38   1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     39   1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     40   1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     41   1.1       mrg  * SUCH DAMAGE.
     42   1.1       mrg  *
     43   1.1       mrg  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
     44   1.1       mrg  *      @(#)vm_mmap.c   8.5 (Berkeley) 5/19/94
     45   1.3       mrg  * from: Id: uvm_mmap.c,v 1.1.2.14 1998/01/05 21:04:26 chuck Exp
     46   1.1       mrg  */
     47   1.1       mrg 
     48   1.1       mrg /*
     49   1.1       mrg  * uvm_mmap.c: system call interface into VM system, plus kernel vm_mmap
     50   1.1       mrg  * function.
     51   1.1       mrg  */
     52  1.60     lukem 
     53  1.60     lukem #include <sys/cdefs.h>
     54  1.70      matt __KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.70 2003/03/06 00:41:52 matt Exp $");
     55  1.60     lukem 
     56   1.1       mrg #include <sys/param.h>
     57   1.1       mrg #include <sys/systm.h>
     58   1.1       mrg #include <sys/file.h>
     59   1.1       mrg #include <sys/filedesc.h>
     60   1.1       mrg #include <sys/resourcevar.h>
     61   1.1       mrg #include <sys/mman.h>
     62   1.1       mrg #include <sys/mount.h>
     63   1.1       mrg #include <sys/proc.h>
     64   1.1       mrg #include <sys/malloc.h>
     65   1.1       mrg #include <sys/vnode.h>
     66   1.1       mrg #include <sys/conf.h>
     67   1.9       mrg #include <sys/stat.h>
     68   1.1       mrg 
     69   1.1       mrg #include <miscfs/specfs/specdev.h>
     70   1.1       mrg 
     71  1.67   thorpej #include <sys/sa.h>
     72   1.1       mrg #include <sys/syscallargs.h>
     73   1.1       mrg 
     74   1.1       mrg #include <uvm/uvm.h>
     75   1.1       mrg #include <uvm/uvm_device.h>
     76   1.1       mrg 
     77   1.1       mrg 
     78   1.1       mrg /*
     79   1.1       mrg  * unimplemented VM system calls:
     80   1.1       mrg  */
     81   1.1       mrg 
     82   1.1       mrg /*
     83   1.1       mrg  * sys_sbrk: sbrk system call.
     84   1.1       mrg  */
     85   1.1       mrg 
     86   1.1       mrg /* ARGSUSED */
     87   1.6       mrg int
     88  1.67   thorpej sys_sbrk(l, v, retval)
     89  1.67   thorpej 	struct lwp *l;
     90   1.6       mrg 	void *v;
     91   1.6       mrg 	register_t *retval;
     92   1.1       mrg {
     93   1.1       mrg #if 0
     94   1.6       mrg 	struct sys_sbrk_args /* {
     95  1.33    kleink 		syscallarg(intptr_t) incr;
     96  1.20       mrg 	} */ *uap = v;
     97   1.1       mrg #endif
     98   1.6       mrg 
     99  1.17    kleink 	return (ENOSYS);
    100   1.1       mrg }
    101   1.1       mrg 
    102   1.1       mrg /*
    103   1.1       mrg  * sys_sstk: sstk system call.
    104   1.1       mrg  */
    105   1.1       mrg 
    106   1.1       mrg /* ARGSUSED */
    107   1.6       mrg int
    108  1.67   thorpej sys_sstk(l, v, retval)
    109  1.67   thorpej 	struct lwp *l;
    110   1.6       mrg 	void *v;
    111   1.6       mrg 	register_t *retval;
    112   1.1       mrg {
    113   1.1       mrg #if 0
    114   1.6       mrg 	struct sys_sstk_args /* {
    115  1.20       mrg 		syscallarg(int) incr;
    116  1.20       mrg 	} */ *uap = v;
    117   1.1       mrg #endif
    118   1.6       mrg 
    119  1.17    kleink 	return (ENOSYS);
    120   1.1       mrg }
    121   1.1       mrg 
    122   1.1       mrg /*
    123   1.1       mrg  * sys_mincore: determine if pages are in core or not.
    124   1.1       mrg  */
    125   1.1       mrg 
    126   1.1       mrg /* ARGSUSED */
    127   1.6       mrg int
    128  1.67   thorpej sys_mincore(l, v, retval)
    129  1.67   thorpej 	struct lwp *l;
    130   1.6       mrg 	void *v;
    131   1.6       mrg 	register_t *retval;
    132   1.1       mrg {
    133   1.6       mrg 	struct sys_mincore_args /* {
    134  1.22   thorpej 		syscallarg(void *) addr;
    135  1.20       mrg 		syscallarg(size_t) len;
    136  1.20       mrg 		syscallarg(char *) vec;
    137  1.20       mrg 	} */ *uap = v;
    138  1.67   thorpej 	struct proc *p = l->l_proc;
    139  1.56       chs 	struct vm_page *pg;
    140  1.22   thorpej 	char *vec, pgi;
    141  1.22   thorpej 	struct uvm_object *uobj;
    142  1.22   thorpej 	struct vm_amap *amap;
    143  1.22   thorpej 	struct vm_anon *anon;
    144  1.53       chs 	struct vm_map_entry *entry;
    145  1.22   thorpej 	vaddr_t start, end, lim;
    146  1.53       chs 	struct vm_map *map;
    147  1.22   thorpej 	vsize_t len;
    148  1.22   thorpej 	int error = 0, npgs;
    149  1.22   thorpej 
    150  1.22   thorpej 	map = &p->p_vmspace->vm_map;
    151  1.22   thorpej 
    152  1.22   thorpej 	start = (vaddr_t)SCARG(uap, addr);
    153  1.22   thorpej 	len = SCARG(uap, len);
    154  1.22   thorpej 	vec = SCARG(uap, vec);
    155  1.22   thorpej 
    156  1.22   thorpej 	if (start & PAGE_MASK)
    157  1.22   thorpej 		return (EINVAL);
    158  1.22   thorpej 	len = round_page(len);
    159  1.22   thorpej 	end = start + len;
    160  1.22   thorpej 	if (end <= start)
    161  1.22   thorpej 		return (EINVAL);
    162  1.22   thorpej 
    163  1.22   thorpej 	/*
    164  1.22   thorpej 	 * Lock down vec, so our returned status isn't outdated by
    165  1.22   thorpej 	 * storing the status byte for a page.
    166  1.22   thorpej 	 */
    167  1.50       chs 
    168  1.62       chs 	npgs = len >> PAGE_SHIFT;
    169  1.62       chs 	error = uvm_vslock(p, vec, npgs, VM_PROT_WRITE);
    170  1.62       chs 	if (error) {
    171  1.62       chs 		return error;
    172  1.62       chs 	}
    173  1.22   thorpej 	vm_map_lock_read(map);
    174  1.22   thorpej 
    175  1.22   thorpej 	if (uvm_map_lookup_entry(map, start, &entry) == FALSE) {
    176  1.22   thorpej 		error = ENOMEM;
    177  1.22   thorpej 		goto out;
    178  1.22   thorpej 	}
    179  1.22   thorpej 
    180  1.22   thorpej 	for (/* nothing */;
    181  1.22   thorpej 	     entry != &map->header && entry->start < end;
    182  1.22   thorpej 	     entry = entry->next) {
    183  1.49       chs 		KASSERT(!UVM_ET_ISSUBMAP(entry));
    184  1.49       chs 		KASSERT(start >= entry->start);
    185  1.49       chs 
    186  1.22   thorpej 		/* Make sure there are no holes. */
    187  1.22   thorpej 		if (entry->end < end &&
    188  1.22   thorpej 		     (entry->next == &map->header ||
    189  1.22   thorpej 		      entry->next->start > entry->end)) {
    190  1.22   thorpej 			error = ENOMEM;
    191  1.22   thorpej 			goto out;
    192  1.22   thorpej 		}
    193   1.6       mrg 
    194  1.22   thorpej 		lim = end < entry->end ? end : entry->end;
    195  1.22   thorpej 
    196  1.22   thorpej 		/*
    197  1.31   thorpej 		 * Special case for objects with no "real" pages.  Those
    198  1.31   thorpej 		 * are always considered resident (mapped devices).
    199  1.22   thorpej 		 */
    200  1.50       chs 
    201  1.22   thorpej 		if (UVM_ET_ISOBJ(entry)) {
    202  1.49       chs 			KASSERT(!UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj));
    203  1.56       chs 			if (!UVM_OBJ_IS_VNODE(entry->object.uvm_obj)) {
    204  1.22   thorpej 				for (/* nothing */; start < lim;
    205  1.22   thorpej 				     start += PAGE_SIZE, vec++)
    206  1.22   thorpej 					subyte(vec, 1);
    207  1.22   thorpej 				continue;
    208  1.22   thorpej 			}
    209  1.22   thorpej 		}
    210  1.22   thorpej 
    211  1.32   thorpej 		amap = entry->aref.ar_amap;	/* top layer */
    212  1.32   thorpej 		uobj = entry->object.uvm_obj;	/* bottom layer */
    213  1.22   thorpej 
    214  1.22   thorpej 		if (amap != NULL)
    215  1.22   thorpej 			amap_lock(amap);
    216  1.22   thorpej 		if (uobj != NULL)
    217  1.22   thorpej 			simple_lock(&uobj->vmobjlock);
    218  1.22   thorpej 
    219  1.22   thorpej 		for (/* nothing */; start < lim; start += PAGE_SIZE, vec++) {
    220  1.22   thorpej 			pgi = 0;
    221  1.22   thorpej 			if (amap != NULL) {
    222  1.22   thorpej 				/* Check the top layer first. */
    223  1.22   thorpej 				anon = amap_lookup(&entry->aref,
    224  1.22   thorpej 				    start - entry->start);
    225  1.22   thorpej 				/* Don't need to lock anon here. */
    226  1.22   thorpej 				if (anon != NULL && anon->u.an_page != NULL) {
    227  1.50       chs 
    228  1.22   thorpej 					/*
    229  1.22   thorpej 					 * Anon has the page for this entry
    230  1.22   thorpej 					 * offset.
    231  1.22   thorpej 					 */
    232  1.50       chs 
    233  1.22   thorpej 					pgi = 1;
    234  1.22   thorpej 				}
    235  1.22   thorpej 			}
    236  1.22   thorpej 			if (uobj != NULL && pgi == 0) {
    237  1.22   thorpej 				/* Check the bottom layer. */
    238  1.56       chs 				pg = uvm_pagelookup(uobj,
    239  1.22   thorpej 				    entry->offset + (start - entry->start));
    240  1.56       chs 				if (pg != NULL) {
    241  1.50       chs 
    242  1.22   thorpej 					/*
    243  1.22   thorpej 					 * Object has the page for this entry
    244  1.22   thorpej 					 * offset.
    245  1.22   thorpej 					 */
    246  1.50       chs 
    247  1.22   thorpej 					pgi = 1;
    248  1.22   thorpej 				}
    249  1.22   thorpej 			}
    250  1.22   thorpej 			(void) subyte(vec, pgi);
    251  1.22   thorpej 		}
    252  1.22   thorpej 		if (uobj != NULL)
    253  1.27   thorpej 			simple_unlock(&uobj->vmobjlock);
    254  1.22   thorpej 		if (amap != NULL)
    255  1.22   thorpej 			amap_unlock(amap);
    256  1.22   thorpej 	}
    257  1.22   thorpej 
    258  1.22   thorpej  out:
    259  1.22   thorpej 	vm_map_unlock_read(map);
    260  1.22   thorpej 	uvm_vsunlock(p, SCARG(uap, vec), npgs);
    261  1.22   thorpej 	return (error);
    262   1.1       mrg }
    263   1.1       mrg 
    264   1.1       mrg /*
    265   1.1       mrg  * sys_mmap: mmap system call.
    266   1.1       mrg  *
    267  1.64    atatat  * => file offset and address may not be page aligned
    268   1.1       mrg  *    - if MAP_FIXED, offset and address must have remainder mod PAGE_SIZE
    269   1.1       mrg  *    - if address isn't page aligned the mapping starts at trunc_page(addr)
    270   1.1       mrg  *      and the return value is adjusted up by the page offset.
    271   1.1       mrg  */
    272   1.1       mrg 
    273   1.6       mrg int
    274  1.67   thorpej sys_mmap(l, v, retval)
    275  1.67   thorpej 	struct lwp *l;
    276   1.6       mrg 	void *v;
    277   1.6       mrg 	register_t *retval;
    278   1.6       mrg {
    279  1.40  augustss 	struct sys_mmap_args /* {
    280   1.6       mrg 		syscallarg(caddr_t) addr;
    281   1.6       mrg 		syscallarg(size_t) len;
    282   1.6       mrg 		syscallarg(int) prot;
    283   1.6       mrg 		syscallarg(int) flags;
    284   1.6       mrg 		syscallarg(int) fd;
    285   1.6       mrg 		syscallarg(long) pad;
    286   1.6       mrg 		syscallarg(off_t) pos;
    287   1.6       mrg 	} */ *uap = v;
    288  1.67   thorpej 	struct proc *p = l->l_proc;
    289  1.12       eeh 	vaddr_t addr;
    290   1.9       mrg 	struct vattr va;
    291   1.6       mrg 	off_t pos;
    292  1.12       eeh 	vsize_t size, pageoff;
    293   1.6       mrg 	vm_prot_t prot, maxprot;
    294   1.6       mrg 	int flags, fd;
    295  1.12       eeh 	vaddr_t vm_min_address = VM_MIN_ADDRESS;
    296  1.40  augustss 	struct filedesc *fdp = p->p_fd;
    297  1.40  augustss 	struct file *fp;
    298   1.6       mrg 	struct vnode *vp;
    299  1.50       chs 	void *handle;
    300   1.6       mrg 	int error;
    301   1.6       mrg 
    302   1.6       mrg 	/*
    303   1.6       mrg 	 * first, extract syscall args from the uap.
    304   1.6       mrg 	 */
    305   1.6       mrg 
    306  1.50       chs 	addr = (vaddr_t)SCARG(uap, addr);
    307  1.50       chs 	size = (vsize_t)SCARG(uap, len);
    308   1.6       mrg 	prot = SCARG(uap, prot) & VM_PROT_ALL;
    309   1.6       mrg 	flags = SCARG(uap, flags);
    310   1.6       mrg 	fd = SCARG(uap, fd);
    311   1.6       mrg 	pos = SCARG(uap, pos);
    312   1.6       mrg 
    313   1.6       mrg 	/*
    314  1.24   thorpej 	 * Fixup the old deprecated MAP_COPY into MAP_PRIVATE, and
    315  1.24   thorpej 	 * validate the flags.
    316  1.24   thorpej 	 */
    317  1.24   thorpej 	if (flags & MAP_COPY)
    318  1.24   thorpej 		flags = (flags & ~MAP_COPY) | MAP_PRIVATE;
    319  1.24   thorpej 	if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
    320  1.24   thorpej 		return (EINVAL);
    321  1.24   thorpej 
    322  1.24   thorpej 	/*
    323   1.6       mrg 	 * align file position and save offset.  adjust size.
    324   1.6       mrg 	 */
    325   1.6       mrg 
    326   1.6       mrg 	pageoff = (pos & PAGE_MASK);
    327   1.6       mrg 	pos  -= pageoff;
    328   1.6       mrg 	size += pageoff;			/* add offset */
    329  1.50       chs 	size = (vsize_t)round_page(size);	/* round up */
    330   1.6       mrg 	if ((ssize_t) size < 0)
    331   1.6       mrg 		return (EINVAL);			/* don't allow wrap */
    332   1.6       mrg 
    333   1.6       mrg 	/*
    334  1.51       chs 	 * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr"
    335   1.6       mrg 	 */
    336   1.6       mrg 
    337   1.6       mrg 	if (flags & MAP_FIXED) {
    338   1.6       mrg 
    339   1.6       mrg 		/* ensure address and file offset are aligned properly */
    340   1.6       mrg 		addr -= pageoff;
    341   1.6       mrg 		if (addr & PAGE_MASK)
    342   1.6       mrg 			return (EINVAL);
    343   1.6       mrg 
    344   1.6       mrg 		if (VM_MAXUSER_ADDRESS > 0 &&
    345   1.6       mrg 		    (addr + size) > VM_MAXUSER_ADDRESS)
    346  1.63   darrenr 			return (EFBIG);
    347   1.6       mrg 		if (vm_min_address > 0 && addr < vm_min_address)
    348   1.6       mrg 			return (EINVAL);
    349   1.6       mrg 		if (addr > addr + size)
    350  1.63   darrenr 			return (EOVERFLOW);		/* no wrapping! */
    351   1.6       mrg 
    352   1.6       mrg 	} else {
    353   1.6       mrg 
    354   1.6       mrg 		/*
    355  1.68    atatat 		 * not fixed: make sure we skip over the largest
    356  1.68    atatat 		 * possible heap for non-topdown mapping arrangements.
    357  1.68    atatat 		 * we will refine our guess later (e.g. to account for
    358  1.68    atatat 		 * VAC, etc)
    359   1.6       mrg 		 */
    360  1.46       chs 
    361  1.68    atatat 		if (addr == 0 ||
    362  1.68    atatat 		    !(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN))
    363  1.68    atatat 			addr = MAX(addr,
    364  1.68    atatat 			    VM_DEFAULT_ADDRESS(p->p_vmspace->vm_daddr, size));
    365  1.68    atatat 		else
    366  1.68    atatat 			addr = MIN(addr,
    367  1.68    atatat 			    VM_DEFAULT_ADDRESS(p->p_vmspace->vm_daddr, size));
    368   1.6       mrg 	}
    369   1.6       mrg 
    370   1.6       mrg 	/*
    371   1.6       mrg 	 * check for file mappings (i.e. not anonymous) and verify file.
    372   1.6       mrg 	 */
    373   1.6       mrg 
    374   1.6       mrg 	if ((flags & MAP_ANON) == 0) {
    375   1.6       mrg 
    376  1.54   thorpej 		if ((fp = fd_getfile(fdp, fd)) == NULL)
    377  1.54   thorpej 			return (EBADF);
    378  1.69        pk 
    379  1.69        pk 		simple_unlock(&fp->f_slock);
    380   1.6       mrg 
    381   1.6       mrg 		if (fp->f_type != DTYPE_VNODE)
    382   1.7    kleink 			return (ENODEV);		/* only mmap vnodes! */
    383   1.6       mrg 		vp = (struct vnode *)fp->f_data;	/* convert to vnode */
    384   1.6       mrg 
    385  1.11   thorpej 		if (vp->v_type != VREG && vp->v_type != VCHR &&
    386  1.11   thorpej 		    vp->v_type != VBLK)
    387  1.11   thorpej 			return (ENODEV);  /* only REG/CHR/BLK support mmap */
    388  1.39    kleink 
    389  1.61       chs 		if (vp->v_type != VCHR && pos < 0)
    390  1.61       chs 			return (EINVAL);
    391  1.61       chs 
    392  1.61       chs 		if (vp->v_type != VCHR && (pos + size) < pos)
    393  1.39    kleink 			return (EOVERFLOW);		/* no offset wrapping */
    394   1.6       mrg 
    395   1.6       mrg 		/* special case: catch SunOS style /dev/zero */
    396  1.65   gehenna 		if (vp->v_type == VCHR && vp->v_rdev == zerodev) {
    397   1.6       mrg 			flags |= MAP_ANON;
    398   1.6       mrg 			goto is_anon;
    399   1.6       mrg 		}
    400   1.6       mrg 
    401   1.6       mrg 		/*
    402   1.6       mrg 		 * Old programs may not select a specific sharing type, so
    403   1.6       mrg 		 * default to an appropriate one.
    404   1.6       mrg 		 *
    405   1.6       mrg 		 * XXX: how does MAP_ANON fit in the picture?
    406   1.6       mrg 		 */
    407  1.24   thorpej 		if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) {
    408   1.8        tv #if defined(DEBUG)
    409   1.6       mrg 			printf("WARNING: defaulted mmap() share type to "
    410   1.6       mrg 			   "%s (pid %d comm %s)\n", vp->v_type == VCHR ?
    411   1.6       mrg 			   "MAP_SHARED" : "MAP_PRIVATE", p->p_pid,
    412   1.6       mrg 			    p->p_comm);
    413   1.1       mrg #endif
    414   1.6       mrg 			if (vp->v_type == VCHR)
    415   1.6       mrg 				flags |= MAP_SHARED;	/* for a device */
    416   1.6       mrg 			else
    417   1.6       mrg 				flags |= MAP_PRIVATE;	/* for a file */
    418   1.6       mrg 		}
    419   1.6       mrg 
    420  1.51       chs 		/*
    421   1.6       mrg 		 * MAP_PRIVATE device mappings don't make sense (and aren't
    422   1.6       mrg 		 * supported anyway).  However, some programs rely on this,
    423   1.6       mrg 		 * so just change it to MAP_SHARED.
    424   1.6       mrg 		 */
    425   1.6       mrg 		if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
    426   1.6       mrg 			flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
    427   1.6       mrg 		}
    428   1.1       mrg 
    429   1.6       mrg 		/*
    430   1.6       mrg 		 * now check protection
    431   1.6       mrg 		 */
    432   1.6       mrg 
    433  1.48   thorpej 		maxprot = VM_PROT_EXECUTE;
    434   1.6       mrg 
    435   1.6       mrg 		/* check read access */
    436   1.6       mrg 		if (fp->f_flag & FREAD)
    437   1.6       mrg 			maxprot |= VM_PROT_READ;
    438   1.6       mrg 		else if (prot & PROT_READ)
    439   1.6       mrg 			return (EACCES);
    440   1.6       mrg 
    441   1.9       mrg 		/* check write access, shared case first */
    442   1.6       mrg 		if (flags & MAP_SHARED) {
    443   1.9       mrg 			/*
    444   1.9       mrg 			 * if the file is writable, only add PROT_WRITE to
    445   1.9       mrg 			 * maxprot if the file is not immutable, append-only.
    446   1.9       mrg 			 * otherwise, if we have asked for PROT_WRITE, return
    447   1.9       mrg 			 * EPERM.
    448   1.9       mrg 			 */
    449   1.9       mrg 			if (fp->f_flag & FWRITE) {
    450   1.9       mrg 				if ((error =
    451   1.9       mrg 				    VOP_GETATTR(vp, &va, p->p_ucred, p)))
    452   1.9       mrg 					return (error);
    453   1.9       mrg 				if ((va.va_flags & (IMMUTABLE|APPEND)) == 0)
    454   1.9       mrg 					maxprot |= VM_PROT_WRITE;
    455   1.9       mrg 				else if (prot & PROT_WRITE)
    456   1.9       mrg 					return (EPERM);
    457   1.9       mrg 			}
    458   1.6       mrg 			else if (prot & PROT_WRITE)
    459   1.6       mrg 				return (EACCES);
    460   1.6       mrg 		} else {
    461   1.6       mrg 			/* MAP_PRIVATE mappings can always write to */
    462   1.6       mrg 			maxprot |= VM_PROT_WRITE;
    463   1.6       mrg 		}
    464  1.50       chs 		handle = vp;
    465   1.1       mrg 
    466   1.6       mrg 	} else {		/* MAP_ANON case */
    467  1.24   thorpej 		/*
    468  1.24   thorpej 		 * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0?
    469  1.24   thorpej 		 */
    470   1.6       mrg 		if (fd != -1)
    471   1.6       mrg 			return (EINVAL);
    472   1.1       mrg 
    473  1.24   thorpej  is_anon:		/* label for SunOS style /dev/zero */
    474   1.6       mrg 		handle = NULL;
    475   1.6       mrg 		maxprot = VM_PROT_ALL;
    476   1.6       mrg 		pos = 0;
    477  1.28       cgd 	}
    478  1.28       cgd 
    479  1.28       cgd 	/*
    480  1.28       cgd 	 * XXX (in)sanity check.  We don't do proper datasize checking
    481  1.28       cgd 	 * XXX for anonymous (or private writable) mmap().  However,
    482  1.28       cgd 	 * XXX know that if we're trying to allocate more than the amount
    483  1.28       cgd 	 * XXX remaining under our current data size limit, _that_ should
    484  1.28       cgd 	 * XXX be disallowed.
    485  1.28       cgd 	 */
    486  1.28       cgd 	if ((flags & MAP_ANON) != 0 ||
    487  1.28       cgd 	    ((flags & MAP_PRIVATE) != 0 && (prot & PROT_WRITE) != 0)) {
    488  1.28       cgd 		if (size >
    489  1.50       chs 		    (p->p_rlimit[RLIMIT_DATA].rlim_cur -
    490  1.50       chs 		     ctob(p->p_vmspace->vm_dsize))) {
    491  1.28       cgd 			return (ENOMEM);
    492  1.28       cgd 		}
    493   1.6       mrg 	}
    494   1.6       mrg 
    495   1.6       mrg 	/*
    496   1.6       mrg 	 * now let kernel internal function uvm_mmap do the work.
    497   1.6       mrg 	 */
    498   1.6       mrg 
    499   1.6       mrg 	error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
    500  1.25   thorpej 	    flags, handle, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
    501   1.6       mrg 
    502   1.6       mrg 	if (error == 0)
    503   1.6       mrg 		/* remember to add offset */
    504   1.6       mrg 		*retval = (register_t)(addr + pageoff);
    505   1.1       mrg 
    506   1.6       mrg 	return (error);
    507   1.1       mrg }
    508   1.1       mrg 
    509   1.1       mrg /*
    510   1.1       mrg  * sys___msync13: the msync system call (a front-end for flush)
    511   1.1       mrg  */
    512   1.1       mrg 
    513   1.6       mrg int
    514  1.67   thorpej sys___msync13(l, v, retval)
    515  1.67   thorpej 	struct lwp *l;
    516   1.6       mrg 	void *v;
    517   1.6       mrg 	register_t *retval;
    518   1.6       mrg {
    519   1.6       mrg 	struct sys___msync13_args /* {
    520   1.6       mrg 		syscallarg(caddr_t) addr;
    521   1.6       mrg 		syscallarg(size_t) len;
    522   1.6       mrg 		syscallarg(int) flags;
    523   1.6       mrg 	} */ *uap = v;
    524  1.67   thorpej 	struct proc *p = l->l_proc;
    525  1.12       eeh 	vaddr_t addr;
    526  1.12       eeh 	vsize_t size, pageoff;
    527  1.53       chs 	struct vm_map *map;
    528  1.50       chs 	int error, rv, flags, uvmflags;
    529   1.6       mrg 
    530   1.6       mrg 	/*
    531   1.6       mrg 	 * extract syscall args from the uap
    532   1.6       mrg 	 */
    533   1.6       mrg 
    534  1.12       eeh 	addr = (vaddr_t)SCARG(uap, addr);
    535  1.12       eeh 	size = (vsize_t)SCARG(uap, len);
    536   1.6       mrg 	flags = SCARG(uap, flags);
    537   1.6       mrg 
    538   1.6       mrg 	/* sanity check flags */
    539   1.6       mrg 	if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 ||
    540   1.6       mrg 			(flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 ||
    541   1.6       mrg 			(flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC))
    542   1.1       mrg 	  return (EINVAL);
    543   1.6       mrg 	if ((flags & (MS_ASYNC | MS_SYNC)) == 0)
    544   1.1       mrg 	  flags |= MS_SYNC;
    545   1.1       mrg 
    546   1.6       mrg 	/*
    547  1.50       chs 	 * align the address to a page boundary and adjust the size accordingly.
    548   1.6       mrg 	 */
    549   1.6       mrg 
    550   1.6       mrg 	pageoff = (addr & PAGE_MASK);
    551   1.6       mrg 	addr -= pageoff;
    552   1.6       mrg 	size += pageoff;
    553  1.50       chs 	size = (vsize_t)round_page(size);
    554   1.6       mrg 
    555   1.6       mrg 	/* disallow wrap-around. */
    556   1.6       mrg 	if (addr + size < addr)
    557   1.6       mrg 		return (EINVAL);
    558   1.6       mrg 
    559   1.6       mrg 	/*
    560   1.6       mrg 	 * get map
    561   1.6       mrg 	 */
    562   1.6       mrg 
    563   1.6       mrg 	map = &p->p_vmspace->vm_map;
    564   1.6       mrg 
    565   1.6       mrg 	/*
    566   1.6       mrg 	 * XXXCDC: do we really need this semantic?
    567   1.6       mrg 	 *
    568   1.6       mrg 	 * XXX Gak!  If size is zero we are supposed to sync "all modified
    569   1.6       mrg 	 * pages with the region containing addr".  Unfortunately, we
    570   1.6       mrg 	 * don't really keep track of individual mmaps so we approximate
    571   1.6       mrg 	 * by flushing the range of the map entry containing addr.
    572   1.6       mrg 	 * This can be incorrect if the region splits or is coalesced
    573   1.6       mrg 	 * with a neighbor.
    574   1.6       mrg 	 */
    575  1.50       chs 
    576   1.6       mrg 	if (size == 0) {
    577  1.53       chs 		struct vm_map_entry *entry;
    578  1.51       chs 
    579   1.6       mrg 		vm_map_lock_read(map);
    580   1.6       mrg 		rv = uvm_map_lookup_entry(map, addr, &entry);
    581   1.6       mrg 		if (rv == TRUE) {
    582   1.6       mrg 			addr = entry->start;
    583   1.6       mrg 			size = entry->end - entry->start;
    584   1.6       mrg 		}
    585   1.6       mrg 		vm_map_unlock_read(map);
    586   1.6       mrg 		if (rv == FALSE)
    587   1.6       mrg 			return (EINVAL);
    588   1.6       mrg 	}
    589   1.6       mrg 
    590   1.6       mrg 	/*
    591   1.6       mrg 	 * translate MS_ flags into PGO_ flags
    592   1.6       mrg 	 */
    593  1.50       chs 
    594  1.34   thorpej 	uvmflags = PGO_CLEANIT;
    595  1.34   thorpej 	if (flags & MS_INVALIDATE)
    596  1.34   thorpej 		uvmflags |= PGO_FREE;
    597   1.6       mrg 	if (flags & MS_SYNC)
    598   1.6       mrg 		uvmflags |= PGO_SYNCIO;
    599   1.6       mrg 	else
    600   1.6       mrg 		uvmflags |= PGO_SYNCIO;	 /* XXXCDC: force sync for now! */
    601   1.6       mrg 
    602  1.50       chs 	error = uvm_map_clean(map, addr, addr+size, uvmflags);
    603  1.50       chs 	return error;
    604   1.1       mrg }
    605   1.1       mrg 
    606   1.1       mrg /*
    607   1.1       mrg  * sys_munmap: unmap a users memory
    608   1.1       mrg  */
    609   1.1       mrg 
    610   1.6       mrg int
    611  1.67   thorpej sys_munmap(l, v, retval)
    612  1.67   thorpej 	struct lwp *l;
    613   1.6       mrg 	void *v;
    614   1.6       mrg 	register_t *retval;
    615   1.6       mrg {
    616  1.40  augustss 	struct sys_munmap_args /* {
    617   1.6       mrg 		syscallarg(caddr_t) addr;
    618   1.6       mrg 		syscallarg(size_t) len;
    619   1.6       mrg 	} */ *uap = v;
    620  1.67   thorpej 	struct proc *p = l->l_proc;
    621  1.12       eeh 	vaddr_t addr;
    622  1.12       eeh 	vsize_t size, pageoff;
    623  1.53       chs 	struct vm_map *map;
    624  1.12       eeh 	vaddr_t vm_min_address = VM_MIN_ADDRESS;
    625   1.6       mrg 	struct vm_map_entry *dead_entries;
    626   1.6       mrg 
    627   1.6       mrg 	/*
    628  1.50       chs 	 * get syscall args.
    629   1.6       mrg 	 */
    630   1.6       mrg 
    631  1.50       chs 	addr = (vaddr_t)SCARG(uap, addr);
    632  1.50       chs 	size = (vsize_t)SCARG(uap, len);
    633  1.51       chs 
    634   1.6       mrg 	/*
    635  1.50       chs 	 * align the address to a page boundary and adjust the size accordingly.
    636   1.6       mrg 	 */
    637   1.6       mrg 
    638   1.6       mrg 	pageoff = (addr & PAGE_MASK);
    639   1.6       mrg 	addr -= pageoff;
    640   1.6       mrg 	size += pageoff;
    641  1.50       chs 	size = (vsize_t)round_page(size);
    642   1.6       mrg 
    643   1.6       mrg 	if ((int)size < 0)
    644   1.6       mrg 		return (EINVAL);
    645   1.6       mrg 	if (size == 0)
    646   1.6       mrg 		return (0);
    647   1.6       mrg 
    648   1.6       mrg 	/*
    649   1.6       mrg 	 * Check for illegal addresses.  Watch out for address wrap...
    650   1.6       mrg 	 * Note that VM_*_ADDRESS are not constants due to casts (argh).
    651   1.6       mrg 	 */
    652   1.6       mrg 	if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
    653   1.6       mrg 		return (EINVAL);
    654   1.6       mrg 	if (vm_min_address > 0 && addr < vm_min_address)
    655   1.6       mrg 		return (EINVAL);
    656   1.6       mrg 	if (addr > addr + size)
    657   1.6       mrg 		return (EINVAL);
    658   1.6       mrg 	map = &p->p_vmspace->vm_map;
    659   1.6       mrg 
    660   1.6       mrg 	/*
    661  1.51       chs 	 * interesting system call semantic: make sure entire range is
    662   1.6       mrg 	 * allocated before allowing an unmap.
    663   1.6       mrg 	 */
    664   1.6       mrg 
    665  1.50       chs 	vm_map_lock(map);
    666  1.66   mycroft #if 0
    667   1.6       mrg 	if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) {
    668   1.6       mrg 		vm_map_unlock(map);
    669   1.6       mrg 		return (EINVAL);
    670   1.6       mrg 	}
    671  1.66   mycroft #endif
    672  1.50       chs 	uvm_unmap_remove(map, addr, addr + size, &dead_entries);
    673  1.50       chs 	vm_map_unlock(map);
    674   1.6       mrg 	if (dead_entries != NULL)
    675   1.6       mrg 		uvm_unmap_detach(dead_entries, 0);
    676   1.6       mrg 	return (0);
    677   1.1       mrg }
    678   1.1       mrg 
    679   1.1       mrg /*
    680   1.1       mrg  * sys_mprotect: the mprotect system call
    681   1.1       mrg  */
    682   1.1       mrg 
    683   1.6       mrg int
    684  1.67   thorpej sys_mprotect(l, v, retval)
    685  1.67   thorpej 	struct lwp *l;
    686   1.6       mrg 	void *v;
    687   1.6       mrg 	register_t *retval;
    688   1.6       mrg {
    689   1.6       mrg 	struct sys_mprotect_args /* {
    690   1.6       mrg 		syscallarg(caddr_t) addr;
    691   1.6       mrg 		syscallarg(int) len;
    692   1.6       mrg 		syscallarg(int) prot;
    693   1.6       mrg 	} */ *uap = v;
    694  1.67   thorpej 	struct proc *p = l->l_proc;
    695  1.12       eeh 	vaddr_t addr;
    696  1.12       eeh 	vsize_t size, pageoff;
    697   1.6       mrg 	vm_prot_t prot;
    698  1.50       chs 	int error;
    699   1.6       mrg 
    700   1.6       mrg 	/*
    701   1.6       mrg 	 * extract syscall args from uap
    702   1.6       mrg 	 */
    703   1.6       mrg 
    704  1.12       eeh 	addr = (vaddr_t)SCARG(uap, addr);
    705  1.12       eeh 	size = (vsize_t)SCARG(uap, len);
    706   1.6       mrg 	prot = SCARG(uap, prot) & VM_PROT_ALL;
    707   1.6       mrg 
    708   1.6       mrg 	/*
    709  1.50       chs 	 * align the address to a page boundary and adjust the size accordingly.
    710   1.6       mrg 	 */
    711  1.50       chs 
    712   1.6       mrg 	pageoff = (addr & PAGE_MASK);
    713   1.6       mrg 	addr -= pageoff;
    714   1.6       mrg 	size += pageoff;
    715  1.50       chs 	size = (vsize_t)round_page(size);
    716  1.50       chs 
    717   1.6       mrg 	if ((int)size < 0)
    718   1.6       mrg 		return (EINVAL);
    719  1.50       chs 	error = uvm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, prot,
    720  1.50       chs 				FALSE);
    721  1.50       chs 	return error;
    722   1.1       mrg }
    723   1.1       mrg 
    724   1.1       mrg /*
    725   1.1       mrg  * sys_minherit: the minherit system call
    726   1.1       mrg  */
    727   1.1       mrg 
    728   1.6       mrg int
    729  1.67   thorpej sys_minherit(l, v, retval)
    730  1.67   thorpej 	struct lwp *l;
    731   1.6       mrg 	void *v;
    732   1.6       mrg 	register_t *retval;
    733   1.6       mrg {
    734   1.6       mrg 	struct sys_minherit_args /* {
    735   1.6       mrg 		syscallarg(caddr_t) addr;
    736   1.6       mrg 		syscallarg(int) len;
    737   1.6       mrg 		syscallarg(int) inherit;
    738   1.6       mrg 	} */ *uap = v;
    739  1.67   thorpej 	struct proc *p = l->l_proc;
    740  1.12       eeh 	vaddr_t addr;
    741  1.12       eeh 	vsize_t size, pageoff;
    742  1.40  augustss 	vm_inherit_t inherit;
    743  1.50       chs 	int error;
    744  1.51       chs 
    745  1.12       eeh 	addr = (vaddr_t)SCARG(uap, addr);
    746  1.12       eeh 	size = (vsize_t)SCARG(uap, len);
    747   1.6       mrg 	inherit = SCARG(uap, inherit);
    748  1.50       chs 
    749   1.6       mrg 	/*
    750  1.50       chs 	 * align the address to a page boundary and adjust the size accordingly.
    751   1.6       mrg 	 */
    752   1.6       mrg 
    753   1.6       mrg 	pageoff = (addr & PAGE_MASK);
    754   1.6       mrg 	addr -= pageoff;
    755   1.6       mrg 	size += pageoff;
    756  1.50       chs 	size = (vsize_t)round_page(size);
    757   1.6       mrg 
    758   1.6       mrg 	if ((int)size < 0)
    759   1.6       mrg 		return (EINVAL);
    760  1.50       chs 	error = uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr + size,
    761  1.50       chs 				inherit);
    762  1.50       chs 	return error;
    763  1.21       mrg }
    764  1.21       mrg 
    765  1.21       mrg /*
    766  1.21       mrg  * sys_madvise: give advice about memory usage.
    767  1.21       mrg  */
    768  1.21       mrg 
    769  1.21       mrg /* ARGSUSED */
    770  1.21       mrg int
    771  1.67   thorpej sys_madvise(l, v, retval)
    772  1.67   thorpej 	struct lwp *l;
    773  1.21       mrg 	void *v;
    774  1.21       mrg 	register_t *retval;
    775  1.21       mrg {
    776  1.21       mrg 	struct sys_madvise_args /* {
    777  1.21       mrg 		syscallarg(caddr_t) addr;
    778  1.21       mrg 		syscallarg(size_t) len;
    779  1.21       mrg 		syscallarg(int) behav;
    780  1.21       mrg 	} */ *uap = v;
    781  1.67   thorpej 	struct proc *p = l->l_proc;
    782  1.21       mrg 	vaddr_t addr;
    783  1.21       mrg 	vsize_t size, pageoff;
    784  1.50       chs 	int advice, error;
    785  1.51       chs 
    786  1.21       mrg 	addr = (vaddr_t)SCARG(uap, addr);
    787  1.21       mrg 	size = (vsize_t)SCARG(uap, len);
    788  1.21       mrg 	advice = SCARG(uap, behav);
    789  1.21       mrg 
    790  1.21       mrg 	/*
    791  1.21       mrg 	 * align the address to a page boundary, and adjust the size accordingly
    792  1.21       mrg 	 */
    793  1.50       chs 
    794  1.21       mrg 	pageoff = (addr & PAGE_MASK);
    795  1.21       mrg 	addr -= pageoff;
    796  1.21       mrg 	size += pageoff;
    797  1.50       chs 	size = (vsize_t)round_page(size);
    798  1.21       mrg 
    799  1.29   thorpej 	if ((ssize_t)size <= 0)
    800  1.29   thorpej 		return (EINVAL);
    801  1.29   thorpej 
    802  1.29   thorpej 	switch (advice) {
    803  1.29   thorpej 	case MADV_NORMAL:
    804  1.29   thorpej 	case MADV_RANDOM:
    805  1.29   thorpej 	case MADV_SEQUENTIAL:
    806  1.50       chs 		error = uvm_map_advice(&p->p_vmspace->vm_map, addr, addr + size,
    807  1.29   thorpej 		    advice);
    808  1.29   thorpej 		break;
    809  1.29   thorpej 
    810  1.29   thorpej 	case MADV_WILLNEED:
    811  1.50       chs 
    812  1.29   thorpej 		/*
    813  1.29   thorpej 		 * Activate all these pages, pre-faulting them in if
    814  1.29   thorpej 		 * necessary.
    815  1.29   thorpej 		 */
    816  1.29   thorpej 		/*
    817  1.29   thorpej 		 * XXX IMPLEMENT ME.
    818  1.29   thorpej 		 * Should invent a "weak" mode for uvm_fault()
    819  1.29   thorpej 		 * which would only do the PGO_LOCKED pgo_get().
    820  1.29   thorpej 		 */
    821  1.50       chs 
    822  1.29   thorpej 		return (0);
    823  1.29   thorpej 
    824  1.29   thorpej 	case MADV_DONTNEED:
    825  1.50       chs 
    826  1.29   thorpej 		/*
    827  1.29   thorpej 		 * Deactivate all these pages.  We don't need them
    828  1.29   thorpej 		 * any more.  We don't, however, toss the data in
    829  1.29   thorpej 		 * the pages.
    830  1.29   thorpej 		 */
    831  1.50       chs 
    832  1.50       chs 		error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
    833  1.29   thorpej 		    PGO_DEACTIVATE);
    834  1.29   thorpej 		break;
    835  1.29   thorpej 
    836  1.29   thorpej 	case MADV_FREE:
    837  1.50       chs 
    838  1.29   thorpej 		/*
    839  1.29   thorpej 		 * These pages contain no valid data, and may be
    840  1.45     soren 		 * garbage-collected.  Toss all resources, including
    841  1.30   thorpej 		 * any swap space in use.
    842  1.29   thorpej 		 */
    843  1.50       chs 
    844  1.50       chs 		error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
    845  1.29   thorpej 		    PGO_FREE);
    846  1.29   thorpej 		break;
    847  1.29   thorpej 
    848  1.29   thorpej 	case MADV_SPACEAVAIL:
    849  1.50       chs 
    850  1.29   thorpej 		/*
    851  1.29   thorpej 		 * XXXMRG What is this?  I think it's:
    852  1.29   thorpej 		 *
    853  1.29   thorpej 		 *	Ensure that we have allocated backing-store
    854  1.29   thorpej 		 *	for these pages.
    855  1.29   thorpej 		 *
    856  1.29   thorpej 		 * This is going to require changes to the page daemon,
    857  1.29   thorpej 		 * as it will free swap space allocated to pages in core.
    858  1.29   thorpej 		 * There's also what to do for device/file/anonymous memory.
    859  1.29   thorpej 		 */
    860  1.50       chs 
    861  1.29   thorpej 		return (EINVAL);
    862  1.29   thorpej 
    863  1.29   thorpej 	default:
    864  1.21       mrg 		return (EINVAL);
    865  1.29   thorpej 	}
    866  1.29   thorpej 
    867  1.50       chs 	return error;
    868   1.1       mrg }
    869   1.1       mrg 
    870   1.1       mrg /*
    871   1.1       mrg  * sys_mlock: memory lock
    872   1.1       mrg  */
    873   1.1       mrg 
    874   1.6       mrg int
    875  1.67   thorpej sys_mlock(l, v, retval)
    876  1.67   thorpej 	struct lwp *l;
    877   1.6       mrg 	void *v;
    878   1.6       mrg 	register_t *retval;
    879   1.6       mrg {
    880   1.6       mrg 	struct sys_mlock_args /* {
    881  1.10    kleink 		syscallarg(const void *) addr;
    882   1.6       mrg 		syscallarg(size_t) len;
    883   1.6       mrg 	} */ *uap = v;
    884  1.67   thorpej 	struct proc *p = l->l_proc;
    885  1.12       eeh 	vaddr_t addr;
    886  1.12       eeh 	vsize_t size, pageoff;
    887   1.6       mrg 	int error;
    888   1.6       mrg 
    889   1.6       mrg 	/*
    890   1.6       mrg 	 * extract syscall args from uap
    891   1.6       mrg 	 */
    892  1.50       chs 
    893  1.12       eeh 	addr = (vaddr_t)SCARG(uap, addr);
    894  1.12       eeh 	size = (vsize_t)SCARG(uap, len);
    895   1.6       mrg 
    896   1.6       mrg 	/*
    897   1.6       mrg 	 * align the address to a page boundary and adjust the size accordingly
    898   1.6       mrg 	 */
    899  1.50       chs 
    900   1.6       mrg 	pageoff = (addr & PAGE_MASK);
    901   1.6       mrg 	addr -= pageoff;
    902   1.6       mrg 	size += pageoff;
    903  1.50       chs 	size = (vsize_t)round_page(size);
    904  1.51       chs 
    905   1.6       mrg 	/* disallow wrap-around. */
    906  1.50       chs 	if (addr + size < addr)
    907   1.6       mrg 		return (EINVAL);
    908   1.1       mrg 
    909   1.6       mrg 	if (atop(size) + uvmexp.wired > uvmexp.wiredmax)
    910   1.6       mrg 		return (EAGAIN);
    911   1.1       mrg 
    912   1.1       mrg #ifdef pmap_wired_count
    913   1.6       mrg 	if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
    914   1.6       mrg 			p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
    915   1.6       mrg 		return (EAGAIN);
    916   1.1       mrg #else
    917   1.6       mrg 	if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
    918   1.6       mrg 		return (error);
    919   1.1       mrg #endif
    920   1.1       mrg 
    921  1.25   thorpej 	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, FALSE,
    922  1.35   thorpej 	    0);
    923  1.50       chs 	return error;
    924   1.1       mrg }
    925   1.1       mrg 
    926   1.1       mrg /*
    927   1.1       mrg  * sys_munlock: unlock wired pages
    928   1.1       mrg  */
    929   1.1       mrg 
    930   1.6       mrg int
    931  1.67   thorpej sys_munlock(l, v, retval)
    932  1.67   thorpej 	struct lwp *l;
    933   1.6       mrg 	void *v;
    934   1.6       mrg 	register_t *retval;
    935   1.6       mrg {
    936   1.6       mrg 	struct sys_munlock_args /* {
    937  1.10    kleink 		syscallarg(const void *) addr;
    938   1.6       mrg 		syscallarg(size_t) len;
    939   1.6       mrg 	} */ *uap = v;
    940  1.67   thorpej 	struct proc *p = l->l_proc;
    941  1.12       eeh 	vaddr_t addr;
    942  1.12       eeh 	vsize_t size, pageoff;
    943   1.6       mrg 	int error;
    944   1.6       mrg 
    945   1.6       mrg 	/*
    946   1.6       mrg 	 * extract syscall args from uap
    947   1.6       mrg 	 */
    948   1.6       mrg 
    949  1.12       eeh 	addr = (vaddr_t)SCARG(uap, addr);
    950  1.12       eeh 	size = (vsize_t)SCARG(uap, len);
    951   1.6       mrg 
    952   1.6       mrg 	/*
    953   1.6       mrg 	 * align the address to a page boundary, and adjust the size accordingly
    954   1.6       mrg 	 */
    955  1.50       chs 
    956   1.6       mrg 	pageoff = (addr & PAGE_MASK);
    957   1.6       mrg 	addr -= pageoff;
    958   1.6       mrg 	size += pageoff;
    959  1.50       chs 	size = (vsize_t)round_page(size);
    960   1.6       mrg 
    961   1.6       mrg 	/* disallow wrap-around. */
    962  1.50       chs 	if (addr + size < addr)
    963   1.6       mrg 		return (EINVAL);
    964   1.1       mrg 
    965   1.1       mrg #ifndef pmap_wired_count
    966   1.6       mrg 	if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
    967   1.6       mrg 		return (error);
    968   1.1       mrg #endif
    969   1.1       mrg 
    970  1.25   thorpej 	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, TRUE,
    971  1.35   thorpej 	    0);
    972  1.50       chs 	return error;
    973  1.22   thorpej }
    974  1.22   thorpej 
    975  1.22   thorpej /*
    976  1.22   thorpej  * sys_mlockall: lock all pages mapped into an address space.
    977  1.22   thorpej  */
    978  1.22   thorpej 
    979  1.22   thorpej int
    980  1.67   thorpej sys_mlockall(l, v, retval)
    981  1.67   thorpej 	struct lwp *l;
    982  1.22   thorpej 	void *v;
    983  1.22   thorpej 	register_t *retval;
    984  1.22   thorpej {
    985  1.22   thorpej 	struct sys_mlockall_args /* {
    986  1.22   thorpej 		syscallarg(int) flags;
    987  1.22   thorpej 	} */ *uap = v;
    988  1.67   thorpej 	struct proc *p = l->l_proc;
    989  1.22   thorpej 	int error, flags;
    990  1.22   thorpej 
    991  1.22   thorpej 	flags = SCARG(uap, flags);
    992  1.22   thorpej 
    993  1.22   thorpej 	if (flags == 0 ||
    994  1.22   thorpej 	    (flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)
    995  1.22   thorpej 		return (EINVAL);
    996  1.22   thorpej 
    997  1.25   thorpej #ifndef pmap_wired_count
    998  1.22   thorpej 	if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
    999  1.22   thorpej 		return (error);
   1000  1.22   thorpej #endif
   1001  1.22   thorpej 
   1002  1.25   thorpej 	error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags,
   1003  1.25   thorpej 	    p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
   1004  1.22   thorpej 	return (error);
   1005  1.22   thorpej }
   1006  1.22   thorpej 
   1007  1.22   thorpej /*
   1008  1.22   thorpej  * sys_munlockall: unlock all pages mapped into an address space.
   1009  1.22   thorpej  */
   1010  1.22   thorpej 
   1011  1.22   thorpej int
   1012  1.67   thorpej sys_munlockall(l, v, retval)
   1013  1.67   thorpej 	struct lwp *l;
   1014  1.22   thorpej 	void *v;
   1015  1.22   thorpej 	register_t *retval;
   1016  1.22   thorpej {
   1017  1.67   thorpej 	struct proc *p = l->l_proc;
   1018  1.22   thorpej 
   1019  1.22   thorpej 	(void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0);
   1020  1.22   thorpej 	return (0);
   1021   1.1       mrg }
   1022   1.1       mrg 
   1023   1.1       mrg /*
   1024   1.1       mrg  * uvm_mmap: internal version of mmap
   1025   1.1       mrg  *
   1026  1.56       chs  * - used by sys_mmap and various framebuffers
   1027  1.56       chs  * - handle is a vnode pointer or NULL for MAP_ANON
   1028   1.1       mrg  * - caller must page-align the file offset
   1029   1.1       mrg  */
   1030   1.1       mrg 
   1031   1.6       mrg int
   1032  1.25   thorpej uvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff, locklimit)
   1033  1.53       chs 	struct vm_map *map;
   1034  1.12       eeh 	vaddr_t *addr;
   1035  1.12       eeh 	vsize_t size;
   1036   1.6       mrg 	vm_prot_t prot, maxprot;
   1037   1.6       mrg 	int flags;
   1038  1.50       chs 	void *handle;
   1039  1.38    kleink 	voff_t foff;
   1040  1.25   thorpej 	vsize_t locklimit;
   1041   1.6       mrg {
   1042   1.6       mrg 	struct uvm_object *uobj;
   1043   1.6       mrg 	struct vnode *vp;
   1044  1.70      matt 	vaddr_t align = 0;
   1045  1.50       chs 	int error;
   1046   1.6       mrg 	int advice = UVM_ADV_NORMAL;
   1047   1.6       mrg 	uvm_flag_t uvmflag = 0;
   1048   1.6       mrg 
   1049   1.6       mrg 	/*
   1050   1.6       mrg 	 * check params
   1051   1.6       mrg 	 */
   1052   1.6       mrg 
   1053   1.6       mrg 	if (size == 0)
   1054   1.6       mrg 		return(0);
   1055   1.6       mrg 	if (foff & PAGE_MASK)
   1056   1.6       mrg 		return(EINVAL);
   1057   1.6       mrg 	if ((prot & maxprot) != prot)
   1058   1.6       mrg 		return(EINVAL);
   1059   1.6       mrg 
   1060   1.6       mrg 	/*
   1061   1.6       mrg 	 * for non-fixed mappings, round off the suggested address.
   1062   1.6       mrg 	 * for fixed mappings, check alignment and zap old mappings.
   1063   1.6       mrg 	 */
   1064   1.6       mrg 
   1065   1.6       mrg 	if ((flags & MAP_FIXED) == 0) {
   1066  1.56       chs 		*addr = round_page(*addr);
   1067   1.6       mrg 	} else {
   1068   1.6       mrg 		if (*addr & PAGE_MASK)
   1069   1.6       mrg 			return(EINVAL);
   1070   1.6       mrg 		uvmflag |= UVM_FLAG_FIXED;
   1071  1.56       chs 		(void) uvm_unmap(map, *addr, *addr + size);
   1072   1.6       mrg 	}
   1073   1.6       mrg 
   1074   1.6       mrg 	/*
   1075  1.70      matt 	 * Try to see if any requested alignment can even be attemped.
   1076  1.70      matt 	 * Make sure we can express the alignment (asking for a >= 4GB
   1077  1.70      matt 	 * alignment on an ILP32 architecure make no sense) and the
   1078  1.70      matt 	 * alignment is at least for a page sized quanitiy.  If the
   1079  1.70      matt 	 * request was for a fixed mapping, make sure supplied address
   1080  1.70      matt 	 * adheres to the request alignment.
   1081  1.70      matt 	 */
   1082  1.70      matt 	align = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT;
   1083  1.70      matt 	if (align) {
   1084  1.70      matt 		if (align >= sizeof(vaddr_t) * NBBY)
   1085  1.70      matt 			return(EINVAL);
   1086  1.70      matt 		align = 1L << align;
   1087  1.70      matt 		if (align < PAGE_SIZE)
   1088  1.70      matt 			return(EINVAL);
   1089  1.70      matt 		if (align >= map->max_offset)
   1090  1.70      matt 			return(ENOMEM);
   1091  1.70      matt 		if (flags & MAP_FIXED) {
   1092  1.70      matt 			if ((*addr & (align-1)) != 0)
   1093  1.70      matt 				return(EINVAL);
   1094  1.70      matt 			align = 0;
   1095  1.70      matt 		}
   1096  1.70      matt 	}
   1097  1.70      matt 
   1098  1.70      matt 	/*
   1099   1.6       mrg 	 * handle anon vs. non-anon mappings.   for non-anon mappings attach
   1100   1.6       mrg 	 * to underlying vm object.
   1101   1.6       mrg 	 */
   1102   1.6       mrg 
   1103   1.6       mrg 	if (flags & MAP_ANON) {
   1104  1.36   thorpej 		foff = UVM_UNKNOWN_OFFSET;
   1105   1.6       mrg 		uobj = NULL;
   1106   1.6       mrg 		if ((flags & MAP_SHARED) == 0)
   1107   1.6       mrg 			/* XXX: defer amap create */
   1108   1.6       mrg 			uvmflag |= UVM_FLAG_COPYONW;
   1109   1.6       mrg 		else
   1110   1.6       mrg 			/* shared: create amap now */
   1111   1.6       mrg 			uvmflag |= UVM_FLAG_OVERLAY;
   1112   1.6       mrg 
   1113   1.6       mrg 	} else {
   1114  1.50       chs 		vp = (struct vnode *)handle;
   1115  1.59   thorpej 
   1116  1.59   thorpej 		/*
   1117  1.59   thorpej 		 * Don't allow mmap for EXEC if the file system
   1118  1.59   thorpej 		 * is mounted NOEXEC.
   1119  1.59   thorpej 		 */
   1120  1.59   thorpej 		if ((prot & PROT_EXEC) != 0 &&
   1121  1.59   thorpej 		    (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0)
   1122  1.59   thorpej 			return (EACCES);
   1123  1.59   thorpej 
   1124   1.6       mrg 		if (vp->v_type != VCHR) {
   1125  1.55       chs 			error = VOP_MMAP(vp, 0, curproc->p_ucred, curproc);
   1126  1.55       chs 			if (error) {
   1127  1.55       chs 				return error;
   1128  1.55       chs 			}
   1129  1.55       chs 
   1130  1.50       chs 			uobj = uvn_attach((void *)vp, (flags & MAP_SHARED) ?
   1131   1.6       mrg 			   maxprot : (maxprot & ~VM_PROT_WRITE));
   1132   1.6       mrg 
   1133  1.46       chs 			/* XXX for now, attach doesn't gain a ref */
   1134  1.46       chs 			VREF(vp);
   1135  1.57   thorpej 
   1136  1.57   thorpej 			/*
   1137  1.57   thorpej 			 * If the vnode is being mapped with PROT_EXEC,
   1138  1.57   thorpej 			 * then mark it as text.
   1139  1.57   thorpej 			 */
   1140  1.57   thorpej 			if (prot & PROT_EXEC)
   1141  1.58   thorpej 				vn_markexec(vp);
   1142   1.6       mrg 		} else {
   1143   1.6       mrg 			uobj = udv_attach((void *) &vp->v_rdev,
   1144  1.48   thorpej 			    (flags & MAP_SHARED) ? maxprot :
   1145  1.48   thorpej 			    (maxprot & ~VM_PROT_WRITE), foff, size);
   1146  1.48   thorpej 			/*
   1147  1.48   thorpej 			 * XXX Some devices don't like to be mapped with
   1148  1.48   thorpej 			 * XXX PROT_EXEC, but we don't really have a
   1149  1.48   thorpej 			 * XXX better way of handling this, right now
   1150  1.48   thorpej 			 */
   1151  1.48   thorpej 			if (uobj == NULL && (prot & PROT_EXEC) == 0) {
   1152  1.48   thorpej 				maxprot &= ~VM_PROT_EXECUTE;
   1153  1.50       chs 				uobj = udv_attach((void *)&vp->v_rdev,
   1154  1.48   thorpej 				    (flags & MAP_SHARED) ? maxprot :
   1155  1.48   thorpej 				    (maxprot & ~VM_PROT_WRITE), foff, size);
   1156  1.48   thorpej 			}
   1157   1.6       mrg 			advice = UVM_ADV_RANDOM;
   1158   1.6       mrg 		}
   1159   1.6       mrg 		if (uobj == NULL)
   1160  1.11   thorpej 			return((vp->v_type == VREG) ? ENOMEM : EINVAL);
   1161   1.6       mrg 		if ((flags & MAP_SHARED) == 0)
   1162   1.6       mrg 			uvmflag |= UVM_FLAG_COPYONW;
   1163   1.6       mrg 	}
   1164   1.6       mrg 
   1165  1.51       chs 	uvmflag = UVM_MAPFLAG(prot, maxprot,
   1166   1.1       mrg 			(flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY,
   1167   1.1       mrg 			advice, uvmflag);
   1168  1.70      matt 	error = uvm_map(map, addr, size, uobj, foff, align, uvmflag);
   1169  1.50       chs 	if (error) {
   1170  1.50       chs 		if (uobj)
   1171  1.50       chs 			uobj->pgops->pgo_detach(uobj);
   1172  1.50       chs 		return error;
   1173  1.50       chs 	}
   1174   1.1       mrg 
   1175   1.6       mrg 	/*
   1176  1.50       chs 	 * POSIX 1003.1b -- if our address space was configured
   1177  1.50       chs 	 * to lock all future mappings, wire the one we just made.
   1178   1.6       mrg 	 */
   1179   1.6       mrg 
   1180  1.50       chs 	if (prot == VM_PROT_NONE) {
   1181   1.6       mrg 
   1182  1.25   thorpej 		/*
   1183  1.50       chs 		 * No more work to do in this case.
   1184  1.25   thorpej 		 */
   1185  1.25   thorpej 
   1186  1.50       chs 		return (0);
   1187  1.50       chs 	}
   1188  1.50       chs 	vm_map_lock(map);
   1189  1.50       chs 	if (map->flags & VM_MAP_WIREFUTURE) {
   1190  1.50       chs 		if ((atop(size) + uvmexp.wired) > uvmexp.wiredmax
   1191  1.25   thorpej #ifdef pmap_wired_count
   1192  1.50       chs 		    || (locklimit != 0 && (size +
   1193  1.50       chs 		    ptoa(pmap_wired_count(vm_map_pmap(map)))) >
   1194  1.50       chs 			locklimit)
   1195  1.25   thorpej #endif
   1196  1.50       chs 		) {
   1197  1.50       chs 			vm_map_unlock(map);
   1198  1.50       chs 			uvm_unmap(map, *addr, *addr + size);
   1199  1.50       chs 			return ENOMEM;
   1200  1.25   thorpej 		}
   1201  1.25   thorpej 
   1202  1.50       chs 		/*
   1203  1.50       chs 		 * uvm_map_pageable() always returns the map unlocked.
   1204  1.50       chs 		 */
   1205  1.25   thorpej 
   1206  1.50       chs 		error = uvm_map_pageable(map, *addr, *addr + size,
   1207  1.50       chs 					 FALSE, UVM_LK_ENTER);
   1208  1.50       chs 		if (error) {
   1209  1.50       chs 			uvm_unmap(map, *addr, *addr + size);
   1210  1.50       chs 			return error;
   1211  1.50       chs 		}
   1212  1.25   thorpej 		return (0);
   1213  1.25   thorpej 	}
   1214  1.50       chs 	vm_map_unlock(map);
   1215  1.50       chs 	return 0;
   1216   1.1       mrg }
   1217