Home | History | Annotate | Line # | Download | only in uvm
uvm_mmap.c revision 1.91.2.7
      1  1.91.2.7      yamt /*	$NetBSD: uvm_mmap.c,v 1.91.2.7 2008/01/21 09:48:22 yamt Exp $	*/
      2       1.1       mrg 
      3       1.1       mrg /*
      4       1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5      1.51       chs  * Copyright (c) 1991, 1993 The Regents of the University of California.
      6       1.1       mrg  * Copyright (c) 1988 University of Utah.
      7      1.51       chs  *
      8       1.1       mrg  * All rights reserved.
      9       1.1       mrg  *
     10       1.1       mrg  * This code is derived from software contributed to Berkeley by
     11       1.1       mrg  * the Systems Programming Group of the University of Utah Computer
     12       1.1       mrg  * Science Department.
     13       1.1       mrg  *
     14       1.1       mrg  * Redistribution and use in source and binary forms, with or without
     15       1.1       mrg  * modification, are permitted provided that the following conditions
     16       1.1       mrg  * are met:
     17       1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     18       1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     19       1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     20       1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     21       1.1       mrg  *    documentation and/or other materials provided with the distribution.
     22       1.1       mrg  * 3. All advertising materials mentioning features or use of this software
     23       1.1       mrg  *    must display the following acknowledgement:
     24       1.1       mrg  *      This product includes software developed by the Charles D. Cranor,
     25      1.51       chs  *	Washington University, University of California, Berkeley and
     26       1.1       mrg  *	its contributors.
     27       1.1       mrg  * 4. Neither the name of the University nor the names of its contributors
     28       1.1       mrg  *    may be used to endorse or promote products derived from this software
     29       1.1       mrg  *    without specific prior written permission.
     30       1.1       mrg  *
     31       1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     32       1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     33       1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     34       1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     35       1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     36       1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     37       1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     38       1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     39       1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     40       1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     41       1.1       mrg  * SUCH DAMAGE.
     42       1.1       mrg  *
     43       1.1       mrg  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
     44       1.1       mrg  *      @(#)vm_mmap.c   8.5 (Berkeley) 5/19/94
     45       1.3       mrg  * from: Id: uvm_mmap.c,v 1.1.2.14 1998/01/05 21:04:26 chuck Exp
     46       1.1       mrg  */
     47       1.1       mrg 
     48       1.1       mrg /*
     49       1.1       mrg  * uvm_mmap.c: system call interface into VM system, plus kernel vm_mmap
     50       1.1       mrg  * function.
     51       1.1       mrg  */
     52      1.60     lukem 
     53      1.60     lukem #include <sys/cdefs.h>
     54  1.91.2.7      yamt __KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.91.2.7 2008/01/21 09:48:22 yamt Exp $");
     55      1.80  jdolecek 
     56      1.80  jdolecek #include "opt_compat_netbsd.h"
     57  1.91.2.1      yamt #include "opt_pax.h"
     58  1.91.2.2      yamt #include "veriexec.h"
     59      1.60     lukem 
     60       1.1       mrg #include <sys/param.h>
     61       1.1       mrg #include <sys/systm.h>
     62       1.1       mrg #include <sys/file.h>
     63       1.1       mrg #include <sys/filedesc.h>
     64       1.1       mrg #include <sys/resourcevar.h>
     65       1.1       mrg #include <sys/mman.h>
     66       1.1       mrg #include <sys/mount.h>
     67       1.1       mrg #include <sys/proc.h>
     68       1.1       mrg #include <sys/malloc.h>
     69       1.1       mrg #include <sys/vnode.h>
     70       1.1       mrg #include <sys/conf.h>
     71       1.9       mrg #include <sys/stat.h>
     72  1.91.2.2      yamt 
     73  1.91.2.2      yamt #if NVERIEXEC > 0
     74  1.91.2.2      yamt #include <sys/verified_exec.h>
     75  1.91.2.2      yamt #endif /* NVERIEXEC > 0 */
     76  1.91.2.1      yamt 
     77  1.91.2.1      yamt #ifdef PAX_MPROTECT
     78  1.91.2.1      yamt #include <sys/pax.h>
     79  1.91.2.1      yamt #endif /* PAX_MPROTECT */
     80       1.1       mrg 
     81       1.1       mrg #include <miscfs/specfs/specdev.h>
     82       1.1       mrg 
     83       1.1       mrg #include <sys/syscallargs.h>
     84       1.1       mrg 
     85       1.1       mrg #include <uvm/uvm.h>
     86       1.1       mrg #include <uvm/uvm_device.h>
     87       1.1       mrg 
     88      1.80  jdolecek #ifndef COMPAT_ZERODEV
     89      1.81       dsl #define COMPAT_ZERODEV(dev)	(0)
     90      1.80  jdolecek #endif
     91       1.1       mrg 
     92  1.91.2.5      yamt static int
     93  1.91.2.5      yamt range_test(vaddr_t addr, vsize_t size, bool ismmap)
     94  1.91.2.5      yamt {
     95  1.91.2.5      yamt 	vaddr_t vm_min_address = VM_MIN_ADDRESS;
     96  1.91.2.5      yamt 	vaddr_t vm_max_address = VM_MAXUSER_ADDRESS;
     97  1.91.2.5      yamt 	vaddr_t eaddr = addr + size;
     98  1.91.2.5      yamt 
     99  1.91.2.5      yamt 	if (addr < vm_min_address)
    100  1.91.2.5      yamt 		return EINVAL;
    101  1.91.2.5      yamt 	if (eaddr > vm_max_address)
    102  1.91.2.5      yamt 		return ismmap ? EFBIG : EINVAL;
    103  1.91.2.5      yamt 	if (addr > eaddr) /* no wrapping! */
    104  1.91.2.5      yamt 		return ismmap ? EOVERFLOW : EINVAL;
    105  1.91.2.5      yamt 	return 0;
    106  1.91.2.5      yamt }
    107  1.91.2.4      yamt 
    108       1.1       mrg /*
    109       1.1       mrg  * unimplemented VM system calls:
    110       1.1       mrg  */
    111       1.1       mrg 
    112       1.1       mrg /*
    113       1.1       mrg  * sys_sbrk: sbrk system call.
    114       1.1       mrg  */
    115       1.1       mrg 
    116       1.1       mrg /* ARGSUSED */
    117       1.6       mrg int
    118  1.91.2.7      yamt sys_sbrk(struct lwp *l, const struct sys_sbrk_args *uap, register_t *retval)
    119       1.1       mrg {
    120  1.91.2.7      yamt 	/* {
    121      1.33    kleink 		syscallarg(intptr_t) incr;
    122  1.91.2.7      yamt 	} */
    123       1.6       mrg 
    124      1.17    kleink 	return (ENOSYS);
    125       1.1       mrg }
    126       1.1       mrg 
    127       1.1       mrg /*
    128       1.1       mrg  * sys_sstk: sstk system call.
    129       1.1       mrg  */
    130       1.1       mrg 
    131       1.1       mrg /* ARGSUSED */
    132       1.6       mrg int
    133  1.91.2.7      yamt sys_sstk(struct lwp *l, const struct sys_sstk_args *uap, register_t *retval)
    134       1.1       mrg {
    135  1.91.2.7      yamt 	/* {
    136      1.20       mrg 		syscallarg(int) incr;
    137  1.91.2.7      yamt 	} */
    138       1.6       mrg 
    139      1.17    kleink 	return (ENOSYS);
    140       1.1       mrg }
    141       1.1       mrg 
    142       1.1       mrg /*
    143       1.1       mrg  * sys_mincore: determine if pages are in core or not.
    144       1.1       mrg  */
    145       1.1       mrg 
    146       1.1       mrg /* ARGSUSED */
    147       1.6       mrg int
    148  1.91.2.7      yamt sys_mincore(struct lwp *l, const struct sys_mincore_args *uap, register_t *retval)
    149       1.1       mrg {
    150  1.91.2.7      yamt 	/* {
    151      1.22   thorpej 		syscallarg(void *) addr;
    152      1.20       mrg 		syscallarg(size_t) len;
    153      1.20       mrg 		syscallarg(char *) vec;
    154  1.91.2.7      yamt 	} */
    155      1.67   thorpej 	struct proc *p = l->l_proc;
    156      1.56       chs 	struct vm_page *pg;
    157      1.22   thorpej 	char *vec, pgi;
    158      1.22   thorpej 	struct uvm_object *uobj;
    159      1.22   thorpej 	struct vm_amap *amap;
    160      1.22   thorpej 	struct vm_anon *anon;
    161      1.53       chs 	struct vm_map_entry *entry;
    162      1.22   thorpej 	vaddr_t start, end, lim;
    163      1.53       chs 	struct vm_map *map;
    164      1.22   thorpej 	vsize_t len;
    165      1.22   thorpej 	int error = 0, npgs;
    166      1.22   thorpej 
    167      1.22   thorpej 	map = &p->p_vmspace->vm_map;
    168      1.22   thorpej 
    169      1.22   thorpej 	start = (vaddr_t)SCARG(uap, addr);
    170      1.22   thorpej 	len = SCARG(uap, len);
    171      1.22   thorpej 	vec = SCARG(uap, vec);
    172      1.22   thorpej 
    173      1.22   thorpej 	if (start & PAGE_MASK)
    174      1.22   thorpej 		return (EINVAL);
    175      1.22   thorpej 	len = round_page(len);
    176      1.22   thorpej 	end = start + len;
    177      1.22   thorpej 	if (end <= start)
    178      1.22   thorpej 		return (EINVAL);
    179      1.22   thorpej 
    180      1.22   thorpej 	/*
    181      1.22   thorpej 	 * Lock down vec, so our returned status isn't outdated by
    182      1.22   thorpej 	 * storing the status byte for a page.
    183      1.22   thorpej 	 */
    184      1.50       chs 
    185      1.62       chs 	npgs = len >> PAGE_SHIFT;
    186  1.91.2.2      yamt 	error = uvm_vslock(p->p_vmspace, vec, npgs, VM_PROT_WRITE);
    187      1.62       chs 	if (error) {
    188      1.62       chs 		return error;
    189      1.62       chs 	}
    190      1.22   thorpej 	vm_map_lock_read(map);
    191      1.22   thorpej 
    192  1.91.2.3      yamt 	if (uvm_map_lookup_entry(map, start, &entry) == false) {
    193      1.22   thorpej 		error = ENOMEM;
    194      1.22   thorpej 		goto out;
    195      1.22   thorpej 	}
    196      1.22   thorpej 
    197      1.22   thorpej 	for (/* nothing */;
    198      1.22   thorpej 	     entry != &map->header && entry->start < end;
    199      1.22   thorpej 	     entry = entry->next) {
    200      1.49       chs 		KASSERT(!UVM_ET_ISSUBMAP(entry));
    201      1.49       chs 		KASSERT(start >= entry->start);
    202      1.49       chs 
    203      1.22   thorpej 		/* Make sure there are no holes. */
    204      1.22   thorpej 		if (entry->end < end &&
    205      1.22   thorpej 		     (entry->next == &map->header ||
    206      1.22   thorpej 		      entry->next->start > entry->end)) {
    207      1.22   thorpej 			error = ENOMEM;
    208      1.22   thorpej 			goto out;
    209      1.22   thorpej 		}
    210       1.6       mrg 
    211      1.22   thorpej 		lim = end < entry->end ? end : entry->end;
    212      1.22   thorpej 
    213      1.22   thorpej 		/*
    214      1.31   thorpej 		 * Special case for objects with no "real" pages.  Those
    215      1.31   thorpej 		 * are always considered resident (mapped devices).
    216      1.22   thorpej 		 */
    217      1.50       chs 
    218      1.22   thorpej 		if (UVM_ET_ISOBJ(entry)) {
    219      1.49       chs 			KASSERT(!UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj));
    220      1.79      yamt 			if (UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) {
    221      1.22   thorpej 				for (/* nothing */; start < lim;
    222      1.22   thorpej 				     start += PAGE_SIZE, vec++)
    223      1.22   thorpej 					subyte(vec, 1);
    224      1.22   thorpej 				continue;
    225      1.22   thorpej 			}
    226      1.22   thorpej 		}
    227      1.22   thorpej 
    228      1.32   thorpej 		amap = entry->aref.ar_amap;	/* top layer */
    229      1.32   thorpej 		uobj = entry->object.uvm_obj;	/* bottom layer */
    230      1.22   thorpej 
    231      1.22   thorpej 		if (amap != NULL)
    232      1.22   thorpej 			amap_lock(amap);
    233      1.22   thorpej 		if (uobj != NULL)
    234  1.91.2.7      yamt 			mutex_enter(&uobj->vmobjlock);
    235      1.22   thorpej 
    236      1.22   thorpej 		for (/* nothing */; start < lim; start += PAGE_SIZE, vec++) {
    237      1.22   thorpej 			pgi = 0;
    238      1.22   thorpej 			if (amap != NULL) {
    239      1.22   thorpej 				/* Check the top layer first. */
    240      1.22   thorpej 				anon = amap_lookup(&entry->aref,
    241      1.22   thorpej 				    start - entry->start);
    242      1.22   thorpej 				/* Don't need to lock anon here. */
    243      1.91      yamt 				if (anon != NULL && anon->an_page != NULL) {
    244      1.50       chs 
    245      1.22   thorpej 					/*
    246      1.22   thorpej 					 * Anon has the page for this entry
    247      1.22   thorpej 					 * offset.
    248      1.22   thorpej 					 */
    249      1.50       chs 
    250      1.22   thorpej 					pgi = 1;
    251      1.22   thorpej 				}
    252      1.22   thorpej 			}
    253      1.22   thorpej 			if (uobj != NULL && pgi == 0) {
    254      1.22   thorpej 				/* Check the bottom layer. */
    255      1.56       chs 				pg = uvm_pagelookup(uobj,
    256      1.22   thorpej 				    entry->offset + (start - entry->start));
    257      1.56       chs 				if (pg != NULL) {
    258      1.50       chs 
    259      1.22   thorpej 					/*
    260      1.22   thorpej 					 * Object has the page for this entry
    261      1.22   thorpej 					 * offset.
    262      1.22   thorpej 					 */
    263      1.50       chs 
    264      1.22   thorpej 					pgi = 1;
    265      1.22   thorpej 				}
    266      1.22   thorpej 			}
    267      1.22   thorpej 			(void) subyte(vec, pgi);
    268      1.22   thorpej 		}
    269      1.22   thorpej 		if (uobj != NULL)
    270  1.91.2.7      yamt 			mutex_exit(&uobj->vmobjlock);
    271      1.22   thorpej 		if (amap != NULL)
    272      1.22   thorpej 			amap_unlock(amap);
    273      1.22   thorpej 	}
    274      1.22   thorpej 
    275      1.22   thorpej  out:
    276      1.22   thorpej 	vm_map_unlock_read(map);
    277  1.91.2.2      yamt 	uvm_vsunlock(p->p_vmspace, SCARG(uap, vec), npgs);
    278      1.22   thorpej 	return (error);
    279       1.1       mrg }
    280       1.1       mrg 
    281       1.1       mrg /*
    282       1.1       mrg  * sys_mmap: mmap system call.
    283       1.1       mrg  *
    284      1.64    atatat  * => file offset and address may not be page aligned
    285       1.1       mrg  *    - if MAP_FIXED, offset and address must have remainder mod PAGE_SIZE
    286       1.1       mrg  *    - if address isn't page aligned the mapping starts at trunc_page(addr)
    287       1.1       mrg  *      and the return value is adjusted up by the page offset.
    288       1.1       mrg  */
    289       1.1       mrg 
    290       1.6       mrg int
    291  1.91.2.7      yamt sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
    292       1.6       mrg {
    293  1.91.2.7      yamt 	/* {
    294  1.91.2.4      yamt 		syscallarg(void *) addr;
    295       1.6       mrg 		syscallarg(size_t) len;
    296       1.6       mrg 		syscallarg(int) prot;
    297       1.6       mrg 		syscallarg(int) flags;
    298       1.6       mrg 		syscallarg(int) fd;
    299       1.6       mrg 		syscallarg(long) pad;
    300       1.6       mrg 		syscallarg(off_t) pos;
    301  1.91.2.7      yamt 	} */
    302      1.67   thorpej 	struct proc *p = l->l_proc;
    303      1.12       eeh 	vaddr_t addr;
    304       1.9       mrg 	struct vattr va;
    305       1.6       mrg 	off_t pos;
    306      1.12       eeh 	vsize_t size, pageoff;
    307       1.6       mrg 	vm_prot_t prot, maxprot;
    308       1.6       mrg 	int flags, fd;
    309  1.91.2.4      yamt 	vaddr_t defaddr;
    310      1.40  augustss 	struct filedesc *fdp = p->p_fd;
    311  1.91.2.5      yamt 	struct file *fp = NULL;
    312       1.6       mrg 	struct vnode *vp;
    313      1.50       chs 	void *handle;
    314       1.6       mrg 	int error;
    315  1.91.2.7      yamt #ifdef PAX_ASLR
    316  1.91.2.7      yamt 	vaddr_t orig_addr;
    317  1.91.2.7      yamt #endif /* PAX_ASLR */
    318       1.6       mrg 
    319       1.6       mrg 	/*
    320       1.6       mrg 	 * first, extract syscall args from the uap.
    321       1.6       mrg 	 */
    322       1.6       mrg 
    323      1.50       chs 	addr = (vaddr_t)SCARG(uap, addr);
    324      1.50       chs 	size = (vsize_t)SCARG(uap, len);
    325       1.6       mrg 	prot = SCARG(uap, prot) & VM_PROT_ALL;
    326       1.6       mrg 	flags = SCARG(uap, flags);
    327       1.6       mrg 	fd = SCARG(uap, fd);
    328       1.6       mrg 	pos = SCARG(uap, pos);
    329       1.6       mrg 
    330  1.91.2.7      yamt #ifdef PAX_ASLR
    331  1.91.2.7      yamt 	orig_addr = addr;
    332  1.91.2.7      yamt #endif /* PAX_ASLR */
    333  1.91.2.7      yamt 
    334       1.6       mrg 	/*
    335      1.24   thorpej 	 * Fixup the old deprecated MAP_COPY into MAP_PRIVATE, and
    336      1.24   thorpej 	 * validate the flags.
    337      1.24   thorpej 	 */
    338      1.24   thorpej 	if (flags & MAP_COPY)
    339      1.24   thorpej 		flags = (flags & ~MAP_COPY) | MAP_PRIVATE;
    340      1.24   thorpej 	if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
    341      1.24   thorpej 		return (EINVAL);
    342      1.24   thorpej 
    343      1.24   thorpej 	/*
    344       1.6       mrg 	 * align file position and save offset.  adjust size.
    345       1.6       mrg 	 */
    346       1.6       mrg 
    347       1.6       mrg 	pageoff = (pos & PAGE_MASK);
    348       1.6       mrg 	pos  -= pageoff;
    349       1.6       mrg 	size += pageoff;			/* add offset */
    350      1.50       chs 	size = (vsize_t)round_page(size);	/* round up */
    351       1.6       mrg 
    352       1.6       mrg 	/*
    353      1.51       chs 	 * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr"
    354       1.6       mrg 	 */
    355       1.6       mrg 	if (flags & MAP_FIXED) {
    356       1.6       mrg 
    357       1.6       mrg 		/* ensure address and file offset are aligned properly */
    358       1.6       mrg 		addr -= pageoff;
    359       1.6       mrg 		if (addr & PAGE_MASK)
    360       1.6       mrg 			return (EINVAL);
    361       1.6       mrg 
    362  1.91.2.5      yamt 		error = range_test(addr, size, true);
    363  1.91.2.5      yamt 		if (error)
    364  1.91.2.5      yamt 			return error;
    365      1.75  christos 	} else if (addr == 0 || !(flags & MAP_TRYFIXED)) {
    366       1.6       mrg 
    367       1.6       mrg 		/*
    368      1.68    atatat 		 * not fixed: make sure we skip over the largest
    369      1.68    atatat 		 * possible heap for non-topdown mapping arrangements.
    370      1.68    atatat 		 * we will refine our guess later (e.g. to account for
    371      1.68    atatat 		 * VAC, etc)
    372       1.6       mrg 		 */
    373      1.46       chs 
    374      1.89      fvdl 		defaddr = p->p_emul->e_vm_default_addr(p,
    375      1.89      fvdl 		    (vaddr_t)p->p_vmspace->vm_daddr, size);
    376      1.89      fvdl 
    377      1.68    atatat 		if (addr == 0 ||
    378      1.68    atatat 		    !(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN))
    379      1.89      fvdl 			addr = MAX(addr, defaddr);
    380      1.68    atatat 		else
    381      1.89      fvdl 			addr = MIN(addr, defaddr);
    382       1.6       mrg 	}
    383       1.6       mrg 
    384       1.6       mrg 	/*
    385       1.6       mrg 	 * check for file mappings (i.e. not anonymous) and verify file.
    386       1.6       mrg 	 */
    387       1.6       mrg 
    388       1.6       mrg 	if ((flags & MAP_ANON) == 0) {
    389       1.6       mrg 
    390      1.54   thorpej 		if ((fp = fd_getfile(fdp, fd)) == NULL)
    391      1.54   thorpej 			return (EBADF);
    392  1.91.2.5      yamt 		if (fp->f_type != DTYPE_VNODE) {
    393  1.91.2.5      yamt 			mutex_exit(&fp->f_lock);
    394       1.7    kleink 			return (ENODEV);		/* only mmap vnodes! */
    395  1.91.2.5      yamt 		}
    396       1.6       mrg 		vp = (struct vnode *)fp->f_data;	/* convert to vnode */
    397       1.6       mrg 
    398      1.11   thorpej 		if (vp->v_type != VREG && vp->v_type != VCHR &&
    399  1.91.2.5      yamt 		    vp->v_type != VBLK) {
    400  1.91.2.5      yamt 			mutex_exit(&fp->f_lock);
    401      1.11   thorpej 			return (ENODEV);  /* only REG/CHR/BLK support mmap */
    402  1.91.2.5      yamt 		}
    403  1.91.2.5      yamt 		if (vp->v_type != VCHR && pos < 0) {
    404  1.91.2.5      yamt 			mutex_exit(&fp->f_lock);
    405      1.61       chs 			return (EINVAL);
    406  1.91.2.5      yamt 		}
    407  1.91.2.5      yamt 		if (vp->v_type != VCHR && (pos + size) < pos) {
    408  1.91.2.5      yamt 			mutex_exit(&fp->f_lock);
    409      1.39    kleink 			return (EOVERFLOW);		/* no offset wrapping */
    410  1.91.2.5      yamt 		}
    411       1.6       mrg 
    412       1.6       mrg 		/* special case: catch SunOS style /dev/zero */
    413      1.80  jdolecek 		if (vp->v_type == VCHR
    414      1.80  jdolecek 		    && (vp->v_rdev == zerodev || COMPAT_ZERODEV(vp->v_rdev))) {
    415       1.6       mrg 			flags |= MAP_ANON;
    416  1.91.2.5      yamt 			mutex_exit(&fp->f_lock);
    417  1.91.2.5      yamt 			fp = NULL;
    418       1.6       mrg 			goto is_anon;
    419       1.6       mrg 		}
    420       1.6       mrg 
    421       1.6       mrg 		/*
    422       1.6       mrg 		 * Old programs may not select a specific sharing type, so
    423       1.6       mrg 		 * default to an appropriate one.
    424       1.6       mrg 		 *
    425       1.6       mrg 		 * XXX: how does MAP_ANON fit in the picture?
    426       1.6       mrg 		 */
    427      1.24   thorpej 		if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) {
    428       1.8        tv #if defined(DEBUG)
    429       1.6       mrg 			printf("WARNING: defaulted mmap() share type to "
    430      1.71  gmcgarry 			   "%s (pid %d command %s)\n", vp->v_type == VCHR ?
    431       1.6       mrg 			   "MAP_SHARED" : "MAP_PRIVATE", p->p_pid,
    432       1.6       mrg 			    p->p_comm);
    433       1.1       mrg #endif
    434       1.6       mrg 			if (vp->v_type == VCHR)
    435       1.6       mrg 				flags |= MAP_SHARED;	/* for a device */
    436       1.6       mrg 			else
    437       1.6       mrg 				flags |= MAP_PRIVATE;	/* for a file */
    438       1.6       mrg 		}
    439       1.6       mrg 
    440      1.51       chs 		/*
    441       1.6       mrg 		 * MAP_PRIVATE device mappings don't make sense (and aren't
    442       1.6       mrg 		 * supported anyway).  However, some programs rely on this,
    443       1.6       mrg 		 * so just change it to MAP_SHARED.
    444       1.6       mrg 		 */
    445       1.6       mrg 		if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
    446       1.6       mrg 			flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
    447       1.6       mrg 		}
    448       1.1       mrg 
    449       1.6       mrg 		/*
    450       1.6       mrg 		 * now check protection
    451       1.6       mrg 		 */
    452       1.6       mrg 
    453      1.48   thorpej 		maxprot = VM_PROT_EXECUTE;
    454       1.6       mrg 
    455       1.6       mrg 		/* check read access */
    456       1.6       mrg 		if (fp->f_flag & FREAD)
    457       1.6       mrg 			maxprot |= VM_PROT_READ;
    458  1.91.2.5      yamt 		else if (prot & PROT_READ) {
    459  1.91.2.5      yamt 			mutex_exit(&fp->f_lock);
    460       1.6       mrg 			return (EACCES);
    461  1.91.2.5      yamt 		}
    462  1.91.2.5      yamt 		FILE_USE(fp);
    463       1.6       mrg 
    464       1.9       mrg 		/* check write access, shared case first */
    465       1.6       mrg 		if (flags & MAP_SHARED) {
    466       1.9       mrg 			/*
    467       1.9       mrg 			 * if the file is writable, only add PROT_WRITE to
    468       1.9       mrg 			 * maxprot if the file is not immutable, append-only.
    469       1.9       mrg 			 * otherwise, if we have asked for PROT_WRITE, return
    470       1.9       mrg 			 * EPERM.
    471       1.9       mrg 			 */
    472       1.9       mrg 			if (fp->f_flag & FWRITE) {
    473       1.9       mrg 				if ((error =
    474  1.91.2.6      yamt 				    VOP_GETATTR(vp, &va, l->l_cred))) {
    475  1.91.2.5      yamt 				    	FILE_UNUSE(fp, l);
    476       1.9       mrg 					return (error);
    477  1.91.2.5      yamt 				}
    478      1.84   hannken 				if ((va.va_flags &
    479      1.84   hannken 				    (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0)
    480       1.9       mrg 					maxprot |= VM_PROT_WRITE;
    481  1.91.2.5      yamt 				else if (prot & PROT_WRITE) {
    482  1.91.2.5      yamt 				    	FILE_UNUSE(fp, l);
    483       1.9       mrg 					return (EPERM);
    484  1.91.2.5      yamt 				}
    485       1.9       mrg 			}
    486  1.91.2.5      yamt 			else if (prot & PROT_WRITE) {
    487  1.91.2.5      yamt 			    	FILE_UNUSE(fp, l);
    488       1.6       mrg 				return (EACCES);
    489  1.91.2.5      yamt 			}
    490       1.6       mrg 		} else {
    491       1.6       mrg 			/* MAP_PRIVATE mappings can always write to */
    492       1.6       mrg 			maxprot |= VM_PROT_WRITE;
    493       1.6       mrg 		}
    494      1.50       chs 		handle = vp;
    495       1.1       mrg 
    496       1.6       mrg 	} else {		/* MAP_ANON case */
    497      1.24   thorpej 		/*
    498      1.24   thorpej 		 * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0?
    499      1.24   thorpej 		 */
    500       1.6       mrg 		if (fd != -1)
    501       1.6       mrg 			return (EINVAL);
    502       1.1       mrg 
    503      1.24   thorpej  is_anon:		/* label for SunOS style /dev/zero */
    504       1.6       mrg 		handle = NULL;
    505       1.6       mrg 		maxprot = VM_PROT_ALL;
    506       1.6       mrg 		pos = 0;
    507      1.28       cgd 	}
    508      1.28       cgd 
    509      1.28       cgd 	/*
    510      1.28       cgd 	 * XXX (in)sanity check.  We don't do proper datasize checking
    511      1.28       cgd 	 * XXX for anonymous (or private writable) mmap().  However,
    512      1.28       cgd 	 * XXX know that if we're trying to allocate more than the amount
    513      1.28       cgd 	 * XXX remaining under our current data size limit, _that_ should
    514      1.28       cgd 	 * XXX be disallowed.
    515      1.28       cgd 	 */
    516      1.28       cgd 	if ((flags & MAP_ANON) != 0 ||
    517      1.28       cgd 	    ((flags & MAP_PRIVATE) != 0 && (prot & PROT_WRITE) != 0)) {
    518      1.28       cgd 		if (size >
    519      1.50       chs 		    (p->p_rlimit[RLIMIT_DATA].rlim_cur -
    520      1.50       chs 		     ctob(p->p_vmspace->vm_dsize))) {
    521  1.91.2.5      yamt 		     	if (fp != NULL)
    522  1.91.2.5      yamt 			    	FILE_UNUSE(fp, l);
    523      1.28       cgd 			return (ENOMEM);
    524      1.28       cgd 		}
    525       1.6       mrg 	}
    526       1.6       mrg 
    527  1.91.2.4      yamt #if NVERIEXEC > 0
    528  1.91.2.4      yamt 	if (handle != NULL) {
    529  1.91.2.4      yamt 		/*
    530  1.91.2.4      yamt 		 * Check if the file can be executed indirectly.
    531  1.91.2.4      yamt 		 *
    532  1.91.2.4      yamt 		 * XXX: This gives false warnings about "Incorrect access type"
    533  1.91.2.4      yamt 		 * XXX: if the mapping is not executable. Harmless, but will be
    534  1.91.2.4      yamt 		 * XXX: fixed as part of other changes.
    535  1.91.2.4      yamt 		 */
    536  1.91.2.4      yamt 		if (veriexec_verify(l, handle, "(mmap)", VERIEXEC_INDIRECT,
    537  1.91.2.4      yamt 		    NULL)) {
    538  1.91.2.4      yamt 			/*
    539  1.91.2.4      yamt 			 * Don't allow executable mappings if we can't
    540  1.91.2.4      yamt 			 * indirectly execute the file.
    541  1.91.2.4      yamt 			 */
    542  1.91.2.5      yamt 			if (prot & VM_PROT_EXECUTE) {
    543  1.91.2.5      yamt 			     	if (fp != NULL)
    544  1.91.2.5      yamt 				    	FILE_UNUSE(fp, l);
    545  1.91.2.4      yamt 				return (EPERM);
    546  1.91.2.5      yamt 			}
    547  1.91.2.4      yamt 
    548  1.91.2.4      yamt 			/*
    549  1.91.2.4      yamt 			 * Strip the executable bit from 'maxprot' to make sure
    550  1.91.2.4      yamt 			 * it can't be made executable later.
    551  1.91.2.4      yamt 			 */
    552  1.91.2.4      yamt 			maxprot &= ~VM_PROT_EXECUTE;
    553  1.91.2.4      yamt 		}
    554  1.91.2.4      yamt 	}
    555  1.91.2.4      yamt #endif /* NVERIEXEC > 0 */
    556  1.91.2.4      yamt 
    557  1.91.2.1      yamt #ifdef PAX_MPROTECT
    558  1.91.2.1      yamt 	pax_mprotect(l, &prot, &maxprot);
    559  1.91.2.1      yamt #endif /* PAX_MPROTECT */
    560  1.91.2.1      yamt 
    561  1.91.2.7      yamt #ifdef PAX_ASLR
    562  1.91.2.7      yamt 	pax_aslr(l, &addr, orig_addr, flags);
    563  1.91.2.7      yamt #endif /* PAX_ASLR */
    564  1.91.2.7      yamt 
    565       1.6       mrg 	/*
    566       1.6       mrg 	 * now let kernel internal function uvm_mmap do the work.
    567       1.6       mrg 	 */
    568       1.6       mrg 
    569       1.6       mrg 	error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
    570      1.25   thorpej 	    flags, handle, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
    571       1.6       mrg 
    572       1.6       mrg 	if (error == 0)
    573       1.6       mrg 		/* remember to add offset */
    574       1.6       mrg 		*retval = (register_t)(addr + pageoff);
    575       1.1       mrg 
    576  1.91.2.5      yamt      	if (fp != NULL)
    577  1.91.2.5      yamt 	    	FILE_UNUSE(fp, l);
    578  1.91.2.5      yamt 
    579       1.6       mrg 	return (error);
    580       1.1       mrg }
    581       1.1       mrg 
    582       1.1       mrg /*
    583       1.1       mrg  * sys___msync13: the msync system call (a front-end for flush)
    584       1.1       mrg  */
    585       1.1       mrg 
    586       1.6       mrg int
    587  1.91.2.7      yamt sys___msync13(struct lwp *l, const struct sys___msync13_args *uap, register_t *retval)
    588       1.6       mrg {
    589  1.91.2.7      yamt 	/* {
    590  1.91.2.4      yamt 		syscallarg(void *) addr;
    591       1.6       mrg 		syscallarg(size_t) len;
    592       1.6       mrg 		syscallarg(int) flags;
    593  1.91.2.7      yamt 	} */
    594      1.67   thorpej 	struct proc *p = l->l_proc;
    595      1.12       eeh 	vaddr_t addr;
    596      1.12       eeh 	vsize_t size, pageoff;
    597      1.53       chs 	struct vm_map *map;
    598      1.50       chs 	int error, rv, flags, uvmflags;
    599       1.6       mrg 
    600       1.6       mrg 	/*
    601       1.6       mrg 	 * extract syscall args from the uap
    602       1.6       mrg 	 */
    603       1.6       mrg 
    604      1.12       eeh 	addr = (vaddr_t)SCARG(uap, addr);
    605      1.12       eeh 	size = (vsize_t)SCARG(uap, len);
    606       1.6       mrg 	flags = SCARG(uap, flags);
    607       1.6       mrg 
    608       1.6       mrg 	/* sanity check flags */
    609       1.6       mrg 	if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 ||
    610      1.77       chs 	    (flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 ||
    611      1.77       chs 	    (flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC))
    612      1.77       chs 		return (EINVAL);
    613       1.6       mrg 	if ((flags & (MS_ASYNC | MS_SYNC)) == 0)
    614      1.77       chs 		flags |= MS_SYNC;
    615       1.1       mrg 
    616       1.6       mrg 	/*
    617      1.50       chs 	 * align the address to a page boundary and adjust the size accordingly.
    618       1.6       mrg 	 */
    619       1.6       mrg 
    620       1.6       mrg 	pageoff = (addr & PAGE_MASK);
    621       1.6       mrg 	addr -= pageoff;
    622       1.6       mrg 	size += pageoff;
    623      1.50       chs 	size = (vsize_t)round_page(size);
    624       1.6       mrg 
    625  1.91.2.5      yamt 	error = range_test(addr, size, false);
    626  1.91.2.5      yamt 	if (error)
    627  1.91.2.5      yamt 		return error;
    628       1.6       mrg 
    629       1.6       mrg 	/*
    630       1.6       mrg 	 * get map
    631       1.6       mrg 	 */
    632       1.6       mrg 
    633       1.6       mrg 	map = &p->p_vmspace->vm_map;
    634       1.6       mrg 
    635       1.6       mrg 	/*
    636       1.6       mrg 	 * XXXCDC: do we really need this semantic?
    637       1.6       mrg 	 *
    638       1.6       mrg 	 * XXX Gak!  If size is zero we are supposed to sync "all modified
    639       1.6       mrg 	 * pages with the region containing addr".  Unfortunately, we
    640       1.6       mrg 	 * don't really keep track of individual mmaps so we approximate
    641       1.6       mrg 	 * by flushing the range of the map entry containing addr.
    642       1.6       mrg 	 * This can be incorrect if the region splits or is coalesced
    643       1.6       mrg 	 * with a neighbor.
    644       1.6       mrg 	 */
    645      1.50       chs 
    646       1.6       mrg 	if (size == 0) {
    647      1.53       chs 		struct vm_map_entry *entry;
    648      1.51       chs 
    649       1.6       mrg 		vm_map_lock_read(map);
    650       1.6       mrg 		rv = uvm_map_lookup_entry(map, addr, &entry);
    651  1.91.2.3      yamt 		if (rv == true) {
    652       1.6       mrg 			addr = entry->start;
    653       1.6       mrg 			size = entry->end - entry->start;
    654       1.6       mrg 		}
    655       1.6       mrg 		vm_map_unlock_read(map);
    656  1.91.2.3      yamt 		if (rv == false)
    657       1.6       mrg 			return (EINVAL);
    658       1.6       mrg 	}
    659       1.6       mrg 
    660       1.6       mrg 	/*
    661       1.6       mrg 	 * translate MS_ flags into PGO_ flags
    662       1.6       mrg 	 */
    663      1.50       chs 
    664      1.34   thorpej 	uvmflags = PGO_CLEANIT;
    665      1.34   thorpej 	if (flags & MS_INVALIDATE)
    666      1.34   thorpej 		uvmflags |= PGO_FREE;
    667       1.6       mrg 	if (flags & MS_SYNC)
    668       1.6       mrg 		uvmflags |= PGO_SYNCIO;
    669       1.6       mrg 
    670      1.50       chs 	error = uvm_map_clean(map, addr, addr+size, uvmflags);
    671      1.50       chs 	return error;
    672       1.1       mrg }
    673       1.1       mrg 
    674       1.1       mrg /*
    675       1.1       mrg  * sys_munmap: unmap a users memory
    676       1.1       mrg  */
    677       1.1       mrg 
    678       1.6       mrg int
    679  1.91.2.7      yamt sys_munmap(struct lwp *l, const struct sys_munmap_args *uap, register_t *retval)
    680       1.6       mrg {
    681  1.91.2.7      yamt 	/* {
    682  1.91.2.4      yamt 		syscallarg(void *) addr;
    683       1.6       mrg 		syscallarg(size_t) len;
    684  1.91.2.7      yamt 	} */
    685      1.67   thorpej 	struct proc *p = l->l_proc;
    686      1.12       eeh 	vaddr_t addr;
    687      1.12       eeh 	vsize_t size, pageoff;
    688      1.53       chs 	struct vm_map *map;
    689       1.6       mrg 	struct vm_map_entry *dead_entries;
    690  1.91.2.5      yamt 	int error;
    691       1.6       mrg 
    692       1.6       mrg 	/*
    693      1.50       chs 	 * get syscall args.
    694       1.6       mrg 	 */
    695       1.6       mrg 
    696      1.50       chs 	addr = (vaddr_t)SCARG(uap, addr);
    697      1.50       chs 	size = (vsize_t)SCARG(uap, len);
    698      1.51       chs 
    699       1.6       mrg 	/*
    700      1.50       chs 	 * align the address to a page boundary and adjust the size accordingly.
    701       1.6       mrg 	 */
    702       1.6       mrg 
    703       1.6       mrg 	pageoff = (addr & PAGE_MASK);
    704       1.6       mrg 	addr -= pageoff;
    705       1.6       mrg 	size += pageoff;
    706      1.50       chs 	size = (vsize_t)round_page(size);
    707       1.6       mrg 
    708       1.6       mrg 	if (size == 0)
    709       1.6       mrg 		return (0);
    710       1.6       mrg 
    711  1.91.2.5      yamt 	error = range_test(addr, size, false);
    712  1.91.2.5      yamt 	if (error)
    713  1.91.2.5      yamt 		return error;
    714  1.91.2.4      yamt 
    715       1.6       mrg 	map = &p->p_vmspace->vm_map;
    716       1.6       mrg 
    717       1.6       mrg 	/*
    718      1.51       chs 	 * interesting system call semantic: make sure entire range is
    719       1.6       mrg 	 * allocated before allowing an unmap.
    720       1.6       mrg 	 */
    721       1.6       mrg 
    722      1.50       chs 	vm_map_lock(map);
    723      1.66   mycroft #if 0
    724       1.6       mrg 	if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) {
    725       1.6       mrg 		vm_map_unlock(map);
    726       1.6       mrg 		return (EINVAL);
    727       1.6       mrg 	}
    728      1.66   mycroft #endif
    729      1.90      yamt 	uvm_unmap_remove(map, addr, addr + size, &dead_entries, NULL, 0);
    730      1.50       chs 	vm_map_unlock(map);
    731       1.6       mrg 	if (dead_entries != NULL)
    732       1.6       mrg 		uvm_unmap_detach(dead_entries, 0);
    733       1.6       mrg 	return (0);
    734       1.1       mrg }
    735       1.1       mrg 
    736       1.1       mrg /*
    737       1.1       mrg  * sys_mprotect: the mprotect system call
    738       1.1       mrg  */
    739       1.1       mrg 
    740       1.6       mrg int
    741  1.91.2.7      yamt sys_mprotect(struct lwp *l, const struct sys_mprotect_args *uap, register_t *retval)
    742       1.6       mrg {
    743  1.91.2.7      yamt 	/* {
    744  1.91.2.4      yamt 		syscallarg(void *) addr;
    745      1.76       chs 		syscallarg(size_t) len;
    746       1.6       mrg 		syscallarg(int) prot;
    747  1.91.2.7      yamt 	} */
    748      1.67   thorpej 	struct proc *p = l->l_proc;
    749      1.12       eeh 	vaddr_t addr;
    750      1.12       eeh 	vsize_t size, pageoff;
    751       1.6       mrg 	vm_prot_t prot;
    752      1.50       chs 	int error;
    753       1.6       mrg 
    754       1.6       mrg 	/*
    755       1.6       mrg 	 * extract syscall args from uap
    756       1.6       mrg 	 */
    757       1.6       mrg 
    758      1.12       eeh 	addr = (vaddr_t)SCARG(uap, addr);
    759      1.12       eeh 	size = (vsize_t)SCARG(uap, len);
    760       1.6       mrg 	prot = SCARG(uap, prot) & VM_PROT_ALL;
    761       1.6       mrg 
    762       1.6       mrg 	/*
    763      1.50       chs 	 * align the address to a page boundary and adjust the size accordingly.
    764       1.6       mrg 	 */
    765      1.50       chs 
    766       1.6       mrg 	pageoff = (addr & PAGE_MASK);
    767       1.6       mrg 	addr -= pageoff;
    768       1.6       mrg 	size += pageoff;
    769      1.76       chs 	size = round_page(size);
    770      1.50       chs 
    771  1.91.2.5      yamt 	error = range_test(addr, size, false);
    772  1.91.2.5      yamt 	if (error)
    773  1.91.2.5      yamt 		return error;
    774  1.91.2.4      yamt 
    775      1.50       chs 	error = uvm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, prot,
    776  1.91.2.3      yamt 				false);
    777      1.50       chs 	return error;
    778       1.1       mrg }
    779       1.1       mrg 
    780       1.1       mrg /*
    781       1.1       mrg  * sys_minherit: the minherit system call
    782       1.1       mrg  */
    783       1.1       mrg 
    784       1.6       mrg int
    785  1.91.2.7      yamt sys_minherit(struct lwp *l, const struct sys_minherit_args *uap, register_t *retval)
    786       1.6       mrg {
    787  1.91.2.7      yamt 	/* {
    788  1.91.2.4      yamt 		syscallarg(void *) addr;
    789       1.6       mrg 		syscallarg(int) len;
    790       1.6       mrg 		syscallarg(int) inherit;
    791  1.91.2.7      yamt 	} */
    792      1.67   thorpej 	struct proc *p = l->l_proc;
    793      1.12       eeh 	vaddr_t addr;
    794      1.12       eeh 	vsize_t size, pageoff;
    795      1.40  augustss 	vm_inherit_t inherit;
    796      1.50       chs 	int error;
    797      1.51       chs 
    798      1.12       eeh 	addr = (vaddr_t)SCARG(uap, addr);
    799      1.12       eeh 	size = (vsize_t)SCARG(uap, len);
    800       1.6       mrg 	inherit = SCARG(uap, inherit);
    801      1.50       chs 
    802       1.6       mrg 	/*
    803      1.50       chs 	 * align the address to a page boundary and adjust the size accordingly.
    804       1.6       mrg 	 */
    805       1.6       mrg 
    806       1.6       mrg 	pageoff = (addr & PAGE_MASK);
    807       1.6       mrg 	addr -= pageoff;
    808       1.6       mrg 	size += pageoff;
    809      1.50       chs 	size = (vsize_t)round_page(size);
    810       1.6       mrg 
    811  1.91.2.5      yamt 	error = range_test(addr, size, false);
    812  1.91.2.5      yamt 	if (error)
    813  1.91.2.5      yamt 		return error;
    814  1.91.2.4      yamt 
    815      1.50       chs 	error = uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr + size,
    816      1.50       chs 				inherit);
    817      1.50       chs 	return error;
    818      1.21       mrg }
    819      1.21       mrg 
    820      1.21       mrg /*
    821      1.21       mrg  * sys_madvise: give advice about memory usage.
    822      1.21       mrg  */
    823      1.21       mrg 
    824      1.21       mrg /* ARGSUSED */
    825      1.21       mrg int
    826  1.91.2.7      yamt sys_madvise(struct lwp *l, const struct sys_madvise_args *uap, register_t *retval)
    827      1.21       mrg {
    828  1.91.2.7      yamt 	/* {
    829  1.91.2.4      yamt 		syscallarg(void *) addr;
    830      1.21       mrg 		syscallarg(size_t) len;
    831      1.21       mrg 		syscallarg(int) behav;
    832  1.91.2.7      yamt 	} */
    833      1.67   thorpej 	struct proc *p = l->l_proc;
    834      1.21       mrg 	vaddr_t addr;
    835      1.21       mrg 	vsize_t size, pageoff;
    836      1.50       chs 	int advice, error;
    837      1.51       chs 
    838      1.21       mrg 	addr = (vaddr_t)SCARG(uap, addr);
    839      1.21       mrg 	size = (vsize_t)SCARG(uap, len);
    840      1.21       mrg 	advice = SCARG(uap, behav);
    841      1.21       mrg 
    842      1.21       mrg 	/*
    843      1.21       mrg 	 * align the address to a page boundary, and adjust the size accordingly
    844      1.21       mrg 	 */
    845      1.50       chs 
    846      1.21       mrg 	pageoff = (addr & PAGE_MASK);
    847      1.21       mrg 	addr -= pageoff;
    848      1.21       mrg 	size += pageoff;
    849      1.50       chs 	size = (vsize_t)round_page(size);
    850      1.21       mrg 
    851  1.91.2.5      yamt 	error = range_test(addr, size, false);
    852  1.91.2.5      yamt 	if (error)
    853  1.91.2.5      yamt 		return error;
    854      1.29   thorpej 
    855      1.29   thorpej 	switch (advice) {
    856      1.29   thorpej 	case MADV_NORMAL:
    857      1.29   thorpej 	case MADV_RANDOM:
    858      1.29   thorpej 	case MADV_SEQUENTIAL:
    859      1.50       chs 		error = uvm_map_advice(&p->p_vmspace->vm_map, addr, addr + size,
    860      1.29   thorpej 		    advice);
    861      1.29   thorpej 		break;
    862      1.29   thorpej 
    863      1.29   thorpej 	case MADV_WILLNEED:
    864      1.50       chs 
    865      1.29   thorpej 		/*
    866      1.29   thorpej 		 * Activate all these pages, pre-faulting them in if
    867      1.29   thorpej 		 * necessary.
    868      1.29   thorpej 		 */
    869      1.29   thorpej 		/*
    870      1.29   thorpej 		 * XXX IMPLEMENT ME.
    871      1.29   thorpej 		 * Should invent a "weak" mode for uvm_fault()
    872      1.29   thorpej 		 * which would only do the PGO_LOCKED pgo_get().
    873      1.29   thorpej 		 */
    874      1.50       chs 
    875      1.29   thorpej 		return (0);
    876      1.29   thorpej 
    877      1.29   thorpej 	case MADV_DONTNEED:
    878      1.50       chs 
    879      1.29   thorpej 		/*
    880      1.29   thorpej 		 * Deactivate all these pages.  We don't need them
    881      1.29   thorpej 		 * any more.  We don't, however, toss the data in
    882      1.29   thorpej 		 * the pages.
    883      1.29   thorpej 		 */
    884      1.50       chs 
    885      1.50       chs 		error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
    886      1.29   thorpej 		    PGO_DEACTIVATE);
    887      1.29   thorpej 		break;
    888      1.29   thorpej 
    889      1.29   thorpej 	case MADV_FREE:
    890      1.50       chs 
    891      1.29   thorpej 		/*
    892      1.29   thorpej 		 * These pages contain no valid data, and may be
    893      1.45     soren 		 * garbage-collected.  Toss all resources, including
    894      1.30   thorpej 		 * any swap space in use.
    895      1.29   thorpej 		 */
    896      1.50       chs 
    897      1.50       chs 		error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
    898      1.29   thorpej 		    PGO_FREE);
    899      1.29   thorpej 		break;
    900      1.29   thorpej 
    901      1.29   thorpej 	case MADV_SPACEAVAIL:
    902      1.50       chs 
    903      1.29   thorpej 		/*
    904      1.29   thorpej 		 * XXXMRG What is this?  I think it's:
    905      1.29   thorpej 		 *
    906      1.29   thorpej 		 *	Ensure that we have allocated backing-store
    907      1.29   thorpej 		 *	for these pages.
    908      1.29   thorpej 		 *
    909      1.29   thorpej 		 * This is going to require changes to the page daemon,
    910      1.29   thorpej 		 * as it will free swap space allocated to pages in core.
    911      1.29   thorpej 		 * There's also what to do for device/file/anonymous memory.
    912      1.29   thorpej 		 */
    913      1.50       chs 
    914      1.29   thorpej 		return (EINVAL);
    915      1.29   thorpej 
    916      1.29   thorpej 	default:
    917      1.21       mrg 		return (EINVAL);
    918      1.29   thorpej 	}
    919      1.29   thorpej 
    920      1.50       chs 	return error;
    921       1.1       mrg }
    922       1.1       mrg 
    923       1.1       mrg /*
    924       1.1       mrg  * sys_mlock: memory lock
    925       1.1       mrg  */
    926       1.1       mrg 
    927       1.6       mrg int
    928  1.91.2.7      yamt sys_mlock(struct lwp *l, const struct sys_mlock_args *uap, register_t *retval)
    929       1.6       mrg {
    930  1.91.2.7      yamt 	/* {
    931      1.10    kleink 		syscallarg(const void *) addr;
    932       1.6       mrg 		syscallarg(size_t) len;
    933  1.91.2.7      yamt 	} */
    934      1.67   thorpej 	struct proc *p = l->l_proc;
    935      1.12       eeh 	vaddr_t addr;
    936      1.12       eeh 	vsize_t size, pageoff;
    937       1.6       mrg 	int error;
    938       1.6       mrg 
    939       1.6       mrg 	/*
    940       1.6       mrg 	 * extract syscall args from uap
    941       1.6       mrg 	 */
    942      1.50       chs 
    943      1.12       eeh 	addr = (vaddr_t)SCARG(uap, addr);
    944      1.12       eeh 	size = (vsize_t)SCARG(uap, len);
    945       1.6       mrg 
    946       1.6       mrg 	/*
    947       1.6       mrg 	 * align the address to a page boundary and adjust the size accordingly
    948       1.6       mrg 	 */
    949      1.50       chs 
    950       1.6       mrg 	pageoff = (addr & PAGE_MASK);
    951       1.6       mrg 	addr -= pageoff;
    952       1.6       mrg 	size += pageoff;
    953      1.50       chs 	size = (vsize_t)round_page(size);
    954      1.51       chs 
    955  1.91.2.5      yamt 	error = range_test(addr, size, false);
    956  1.91.2.5      yamt 	if (error)
    957  1.91.2.5      yamt 		return error;
    958       1.1       mrg 
    959       1.6       mrg 	if (atop(size) + uvmexp.wired > uvmexp.wiredmax)
    960       1.6       mrg 		return (EAGAIN);
    961       1.1       mrg 
    962       1.6       mrg 	if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
    963       1.6       mrg 			p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
    964       1.6       mrg 		return (EAGAIN);
    965       1.1       mrg 
    966  1.91.2.3      yamt 	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, false,
    967      1.35   thorpej 	    0);
    968      1.85    briggs 	if (error == EFAULT)
    969      1.85    briggs 		error = ENOMEM;
    970      1.50       chs 	return error;
    971       1.1       mrg }
    972       1.1       mrg 
    973       1.1       mrg /*
    974       1.1       mrg  * sys_munlock: unlock wired pages
    975       1.1       mrg  */
    976       1.1       mrg 
    977       1.6       mrg int
    978  1.91.2.7      yamt sys_munlock(struct lwp *l, const struct sys_munlock_args *uap, register_t *retval)
    979       1.6       mrg {
    980  1.91.2.7      yamt 	/* {
    981      1.10    kleink 		syscallarg(const void *) addr;
    982       1.6       mrg 		syscallarg(size_t) len;
    983  1.91.2.7      yamt 	} */
    984      1.67   thorpej 	struct proc *p = l->l_proc;
    985      1.12       eeh 	vaddr_t addr;
    986      1.12       eeh 	vsize_t size, pageoff;
    987       1.6       mrg 	int error;
    988       1.6       mrg 
    989       1.6       mrg 	/*
    990       1.6       mrg 	 * extract syscall args from uap
    991       1.6       mrg 	 */
    992       1.6       mrg 
    993      1.12       eeh 	addr = (vaddr_t)SCARG(uap, addr);
    994      1.12       eeh 	size = (vsize_t)SCARG(uap, len);
    995       1.6       mrg 
    996       1.6       mrg 	/*
    997       1.6       mrg 	 * align the address to a page boundary, and adjust the size accordingly
    998       1.6       mrg 	 */
    999      1.50       chs 
   1000       1.6       mrg 	pageoff = (addr & PAGE_MASK);
   1001       1.6       mrg 	addr -= pageoff;
   1002       1.6       mrg 	size += pageoff;
   1003      1.50       chs 	size = (vsize_t)round_page(size);
   1004       1.6       mrg 
   1005  1.91.2.5      yamt 	error = range_test(addr, size, false);
   1006  1.91.2.5      yamt 	if (error)
   1007  1.91.2.5      yamt 		return error;
   1008       1.1       mrg 
   1009  1.91.2.3      yamt 	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, true,
   1010      1.35   thorpej 	    0);
   1011      1.85    briggs 	if (error == EFAULT)
   1012      1.85    briggs 		error = ENOMEM;
   1013      1.50       chs 	return error;
   1014      1.22   thorpej }
   1015      1.22   thorpej 
   1016      1.22   thorpej /*
   1017      1.22   thorpej  * sys_mlockall: lock all pages mapped into an address space.
   1018      1.22   thorpej  */
   1019      1.22   thorpej 
   1020      1.22   thorpej int
   1021  1.91.2.7      yamt sys_mlockall(struct lwp *l, const struct sys_mlockall_args *uap, register_t *retval)
   1022      1.22   thorpej {
   1023  1.91.2.7      yamt 	/* {
   1024      1.22   thorpej 		syscallarg(int) flags;
   1025  1.91.2.7      yamt 	} */
   1026      1.67   thorpej 	struct proc *p = l->l_proc;
   1027      1.22   thorpej 	int error, flags;
   1028      1.22   thorpej 
   1029      1.22   thorpej 	flags = SCARG(uap, flags);
   1030      1.22   thorpej 
   1031      1.22   thorpej 	if (flags == 0 ||
   1032      1.22   thorpej 	    (flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)
   1033      1.22   thorpej 		return (EINVAL);
   1034      1.22   thorpej 
   1035      1.25   thorpej 	error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags,
   1036      1.25   thorpej 	    p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
   1037      1.22   thorpej 	return (error);
   1038      1.22   thorpej }
   1039      1.22   thorpej 
   1040      1.22   thorpej /*
   1041      1.22   thorpej  * sys_munlockall: unlock all pages mapped into an address space.
   1042      1.22   thorpej  */
   1043      1.22   thorpej 
   1044      1.22   thorpej int
   1045  1.91.2.7      yamt sys_munlockall(struct lwp *l, const void *v, register_t *retval)
   1046      1.22   thorpej {
   1047      1.67   thorpej 	struct proc *p = l->l_proc;
   1048      1.22   thorpej 
   1049      1.22   thorpej 	(void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0);
   1050      1.22   thorpej 	return (0);
   1051       1.1       mrg }
   1052       1.1       mrg 
   1053       1.1       mrg /*
   1054       1.1       mrg  * uvm_mmap: internal version of mmap
   1055       1.1       mrg  *
   1056      1.56       chs  * - used by sys_mmap and various framebuffers
   1057      1.56       chs  * - handle is a vnode pointer or NULL for MAP_ANON
   1058       1.1       mrg  * - caller must page-align the file offset
   1059       1.1       mrg  */
   1060       1.1       mrg 
   1061       1.6       mrg int
   1062      1.25   thorpej uvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff, locklimit)
   1063      1.53       chs 	struct vm_map *map;
   1064      1.12       eeh 	vaddr_t *addr;
   1065      1.12       eeh 	vsize_t size;
   1066       1.6       mrg 	vm_prot_t prot, maxprot;
   1067       1.6       mrg 	int flags;
   1068      1.50       chs 	void *handle;
   1069      1.38    kleink 	voff_t foff;
   1070      1.25   thorpej 	vsize_t locklimit;
   1071       1.6       mrg {
   1072       1.6       mrg 	struct uvm_object *uobj;
   1073       1.6       mrg 	struct vnode *vp;
   1074      1.70      matt 	vaddr_t align = 0;
   1075      1.50       chs 	int error;
   1076       1.6       mrg 	int advice = UVM_ADV_NORMAL;
   1077       1.6       mrg 	uvm_flag_t uvmflag = 0;
   1078  1.91.2.3      yamt 	bool needwritemap;
   1079       1.6       mrg 
   1080       1.6       mrg 	/*
   1081       1.6       mrg 	 * check params
   1082       1.6       mrg 	 */
   1083       1.6       mrg 
   1084       1.6       mrg 	if (size == 0)
   1085       1.6       mrg 		return(0);
   1086       1.6       mrg 	if (foff & PAGE_MASK)
   1087       1.6       mrg 		return(EINVAL);
   1088       1.6       mrg 	if ((prot & maxprot) != prot)
   1089       1.6       mrg 		return(EINVAL);
   1090       1.6       mrg 
   1091       1.6       mrg 	/*
   1092       1.6       mrg 	 * for non-fixed mappings, round off the suggested address.
   1093       1.6       mrg 	 * for fixed mappings, check alignment and zap old mappings.
   1094       1.6       mrg 	 */
   1095       1.6       mrg 
   1096       1.6       mrg 	if ((flags & MAP_FIXED) == 0) {
   1097      1.56       chs 		*addr = round_page(*addr);
   1098       1.6       mrg 	} else {
   1099       1.6       mrg 		if (*addr & PAGE_MASK)
   1100       1.6       mrg 			return(EINVAL);
   1101       1.6       mrg 		uvmflag |= UVM_FLAG_FIXED;
   1102      1.56       chs 		(void) uvm_unmap(map, *addr, *addr + size);
   1103       1.6       mrg 	}
   1104       1.6       mrg 
   1105       1.6       mrg 	/*
   1106      1.70      matt 	 * Try to see if any requested alignment can even be attemped.
   1107      1.70      matt 	 * Make sure we can express the alignment (asking for a >= 4GB
   1108      1.70      matt 	 * alignment on an ILP32 architecure make no sense) and the
   1109      1.70      matt 	 * alignment is at least for a page sized quanitiy.  If the
   1110      1.70      matt 	 * request was for a fixed mapping, make sure supplied address
   1111      1.70      matt 	 * adheres to the request alignment.
   1112      1.70      matt 	 */
   1113      1.70      matt 	align = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT;
   1114      1.70      matt 	if (align) {
   1115      1.70      matt 		if (align >= sizeof(vaddr_t) * NBBY)
   1116      1.70      matt 			return(EINVAL);
   1117      1.70      matt 		align = 1L << align;
   1118      1.70      matt 		if (align < PAGE_SIZE)
   1119      1.70      matt 			return(EINVAL);
   1120      1.88       chs 		if (align >= vm_map_max(map))
   1121      1.70      matt 			return(ENOMEM);
   1122      1.70      matt 		if (flags & MAP_FIXED) {
   1123      1.70      matt 			if ((*addr & (align-1)) != 0)
   1124      1.70      matt 				return(EINVAL);
   1125      1.70      matt 			align = 0;
   1126      1.70      matt 		}
   1127      1.70      matt 	}
   1128      1.70      matt 
   1129      1.70      matt 	/*
   1130       1.6       mrg 	 * handle anon vs. non-anon mappings.   for non-anon mappings attach
   1131       1.6       mrg 	 * to underlying vm object.
   1132       1.6       mrg 	 */
   1133       1.6       mrg 
   1134       1.6       mrg 	if (flags & MAP_ANON) {
   1135  1.91.2.1      yamt 		KASSERT(handle == NULL);
   1136      1.36   thorpej 		foff = UVM_UNKNOWN_OFFSET;
   1137       1.6       mrg 		uobj = NULL;
   1138       1.6       mrg 		if ((flags & MAP_SHARED) == 0)
   1139       1.6       mrg 			/* XXX: defer amap create */
   1140       1.6       mrg 			uvmflag |= UVM_FLAG_COPYONW;
   1141       1.6       mrg 		else
   1142       1.6       mrg 			/* shared: create amap now */
   1143       1.6       mrg 			uvmflag |= UVM_FLAG_OVERLAY;
   1144       1.6       mrg 
   1145       1.6       mrg 	} else {
   1146  1.91.2.1      yamt 		KASSERT(handle != NULL);
   1147      1.50       chs 		vp = (struct vnode *)handle;
   1148      1.59   thorpej 
   1149      1.59   thorpej 		/*
   1150      1.59   thorpej 		 * Don't allow mmap for EXEC if the file system
   1151      1.59   thorpej 		 * is mounted NOEXEC.
   1152      1.59   thorpej 		 */
   1153      1.59   thorpej 		if ((prot & PROT_EXEC) != 0 &&
   1154      1.59   thorpej 		    (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0)
   1155      1.59   thorpej 			return (EACCES);
   1156      1.59   thorpej 
   1157       1.6       mrg 		if (vp->v_type != VCHR) {
   1158  1.91.2.6      yamt 			error = VOP_MMAP(vp, prot, curlwp->l_cred);
   1159      1.55       chs 			if (error) {
   1160      1.55       chs 				return error;
   1161      1.55       chs 			}
   1162  1.91.2.4      yamt 			vref(vp);
   1163  1.91.2.4      yamt 			uobj = &vp->v_uobj;
   1164      1.57   thorpej 
   1165      1.57   thorpej 			/*
   1166      1.57   thorpej 			 * If the vnode is being mapped with PROT_EXEC,
   1167      1.57   thorpej 			 * then mark it as text.
   1168      1.57   thorpej 			 */
   1169  1.91.2.5      yamt 			if (prot & PROT_EXEC) {
   1170  1.91.2.7      yamt 				mutex_enter(&vp->v_interlock);
   1171      1.58   thorpej 				vn_markexec(vp);
   1172  1.91.2.7      yamt 				mutex_exit(&vp->v_interlock);
   1173  1.91.2.5      yamt 			}
   1174       1.6       mrg 		} else {
   1175      1.83   darrenr 			int i = maxprot;
   1176      1.83   darrenr 
   1177      1.48   thorpej 			/*
   1178      1.48   thorpej 			 * XXX Some devices don't like to be mapped with
   1179      1.83   darrenr 			 * XXX PROT_EXEC or PROT_WRITE, but we don't really
   1180      1.83   darrenr 			 * XXX have a better way of handling this, right now
   1181      1.48   thorpej 			 */
   1182      1.83   darrenr 			do {
   1183      1.83   darrenr 				uobj = udv_attach((void *) &vp->v_rdev,
   1184      1.83   darrenr 				    (flags & MAP_SHARED) ? i :
   1185      1.83   darrenr 				    (i & ~VM_PROT_WRITE), foff, size);
   1186      1.83   darrenr 				i--;
   1187      1.83   darrenr 			} while ((uobj == NULL) && (i > 0));
   1188       1.6       mrg 			advice = UVM_ADV_RANDOM;
   1189       1.6       mrg 		}
   1190       1.6       mrg 		if (uobj == NULL)
   1191      1.11   thorpej 			return((vp->v_type == VREG) ? ENOMEM : EINVAL);
   1192  1.91.2.1      yamt 		if ((flags & MAP_SHARED) == 0) {
   1193       1.6       mrg 			uvmflag |= UVM_FLAG_COPYONW;
   1194  1.91.2.2      yamt 		}
   1195  1.91.2.2      yamt 
   1196  1.91.2.2      yamt 		/*
   1197  1.91.2.2      yamt 		 * Set vnode flags to indicate the new kinds of mapping.
   1198  1.91.2.2      yamt 		 * We take the vnode lock in exclusive mode here to serialize
   1199  1.91.2.2      yamt 		 * with direct I/O.
   1200  1.91.2.2      yamt 		 */
   1201  1.91.2.2      yamt 
   1202  1.91.2.7      yamt 		mutex_enter(&vp->v_interlock);
   1203  1.91.2.5      yamt 		needwritemap = (vp->v_iflag & VI_WRMAP) == 0 &&
   1204  1.91.2.2      yamt 			(flags & MAP_SHARED) != 0 &&
   1205  1.91.2.2      yamt 			(maxprot & VM_PROT_WRITE) != 0;
   1206  1.91.2.5      yamt 		if ((vp->v_iflag & VI_MAPPED) == 0 || needwritemap) {
   1207  1.91.2.5      yamt 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK);
   1208  1.91.2.7      yamt 			mutex_enter(&vp->v_interlock);
   1209  1.91.2.5      yamt 			vp->v_iflag |= VI_MAPPED;
   1210  1.91.2.5      yamt 			vp->v_vflag |= VV_MAPPED;
   1211  1.91.2.2      yamt 			if (needwritemap) {
   1212  1.91.2.5      yamt 				vp->v_iflag |= VI_WRMAP;
   1213  1.91.2.2      yamt 			}
   1214  1.91.2.7      yamt 			mutex_exit(&vp->v_interlock);
   1215  1.91.2.2      yamt 			VOP_UNLOCK(vp, 0);
   1216  1.91.2.5      yamt 		} else
   1217  1.91.2.7      yamt 			mutex_exit(&vp->v_interlock);
   1218       1.6       mrg 	}
   1219       1.6       mrg 
   1220      1.51       chs 	uvmflag = UVM_MAPFLAG(prot, maxprot,
   1221       1.1       mrg 			(flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY,
   1222       1.1       mrg 			advice, uvmflag);
   1223      1.70      matt 	error = uvm_map(map, addr, size, uobj, foff, align, uvmflag);
   1224      1.50       chs 	if (error) {
   1225      1.50       chs 		if (uobj)
   1226      1.50       chs 			uobj->pgops->pgo_detach(uobj);
   1227      1.50       chs 		return error;
   1228      1.50       chs 	}
   1229       1.1       mrg 
   1230       1.6       mrg 	/*
   1231      1.50       chs 	 * POSIX 1003.1b -- if our address space was configured
   1232      1.50       chs 	 * to lock all future mappings, wire the one we just made.
   1233      1.78   thorpej 	 *
   1234      1.78   thorpej 	 * Also handle the MAP_WIRED flag here.
   1235       1.6       mrg 	 */
   1236       1.6       mrg 
   1237      1.50       chs 	if (prot == VM_PROT_NONE) {
   1238       1.6       mrg 
   1239      1.25   thorpej 		/*
   1240      1.50       chs 		 * No more work to do in this case.
   1241      1.25   thorpej 		 */
   1242      1.25   thorpej 
   1243      1.50       chs 		return (0);
   1244      1.50       chs 	}
   1245      1.50       chs 	vm_map_lock(map);
   1246      1.78   thorpej 	if ((flags & MAP_WIRED) != 0 || (map->flags & VM_MAP_WIREFUTURE) != 0) {
   1247      1.87       chs 		if (atop(size) + uvmexp.wired > uvmexp.wiredmax ||
   1248      1.87       chs 		    (locklimit != 0 &&
   1249      1.87       chs 		     size + ptoa(pmap_wired_count(vm_map_pmap(map))) >
   1250      1.87       chs 		     locklimit)) {
   1251      1.50       chs 			vm_map_unlock(map);
   1252      1.50       chs 			uvm_unmap(map, *addr, *addr + size);
   1253      1.50       chs 			return ENOMEM;
   1254      1.25   thorpej 		}
   1255      1.25   thorpej 
   1256      1.50       chs 		/*
   1257      1.50       chs 		 * uvm_map_pageable() always returns the map unlocked.
   1258      1.50       chs 		 */
   1259      1.25   thorpej 
   1260      1.50       chs 		error = uvm_map_pageable(map, *addr, *addr + size,
   1261  1.91.2.3      yamt 					 false, UVM_LK_ENTER);
   1262      1.50       chs 		if (error) {
   1263      1.50       chs 			uvm_unmap(map, *addr, *addr + size);
   1264      1.50       chs 			return error;
   1265      1.50       chs 		}
   1266      1.25   thorpej 		return (0);
   1267      1.25   thorpej 	}
   1268      1.50       chs 	vm_map_unlock(map);
   1269      1.50       chs 	return 0;
   1270       1.1       mrg }
   1271      1.89      fvdl 
   1272      1.89      fvdl vaddr_t
   1273      1.89      fvdl uvm_default_mapaddr(struct proc *p, vaddr_t base, vsize_t sz)
   1274      1.89      fvdl {
   1275  1.91.2.2      yamt 
   1276      1.89      fvdl 	return VM_DEFAULT_ADDRESS(base, sz);
   1277      1.89      fvdl }
   1278