Home | History | Annotate | Line # | Download | only in uvm
uvm_mmap.c revision 1.172
      1 /*	$NetBSD: uvm_mmap.c,v 1.172 2019/04/06 03:06:29 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993 The Regents of the University of California.
      6  * Copyright (c) 1988 University of Utah.
      7  *
      8  * All rights reserved.
      9  *
     10  * This code is derived from software contributed to Berkeley by
     11  * the Systems Programming Group of the University of Utah Computer
     12  * Science Department.
     13  *
     14  * Redistribution and use in source and binary forms, with or without
     15  * modification, are permitted provided that the following conditions
     16  * are met:
     17  * 1. Redistributions of source code must retain the above copyright
     18  *    notice, this list of conditions and the following disclaimer.
     19  * 2. Redistributions in binary form must reproduce the above copyright
     20  *    notice, this list of conditions and the following disclaimer in the
     21  *    documentation and/or other materials provided with the distribution.
     22  * 3. Neither the name of the University nor the names of its contributors
     23  *    may be used to endorse or promote products derived from this software
     24  *    without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     36  * SUCH DAMAGE.
     37  *
     38  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
     39  *      @(#)vm_mmap.c   8.5 (Berkeley) 5/19/94
     40  * from: Id: uvm_mmap.c,v 1.1.2.14 1998/01/05 21:04:26 chuck Exp
     41  */
     42 
     43 /*
     44  * uvm_mmap.c: system call interface into VM system, plus kernel vm_mmap
     45  * function.
     46  */
     47 
     48 #include <sys/cdefs.h>
     49 __KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.172 2019/04/06 03:06:29 thorpej Exp $");
     50 
     51 #include "opt_compat_netbsd.h"
     52 #include "opt_pax.h"
     53 
     54 #include <sys/types.h>
     55 #include <sys/file.h>
     56 #include <sys/filedesc.h>
     57 #include <sys/resourcevar.h>
     58 #include <sys/mman.h>
     59 #include <sys/pax.h>
     60 
     61 #include <sys/syscallargs.h>
     62 
     63 #include <uvm/uvm.h>
     64 #include <uvm/uvm_device.h>
     65 
     66 static int uvm_mmap(struct vm_map *, vaddr_t *, vsize_t, vm_prot_t, vm_prot_t,
     67     int, int, struct uvm_object *, voff_t, vsize_t);
     68 
     69 static int
     70 range_test(const struct vm_map *map, vaddr_t addr, vsize_t size, bool ismmap)
     71 {
     72 	vaddr_t vm_min_address = vm_map_min(map);
     73 	vaddr_t vm_max_address = vm_map_max(map);
     74 	vaddr_t eaddr = addr + size;
     75 	int res = 0;
     76 
     77 	if (addr < vm_min_address)
     78 		return EINVAL;
     79 	if (eaddr > vm_max_address)
     80 		return ismmap ? EFBIG : EINVAL;
     81 	if (addr > eaddr) /* no wrapping! */
     82 		return ismmap ? EOVERFLOW : EINVAL;
     83 
     84 #ifdef MD_MMAP_RANGE_TEST
     85 	res = MD_MMAP_RANGE_TEST(addr, eaddr);
     86 #endif
     87 
     88 	return res;
     89 }
     90 
     91 /*
     92  * align the address to a page boundary, and adjust the size accordingly
     93  */
     94 static int
     95 round_and_check(const struct vm_map *map, vaddr_t *addr, vsize_t *size)
     96 {
     97 	const vsize_t pageoff = (vsize_t)(*addr & PAGE_MASK);
     98 
     99 	*addr -= pageoff;
    100 
    101 	if (*size != 0) {
    102 		*size += pageoff;
    103 		*size = (vsize_t)round_page(*size);
    104 	} else if (*addr + *size < *addr) {
    105 		return ENOMEM;
    106 	}
    107 
    108 	return range_test(map, *addr, *size, false);
    109 }
    110 
    111 /*
    112  * sys_mincore: determine if pages are in core or not.
    113  */
    114 
    115 /* ARGSUSED */
    116 int
    117 sys_mincore(struct lwp *l, const struct sys_mincore_args *uap,
    118     register_t *retval)
    119 {
    120 	/* {
    121 		syscallarg(void *) addr;
    122 		syscallarg(size_t) len;
    123 		syscallarg(char *) vec;
    124 	} */
    125 	struct proc *p = l->l_proc;
    126 	struct vm_page *pg;
    127 	char *vec, pgi;
    128 	struct uvm_object *uobj;
    129 	struct vm_amap *amap;
    130 	struct vm_anon *anon;
    131 	struct vm_map_entry *entry;
    132 	vaddr_t start, end, lim;
    133 	struct vm_map *map;
    134 	vsize_t len;
    135 	int error = 0, npgs;
    136 
    137 	map = &p->p_vmspace->vm_map;
    138 
    139 	start = (vaddr_t)SCARG(uap, addr);
    140 	len = SCARG(uap, len);
    141 	vec = SCARG(uap, vec);
    142 
    143 	if (start & PAGE_MASK)
    144 		return EINVAL;
    145 	len = round_page(len);
    146 	end = start + len;
    147 	if (end <= start)
    148 		return EINVAL;
    149 
    150 	/*
    151 	 * Lock down vec, so our returned status isn't outdated by
    152 	 * storing the status byte for a page.
    153 	 */
    154 
    155 	npgs = len >> PAGE_SHIFT;
    156 	error = uvm_vslock(p->p_vmspace, vec, npgs, VM_PROT_WRITE);
    157 	if (error) {
    158 		return error;
    159 	}
    160 	vm_map_lock_read(map);
    161 
    162 	if (uvm_map_lookup_entry(map, start, &entry) == false) {
    163 		error = ENOMEM;
    164 		goto out;
    165 	}
    166 
    167 	for (/* nothing */;
    168 	     entry != &map->header && entry->start < end;
    169 	     entry = entry->next) {
    170 		KASSERT(!UVM_ET_ISSUBMAP(entry));
    171 		KASSERT(start >= entry->start);
    172 
    173 		/* Make sure there are no holes. */
    174 		if (entry->end < end &&
    175 		     (entry->next == &map->header ||
    176 		      entry->next->start > entry->end)) {
    177 			error = ENOMEM;
    178 			goto out;
    179 		}
    180 
    181 		lim = end < entry->end ? end : entry->end;
    182 
    183 		/*
    184 		 * Special case for objects with no "real" pages.  Those
    185 		 * are always considered resident (mapped devices).
    186 		 */
    187 
    188 		if (UVM_ET_ISOBJ(entry)) {
    189 			KASSERT(!UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj));
    190 			if (UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) {
    191 				for (/* nothing */; start < lim;
    192 				     start += PAGE_SIZE, vec++)
    193 					ustore_char(vec, 1);
    194 				continue;
    195 			}
    196 		}
    197 
    198 		amap = entry->aref.ar_amap;	/* upper layer */
    199 		uobj = entry->object.uvm_obj;	/* lower layer */
    200 
    201 		if (amap != NULL)
    202 			amap_lock(amap);
    203 		if (uobj != NULL)
    204 			mutex_enter(uobj->vmobjlock);
    205 
    206 		for (/* nothing */; start < lim; start += PAGE_SIZE, vec++) {
    207 			pgi = 0;
    208 			if (amap != NULL) {
    209 				/* Check the upper layer first. */
    210 				anon = amap_lookup(&entry->aref,
    211 				    start - entry->start);
    212 				/* Don't need to lock anon here. */
    213 				if (anon != NULL && anon->an_page != NULL) {
    214 
    215 					/*
    216 					 * Anon has the page for this entry
    217 					 * offset.
    218 					 */
    219 
    220 					pgi = 1;
    221 				}
    222 			}
    223 			if (uobj != NULL && pgi == 0) {
    224 				/* Check the lower layer. */
    225 				pg = uvm_pagelookup(uobj,
    226 				    entry->offset + (start - entry->start));
    227 				if (pg != NULL) {
    228 
    229 					/*
    230 					 * Object has the page for this entry
    231 					 * offset.
    232 					 */
    233 
    234 					pgi = 1;
    235 				}
    236 			}
    237 			(void) ustore_char(vec, pgi);
    238 		}
    239 		if (uobj != NULL)
    240 			mutex_exit(uobj->vmobjlock);
    241 		if (amap != NULL)
    242 			amap_unlock(amap);
    243 	}
    244 
    245  out:
    246 	vm_map_unlock_read(map);
    247 	uvm_vsunlock(p->p_vmspace, SCARG(uap, vec), npgs);
    248 	return error;
    249 }
    250 
    251 /*
    252  * sys_mmap: mmap system call.
    253  *
    254  * => file offset and address may not be page aligned
    255  *    - if MAP_FIXED, offset and address must have remainder mod PAGE_SIZE
    256  *    - if address isn't page aligned the mapping starts at trunc_page(addr)
    257  *      and the return value is adjusted up by the page offset.
    258  */
    259 
    260 int
    261 sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
    262 {
    263 	/* {
    264 		syscallarg(void *) addr;
    265 		syscallarg(size_t) len;
    266 		syscallarg(int) prot;
    267 		syscallarg(int) flags;
    268 		syscallarg(int) fd;
    269 		syscallarg(long) pad;
    270 		syscallarg(off_t) pos;
    271 	} */
    272 	struct proc *p = l->l_proc;
    273 	vaddr_t addr;
    274 	off_t pos;
    275 	vsize_t size, pageoff, newsize;
    276 	vm_prot_t prot, maxprot, extraprot;
    277 	int flags, fd, advice;
    278 	vaddr_t defaddr;
    279 	struct file *fp = NULL;
    280 	struct uvm_object *uobj;
    281 	int error;
    282 #ifdef PAX_ASLR
    283 	vaddr_t orig_addr;
    284 #endif /* PAX_ASLR */
    285 
    286 	/*
    287 	 * first, extract syscall args from the uap.
    288 	 */
    289 
    290 	addr = (vaddr_t)SCARG(uap, addr);
    291 	size = (vsize_t)SCARG(uap, len);
    292 	prot = SCARG(uap, prot) & VM_PROT_ALL;
    293 	extraprot = PROT_MPROTECT_EXTRACT(SCARG(uap, prot));
    294 	flags = SCARG(uap, flags);
    295 	fd = SCARG(uap, fd);
    296 	pos = SCARG(uap, pos);
    297 
    298 #ifdef PAX_ASLR
    299 	orig_addr = addr;
    300 #endif /* PAX_ASLR */
    301 
    302 	if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
    303 		return EINVAL;
    304 
    305 	/*
    306 	 * align file position and save offset.  adjust size.
    307 	 */
    308 
    309 	pageoff = (pos & PAGE_MASK);
    310 	pos    -= pageoff;
    311 	newsize = size + pageoff;		/* add offset */
    312 	newsize = (vsize_t)round_page(newsize);	/* round up */
    313 
    314 	if (newsize < size)
    315 		return ENOMEM;
    316 	size = newsize;
    317 
    318 	/*
    319 	 * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr"
    320 	 */
    321 	if (flags & MAP_FIXED) {
    322 		/* ensure address and file offset are aligned properly */
    323 		addr -= pageoff;
    324 		if (addr & PAGE_MASK)
    325 			return EINVAL;
    326 
    327 		error = range_test(&p->p_vmspace->vm_map, addr, size, true);
    328 		if (error) {
    329 			return error;
    330 		}
    331 	} else if (addr == 0 || !(flags & MAP_TRYFIXED)) {
    332 		/*
    333 		 * not fixed: make sure we skip over the largest
    334 		 * possible heap for non-topdown mapping arrangements.
    335 		 * we will refine our guess later (e.g. to account for
    336 		 * VAC, etc)
    337 		 */
    338 
    339 		defaddr = p->p_emul->e_vm_default_addr(p,
    340 		    (vaddr_t)p->p_vmspace->vm_daddr, size,
    341 		    p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
    342 
    343 		if (addr == 0 || !(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN))
    344 			addr = MAX(addr, defaddr);
    345 		else
    346 			addr = MIN(addr, defaddr);
    347 	}
    348 
    349 	/*
    350 	 * check for file mappings (i.e. not anonymous) and verify file.
    351 	 */
    352 
    353 	advice = UVM_ADV_NORMAL;
    354 	if ((flags & MAP_ANON) == 0) {
    355 		if ((fp = fd_getfile(fd)) == NULL)
    356 			return EBADF;
    357 
    358 		if (fp->f_ops->fo_mmap == NULL) {
    359 			error = ENODEV;
    360 			goto out;
    361 		}
    362 		error = (*fp->f_ops->fo_mmap)(fp, &pos, size, prot, &flags,
    363 		    &advice, &uobj, &maxprot);
    364 		if (error) {
    365 			goto out;
    366 		}
    367 		if (uobj == NULL) {
    368 			flags |= MAP_ANON;
    369 			fd_putfile(fd);
    370 			fp = NULL;
    371 			goto is_anon;
    372 		}
    373 	} else {		/* MAP_ANON case */
    374 		/*
    375 		 * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0?
    376 		 */
    377 		if (fd != -1)
    378 			return EINVAL;
    379 
    380  is_anon:		/* label for SunOS style /dev/zero */
    381 		uobj = NULL;
    382 		maxprot = VM_PROT_ALL;
    383 		pos = 0;
    384 	}
    385 
    386 	maxprot = PAX_MPROTECT_MAXPROTECT(l, prot, extraprot, maxprot);
    387 	if (((prot | extraprot) & maxprot) != (prot | extraprot)) {
    388 		error = EACCES;
    389 		goto out;
    390 	}
    391 	if ((error = PAX_MPROTECT_VALIDATE(l, prot)))
    392 		goto out;
    393 
    394 	pax_aslr_mmap(l, &addr, orig_addr, flags);
    395 
    396 	/*
    397 	 * now let kernel internal function uvm_mmap do the work.
    398 	 */
    399 
    400 	error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
    401 	    flags, advice, uobj, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
    402 
    403 	/* remember to add offset */
    404 	*retval = (register_t)(addr + pageoff);
    405 
    406  out:
    407 	if (fp != NULL)
    408 		fd_putfile(fd);
    409 
    410 	return error;
    411 }
    412 
    413 /*
    414  * sys___msync13: the msync system call (a front-end for flush)
    415  */
    416 
    417 int
    418 sys___msync13(struct lwp *l, const struct sys___msync13_args *uap,
    419     register_t *retval)
    420 {
    421 	/* {
    422 		syscallarg(void *) addr;
    423 		syscallarg(size_t) len;
    424 		syscallarg(int) flags;
    425 	} */
    426 	struct proc *p = l->l_proc;
    427 	vaddr_t addr;
    428 	vsize_t size;
    429 	struct vm_map *map;
    430 	int error, flags, uvmflags;
    431 	bool rv;
    432 
    433 	/*
    434 	 * extract syscall args from the uap
    435 	 */
    436 
    437 	addr = (vaddr_t)SCARG(uap, addr);
    438 	size = (vsize_t)SCARG(uap, len);
    439 	flags = SCARG(uap, flags);
    440 
    441 	/* sanity check flags */
    442 	if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 ||
    443 	    (flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 ||
    444 	    (flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC))
    445 		return EINVAL;
    446 	if ((flags & (MS_ASYNC | MS_SYNC)) == 0)
    447 		flags |= MS_SYNC;
    448 
    449 	/*
    450 	 * get map
    451 	 */
    452 	map = &p->p_vmspace->vm_map;
    453 
    454 	if (round_and_check(map, &addr, &size))
    455 		return ENOMEM;
    456 
    457 	/*
    458 	 * XXXCDC: do we really need this semantic?
    459 	 *
    460 	 * XXX Gak!  If size is zero we are supposed to sync "all modified
    461 	 * pages with the region containing addr".  Unfortunately, we
    462 	 * don't really keep track of individual mmaps so we approximate
    463 	 * by flushing the range of the map entry containing addr.
    464 	 * This can be incorrect if the region splits or is coalesced
    465 	 * with a neighbor.
    466 	 */
    467 
    468 	if (size == 0) {
    469 		struct vm_map_entry *entry;
    470 
    471 		vm_map_lock_read(map);
    472 		rv = uvm_map_lookup_entry(map, addr, &entry);
    473 		if (rv == true) {
    474 			addr = entry->start;
    475 			size = entry->end - entry->start;
    476 		}
    477 		vm_map_unlock_read(map);
    478 		if (rv == false)
    479 			return EINVAL;
    480 	}
    481 
    482 	/*
    483 	 * translate MS_ flags into PGO_ flags
    484 	 */
    485 
    486 	uvmflags = PGO_CLEANIT;
    487 	if (flags & MS_INVALIDATE)
    488 		uvmflags |= PGO_FREE;
    489 	if (flags & MS_SYNC)
    490 		uvmflags |= PGO_SYNCIO;
    491 
    492 	error = uvm_map_clean(map, addr, addr+size, uvmflags);
    493 	return error;
    494 }
    495 
    496 /*
    497  * sys_munmap: unmap a users memory
    498  */
    499 
    500 int
    501 sys_munmap(struct lwp *l, const struct sys_munmap_args *uap, register_t *retval)
    502 {
    503 	/* {
    504 		syscallarg(void *) addr;
    505 		syscallarg(size_t) len;
    506 	} */
    507 	struct proc *p = l->l_proc;
    508 	vaddr_t addr;
    509 	vsize_t size;
    510 	struct vm_map *map;
    511 	struct vm_map_entry *dead_entries;
    512 
    513 	/*
    514 	 * get syscall args.
    515 	 */
    516 
    517 	addr = (vaddr_t)SCARG(uap, addr);
    518 	size = (vsize_t)SCARG(uap, len);
    519 
    520 	map = &p->p_vmspace->vm_map;
    521 
    522 	if (round_and_check(map, &addr, &size))
    523 		return EINVAL;
    524 
    525 	if (size == 0)
    526 		return 0;
    527 
    528 	vm_map_lock(map);
    529 #if 0
    530 	/*
    531 	 * interesting system call semantic: make sure entire range is
    532 	 * allocated before allowing an unmap.
    533 	 */
    534 	if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) {
    535 		vm_map_unlock(map);
    536 		return EINVAL;
    537 	}
    538 #endif
    539 	uvm_unmap_remove(map, addr, addr + size, &dead_entries, 0);
    540 	vm_map_unlock(map);
    541 	if (dead_entries != NULL)
    542 		uvm_unmap_detach(dead_entries, 0);
    543 	return 0;
    544 }
    545 
    546 /*
    547  * sys_mprotect: the mprotect system call
    548  */
    549 
    550 int
    551 sys_mprotect(struct lwp *l, const struct sys_mprotect_args *uap,
    552     register_t *retval)
    553 {
    554 	/* {
    555 		syscallarg(void *) addr;
    556 		syscallarg(size_t) len;
    557 		syscallarg(int) prot;
    558 	} */
    559 	struct proc *p = l->l_proc;
    560 	vaddr_t addr;
    561 	vsize_t size;
    562 	vm_prot_t prot;
    563 	int error;
    564 
    565 	/*
    566 	 * extract syscall args from uap
    567 	 */
    568 
    569 	addr = (vaddr_t)SCARG(uap, addr);
    570 	size = (vsize_t)SCARG(uap, len);
    571 	prot = SCARG(uap, prot) & VM_PROT_ALL;
    572 
    573 	if (round_and_check(&p->p_vmspace->vm_map, &addr, &size))
    574 		return EINVAL;
    575 
    576 	error = uvm_map_protect_user(l, addr, addr + size, prot);
    577 	return error;
    578 }
    579 
    580 /*
    581  * sys_minherit: the minherit system call
    582  */
    583 
    584 int
    585 sys_minherit(struct lwp *l, const struct sys_minherit_args *uap,
    586    register_t *retval)
    587 {
    588 	/* {
    589 		syscallarg(void *) addr;
    590 		syscallarg(int) len;
    591 		syscallarg(int) inherit;
    592 	} */
    593 	struct proc *p = l->l_proc;
    594 	vaddr_t addr;
    595 	vsize_t size;
    596 	vm_inherit_t inherit;
    597 	int error;
    598 
    599 	addr = (vaddr_t)SCARG(uap, addr);
    600 	size = (vsize_t)SCARG(uap, len);
    601 	inherit = SCARG(uap, inherit);
    602 
    603 	if (round_and_check(&p->p_vmspace->vm_map, &addr, &size))
    604 		return EINVAL;
    605 
    606 	error = uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr + size,
    607 	    inherit);
    608 	return error;
    609 }
    610 
    611 /*
    612  * sys_madvise: give advice about memory usage.
    613  */
    614 
    615 /* ARGSUSED */
    616 int
    617 sys_madvise(struct lwp *l, const struct sys_madvise_args *uap,
    618    register_t *retval)
    619 {
    620 	/* {
    621 		syscallarg(void *) addr;
    622 		syscallarg(size_t) len;
    623 		syscallarg(int) behav;
    624 	} */
    625 	struct proc *p = l->l_proc;
    626 	vaddr_t addr;
    627 	vsize_t size;
    628 	int advice, error;
    629 
    630 	addr = (vaddr_t)SCARG(uap, addr);
    631 	size = (vsize_t)SCARG(uap, len);
    632 	advice = SCARG(uap, behav);
    633 
    634 	if (round_and_check(&p->p_vmspace->vm_map, &addr, &size))
    635 		return EINVAL;
    636 
    637 	switch (advice) {
    638 	case MADV_NORMAL:
    639 	case MADV_RANDOM:
    640 	case MADV_SEQUENTIAL:
    641 		error = uvm_map_advice(&p->p_vmspace->vm_map, addr, addr + size,
    642 		    advice);
    643 		break;
    644 
    645 	case MADV_WILLNEED:
    646 
    647 		/*
    648 		 * Activate all these pages, pre-faulting them in if
    649 		 * necessary.
    650 		 */
    651 		error = uvm_map_willneed(&p->p_vmspace->vm_map,
    652 		    addr, addr + size);
    653 		break;
    654 
    655 	case MADV_DONTNEED:
    656 
    657 		/*
    658 		 * Deactivate all these pages.  We don't need them
    659 		 * any more.  We don't, however, toss the data in
    660 		 * the pages.
    661 		 */
    662 
    663 		error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
    664 		    PGO_DEACTIVATE);
    665 		break;
    666 
    667 	case MADV_FREE:
    668 
    669 		/*
    670 		 * These pages contain no valid data, and may be
    671 		 * garbage-collected.  Toss all resources, including
    672 		 * any swap space in use.
    673 		 */
    674 
    675 		error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
    676 		    PGO_FREE);
    677 		break;
    678 
    679 	case MADV_SPACEAVAIL:
    680 
    681 		/*
    682 		 * XXXMRG What is this?  I think it's:
    683 		 *
    684 		 *	Ensure that we have allocated backing-store
    685 		 *	for these pages.
    686 		 *
    687 		 * This is going to require changes to the page daemon,
    688 		 * as it will free swap space allocated to pages in core.
    689 		 * There's also what to do for device/file/anonymous memory.
    690 		 */
    691 
    692 		return EINVAL;
    693 
    694 	default:
    695 		return EINVAL;
    696 	}
    697 
    698 	return error;
    699 }
    700 
    701 /*
    702  * sys_mlock: memory lock
    703  */
    704 
    705 int
    706 sys_mlock(struct lwp *l, const struct sys_mlock_args *uap, register_t *retval)
    707 {
    708 	/* {
    709 		syscallarg(const void *) addr;
    710 		syscallarg(size_t) len;
    711 	} */
    712 	struct proc *p = l->l_proc;
    713 	vaddr_t addr;
    714 	vsize_t size;
    715 	int error;
    716 
    717 	/*
    718 	 * extract syscall args from uap
    719 	 */
    720 
    721 	addr = (vaddr_t)SCARG(uap, addr);
    722 	size = (vsize_t)SCARG(uap, len);
    723 
    724 	if (round_and_check(&p->p_vmspace->vm_map, &addr, &size))
    725 		return ENOMEM;
    726 
    727 	if (atop(size) + uvmexp.wired > uvmexp.wiredmax)
    728 		return EAGAIN;
    729 
    730 	if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
    731 	    p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
    732 		return EAGAIN;
    733 
    734 	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, false,
    735 	    0);
    736 	if (error == EFAULT)
    737 		error = ENOMEM;
    738 	return error;
    739 }
    740 
    741 /*
    742  * sys_munlock: unlock wired pages
    743  */
    744 
    745 int
    746 sys_munlock(struct lwp *l, const struct sys_munlock_args *uap,
    747     register_t *retval)
    748 {
    749 	/* {
    750 		syscallarg(const void *) addr;
    751 		syscallarg(size_t) len;
    752 	} */
    753 	struct proc *p = l->l_proc;
    754 	vaddr_t addr;
    755 	vsize_t size;
    756 
    757 	/*
    758 	 * extract syscall args from uap
    759 	 */
    760 
    761 	addr = (vaddr_t)SCARG(uap, addr);
    762 	size = (vsize_t)SCARG(uap, len);
    763 
    764 	if (round_and_check(&p->p_vmspace->vm_map, &addr, &size))
    765 		return ENOMEM;
    766 
    767 	if (uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, true, 0))
    768 		return ENOMEM;
    769 
    770 	return 0;
    771 }
    772 
    773 /*
    774  * sys_mlockall: lock all pages mapped into an address space.
    775  */
    776 
    777 int
    778 sys_mlockall(struct lwp *l, const struct sys_mlockall_args *uap,
    779     register_t *retval)
    780 {
    781 	/* {
    782 		syscallarg(int) flags;
    783 	} */
    784 	struct proc *p = l->l_proc;
    785 	int error, flags;
    786 
    787 	flags = SCARG(uap, flags);
    788 
    789 	if (flags == 0 || (flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)
    790 		return EINVAL;
    791 
    792 	error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags,
    793 	    p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
    794 	return error;
    795 }
    796 
    797 /*
    798  * sys_munlockall: unlock all pages mapped into an address space.
    799  */
    800 
    801 int
    802 sys_munlockall(struct lwp *l, const void *v, register_t *retval)
    803 {
    804 	struct proc *p = l->l_proc;
    805 
    806 	(void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0);
    807 	return 0;
    808 }
    809 
    810 /*
    811  * uvm_mmap: internal version of mmap
    812  *
    813  * - used by sys_mmap and various framebuffers
    814  * - uobj is a struct uvm_object pointer or NULL for MAP_ANON
    815  * - caller must page-align the file offset
    816  */
    817 
    818 int
    819 uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
    820     vm_prot_t maxprot, int flags, int advice, struct uvm_object *uobj,
    821     voff_t foff, vsize_t locklimit)
    822 {
    823 	vaddr_t align = 0;
    824 	int error;
    825 	uvm_flag_t uvmflag = 0;
    826 
    827 	/*
    828 	 * check params
    829 	 */
    830 
    831 	if (size == 0)
    832 		return 0;
    833 	if (foff & PAGE_MASK)
    834 		return EINVAL;
    835 	if ((prot & maxprot) != prot)
    836 		return EINVAL;
    837 
    838 	/*
    839 	 * for non-fixed mappings, round off the suggested address.
    840 	 * for fixed mappings, check alignment.
    841 	 */
    842 
    843 	if ((flags & MAP_FIXED) == 0) {
    844 		*addr = round_page(*addr);
    845 	} else {
    846 		if (*addr & PAGE_MASK)
    847 			return EINVAL;
    848 		uvmflag |= UVM_FLAG_FIXED | UVM_FLAG_UNMAP;
    849 	}
    850 
    851 	/*
    852 	 * Try to see if any requested alignment can even be attemped.
    853 	 * Make sure we can express the alignment (asking for a >= 4GB
    854 	 * alignment on an ILP32 architecure make no sense) and the
    855 	 * alignment is at least for a page sized quanitiy.  If the
    856 	 * request was for a fixed mapping, make sure supplied address
    857 	 * adheres to the request alignment.
    858 	 */
    859 	align = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT;
    860 	if (align) {
    861 		if (align >= sizeof(vaddr_t) * NBBY)
    862 			return EINVAL;
    863 		align = 1L << align;
    864 		if (align < PAGE_SIZE)
    865 			return EINVAL;
    866 		if (align >= vm_map_max(map))
    867 			return ENOMEM;
    868 		if (flags & MAP_FIXED) {
    869 			if ((*addr & (align-1)) != 0)
    870 				return EINVAL;
    871 			align = 0;
    872 		}
    873 	}
    874 
    875 	/*
    876 	 * check resource limits
    877 	 */
    878 
    879 	if (!VM_MAP_IS_KERNEL(map) &&
    880 	    (((rlim_t)curproc->p_vmspace->vm_map.size + (rlim_t)size) >
    881 	    curproc->p_rlimit[RLIMIT_AS].rlim_cur))
    882 		return ENOMEM;
    883 
    884 	/*
    885 	 * handle anon vs. non-anon mappings.   for non-anon mappings attach
    886 	 * to underlying vm object.
    887 	 */
    888 
    889 	if (flags & MAP_ANON) {
    890 		KASSERT(uobj == NULL);
    891 		foff = UVM_UNKNOWN_OFFSET;
    892 		if ((flags & MAP_SHARED) == 0)
    893 			/* XXX: defer amap create */
    894 			uvmflag |= UVM_FLAG_COPYONW;
    895 		else
    896 			/* shared: create amap now */
    897 			uvmflag |= UVM_FLAG_OVERLAY;
    898 
    899 	} else {
    900 		KASSERT(uobj != NULL);
    901 		if ((flags & MAP_SHARED) == 0) {
    902 			uvmflag |= UVM_FLAG_COPYONW;
    903 		}
    904 	}
    905 
    906 	uvmflag = UVM_MAPFLAG(prot, maxprot,
    907 	    (flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY, advice,
    908 	    uvmflag);
    909 	error = uvm_map(map, addr, size, uobj, foff, align, uvmflag);
    910 	if (error) {
    911 		if (uobj)
    912 			uobj->pgops->pgo_detach(uobj);
    913 		return error;
    914 	}
    915 
    916 	/*
    917 	 * POSIX 1003.1b -- if our address space was configured
    918 	 * to lock all future mappings, wire the one we just made.
    919 	 *
    920 	 * Also handle the MAP_WIRED flag here.
    921 	 */
    922 
    923 	if (prot == VM_PROT_NONE) {
    924 
    925 		/*
    926 		 * No more work to do in this case.
    927 		 */
    928 
    929 		return 0;
    930 	}
    931 	if ((flags & MAP_WIRED) != 0 || (map->flags & VM_MAP_WIREFUTURE) != 0) {
    932 		vm_map_lock(map);
    933 		if (atop(size) + uvmexp.wired > uvmexp.wiredmax ||
    934 		    (locklimit != 0 &&
    935 		     size + ptoa(pmap_wired_count(vm_map_pmap(map))) >
    936 		     locklimit)) {
    937 			vm_map_unlock(map);
    938 			uvm_unmap(map, *addr, *addr + size);
    939 			return ENOMEM;
    940 		}
    941 
    942 		/*
    943 		 * uvm_map_pageable() always returns the map unlocked.
    944 		 */
    945 
    946 		error = uvm_map_pageable(map, *addr, *addr + size,
    947 		    false, UVM_LK_ENTER);
    948 		if (error) {
    949 			uvm_unmap(map, *addr, *addr + size);
    950 			return error;
    951 		}
    952 		return 0;
    953 	}
    954 	return 0;
    955 }
    956 
    957 vaddr_t
    958 uvm_default_mapaddr(struct proc *p, vaddr_t base, vsize_t sz, int topdown)
    959 {
    960 
    961 	if (topdown)
    962 		return VM_DEFAULT_ADDRESS_TOPDOWN(base, sz);
    963 	else
    964 		return VM_DEFAULT_ADDRESS_BOTTOMUP(base, sz);
    965 }
    966 
    967 int
    968 uvm_mmap_dev(struct proc *p, void **addrp, size_t len, dev_t dev,
    969     off_t off)
    970 {
    971 	struct uvm_object *uobj;
    972 	int error, flags, prot;
    973 
    974 	flags = MAP_SHARED;
    975 	prot = VM_PROT_READ | VM_PROT_WRITE;
    976 	if (*addrp)
    977 		flags |= MAP_FIXED;
    978 	else
    979 		*addrp = (void *)p->p_emul->e_vm_default_addr(p,
    980 		    (vaddr_t)p->p_vmspace->vm_daddr, len,
    981 		    p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
    982 
    983 	uobj = udv_attach(dev, prot, off, len);
    984 	if (uobj == NULL)
    985 		return EINVAL;
    986 
    987 	error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp,
    988 	    (vsize_t)len, prot, prot, flags, UVM_ADV_RANDOM, uobj, off,
    989 	    p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
    990 	return error;
    991 }
    992 
    993 int
    994 uvm_mmap_anon(struct proc *p, void **addrp, size_t len)
    995 {
    996 	int error, flags, prot;
    997 
    998 	flags = MAP_PRIVATE | MAP_ANON;
    999 	prot = VM_PROT_READ | VM_PROT_WRITE;
   1000 	if (*addrp)
   1001 		flags |= MAP_FIXED;
   1002 	else
   1003 		*addrp = (void *)p->p_emul->e_vm_default_addr(p,
   1004 		    (vaddr_t)p->p_vmspace->vm_daddr, len,
   1005 		    p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
   1006 
   1007 	error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp,
   1008 	    (vsize_t)len, prot, prot, flags, UVM_ADV_NORMAL, NULL, 0,
   1009 	    p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
   1010 	return error;
   1011 }
   1012