uvm_mmap.c revision 1.137 1 /* $NetBSD: uvm_mmap.c,v 1.137 2011/06/23 23:42:44 matt Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993 The Regents of the University of California.
6 * Copyright (c) 1988 University of Utah.
7 *
8 * All rights reserved.
9 *
10 * This code is derived from software contributed to Berkeley by
11 * the Systems Programming Group of the University of Utah Computer
12 * Science Department.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
39 * @(#)vm_mmap.c 8.5 (Berkeley) 5/19/94
40 * from: Id: uvm_mmap.c,v 1.1.2.14 1998/01/05 21:04:26 chuck Exp
41 */
42
43 /*
44 * uvm_mmap.c: system call interface into VM system, plus kernel vm_mmap
45 * function.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.137 2011/06/23 23:42:44 matt Exp $");
50
51 #include "opt_compat_netbsd.h"
52 #include "opt_pax.h"
53 #include "veriexec.h"
54
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/file.h>
58 #include <sys/filedesc.h>
59 #include <sys/resourcevar.h>
60 #include <sys/mman.h>
61 #include <sys/mount.h>
62 #include <sys/vnode.h>
63 #include <sys/conf.h>
64 #include <sys/stat.h>
65
66 #if NVERIEXEC > 0
67 #include <sys/verified_exec.h>
68 #endif /* NVERIEXEC > 0 */
69
70 #if defined(PAX_ASLR) || defined(PAX_MPROTECT)
71 #include <sys/pax.h>
72 #endif /* PAX_ASLR || PAX_MPROTECT */
73
74 #include <miscfs/specfs/specdev.h>
75
76 #include <sys/syscallargs.h>
77
78 #include <uvm/uvm.h>
79 #include <uvm/uvm_device.h>
80
81 #ifndef COMPAT_ZERODEV
82 #define COMPAT_ZERODEV(dev) (0)
83 #endif
84
85 static int
86 range_test(vaddr_t addr, vsize_t size, bool ismmap)
87 {
88 vaddr_t vm_min_address = VM_MIN_ADDRESS;
89 vaddr_t vm_max_address = VM_MAXUSER_ADDRESS;
90 vaddr_t eaddr = addr + size;
91
92 if (addr < vm_min_address)
93 return EINVAL;
94 if (eaddr > vm_max_address)
95 return ismmap ? EFBIG : EINVAL;
96 if (addr > eaddr) /* no wrapping! */
97 return ismmap ? EOVERFLOW : EINVAL;
98 return 0;
99 }
100
101 /*
102 * unimplemented VM system calls:
103 */
104
105 /*
106 * sys_sbrk: sbrk system call.
107 */
108
109 /* ARGSUSED */
110 int
111 sys_sbrk(struct lwp *l, const struct sys_sbrk_args *uap, register_t *retval)
112 {
113 /* {
114 syscallarg(intptr_t) incr;
115 } */
116
117 return (ENOSYS);
118 }
119
120 /*
121 * sys_sstk: sstk system call.
122 */
123
124 /* ARGSUSED */
125 int
126 sys_sstk(struct lwp *l, const struct sys_sstk_args *uap, register_t *retval)
127 {
128 /* {
129 syscallarg(int) incr;
130 } */
131
132 return (ENOSYS);
133 }
134
135 /*
136 * sys_mincore: determine if pages are in core or not.
137 */
138
139 /* ARGSUSED */
140 int
141 sys_mincore(struct lwp *l, const struct sys_mincore_args *uap,
142 register_t *retval)
143 {
144 /* {
145 syscallarg(void *) addr;
146 syscallarg(size_t) len;
147 syscallarg(char *) vec;
148 } */
149 struct proc *p = l->l_proc;
150 struct vm_page *pg;
151 char *vec, pgi;
152 struct uvm_object *uobj;
153 struct vm_amap *amap;
154 struct vm_anon *anon;
155 struct vm_map_entry *entry;
156 vaddr_t start, end, lim;
157 struct vm_map *map;
158 vsize_t len;
159 int error = 0, npgs;
160
161 map = &p->p_vmspace->vm_map;
162
163 start = (vaddr_t)SCARG(uap, addr);
164 len = SCARG(uap, len);
165 vec = SCARG(uap, vec);
166
167 if (start & PAGE_MASK)
168 return (EINVAL);
169 len = round_page(len);
170 end = start + len;
171 if (end <= start)
172 return (EINVAL);
173
174 /*
175 * Lock down vec, so our returned status isn't outdated by
176 * storing the status byte for a page.
177 */
178
179 npgs = len >> PAGE_SHIFT;
180 error = uvm_vslock(p->p_vmspace, vec, npgs, VM_PROT_WRITE);
181 if (error) {
182 return error;
183 }
184 vm_map_lock_read(map);
185
186 if (uvm_map_lookup_entry(map, start, &entry) == false) {
187 error = ENOMEM;
188 goto out;
189 }
190
191 for (/* nothing */;
192 entry != &map->header && entry->start < end;
193 entry = entry->next) {
194 KASSERT(!UVM_ET_ISSUBMAP(entry));
195 KASSERT(start >= entry->start);
196
197 /* Make sure there are no holes. */
198 if (entry->end < end &&
199 (entry->next == &map->header ||
200 entry->next->start > entry->end)) {
201 error = ENOMEM;
202 goto out;
203 }
204
205 lim = end < entry->end ? end : entry->end;
206
207 /*
208 * Special case for objects with no "real" pages. Those
209 * are always considered resident (mapped devices).
210 */
211
212 if (UVM_ET_ISOBJ(entry)) {
213 KASSERT(!UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj));
214 if (UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) {
215 for (/* nothing */; start < lim;
216 start += PAGE_SIZE, vec++)
217 subyte(vec, 1);
218 continue;
219 }
220 }
221
222 amap = entry->aref.ar_amap; /* upper layer */
223 uobj = entry->object.uvm_obj; /* lower layer */
224
225 if (amap != NULL)
226 amap_lock(amap);
227 if (uobj != NULL)
228 mutex_enter(uobj->vmobjlock);
229
230 for (/* nothing */; start < lim; start += PAGE_SIZE, vec++) {
231 pgi = 0;
232 if (amap != NULL) {
233 /* Check the upper layer first. */
234 anon = amap_lookup(&entry->aref,
235 start - entry->start);
236 /* Don't need to lock anon here. */
237 if (anon != NULL && anon->an_page != NULL) {
238
239 /*
240 * Anon has the page for this entry
241 * offset.
242 */
243
244 pgi = 1;
245 }
246 }
247 if (uobj != NULL && pgi == 0) {
248 /* Check the lower layer. */
249 pg = uvm_pagelookup(uobj,
250 entry->offset + (start - entry->start));
251 if (pg != NULL) {
252
253 /*
254 * Object has the page for this entry
255 * offset.
256 */
257
258 pgi = 1;
259 }
260 }
261 (void) subyte(vec, pgi);
262 }
263 if (uobj != NULL)
264 mutex_exit(uobj->vmobjlock);
265 if (amap != NULL)
266 amap_unlock(amap);
267 }
268
269 out:
270 vm_map_unlock_read(map);
271 uvm_vsunlock(p->p_vmspace, SCARG(uap, vec), npgs);
272 return (error);
273 }
274
275 /*
276 * sys_mmap: mmap system call.
277 *
278 * => file offset and address may not be page aligned
279 * - if MAP_FIXED, offset and address must have remainder mod PAGE_SIZE
280 * - if address isn't page aligned the mapping starts at trunc_page(addr)
281 * and the return value is adjusted up by the page offset.
282 */
283
284 int
285 sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
286 {
287 /* {
288 syscallarg(void *) addr;
289 syscallarg(size_t) len;
290 syscallarg(int) prot;
291 syscallarg(int) flags;
292 syscallarg(int) fd;
293 syscallarg(long) pad;
294 syscallarg(off_t) pos;
295 } */
296 struct proc *p = l->l_proc;
297 vaddr_t addr;
298 struct vattr va;
299 off_t pos;
300 vsize_t size, pageoff;
301 vm_prot_t prot, maxprot;
302 int flags, fd;
303 vaddr_t defaddr;
304 struct file *fp = NULL;
305 struct vnode *vp;
306 void *handle;
307 int error;
308 #ifdef PAX_ASLR
309 vaddr_t orig_addr;
310 #endif /* PAX_ASLR */
311
312 /*
313 * first, extract syscall args from the uap.
314 */
315
316 addr = (vaddr_t)SCARG(uap, addr);
317 size = (vsize_t)SCARG(uap, len);
318 prot = SCARG(uap, prot) & VM_PROT_ALL;
319 flags = SCARG(uap, flags);
320 fd = SCARG(uap, fd);
321 pos = SCARG(uap, pos);
322
323 #ifdef PAX_ASLR
324 orig_addr = addr;
325 #endif /* PAX_ASLR */
326
327 /*
328 * Fixup the old deprecated MAP_COPY into MAP_PRIVATE, and
329 * validate the flags.
330 */
331 if (flags & MAP_COPY)
332 flags = (flags & ~MAP_COPY) | MAP_PRIVATE;
333 if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
334 return (EINVAL);
335
336 /*
337 * align file position and save offset. adjust size.
338 */
339
340 pageoff = (pos & PAGE_MASK);
341 pos -= pageoff;
342 size += pageoff; /* add offset */
343 size = (vsize_t)round_page(size); /* round up */
344
345 /*
346 * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr"
347 */
348 if (flags & MAP_FIXED) {
349
350 /* ensure address and file offset are aligned properly */
351 addr -= pageoff;
352 if (addr & PAGE_MASK)
353 return (EINVAL);
354
355 error = range_test(addr, size, true);
356 if (error)
357 return error;
358 } else if (addr == 0 || !(flags & MAP_TRYFIXED)) {
359
360 /*
361 * not fixed: make sure we skip over the largest
362 * possible heap for non-topdown mapping arrangements.
363 * we will refine our guess later (e.g. to account for
364 * VAC, etc)
365 */
366
367 defaddr = p->p_emul->e_vm_default_addr(p,
368 (vaddr_t)p->p_vmspace->vm_daddr, size);
369
370 if (addr == 0 ||
371 !(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN))
372 addr = MAX(addr, defaddr);
373 else
374 addr = MIN(addr, defaddr);
375 }
376
377 /*
378 * check for file mappings (i.e. not anonymous) and verify file.
379 */
380
381 if ((flags & MAP_ANON) == 0) {
382 if ((fp = fd_getfile(fd)) == NULL)
383 return (EBADF);
384 if (fp->f_type != DTYPE_VNODE) {
385 fd_putfile(fd);
386 return (ENODEV); /* only mmap vnodes! */
387 }
388 vp = fp->f_data; /* convert to vnode */
389 if (vp->v_type != VREG && vp->v_type != VCHR &&
390 vp->v_type != VBLK) {
391 fd_putfile(fd);
392 return (ENODEV); /* only REG/CHR/BLK support mmap */
393 }
394 if (vp->v_type != VCHR && pos < 0) {
395 fd_putfile(fd);
396 return (EINVAL);
397 }
398 if (vp->v_type != VCHR && (pos + size) < pos) {
399 fd_putfile(fd);
400 return (EOVERFLOW); /* no offset wrapping */
401 }
402
403 /* special case: catch SunOS style /dev/zero */
404 if (vp->v_type == VCHR
405 && (vp->v_rdev == zerodev || COMPAT_ZERODEV(vp->v_rdev))) {
406 flags |= MAP_ANON;
407 fd_putfile(fd);
408 fp = NULL;
409 goto is_anon;
410 }
411
412 /*
413 * Old programs may not select a specific sharing type, so
414 * default to an appropriate one.
415 *
416 * XXX: how does MAP_ANON fit in the picture?
417 */
418 if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) {
419 #if defined(DEBUG)
420 printf("WARNING: defaulted mmap() share type to "
421 "%s (pid %d command %s)\n", vp->v_type == VCHR ?
422 "MAP_SHARED" : "MAP_PRIVATE", p->p_pid,
423 p->p_comm);
424 #endif
425 if (vp->v_type == VCHR)
426 flags |= MAP_SHARED; /* for a device */
427 else
428 flags |= MAP_PRIVATE; /* for a file */
429 }
430
431 /*
432 * MAP_PRIVATE device mappings don't make sense (and aren't
433 * supported anyway). However, some programs rely on this,
434 * so just change it to MAP_SHARED.
435 */
436 if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
437 flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
438 }
439
440 /*
441 * now check protection
442 */
443
444 maxprot = VM_PROT_EXECUTE;
445
446 /* check read access */
447 if (fp->f_flag & FREAD)
448 maxprot |= VM_PROT_READ;
449 else if (prot & PROT_READ) {
450 fd_putfile(fd);
451 return (EACCES);
452 }
453
454 /* check write access, shared case first */
455 if (flags & MAP_SHARED) {
456 /*
457 * if the file is writable, only add PROT_WRITE to
458 * maxprot if the file is not immutable, append-only.
459 * otherwise, if we have asked for PROT_WRITE, return
460 * EPERM.
461 */
462 if (fp->f_flag & FWRITE) {
463 if ((error =
464 VOP_GETATTR(vp, &va, l->l_cred))) {
465 fd_putfile(fd);
466 return (error);
467 }
468 if ((va.va_flags &
469 (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0)
470 maxprot |= VM_PROT_WRITE;
471 else if (prot & PROT_WRITE) {
472 fd_putfile(fd);
473 return (EPERM);
474 }
475 }
476 else if (prot & PROT_WRITE) {
477 fd_putfile(fd);
478 return (EACCES);
479 }
480 } else {
481 /* MAP_PRIVATE mappings can always write to */
482 maxprot |= VM_PROT_WRITE;
483 }
484 handle = vp;
485
486 } else { /* MAP_ANON case */
487 /*
488 * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0?
489 */
490 if (fd != -1)
491 return (EINVAL);
492
493 is_anon: /* label for SunOS style /dev/zero */
494 handle = NULL;
495 maxprot = VM_PROT_ALL;
496 pos = 0;
497 }
498
499 #if NVERIEXEC > 0
500 if (handle != NULL) {
501 /*
502 * Check if the file can be executed indirectly.
503 *
504 * XXX: This gives false warnings about "Incorrect access type"
505 * XXX: if the mapping is not executable. Harmless, but will be
506 * XXX: fixed as part of other changes.
507 */
508 if (veriexec_verify(l, handle, "(mmap)", VERIEXEC_INDIRECT,
509 NULL)) {
510 /*
511 * Don't allow executable mappings if we can't
512 * indirectly execute the file.
513 */
514 if (prot & VM_PROT_EXECUTE) {
515 if (fp != NULL)
516 fd_putfile(fd);
517 return (EPERM);
518 }
519
520 /*
521 * Strip the executable bit from 'maxprot' to make sure
522 * it can't be made executable later.
523 */
524 maxprot &= ~VM_PROT_EXECUTE;
525 }
526 }
527 #endif /* NVERIEXEC > 0 */
528
529 #ifdef PAX_MPROTECT
530 pax_mprotect(l, &prot, &maxprot);
531 #endif /* PAX_MPROTECT */
532
533 #ifdef PAX_ASLR
534 pax_aslr(l, &addr, orig_addr, flags);
535 #endif /* PAX_ASLR */
536
537 /*
538 * now let kernel internal function uvm_mmap do the work.
539 */
540
541 error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
542 flags, handle, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
543
544 if (error == 0)
545 /* remember to add offset */
546 *retval = (register_t)(addr + pageoff);
547
548 if (fp != NULL)
549 fd_putfile(fd);
550
551 return (error);
552 }
553
554 /*
555 * sys___msync13: the msync system call (a front-end for flush)
556 */
557
558 int
559 sys___msync13(struct lwp *l, const struct sys___msync13_args *uap,
560 register_t *retval)
561 {
562 /* {
563 syscallarg(void *) addr;
564 syscallarg(size_t) len;
565 syscallarg(int) flags;
566 } */
567 struct proc *p = l->l_proc;
568 vaddr_t addr;
569 vsize_t size, pageoff;
570 struct vm_map *map;
571 int error, rv, flags, uvmflags;
572
573 /*
574 * extract syscall args from the uap
575 */
576
577 addr = (vaddr_t)SCARG(uap, addr);
578 size = (vsize_t)SCARG(uap, len);
579 flags = SCARG(uap, flags);
580
581 /* sanity check flags */
582 if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 ||
583 (flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 ||
584 (flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC))
585 return (EINVAL);
586 if ((flags & (MS_ASYNC | MS_SYNC)) == 0)
587 flags |= MS_SYNC;
588
589 /*
590 * align the address to a page boundary and adjust the size accordingly.
591 */
592
593 pageoff = (addr & PAGE_MASK);
594 addr -= pageoff;
595 size += pageoff;
596 size = (vsize_t)round_page(size);
597
598 error = range_test(addr, size, false);
599 if (error)
600 return error;
601
602 /*
603 * get map
604 */
605
606 map = &p->p_vmspace->vm_map;
607
608 /*
609 * XXXCDC: do we really need this semantic?
610 *
611 * XXX Gak! If size is zero we are supposed to sync "all modified
612 * pages with the region containing addr". Unfortunately, we
613 * don't really keep track of individual mmaps so we approximate
614 * by flushing the range of the map entry containing addr.
615 * This can be incorrect if the region splits or is coalesced
616 * with a neighbor.
617 */
618
619 if (size == 0) {
620 struct vm_map_entry *entry;
621
622 vm_map_lock_read(map);
623 rv = uvm_map_lookup_entry(map, addr, &entry);
624 if (rv == true) {
625 addr = entry->start;
626 size = entry->end - entry->start;
627 }
628 vm_map_unlock_read(map);
629 if (rv == false)
630 return (EINVAL);
631 }
632
633 /*
634 * translate MS_ flags into PGO_ flags
635 */
636
637 uvmflags = PGO_CLEANIT;
638 if (flags & MS_INVALIDATE)
639 uvmflags |= PGO_FREE;
640 if (flags & MS_SYNC)
641 uvmflags |= PGO_SYNCIO;
642
643 error = uvm_map_clean(map, addr, addr+size, uvmflags);
644 return error;
645 }
646
647 /*
648 * sys_munmap: unmap a users memory
649 */
650
651 int
652 sys_munmap(struct lwp *l, const struct sys_munmap_args *uap, register_t *retval)
653 {
654 /* {
655 syscallarg(void *) addr;
656 syscallarg(size_t) len;
657 } */
658 struct proc *p = l->l_proc;
659 vaddr_t addr;
660 vsize_t size, pageoff;
661 struct vm_map *map;
662 struct vm_map_entry *dead_entries;
663 int error;
664
665 /*
666 * get syscall args.
667 */
668
669 addr = (vaddr_t)SCARG(uap, addr);
670 size = (vsize_t)SCARG(uap, len);
671
672 /*
673 * align the address to a page boundary and adjust the size accordingly.
674 */
675
676 pageoff = (addr & PAGE_MASK);
677 addr -= pageoff;
678 size += pageoff;
679 size = (vsize_t)round_page(size);
680
681 if (size == 0)
682 return (0);
683
684 error = range_test(addr, size, false);
685 if (error)
686 return error;
687
688 map = &p->p_vmspace->vm_map;
689
690 /*
691 * interesting system call semantic: make sure entire range is
692 * allocated before allowing an unmap.
693 */
694
695 vm_map_lock(map);
696 #if 0
697 if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) {
698 vm_map_unlock(map);
699 return (EINVAL);
700 }
701 #endif
702 uvm_unmap_remove(map, addr, addr + size, &dead_entries, NULL, 0);
703 vm_map_unlock(map);
704 if (dead_entries != NULL)
705 uvm_unmap_detach(dead_entries, 0);
706 return (0);
707 }
708
709 /*
710 * sys_mprotect: the mprotect system call
711 */
712
713 int
714 sys_mprotect(struct lwp *l, const struct sys_mprotect_args *uap,
715 register_t *retval)
716 {
717 /* {
718 syscallarg(void *) addr;
719 syscallarg(size_t) len;
720 syscallarg(int) prot;
721 } */
722 struct proc *p = l->l_proc;
723 vaddr_t addr;
724 vsize_t size, pageoff;
725 vm_prot_t prot;
726 int error;
727
728 /*
729 * extract syscall args from uap
730 */
731
732 addr = (vaddr_t)SCARG(uap, addr);
733 size = (vsize_t)SCARG(uap, len);
734 prot = SCARG(uap, prot) & VM_PROT_ALL;
735
736 /*
737 * align the address to a page boundary and adjust the size accordingly.
738 */
739
740 pageoff = (addr & PAGE_MASK);
741 addr -= pageoff;
742 size += pageoff;
743 size = round_page(size);
744
745 error = range_test(addr, size, false);
746 if (error)
747 return error;
748
749 error = uvm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, prot,
750 false);
751 return error;
752 }
753
754 /*
755 * sys_minherit: the minherit system call
756 */
757
758 int
759 sys_minherit(struct lwp *l, const struct sys_minherit_args *uap,
760 register_t *retval)
761 {
762 /* {
763 syscallarg(void *) addr;
764 syscallarg(int) len;
765 syscallarg(int) inherit;
766 } */
767 struct proc *p = l->l_proc;
768 vaddr_t addr;
769 vsize_t size, pageoff;
770 vm_inherit_t inherit;
771 int error;
772
773 addr = (vaddr_t)SCARG(uap, addr);
774 size = (vsize_t)SCARG(uap, len);
775 inherit = SCARG(uap, inherit);
776
777 /*
778 * align the address to a page boundary and adjust the size accordingly.
779 */
780
781 pageoff = (addr & PAGE_MASK);
782 addr -= pageoff;
783 size += pageoff;
784 size = (vsize_t)round_page(size);
785
786 error = range_test(addr, size, false);
787 if (error)
788 return error;
789
790 error = uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr + size,
791 inherit);
792 return error;
793 }
794
795 /*
796 * sys_madvise: give advice about memory usage.
797 */
798
799 /* ARGSUSED */
800 int
801 sys_madvise(struct lwp *l, const struct sys_madvise_args *uap,
802 register_t *retval)
803 {
804 /* {
805 syscallarg(void *) addr;
806 syscallarg(size_t) len;
807 syscallarg(int) behav;
808 } */
809 struct proc *p = l->l_proc;
810 vaddr_t addr;
811 vsize_t size, pageoff;
812 int advice, error;
813
814 addr = (vaddr_t)SCARG(uap, addr);
815 size = (vsize_t)SCARG(uap, len);
816 advice = SCARG(uap, behav);
817
818 /*
819 * align the address to a page boundary, and adjust the size accordingly
820 */
821
822 pageoff = (addr & PAGE_MASK);
823 addr -= pageoff;
824 size += pageoff;
825 size = (vsize_t)round_page(size);
826
827 error = range_test(addr, size, false);
828 if (error)
829 return error;
830
831 switch (advice) {
832 case MADV_NORMAL:
833 case MADV_RANDOM:
834 case MADV_SEQUENTIAL:
835 error = uvm_map_advice(&p->p_vmspace->vm_map, addr, addr + size,
836 advice);
837 break;
838
839 case MADV_WILLNEED:
840
841 /*
842 * Activate all these pages, pre-faulting them in if
843 * necessary.
844 */
845 error = uvm_map_willneed(&p->p_vmspace->vm_map,
846 addr, addr + size);
847 break;
848
849 case MADV_DONTNEED:
850
851 /*
852 * Deactivate all these pages. We don't need them
853 * any more. We don't, however, toss the data in
854 * the pages.
855 */
856
857 error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
858 PGO_DEACTIVATE);
859 break;
860
861 case MADV_FREE:
862
863 /*
864 * These pages contain no valid data, and may be
865 * garbage-collected. Toss all resources, including
866 * any swap space in use.
867 */
868
869 error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
870 PGO_FREE);
871 break;
872
873 case MADV_SPACEAVAIL:
874
875 /*
876 * XXXMRG What is this? I think it's:
877 *
878 * Ensure that we have allocated backing-store
879 * for these pages.
880 *
881 * This is going to require changes to the page daemon,
882 * as it will free swap space allocated to pages in core.
883 * There's also what to do for device/file/anonymous memory.
884 */
885
886 return (EINVAL);
887
888 default:
889 return (EINVAL);
890 }
891
892 return error;
893 }
894
895 /*
896 * sys_mlock: memory lock
897 */
898
899 int
900 sys_mlock(struct lwp *l, const struct sys_mlock_args *uap, register_t *retval)
901 {
902 /* {
903 syscallarg(const void *) addr;
904 syscallarg(size_t) len;
905 } */
906 struct proc *p = l->l_proc;
907 vaddr_t addr;
908 vsize_t size, pageoff;
909 int error;
910
911 /*
912 * extract syscall args from uap
913 */
914
915 addr = (vaddr_t)SCARG(uap, addr);
916 size = (vsize_t)SCARG(uap, len);
917
918 /*
919 * align the address to a page boundary and adjust the size accordingly
920 */
921
922 pageoff = (addr & PAGE_MASK);
923 addr -= pageoff;
924 size += pageoff;
925 size = (vsize_t)round_page(size);
926
927 error = range_test(addr, size, false);
928 if (error)
929 return error;
930
931 if (atop(size) + uvmexp.wired > uvmexp.wiredmax)
932 return (EAGAIN);
933
934 if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
935 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
936 return (EAGAIN);
937
938 error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, false,
939 0);
940 if (error == EFAULT)
941 error = ENOMEM;
942 return error;
943 }
944
945 /*
946 * sys_munlock: unlock wired pages
947 */
948
949 int
950 sys_munlock(struct lwp *l, const struct sys_munlock_args *uap,
951 register_t *retval)
952 {
953 /* {
954 syscallarg(const void *) addr;
955 syscallarg(size_t) len;
956 } */
957 struct proc *p = l->l_proc;
958 vaddr_t addr;
959 vsize_t size, pageoff;
960 int error;
961
962 /*
963 * extract syscall args from uap
964 */
965
966 addr = (vaddr_t)SCARG(uap, addr);
967 size = (vsize_t)SCARG(uap, len);
968
969 /*
970 * align the address to a page boundary, and adjust the size accordingly
971 */
972
973 pageoff = (addr & PAGE_MASK);
974 addr -= pageoff;
975 size += pageoff;
976 size = (vsize_t)round_page(size);
977
978 error = range_test(addr, size, false);
979 if (error)
980 return error;
981
982 error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, true,
983 0);
984 if (error == EFAULT)
985 error = ENOMEM;
986 return error;
987 }
988
989 /*
990 * sys_mlockall: lock all pages mapped into an address space.
991 */
992
993 int
994 sys_mlockall(struct lwp *l, const struct sys_mlockall_args *uap,
995 register_t *retval)
996 {
997 /* {
998 syscallarg(int) flags;
999 } */
1000 struct proc *p = l->l_proc;
1001 int error, flags;
1002
1003 flags = SCARG(uap, flags);
1004
1005 if (flags == 0 ||
1006 (flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)
1007 return (EINVAL);
1008
1009 error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags,
1010 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
1011 return (error);
1012 }
1013
1014 /*
1015 * sys_munlockall: unlock all pages mapped into an address space.
1016 */
1017
1018 int
1019 sys_munlockall(struct lwp *l, const void *v, register_t *retval)
1020 {
1021 struct proc *p = l->l_proc;
1022
1023 (void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0);
1024 return (0);
1025 }
1026
1027 /*
1028 * uvm_mmap: internal version of mmap
1029 *
1030 * - used by sys_mmap and various framebuffers
1031 * - handle is a vnode pointer or NULL for MAP_ANON
1032 * - caller must page-align the file offset
1033 */
1034
1035 int
1036 uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
1037 vm_prot_t maxprot, int flags, void *handle, voff_t foff, vsize_t locklimit)
1038 {
1039 struct uvm_object *uobj;
1040 struct vnode *vp;
1041 vaddr_t align = 0;
1042 int error;
1043 int advice = UVM_ADV_NORMAL;
1044 uvm_flag_t uvmflag = 0;
1045 bool needwritemap;
1046
1047 /*
1048 * check params
1049 */
1050
1051 if (size == 0)
1052 return(0);
1053 if (foff & PAGE_MASK)
1054 return(EINVAL);
1055 if ((prot & maxprot) != prot)
1056 return(EINVAL);
1057
1058 /*
1059 * for non-fixed mappings, round off the suggested address.
1060 * for fixed mappings, check alignment and zap old mappings.
1061 */
1062
1063 if ((flags & MAP_FIXED) == 0) {
1064 *addr = round_page(*addr);
1065 } else {
1066 if (*addr & PAGE_MASK)
1067 return(EINVAL);
1068 uvmflag |= UVM_FLAG_FIXED;
1069 (void) uvm_unmap(map, *addr, *addr + size);
1070 }
1071
1072 /*
1073 * Try to see if any requested alignment can even be attemped.
1074 * Make sure we can express the alignment (asking for a >= 4GB
1075 * alignment on an ILP32 architecure make no sense) and the
1076 * alignment is at least for a page sized quanitiy. If the
1077 * request was for a fixed mapping, make sure supplied address
1078 * adheres to the request alignment.
1079 */
1080 align = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT;
1081 if (align) {
1082 if (align >= sizeof(vaddr_t) * NBBY)
1083 return(EINVAL);
1084 align = 1L << align;
1085 if (align < PAGE_SIZE)
1086 return(EINVAL);
1087 if (align >= vm_map_max(map))
1088 return(ENOMEM);
1089 if (flags & MAP_FIXED) {
1090 if ((*addr & (align-1)) != 0)
1091 return(EINVAL);
1092 align = 0;
1093 }
1094 }
1095
1096 /*
1097 * check resource limits
1098 */
1099
1100 if (!VM_MAP_IS_KERNEL(map) &&
1101 (((rlim_t)curproc->p_vmspace->vm_map.size + (rlim_t)size) >
1102 curproc->p_rlimit[RLIMIT_AS].rlim_cur))
1103 return ENOMEM;
1104
1105 /*
1106 * handle anon vs. non-anon mappings. for non-anon mappings attach
1107 * to underlying vm object.
1108 */
1109
1110 if (flags & MAP_ANON) {
1111 KASSERT(handle == NULL);
1112 foff = UVM_UNKNOWN_OFFSET;
1113 uobj = NULL;
1114 if ((flags & MAP_SHARED) == 0)
1115 /* XXX: defer amap create */
1116 uvmflag |= UVM_FLAG_COPYONW;
1117 else
1118 /* shared: create amap now */
1119 uvmflag |= UVM_FLAG_OVERLAY;
1120
1121 } else {
1122 KASSERT(handle != NULL);
1123 vp = (struct vnode *)handle;
1124
1125 /*
1126 * Don't allow mmap for EXEC if the file system
1127 * is mounted NOEXEC.
1128 */
1129 if ((prot & PROT_EXEC) != 0 &&
1130 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0)
1131 return (EACCES);
1132
1133 if (vp->v_type != VCHR) {
1134 error = VOP_MMAP(vp, prot, curlwp->l_cred);
1135 if (error) {
1136 return error;
1137 }
1138 vref(vp);
1139 uobj = &vp->v_uobj;
1140
1141 /*
1142 * If the vnode is being mapped with PROT_EXEC,
1143 * then mark it as text.
1144 */
1145 if (prot & PROT_EXEC) {
1146 vn_markexec(vp);
1147 }
1148 } else {
1149 int i = maxprot;
1150
1151 /*
1152 * XXX Some devices don't like to be mapped with
1153 * XXX PROT_EXEC or PROT_WRITE, but we don't really
1154 * XXX have a better way of handling this, right now
1155 */
1156 do {
1157 uobj = udv_attach((void *) &vp->v_rdev,
1158 (flags & MAP_SHARED) ? i :
1159 (i & ~VM_PROT_WRITE), foff, size);
1160 i--;
1161 } while ((uobj == NULL) && (i > 0));
1162 if (uobj == NULL)
1163 return EINVAL;
1164 advice = UVM_ADV_RANDOM;
1165 }
1166 if ((flags & MAP_SHARED) == 0) {
1167 uvmflag |= UVM_FLAG_COPYONW;
1168 }
1169
1170 /*
1171 * Set vnode flags to indicate the new kinds of mapping.
1172 * We take the vnode lock in exclusive mode here to serialize
1173 * with direct I/O.
1174 *
1175 * Safe to check for these flag values without a lock, as
1176 * long as a reference to the vnode is held.
1177 */
1178 needwritemap = (vp->v_iflag & VI_WRMAP) == 0 &&
1179 (flags & MAP_SHARED) != 0 &&
1180 (maxprot & VM_PROT_WRITE) != 0;
1181 if ((vp->v_vflag & VV_MAPPED) == 0 || needwritemap) {
1182 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1183 vp->v_vflag |= VV_MAPPED;
1184 if (needwritemap) {
1185 mutex_enter(vp->v_interlock);
1186 vp->v_iflag |= VI_WRMAP;
1187 mutex_exit(vp->v_interlock);
1188 }
1189 VOP_UNLOCK(vp);
1190 }
1191 }
1192
1193 uvmflag = UVM_MAPFLAG(prot, maxprot,
1194 (flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY,
1195 advice, uvmflag);
1196 error = uvm_map(map, addr, size, uobj, foff, align, uvmflag);
1197 if (error) {
1198 if (uobj)
1199 uobj->pgops->pgo_detach(uobj);
1200 return error;
1201 }
1202
1203 /*
1204 * POSIX 1003.1b -- if our address space was configured
1205 * to lock all future mappings, wire the one we just made.
1206 *
1207 * Also handle the MAP_WIRED flag here.
1208 */
1209
1210 if (prot == VM_PROT_NONE) {
1211
1212 /*
1213 * No more work to do in this case.
1214 */
1215
1216 return (0);
1217 }
1218 if ((flags & MAP_WIRED) != 0 || (map->flags & VM_MAP_WIREFUTURE) != 0) {
1219 vm_map_lock(map);
1220 if (atop(size) + uvmexp.wired > uvmexp.wiredmax ||
1221 (locklimit != 0 &&
1222 size + ptoa(pmap_wired_count(vm_map_pmap(map))) >
1223 locklimit)) {
1224 vm_map_unlock(map);
1225 uvm_unmap(map, *addr, *addr + size);
1226 return ENOMEM;
1227 }
1228
1229 /*
1230 * uvm_map_pageable() always returns the map unlocked.
1231 */
1232
1233 error = uvm_map_pageable(map, *addr, *addr + size,
1234 false, UVM_LK_ENTER);
1235 if (error) {
1236 uvm_unmap(map, *addr, *addr + size);
1237 return error;
1238 }
1239 return (0);
1240 }
1241 return 0;
1242 }
1243
1244 vaddr_t
1245 uvm_default_mapaddr(struct proc *p, vaddr_t base, vsize_t sz)
1246 {
1247
1248 return VM_DEFAULT_ADDRESS(base, sz);
1249 }
1250