uvm_mmap.c revision 1.79 1 /* $NetBSD: uvm_mmap.c,v 1.79 2003/11/29 19:06:48 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993 The Regents of the University of California.
6 * Copyright (c) 1988 University of Utah.
7 *
8 * All rights reserved.
9 *
10 * This code is derived from software contributed to Berkeley by
11 * the Systems Programming Group of the University of Utah Computer
12 * Science Department.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 * must display the following acknowledgement:
24 * This product includes software developed by the Charles D. Cranor,
25 * Washington University, University of California, Berkeley and
26 * its contributors.
27 * 4. Neither the name of the University nor the names of its contributors
28 * may be used to endorse or promote products derived from this software
29 * without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * SUCH DAMAGE.
42 *
43 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
44 * @(#)vm_mmap.c 8.5 (Berkeley) 5/19/94
45 * from: Id: uvm_mmap.c,v 1.1.2.14 1998/01/05 21:04:26 chuck Exp
46 */
47
48 /*
49 * uvm_mmap.c: system call interface into VM system, plus kernel vm_mmap
50 * function.
51 */
52
53 #include <sys/cdefs.h>
54 __KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.79 2003/11/29 19:06:48 yamt Exp $");
55
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/file.h>
59 #include <sys/filedesc.h>
60 #include <sys/resourcevar.h>
61 #include <sys/mman.h>
62 #include <sys/mount.h>
63 #include <sys/proc.h>
64 #include <sys/malloc.h>
65 #include <sys/vnode.h>
66 #include <sys/conf.h>
67 #include <sys/stat.h>
68
69 #include <miscfs/specfs/specdev.h>
70
71 #include <sys/sa.h>
72 #include <sys/syscallargs.h>
73
74 #include <uvm/uvm.h>
75 #include <uvm/uvm_device.h>
76
77
78 /*
79 * unimplemented VM system calls:
80 */
81
82 /*
83 * sys_sbrk: sbrk system call.
84 */
85
86 /* ARGSUSED */
87 int
88 sys_sbrk(l, v, retval)
89 struct lwp *l;
90 void *v;
91 register_t *retval;
92 {
93 #if 0
94 struct sys_sbrk_args /* {
95 syscallarg(intptr_t) incr;
96 } */ *uap = v;
97 #endif
98
99 return (ENOSYS);
100 }
101
102 /*
103 * sys_sstk: sstk system call.
104 */
105
106 /* ARGSUSED */
107 int
108 sys_sstk(l, v, retval)
109 struct lwp *l;
110 void *v;
111 register_t *retval;
112 {
113 #if 0
114 struct sys_sstk_args /* {
115 syscallarg(int) incr;
116 } */ *uap = v;
117 #endif
118
119 return (ENOSYS);
120 }
121
122 /*
123 * sys_mincore: determine if pages are in core or not.
124 */
125
126 /* ARGSUSED */
127 int
128 sys_mincore(l, v, retval)
129 struct lwp *l;
130 void *v;
131 register_t *retval;
132 {
133 struct sys_mincore_args /* {
134 syscallarg(void *) addr;
135 syscallarg(size_t) len;
136 syscallarg(char *) vec;
137 } */ *uap = v;
138 struct proc *p = l->l_proc;
139 struct vm_page *pg;
140 char *vec, pgi;
141 struct uvm_object *uobj;
142 struct vm_amap *amap;
143 struct vm_anon *anon;
144 struct vm_map_entry *entry;
145 vaddr_t start, end, lim;
146 struct vm_map *map;
147 vsize_t len;
148 int error = 0, npgs;
149
150 map = &p->p_vmspace->vm_map;
151
152 start = (vaddr_t)SCARG(uap, addr);
153 len = SCARG(uap, len);
154 vec = SCARG(uap, vec);
155
156 if (start & PAGE_MASK)
157 return (EINVAL);
158 len = round_page(len);
159 end = start + len;
160 if (end <= start)
161 return (EINVAL);
162
163 /*
164 * Lock down vec, so our returned status isn't outdated by
165 * storing the status byte for a page.
166 */
167
168 npgs = len >> PAGE_SHIFT;
169 error = uvm_vslock(p, vec, npgs, VM_PROT_WRITE);
170 if (error) {
171 return error;
172 }
173 vm_map_lock_read(map);
174
175 if (uvm_map_lookup_entry(map, start, &entry) == FALSE) {
176 error = ENOMEM;
177 goto out;
178 }
179
180 for (/* nothing */;
181 entry != &map->header && entry->start < end;
182 entry = entry->next) {
183 KASSERT(!UVM_ET_ISSUBMAP(entry));
184 KASSERT(start >= entry->start);
185
186 /* Make sure there are no holes. */
187 if (entry->end < end &&
188 (entry->next == &map->header ||
189 entry->next->start > entry->end)) {
190 error = ENOMEM;
191 goto out;
192 }
193
194 lim = end < entry->end ? end : entry->end;
195
196 /*
197 * Special case for objects with no "real" pages. Those
198 * are always considered resident (mapped devices).
199 */
200
201 if (UVM_ET_ISOBJ(entry)) {
202 KASSERT(!UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj));
203 if (UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) {
204 for (/* nothing */; start < lim;
205 start += PAGE_SIZE, vec++)
206 subyte(vec, 1);
207 continue;
208 }
209 }
210
211 amap = entry->aref.ar_amap; /* top layer */
212 uobj = entry->object.uvm_obj; /* bottom layer */
213
214 if (amap != NULL)
215 amap_lock(amap);
216 if (uobj != NULL)
217 simple_lock(&uobj->vmobjlock);
218
219 for (/* nothing */; start < lim; start += PAGE_SIZE, vec++) {
220 pgi = 0;
221 if (amap != NULL) {
222 /* Check the top layer first. */
223 anon = amap_lookup(&entry->aref,
224 start - entry->start);
225 /* Don't need to lock anon here. */
226 if (anon != NULL && anon->u.an_page != NULL) {
227
228 /*
229 * Anon has the page for this entry
230 * offset.
231 */
232
233 pgi = 1;
234 }
235 }
236 if (uobj != NULL && pgi == 0) {
237 /* Check the bottom layer. */
238 pg = uvm_pagelookup(uobj,
239 entry->offset + (start - entry->start));
240 if (pg != NULL) {
241
242 /*
243 * Object has the page for this entry
244 * offset.
245 */
246
247 pgi = 1;
248 }
249 }
250 (void) subyte(vec, pgi);
251 }
252 if (uobj != NULL)
253 simple_unlock(&uobj->vmobjlock);
254 if (amap != NULL)
255 amap_unlock(amap);
256 }
257
258 out:
259 vm_map_unlock_read(map);
260 uvm_vsunlock(p, SCARG(uap, vec), npgs);
261 return (error);
262 }
263
264 /*
265 * sys_mmap: mmap system call.
266 *
267 * => file offset and address may not be page aligned
268 * - if MAP_FIXED, offset and address must have remainder mod PAGE_SIZE
269 * - if address isn't page aligned the mapping starts at trunc_page(addr)
270 * and the return value is adjusted up by the page offset.
271 */
272
273 int
274 sys_mmap(l, v, retval)
275 struct lwp *l;
276 void *v;
277 register_t *retval;
278 {
279 struct sys_mmap_args /* {
280 syscallarg(caddr_t) addr;
281 syscallarg(size_t) len;
282 syscallarg(int) prot;
283 syscallarg(int) flags;
284 syscallarg(int) fd;
285 syscallarg(long) pad;
286 syscallarg(off_t) pos;
287 } */ *uap = v;
288 struct proc *p = l->l_proc;
289 vaddr_t addr;
290 struct vattr va;
291 off_t pos;
292 vsize_t size, pageoff;
293 vm_prot_t prot, maxprot;
294 int flags, fd;
295 vaddr_t vm_min_address = VM_MIN_ADDRESS;
296 struct filedesc *fdp = p->p_fd;
297 struct file *fp;
298 struct vnode *vp;
299 void *handle;
300 int error;
301
302 /*
303 * first, extract syscall args from the uap.
304 */
305
306 addr = (vaddr_t)SCARG(uap, addr);
307 size = (vsize_t)SCARG(uap, len);
308 prot = SCARG(uap, prot) & VM_PROT_ALL;
309 flags = SCARG(uap, flags);
310 fd = SCARG(uap, fd);
311 pos = SCARG(uap, pos);
312
313 /*
314 * Fixup the old deprecated MAP_COPY into MAP_PRIVATE, and
315 * validate the flags.
316 */
317 if (flags & MAP_COPY)
318 flags = (flags & ~MAP_COPY) | MAP_PRIVATE;
319 if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
320 return (EINVAL);
321
322 /*
323 * align file position and save offset. adjust size.
324 */
325
326 pageoff = (pos & PAGE_MASK);
327 pos -= pageoff;
328 size += pageoff; /* add offset */
329 size = (vsize_t)round_page(size); /* round up */
330 if ((ssize_t) size < 0)
331 return (EINVAL); /* don't allow wrap */
332
333 #ifndef pmap_wired_count
334 /*
335 * if we're going to wire the mapping, restrict it to superuser.
336 */
337
338 if ((flags & MAP_WIRED) != 0 &&
339 (error = suser(p->p_ucred, &p->p_acflag)) != 0)
340 return (error);
341 #endif
342
343 /*
344 * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr"
345 */
346
347 if (flags & MAP_FIXED) {
348
349 /* ensure address and file offset are aligned properly */
350 addr -= pageoff;
351 if (addr & PAGE_MASK)
352 return (EINVAL);
353
354 if (VM_MAXUSER_ADDRESS > 0 &&
355 (addr + size) > VM_MAXUSER_ADDRESS)
356 return (EFBIG);
357 if (vm_min_address > 0 && addr < vm_min_address)
358 return (EINVAL);
359 if (addr > addr + size)
360 return (EOVERFLOW); /* no wrapping! */
361
362 } else if (addr == 0 || !(flags & MAP_TRYFIXED)) {
363
364 /*
365 * not fixed: make sure we skip over the largest
366 * possible heap for non-topdown mapping arrangements.
367 * we will refine our guess later (e.g. to account for
368 * VAC, etc)
369 */
370
371 if (addr == 0 ||
372 !(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN))
373 addr = MAX(addr,
374 VM_DEFAULT_ADDRESS(p->p_vmspace->vm_daddr, size));
375 else
376 addr = MIN(addr,
377 VM_DEFAULT_ADDRESS(p->p_vmspace->vm_daddr, size));
378 }
379
380 /*
381 * check for file mappings (i.e. not anonymous) and verify file.
382 */
383
384 if ((flags & MAP_ANON) == 0) {
385
386 if ((fp = fd_getfile(fdp, fd)) == NULL)
387 return (EBADF);
388
389 simple_unlock(&fp->f_slock);
390
391 if (fp->f_type != DTYPE_VNODE)
392 return (ENODEV); /* only mmap vnodes! */
393 vp = (struct vnode *)fp->f_data; /* convert to vnode */
394
395 if (vp->v_type != VREG && vp->v_type != VCHR &&
396 vp->v_type != VBLK)
397 return (ENODEV); /* only REG/CHR/BLK support mmap */
398
399 if (vp->v_type != VCHR && pos < 0)
400 return (EINVAL);
401
402 if (vp->v_type != VCHR && (pos + size) < pos)
403 return (EOVERFLOW); /* no offset wrapping */
404
405 /* special case: catch SunOS style /dev/zero */
406 if (vp->v_type == VCHR && vp->v_rdev == zerodev) {
407 flags |= MAP_ANON;
408 goto is_anon;
409 }
410
411 /*
412 * Old programs may not select a specific sharing type, so
413 * default to an appropriate one.
414 *
415 * XXX: how does MAP_ANON fit in the picture?
416 */
417 if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) {
418 #if defined(DEBUG)
419 printf("WARNING: defaulted mmap() share type to "
420 "%s (pid %d command %s)\n", vp->v_type == VCHR ?
421 "MAP_SHARED" : "MAP_PRIVATE", p->p_pid,
422 p->p_comm);
423 #endif
424 if (vp->v_type == VCHR)
425 flags |= MAP_SHARED; /* for a device */
426 else
427 flags |= MAP_PRIVATE; /* for a file */
428 }
429
430 /*
431 * MAP_PRIVATE device mappings don't make sense (and aren't
432 * supported anyway). However, some programs rely on this,
433 * so just change it to MAP_SHARED.
434 */
435 if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
436 flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
437 }
438
439 /*
440 * now check protection
441 */
442
443 maxprot = VM_PROT_EXECUTE;
444
445 /* check read access */
446 if (fp->f_flag & FREAD)
447 maxprot |= VM_PROT_READ;
448 else if (prot & PROT_READ)
449 return (EACCES);
450
451 /* check write access, shared case first */
452 if (flags & MAP_SHARED) {
453 /*
454 * if the file is writable, only add PROT_WRITE to
455 * maxprot if the file is not immutable, append-only.
456 * otherwise, if we have asked for PROT_WRITE, return
457 * EPERM.
458 */
459 if (fp->f_flag & FWRITE) {
460 if ((error =
461 VOP_GETATTR(vp, &va, p->p_ucred, p)))
462 return (error);
463 if ((va.va_flags & (IMMUTABLE|APPEND)) == 0)
464 maxprot |= VM_PROT_WRITE;
465 else if (prot & PROT_WRITE)
466 return (EPERM);
467 }
468 else if (prot & PROT_WRITE)
469 return (EACCES);
470 } else {
471 /* MAP_PRIVATE mappings can always write to */
472 maxprot |= VM_PROT_WRITE;
473 }
474 handle = vp;
475
476 } else { /* MAP_ANON case */
477 /*
478 * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0?
479 */
480 if (fd != -1)
481 return (EINVAL);
482
483 is_anon: /* label for SunOS style /dev/zero */
484 handle = NULL;
485 maxprot = VM_PROT_ALL;
486 pos = 0;
487 }
488
489 /*
490 * XXX (in)sanity check. We don't do proper datasize checking
491 * XXX for anonymous (or private writable) mmap(). However,
492 * XXX know that if we're trying to allocate more than the amount
493 * XXX remaining under our current data size limit, _that_ should
494 * XXX be disallowed.
495 */
496 if ((flags & MAP_ANON) != 0 ||
497 ((flags & MAP_PRIVATE) != 0 && (prot & PROT_WRITE) != 0)) {
498 if (size >
499 (p->p_rlimit[RLIMIT_DATA].rlim_cur -
500 ctob(p->p_vmspace->vm_dsize))) {
501 return (ENOMEM);
502 }
503 }
504
505 /*
506 * now let kernel internal function uvm_mmap do the work.
507 */
508
509 error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
510 flags, handle, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
511
512 if (error == 0)
513 /* remember to add offset */
514 *retval = (register_t)(addr + pageoff);
515
516 return (error);
517 }
518
519 /*
520 * sys___msync13: the msync system call (a front-end for flush)
521 */
522
523 int
524 sys___msync13(l, v, retval)
525 struct lwp *l;
526 void *v;
527 register_t *retval;
528 {
529 struct sys___msync13_args /* {
530 syscallarg(caddr_t) addr;
531 syscallarg(size_t) len;
532 syscallarg(int) flags;
533 } */ *uap = v;
534 struct proc *p = l->l_proc;
535 vaddr_t addr;
536 vsize_t size, pageoff;
537 struct vm_map *map;
538 int error, rv, flags, uvmflags;
539
540 /*
541 * extract syscall args from the uap
542 */
543
544 addr = (vaddr_t)SCARG(uap, addr);
545 size = (vsize_t)SCARG(uap, len);
546 flags = SCARG(uap, flags);
547
548 /* sanity check flags */
549 if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 ||
550 (flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 ||
551 (flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC))
552 return (EINVAL);
553 if ((flags & (MS_ASYNC | MS_SYNC)) == 0)
554 flags |= MS_SYNC;
555
556 /*
557 * align the address to a page boundary and adjust the size accordingly.
558 */
559
560 pageoff = (addr & PAGE_MASK);
561 addr -= pageoff;
562 size += pageoff;
563 size = (vsize_t)round_page(size);
564
565 /* disallow wrap-around. */
566 if (addr + size < addr)
567 return (EINVAL);
568
569 /*
570 * get map
571 */
572
573 map = &p->p_vmspace->vm_map;
574
575 /*
576 * XXXCDC: do we really need this semantic?
577 *
578 * XXX Gak! If size is zero we are supposed to sync "all modified
579 * pages with the region containing addr". Unfortunately, we
580 * don't really keep track of individual mmaps so we approximate
581 * by flushing the range of the map entry containing addr.
582 * This can be incorrect if the region splits or is coalesced
583 * with a neighbor.
584 */
585
586 if (size == 0) {
587 struct vm_map_entry *entry;
588
589 vm_map_lock_read(map);
590 rv = uvm_map_lookup_entry(map, addr, &entry);
591 if (rv == TRUE) {
592 addr = entry->start;
593 size = entry->end - entry->start;
594 }
595 vm_map_unlock_read(map);
596 if (rv == FALSE)
597 return (EINVAL);
598 }
599
600 /*
601 * translate MS_ flags into PGO_ flags
602 */
603
604 uvmflags = PGO_CLEANIT;
605 if (flags & MS_INVALIDATE)
606 uvmflags |= PGO_FREE;
607 if (flags & MS_SYNC)
608 uvmflags |= PGO_SYNCIO;
609 else
610 uvmflags |= PGO_SYNCIO; /* XXXCDC: force sync for now! */
611
612 error = uvm_map_clean(map, addr, addr+size, uvmflags);
613 return error;
614 }
615
616 /*
617 * sys_munmap: unmap a users memory
618 */
619
620 int
621 sys_munmap(l, v, retval)
622 struct lwp *l;
623 void *v;
624 register_t *retval;
625 {
626 struct sys_munmap_args /* {
627 syscallarg(caddr_t) addr;
628 syscallarg(size_t) len;
629 } */ *uap = v;
630 struct proc *p = l->l_proc;
631 vaddr_t addr;
632 vsize_t size, pageoff;
633 struct vm_map *map;
634 vaddr_t vm_min_address = VM_MIN_ADDRESS;
635 struct vm_map_entry *dead_entries;
636
637 /*
638 * get syscall args.
639 */
640
641 addr = (vaddr_t)SCARG(uap, addr);
642 size = (vsize_t)SCARG(uap, len);
643
644 /*
645 * align the address to a page boundary and adjust the size accordingly.
646 */
647
648 pageoff = (addr & PAGE_MASK);
649 addr -= pageoff;
650 size += pageoff;
651 size = (vsize_t)round_page(size);
652
653 if ((int)size < 0)
654 return (EINVAL);
655 if (size == 0)
656 return (0);
657
658 /*
659 * Check for illegal addresses. Watch out for address wrap...
660 * Note that VM_*_ADDRESS are not constants due to casts (argh).
661 */
662 if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
663 return (EINVAL);
664 if (vm_min_address > 0 && addr < vm_min_address)
665 return (EINVAL);
666 if (addr > addr + size)
667 return (EINVAL);
668 map = &p->p_vmspace->vm_map;
669
670 /*
671 * interesting system call semantic: make sure entire range is
672 * allocated before allowing an unmap.
673 */
674
675 vm_map_lock(map);
676 #if 0
677 if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) {
678 vm_map_unlock(map);
679 return (EINVAL);
680 }
681 #endif
682 uvm_unmap_remove(map, addr, addr + size, &dead_entries);
683 vm_map_unlock(map);
684 if (dead_entries != NULL)
685 uvm_unmap_detach(dead_entries, 0);
686 return (0);
687 }
688
689 /*
690 * sys_mprotect: the mprotect system call
691 */
692
693 int
694 sys_mprotect(l, v, retval)
695 struct lwp *l;
696 void *v;
697 register_t *retval;
698 {
699 struct sys_mprotect_args /* {
700 syscallarg(caddr_t) addr;
701 syscallarg(size_t) len;
702 syscallarg(int) prot;
703 } */ *uap = v;
704 struct proc *p = l->l_proc;
705 vaddr_t addr;
706 vsize_t size, pageoff;
707 vm_prot_t prot;
708 int error;
709
710 /*
711 * extract syscall args from uap
712 */
713
714 addr = (vaddr_t)SCARG(uap, addr);
715 size = (vsize_t)SCARG(uap, len);
716 prot = SCARG(uap, prot) & VM_PROT_ALL;
717
718 /*
719 * align the address to a page boundary and adjust the size accordingly.
720 */
721
722 pageoff = (addr & PAGE_MASK);
723 addr -= pageoff;
724 size += pageoff;
725 size = round_page(size);
726
727 error = uvm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, prot,
728 FALSE);
729 return error;
730 }
731
732 /*
733 * sys_minherit: the minherit system call
734 */
735
736 int
737 sys_minherit(l, v, retval)
738 struct lwp *l;
739 void *v;
740 register_t *retval;
741 {
742 struct sys_minherit_args /* {
743 syscallarg(caddr_t) addr;
744 syscallarg(int) len;
745 syscallarg(int) inherit;
746 } */ *uap = v;
747 struct proc *p = l->l_proc;
748 vaddr_t addr;
749 vsize_t size, pageoff;
750 vm_inherit_t inherit;
751 int error;
752
753 addr = (vaddr_t)SCARG(uap, addr);
754 size = (vsize_t)SCARG(uap, len);
755 inherit = SCARG(uap, inherit);
756
757 /*
758 * align the address to a page boundary and adjust the size accordingly.
759 */
760
761 pageoff = (addr & PAGE_MASK);
762 addr -= pageoff;
763 size += pageoff;
764 size = (vsize_t)round_page(size);
765
766 if ((int)size < 0)
767 return (EINVAL);
768 error = uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr + size,
769 inherit);
770 return error;
771 }
772
773 /*
774 * sys_madvise: give advice about memory usage.
775 */
776
777 /* ARGSUSED */
778 int
779 sys_madvise(l, v, retval)
780 struct lwp *l;
781 void *v;
782 register_t *retval;
783 {
784 struct sys_madvise_args /* {
785 syscallarg(caddr_t) addr;
786 syscallarg(size_t) len;
787 syscallarg(int) behav;
788 } */ *uap = v;
789 struct proc *p = l->l_proc;
790 vaddr_t addr;
791 vsize_t size, pageoff;
792 int advice, error;
793
794 addr = (vaddr_t)SCARG(uap, addr);
795 size = (vsize_t)SCARG(uap, len);
796 advice = SCARG(uap, behav);
797
798 /*
799 * align the address to a page boundary, and adjust the size accordingly
800 */
801
802 pageoff = (addr & PAGE_MASK);
803 addr -= pageoff;
804 size += pageoff;
805 size = (vsize_t)round_page(size);
806
807 if ((ssize_t)size <= 0)
808 return (EINVAL);
809
810 switch (advice) {
811 case MADV_NORMAL:
812 case MADV_RANDOM:
813 case MADV_SEQUENTIAL:
814 error = uvm_map_advice(&p->p_vmspace->vm_map, addr, addr + size,
815 advice);
816 break;
817
818 case MADV_WILLNEED:
819
820 /*
821 * Activate all these pages, pre-faulting them in if
822 * necessary.
823 */
824 /*
825 * XXX IMPLEMENT ME.
826 * Should invent a "weak" mode for uvm_fault()
827 * which would only do the PGO_LOCKED pgo_get().
828 */
829
830 return (0);
831
832 case MADV_DONTNEED:
833
834 /*
835 * Deactivate all these pages. We don't need them
836 * any more. We don't, however, toss the data in
837 * the pages.
838 */
839
840 error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
841 PGO_DEACTIVATE);
842 break;
843
844 case MADV_FREE:
845
846 /*
847 * These pages contain no valid data, and may be
848 * garbage-collected. Toss all resources, including
849 * any swap space in use.
850 */
851
852 error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
853 PGO_FREE);
854 break;
855
856 case MADV_SPACEAVAIL:
857
858 /*
859 * XXXMRG What is this? I think it's:
860 *
861 * Ensure that we have allocated backing-store
862 * for these pages.
863 *
864 * This is going to require changes to the page daemon,
865 * as it will free swap space allocated to pages in core.
866 * There's also what to do for device/file/anonymous memory.
867 */
868
869 return (EINVAL);
870
871 default:
872 return (EINVAL);
873 }
874
875 return error;
876 }
877
878 /*
879 * sys_mlock: memory lock
880 */
881
882 int
883 sys_mlock(l, v, retval)
884 struct lwp *l;
885 void *v;
886 register_t *retval;
887 {
888 struct sys_mlock_args /* {
889 syscallarg(const void *) addr;
890 syscallarg(size_t) len;
891 } */ *uap = v;
892 struct proc *p = l->l_proc;
893 vaddr_t addr;
894 vsize_t size, pageoff;
895 int error;
896
897 /*
898 * extract syscall args from uap
899 */
900
901 addr = (vaddr_t)SCARG(uap, addr);
902 size = (vsize_t)SCARG(uap, len);
903
904 /*
905 * align the address to a page boundary and adjust the size accordingly
906 */
907
908 pageoff = (addr & PAGE_MASK);
909 addr -= pageoff;
910 size += pageoff;
911 size = (vsize_t)round_page(size);
912
913 /* disallow wrap-around. */
914 if (addr + size < addr)
915 return (EINVAL);
916
917 if (atop(size) + uvmexp.wired > uvmexp.wiredmax)
918 return (EAGAIN);
919
920 #ifdef pmap_wired_count
921 if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
922 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
923 return (EAGAIN);
924 #else
925 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
926 return (error);
927 #endif
928
929 error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, FALSE,
930 0);
931 return error;
932 }
933
934 /*
935 * sys_munlock: unlock wired pages
936 */
937
938 int
939 sys_munlock(l, v, retval)
940 struct lwp *l;
941 void *v;
942 register_t *retval;
943 {
944 struct sys_munlock_args /* {
945 syscallarg(const void *) addr;
946 syscallarg(size_t) len;
947 } */ *uap = v;
948 struct proc *p = l->l_proc;
949 vaddr_t addr;
950 vsize_t size, pageoff;
951 int error;
952
953 /*
954 * extract syscall args from uap
955 */
956
957 addr = (vaddr_t)SCARG(uap, addr);
958 size = (vsize_t)SCARG(uap, len);
959
960 /*
961 * align the address to a page boundary, and adjust the size accordingly
962 */
963
964 pageoff = (addr & PAGE_MASK);
965 addr -= pageoff;
966 size += pageoff;
967 size = (vsize_t)round_page(size);
968
969 /* disallow wrap-around. */
970 if (addr + size < addr)
971 return (EINVAL);
972
973 #ifndef pmap_wired_count
974 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
975 return (error);
976 #endif
977
978 error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, TRUE,
979 0);
980 return error;
981 }
982
983 /*
984 * sys_mlockall: lock all pages mapped into an address space.
985 */
986
987 int
988 sys_mlockall(l, v, retval)
989 struct lwp *l;
990 void *v;
991 register_t *retval;
992 {
993 struct sys_mlockall_args /* {
994 syscallarg(int) flags;
995 } */ *uap = v;
996 struct proc *p = l->l_proc;
997 int error, flags;
998
999 flags = SCARG(uap, flags);
1000
1001 if (flags == 0 ||
1002 (flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)
1003 return (EINVAL);
1004
1005 #ifndef pmap_wired_count
1006 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
1007 return (error);
1008 #endif
1009
1010 error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags,
1011 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
1012 return (error);
1013 }
1014
1015 /*
1016 * sys_munlockall: unlock all pages mapped into an address space.
1017 */
1018
1019 int
1020 sys_munlockall(l, v, retval)
1021 struct lwp *l;
1022 void *v;
1023 register_t *retval;
1024 {
1025 struct proc *p = l->l_proc;
1026
1027 (void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0);
1028 return (0);
1029 }
1030
1031 /*
1032 * uvm_mmap: internal version of mmap
1033 *
1034 * - used by sys_mmap and various framebuffers
1035 * - handle is a vnode pointer or NULL for MAP_ANON
1036 * - caller must page-align the file offset
1037 */
1038
1039 int
1040 uvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff, locklimit)
1041 struct vm_map *map;
1042 vaddr_t *addr;
1043 vsize_t size;
1044 vm_prot_t prot, maxprot;
1045 int flags;
1046 void *handle;
1047 voff_t foff;
1048 vsize_t locklimit;
1049 {
1050 struct uvm_object *uobj;
1051 struct vnode *vp;
1052 vaddr_t align = 0;
1053 int error;
1054 int advice = UVM_ADV_NORMAL;
1055 uvm_flag_t uvmflag = 0;
1056
1057 /*
1058 * check params
1059 */
1060
1061 if (size == 0)
1062 return(0);
1063 if (foff & PAGE_MASK)
1064 return(EINVAL);
1065 if ((prot & maxprot) != prot)
1066 return(EINVAL);
1067
1068 /*
1069 * for non-fixed mappings, round off the suggested address.
1070 * for fixed mappings, check alignment and zap old mappings.
1071 */
1072
1073 if ((flags & MAP_FIXED) == 0) {
1074 *addr = round_page(*addr);
1075 } else {
1076 if (*addr & PAGE_MASK)
1077 return(EINVAL);
1078 uvmflag |= UVM_FLAG_FIXED;
1079 (void) uvm_unmap(map, *addr, *addr + size);
1080 }
1081
1082 /*
1083 * Try to see if any requested alignment can even be attemped.
1084 * Make sure we can express the alignment (asking for a >= 4GB
1085 * alignment on an ILP32 architecure make no sense) and the
1086 * alignment is at least for a page sized quanitiy. If the
1087 * request was for a fixed mapping, make sure supplied address
1088 * adheres to the request alignment.
1089 */
1090 align = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT;
1091 if (align) {
1092 if (align >= sizeof(vaddr_t) * NBBY)
1093 return(EINVAL);
1094 align = 1L << align;
1095 if (align < PAGE_SIZE)
1096 return(EINVAL);
1097 if (align >= map->max_offset)
1098 return(ENOMEM);
1099 if (flags & MAP_FIXED) {
1100 if ((*addr & (align-1)) != 0)
1101 return(EINVAL);
1102 align = 0;
1103 }
1104 }
1105
1106 /*
1107 * handle anon vs. non-anon mappings. for non-anon mappings attach
1108 * to underlying vm object.
1109 */
1110
1111 if (flags & MAP_ANON) {
1112 foff = UVM_UNKNOWN_OFFSET;
1113 uobj = NULL;
1114 if ((flags & MAP_SHARED) == 0)
1115 /* XXX: defer amap create */
1116 uvmflag |= UVM_FLAG_COPYONW;
1117 else
1118 /* shared: create amap now */
1119 uvmflag |= UVM_FLAG_OVERLAY;
1120
1121 } else {
1122 vp = (struct vnode *)handle;
1123
1124 /*
1125 * Don't allow mmap for EXEC if the file system
1126 * is mounted NOEXEC.
1127 */
1128 if ((prot & PROT_EXEC) != 0 &&
1129 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0)
1130 return (EACCES);
1131
1132 if (vp->v_type != VCHR) {
1133 error = VOP_MMAP(vp, 0, curproc->p_ucred, curproc);
1134 if (error) {
1135 return error;
1136 }
1137
1138 uobj = uvn_attach((void *)vp, (flags & MAP_SHARED) ?
1139 maxprot : (maxprot & ~VM_PROT_WRITE));
1140
1141 /* XXX for now, attach doesn't gain a ref */
1142 VREF(vp);
1143
1144 /*
1145 * If the vnode is being mapped with PROT_EXEC,
1146 * then mark it as text.
1147 */
1148 if (prot & PROT_EXEC)
1149 vn_markexec(vp);
1150 } else {
1151 uobj = udv_attach((void *) &vp->v_rdev,
1152 (flags & MAP_SHARED) ? maxprot :
1153 (maxprot & ~VM_PROT_WRITE), foff, size);
1154 /*
1155 * XXX Some devices don't like to be mapped with
1156 * XXX PROT_EXEC, but we don't really have a
1157 * XXX better way of handling this, right now
1158 */
1159 if (uobj == NULL && (prot & PROT_EXEC) == 0) {
1160 maxprot &= ~VM_PROT_EXECUTE;
1161 uobj = udv_attach((void *)&vp->v_rdev,
1162 (flags & MAP_SHARED) ? maxprot :
1163 (maxprot & ~VM_PROT_WRITE), foff, size);
1164 }
1165 advice = UVM_ADV_RANDOM;
1166 }
1167 if (uobj == NULL)
1168 return((vp->v_type == VREG) ? ENOMEM : EINVAL);
1169 if ((flags & MAP_SHARED) == 0)
1170 uvmflag |= UVM_FLAG_COPYONW;
1171 }
1172
1173 uvmflag = UVM_MAPFLAG(prot, maxprot,
1174 (flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY,
1175 advice, uvmflag);
1176 error = uvm_map(map, addr, size, uobj, foff, align, uvmflag);
1177 if (error) {
1178 if (uobj)
1179 uobj->pgops->pgo_detach(uobj);
1180 return error;
1181 }
1182
1183 /*
1184 * POSIX 1003.1b -- if our address space was configured
1185 * to lock all future mappings, wire the one we just made.
1186 *
1187 * Also handle the MAP_WIRED flag here.
1188 */
1189
1190 if (prot == VM_PROT_NONE) {
1191
1192 /*
1193 * No more work to do in this case.
1194 */
1195
1196 return (0);
1197 }
1198 vm_map_lock(map);
1199 if ((flags & MAP_WIRED) != 0 || (map->flags & VM_MAP_WIREFUTURE) != 0) {
1200 if ((atop(size) + uvmexp.wired) > uvmexp.wiredmax
1201 #ifdef pmap_wired_count
1202 || (locklimit != 0 && (size +
1203 ptoa(pmap_wired_count(vm_map_pmap(map)))) >
1204 locklimit)
1205 #endif
1206 ) {
1207 vm_map_unlock(map);
1208 uvm_unmap(map, *addr, *addr + size);
1209 return ENOMEM;
1210 }
1211
1212 /*
1213 * uvm_map_pageable() always returns the map unlocked.
1214 */
1215
1216 error = uvm_map_pageable(map, *addr, *addr + size,
1217 FALSE, UVM_LK_ENTER);
1218 if (error) {
1219 uvm_unmap(map, *addr, *addr + size);
1220 return error;
1221 }
1222 return (0);
1223 }
1224 vm_map_unlock(map);
1225 return 0;
1226 }
1227