uvm_mmap.c revision 1.180 1 /* $NetBSD: uvm_mmap.c,v 1.180 2022/06/04 20:54:03 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993 The Regents of the University of California.
6 * Copyright (c) 1988 University of Utah.
7 *
8 * All rights reserved.
9 *
10 * This code is derived from software contributed to Berkeley by
11 * the Systems Programming Group of the University of Utah Computer
12 * Science Department.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
39 * @(#)vm_mmap.c 8.5 (Berkeley) 5/19/94
40 * from: Id: uvm_mmap.c,v 1.1.2.14 1998/01/05 21:04:26 chuck Exp
41 */
42
43 /*
44 * uvm_mmap.c: system call interface into VM system, plus kernel vm_mmap
45 * function.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.180 2022/06/04 20:54:03 riastradh Exp $");
50
51 #include "opt_compat_netbsd.h"
52 #include "opt_pax.h"
53
54 #include <sys/param.h>
55 #include <sys/types.h>
56 #include <sys/file.h>
57 #include <sys/filedesc.h>
58 #include <sys/resourcevar.h>
59 #include <sys/mman.h>
60 #include <sys/pax.h>
61
62 #include <sys/syscallargs.h>
63
64 #include <uvm/uvm.h>
65 #include <uvm/uvm_device.h>
66
67 static int uvm_mmap(struct vm_map *, vaddr_t *, vsize_t, vm_prot_t, vm_prot_t,
68 int, int, struct uvm_object *, voff_t, vsize_t);
69
70 static int
71 range_test(const struct vm_map *map, vaddr_t addr, vsize_t size, bool ismmap)
72 {
73 vaddr_t vm_min_address = vm_map_min(map);
74 vaddr_t vm_max_address = vm_map_max(map);
75 vaddr_t eaddr = addr + size;
76 int res = 0;
77
78 if (addr < vm_min_address)
79 return EINVAL;
80 if (eaddr > vm_max_address)
81 return ismmap ? EFBIG : EINVAL;
82 if (addr > eaddr) /* no wrapping! */
83 return ismmap ? EOVERFLOW : EINVAL;
84
85 #ifdef MD_MMAP_RANGE_TEST
86 res = MD_MMAP_RANGE_TEST(addr, eaddr);
87 #endif
88
89 return res;
90 }
91
92 /*
93 * align the address to a page boundary, and adjust the size accordingly
94 */
95 static int
96 round_and_check(const struct vm_map *map, vaddr_t *addr, vsize_t *size)
97 {
98 const vsize_t pageoff = (vsize_t)(*addr & PAGE_MASK);
99
100 *addr -= pageoff;
101
102 if (*size != 0) {
103 *size += pageoff;
104 *size = (vsize_t)round_page(*size);
105 } else if (*addr + *size < *addr) {
106 return ENOMEM;
107 }
108
109 return range_test(map, *addr, *size, false);
110 }
111
112 /*
113 * sys_mincore: determine if pages are in core or not.
114 */
115
116 /* ARGSUSED */
117 int
118 sys_mincore(struct lwp *l, const struct sys_mincore_args *uap,
119 register_t *retval)
120 {
121 /* {
122 syscallarg(void *) addr;
123 syscallarg(size_t) len;
124 syscallarg(char *) vec;
125 } */
126 struct proc *p = l->l_proc;
127 struct vm_page *pg;
128 char *vec, pgi;
129 struct uvm_object *uobj;
130 struct vm_amap *amap;
131 struct vm_anon *anon;
132 struct vm_map_entry *entry;
133 vaddr_t start, end, lim;
134 struct vm_map *map;
135 vsize_t len;
136 int error = 0;
137 size_t npgs;
138
139 map = &p->p_vmspace->vm_map;
140
141 start = (vaddr_t)SCARG(uap, addr);
142 len = SCARG(uap, len);
143 vec = SCARG(uap, vec);
144
145 if (start & PAGE_MASK)
146 return EINVAL;
147 len = round_page(len);
148 end = start + len;
149 if (end <= start)
150 return EINVAL;
151
152 /*
153 * Lock down vec, so our returned status isn't outdated by
154 * storing the status byte for a page.
155 */
156
157 npgs = len >> PAGE_SHIFT;
158 error = uvm_vslock(p->p_vmspace, vec, npgs, VM_PROT_WRITE);
159 if (error) {
160 return error;
161 }
162 vm_map_lock_read(map);
163
164 if (uvm_map_lookup_entry(map, start, &entry) == false) {
165 error = ENOMEM;
166 goto out;
167 }
168
169 for (/* nothing */;
170 entry != &map->header && entry->start < end;
171 entry = entry->next) {
172 KASSERT(!UVM_ET_ISSUBMAP(entry));
173 KASSERT(start >= entry->start);
174
175 /* Make sure there are no holes. */
176 if (entry->end < end &&
177 (entry->next == &map->header ||
178 entry->next->start > entry->end)) {
179 error = ENOMEM;
180 goto out;
181 }
182
183 lim = end < entry->end ? end : entry->end;
184
185 /*
186 * Special case for objects with no "real" pages. Those
187 * are always considered resident (mapped devices).
188 */
189
190 if (UVM_ET_ISOBJ(entry)) {
191 KASSERT(!UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj));
192 if (UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) {
193 for (/* nothing */; start < lim;
194 start += PAGE_SIZE, vec++)
195 ustore_char(vec, 1);
196 continue;
197 }
198 }
199
200 amap = entry->aref.ar_amap; /* upper layer */
201 uobj = entry->object.uvm_obj; /* lower layer */
202
203 if (amap != NULL)
204 amap_lock(amap, RW_READER);
205 if (uobj != NULL)
206 rw_enter(uobj->vmobjlock, RW_READER);
207
208 for (/* nothing */; start < lim; start += PAGE_SIZE, vec++) {
209 pgi = 0;
210 if (amap != NULL) {
211 /* Check the upper layer first. */
212 anon = amap_lookup(&entry->aref,
213 start - entry->start);
214 /* Don't need to lock anon here. */
215 if (anon != NULL && anon->an_page != NULL) {
216
217 /*
218 * Anon has the page for this entry
219 * offset.
220 */
221
222 pgi = 1;
223 }
224 }
225 if (uobj != NULL && pgi == 0) {
226 /* Check the lower layer. */
227 pg = uvm_pagelookup(uobj,
228 entry->offset + (start - entry->start));
229 if (pg != NULL) {
230
231 /*
232 * Object has the page for this entry
233 * offset.
234 */
235
236 pgi = 1;
237 }
238 }
239 (void) ustore_char(vec, pgi);
240 }
241 if (uobj != NULL)
242 rw_exit(uobj->vmobjlock);
243 if (amap != NULL)
244 amap_unlock(amap);
245 }
246
247 out:
248 vm_map_unlock_read(map);
249 uvm_vsunlock(p->p_vmspace, SCARG(uap, vec), npgs);
250 return error;
251 }
252
253 /*
254 * sys_mmap: mmap system call.
255 *
256 * => file offset and address may not be page aligned
257 * - if MAP_FIXED, offset and address must have remainder mod PAGE_SIZE
258 * - if address isn't page aligned the mapping starts at trunc_page(addr)
259 * and the return value is adjusted up by the page offset.
260 */
261
262 int
263 sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
264 {
265 /* {
266 syscallarg(void *) addr;
267 syscallarg(size_t) len;
268 syscallarg(int) prot;
269 syscallarg(int) flags;
270 syscallarg(int) fd;
271 syscallarg(long) pad;
272 syscallarg(off_t) pos;
273 } */
274 struct proc *p = l->l_proc;
275 vaddr_t addr;
276 off_t pos;
277 vsize_t size, pageoff, newsize;
278 vm_prot_t prot, maxprot, extraprot;
279 int flags, fd, advice;
280 vaddr_t defaddr = 0; /* XXXGCC */
281 bool addrhint = false;
282 struct file *fp = NULL;
283 struct uvm_object *uobj;
284 int error;
285 #ifdef PAX_ASLR
286 vaddr_t orig_addr;
287 #endif /* PAX_ASLR */
288
289 /*
290 * first, extract syscall args from the uap.
291 */
292
293 addr = (vaddr_t)SCARG(uap, addr);
294 size = (vsize_t)SCARG(uap, len);
295 prot = SCARG(uap, prot) & VM_PROT_ALL;
296 extraprot = PROT_MPROTECT_EXTRACT(SCARG(uap, prot));
297 flags = SCARG(uap, flags);
298 fd = SCARG(uap, fd);
299 pos = SCARG(uap, pos);
300
301 #ifdef PAX_ASLR
302 orig_addr = addr;
303 #endif /* PAX_ASLR */
304
305 if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
306 return EINVAL;
307
308 if (size == 0 && (flags & MAP_ANON) == 0)
309 return EINVAL;
310
311 /*
312 * align file position and save offset. adjust size.
313 */
314
315 pageoff = (pos & PAGE_MASK);
316 pos -= pageoff;
317 newsize = size + pageoff; /* add offset */
318 newsize = (vsize_t)round_page(newsize); /* round up */
319
320 if (newsize < size)
321 return ENOMEM;
322 size = newsize;
323
324 /*
325 * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr"
326 */
327 if (flags & MAP_FIXED) {
328 /* ensure address and file offset are aligned properly */
329 addr -= pageoff;
330 if (addr & PAGE_MASK)
331 return EINVAL;
332
333 error = range_test(&p->p_vmspace->vm_map, addr, size, true);
334 if (error) {
335 return error;
336 }
337 } else if (addr == 0 || !(flags & MAP_TRYFIXED)) {
338 /*
339 * not fixed: make sure we skip over the largest
340 * possible heap for non-topdown mapping arrangements.
341 * we will refine our guess later (e.g. to account for
342 * VAC, etc)
343 */
344
345 defaddr = p->p_emul->e_vm_default_addr(p,
346 (vaddr_t)p->p_vmspace->vm_daddr, size,
347 p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
348
349 if (addr == 0 || !(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN))
350 addr = MAX(addr, defaddr);
351 else
352 addr = MIN(addr, defaddr);
353
354 /*
355 * If addr is nonzero and not the default, then the
356 * address is a hint.
357 */
358 addrhint = (addr != 0 && addr != defaddr);
359 }
360
361 /*
362 * check for file mappings (i.e. not anonymous) and verify file.
363 */
364
365 advice = UVM_ADV_NORMAL;
366 if ((flags & MAP_ANON) == 0) {
367 if ((fp = fd_getfile(fd)) == NULL)
368 return EBADF;
369
370 if (fp->f_ops->fo_mmap == NULL) {
371 error = ENODEV;
372 goto out;
373 }
374 error = (*fp->f_ops->fo_mmap)(fp, &pos, size, prot, &flags,
375 &advice, &uobj, &maxprot);
376 if (error) {
377 goto out;
378 }
379 if (uobj == NULL) {
380 flags |= MAP_ANON;
381 fd_putfile(fd);
382 fp = NULL;
383 goto is_anon;
384 }
385 } else { /* MAP_ANON case */
386 /*
387 * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0?
388 */
389 if (fd != -1)
390 return EINVAL;
391
392 is_anon: /* label for SunOS style /dev/zero */
393 uobj = NULL;
394 maxprot = VM_PROT_ALL;
395 pos = 0;
396 }
397
398 maxprot = PAX_MPROTECT_MAXPROTECT(l, prot, extraprot, maxprot);
399 if (((prot | extraprot) & maxprot) != (prot | extraprot)) {
400 error = EACCES;
401 goto out;
402 }
403 if ((error = PAX_MPROTECT_VALIDATE(l, prot)))
404 goto out;
405
406 pax_aslr_mmap(l, &addr, orig_addr, flags);
407
408 /*
409 * Now let kernel internal function uvm_mmap do the work.
410 *
411 * If the user provided a hint, take a reference to uobj in
412 * case the first attempt to satisfy the hint fails, so we can
413 * try again with the default address.
414 */
415 if (addrhint) {
416 if (uobj)
417 (*uobj->pgops->pgo_reference)(uobj);
418 }
419 error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
420 flags, advice, uobj, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
421 if (addrhint) {
422 if (error) {
423 addr = defaddr;
424 pax_aslr_mmap(l, &addr, orig_addr, flags);
425 error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size,
426 prot, maxprot, flags, advice, uobj, pos,
427 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
428 } else if (uobj) {
429 /* Release the exta reference we took. */
430 (*uobj->pgops->pgo_detach)(uobj);
431 }
432 }
433
434 /* remember to add offset */
435 *retval = (register_t)(addr + pageoff);
436
437 out:
438 if (fp != NULL)
439 fd_putfile(fd);
440
441 return error;
442 }
443
444 /*
445 * sys___msync13: the msync system call (a front-end for flush)
446 */
447
448 int
449 sys___msync13(struct lwp *l, const struct sys___msync13_args *uap,
450 register_t *retval)
451 {
452 /* {
453 syscallarg(void *) addr;
454 syscallarg(size_t) len;
455 syscallarg(int) flags;
456 } */
457 struct proc *p = l->l_proc;
458 vaddr_t addr;
459 vsize_t size;
460 struct vm_map *map;
461 int error, flags, uvmflags;
462 bool rv;
463
464 /*
465 * extract syscall args from the uap
466 */
467
468 addr = (vaddr_t)SCARG(uap, addr);
469 size = (vsize_t)SCARG(uap, len);
470 flags = SCARG(uap, flags);
471
472 /* sanity check flags */
473 if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 ||
474 (flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 ||
475 (flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC))
476 return EINVAL;
477 if ((flags & (MS_ASYNC | MS_SYNC)) == 0)
478 flags |= MS_SYNC;
479
480 /*
481 * get map
482 */
483 map = &p->p_vmspace->vm_map;
484
485 if (round_and_check(map, &addr, &size))
486 return ENOMEM;
487
488 /*
489 * XXXCDC: do we really need this semantic?
490 *
491 * XXX Gak! If size is zero we are supposed to sync "all modified
492 * pages with the region containing addr". Unfortunately, we
493 * don't really keep track of individual mmaps so we approximate
494 * by flushing the range of the map entry containing addr.
495 * This can be incorrect if the region splits or is coalesced
496 * with a neighbor.
497 */
498
499 if (size == 0) {
500 struct vm_map_entry *entry;
501
502 vm_map_lock_read(map);
503 rv = uvm_map_lookup_entry(map, addr, &entry);
504 if (rv == true) {
505 addr = entry->start;
506 size = entry->end - entry->start;
507 }
508 vm_map_unlock_read(map);
509 if (rv == false)
510 return EINVAL;
511 }
512
513 /*
514 * translate MS_ flags into PGO_ flags
515 */
516
517 uvmflags = PGO_CLEANIT;
518 if (flags & MS_INVALIDATE)
519 uvmflags |= PGO_FREE;
520 if (flags & MS_SYNC)
521 uvmflags |= PGO_SYNCIO;
522
523 error = uvm_map_clean(map, addr, addr+size, uvmflags);
524 return error;
525 }
526
527 /*
528 * sys_munmap: unmap a users memory
529 */
530
531 int
532 sys_munmap(struct lwp *l, const struct sys_munmap_args *uap, register_t *retval)
533 {
534 /* {
535 syscallarg(void *) addr;
536 syscallarg(size_t) len;
537 } */
538 struct proc *p = l->l_proc;
539 vaddr_t addr;
540 vsize_t size;
541 struct vm_map *map;
542 struct vm_map_entry *dead_entries;
543
544 /*
545 * get syscall args.
546 */
547
548 addr = (vaddr_t)SCARG(uap, addr);
549 size = (vsize_t)SCARG(uap, len);
550
551 map = &p->p_vmspace->vm_map;
552
553 if (round_and_check(map, &addr, &size))
554 return EINVAL;
555
556 if (size == 0)
557 return 0;
558
559 vm_map_lock(map);
560 #if 0
561 /*
562 * interesting system call semantic: make sure entire range is
563 * allocated before allowing an unmap.
564 */
565 if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) {
566 vm_map_unlock(map);
567 return EINVAL;
568 }
569 #endif
570 uvm_unmap_remove(map, addr, addr + size, &dead_entries, 0);
571 vm_map_unlock(map);
572 if (dead_entries != NULL)
573 uvm_unmap_detach(dead_entries, 0);
574 return 0;
575 }
576
577 /*
578 * sys_mprotect: the mprotect system call
579 */
580
581 int
582 sys_mprotect(struct lwp *l, const struct sys_mprotect_args *uap,
583 register_t *retval)
584 {
585 /* {
586 syscallarg(void *) addr;
587 syscallarg(size_t) len;
588 syscallarg(int) prot;
589 } */
590 struct proc *p = l->l_proc;
591 vaddr_t addr;
592 vsize_t size;
593 vm_prot_t prot;
594 int error;
595
596 /*
597 * extract syscall args from uap
598 */
599
600 addr = (vaddr_t)SCARG(uap, addr);
601 size = (vsize_t)SCARG(uap, len);
602 prot = SCARG(uap, prot) & VM_PROT_ALL;
603
604 if (round_and_check(&p->p_vmspace->vm_map, &addr, &size))
605 return EINVAL;
606
607 error = uvm_map_protect_user(l, addr, addr + size, prot);
608 return error;
609 }
610
611 /*
612 * sys_minherit: the minherit system call
613 */
614
615 int
616 sys_minherit(struct lwp *l, const struct sys_minherit_args *uap,
617 register_t *retval)
618 {
619 /* {
620 syscallarg(void *) addr;
621 syscallarg(int) len;
622 syscallarg(int) inherit;
623 } */
624 struct proc *p = l->l_proc;
625 vaddr_t addr;
626 vsize_t size;
627 vm_inherit_t inherit;
628 int error;
629
630 addr = (vaddr_t)SCARG(uap, addr);
631 size = (vsize_t)SCARG(uap, len);
632 inherit = SCARG(uap, inherit);
633
634 if (round_and_check(&p->p_vmspace->vm_map, &addr, &size))
635 return EINVAL;
636
637 error = uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr + size,
638 inherit);
639 return error;
640 }
641
642 /*
643 * sys_madvise: give advice about memory usage.
644 */
645
646 /* ARGSUSED */
647 int
648 sys_madvise(struct lwp *l, const struct sys_madvise_args *uap,
649 register_t *retval)
650 {
651 /* {
652 syscallarg(void *) addr;
653 syscallarg(size_t) len;
654 syscallarg(int) behav;
655 } */
656 struct proc *p = l->l_proc;
657 vaddr_t addr;
658 vsize_t size;
659 int advice, error;
660
661 addr = (vaddr_t)SCARG(uap, addr);
662 size = (vsize_t)SCARG(uap, len);
663 advice = SCARG(uap, behav);
664
665 if (round_and_check(&p->p_vmspace->vm_map, &addr, &size))
666 return EINVAL;
667
668 switch (advice) {
669 case MADV_NORMAL:
670 case MADV_RANDOM:
671 case MADV_SEQUENTIAL:
672 error = uvm_map_advice(&p->p_vmspace->vm_map, addr, addr + size,
673 advice);
674 break;
675
676 case MADV_WILLNEED:
677
678 /*
679 * Activate all these pages, pre-faulting them in if
680 * necessary.
681 */
682 error = uvm_map_willneed(&p->p_vmspace->vm_map,
683 addr, addr + size);
684 break;
685
686 case MADV_DONTNEED:
687
688 /*
689 * Deactivate all these pages. We don't need them
690 * any more. We don't, however, toss the data in
691 * the pages.
692 */
693
694 error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
695 PGO_DEACTIVATE);
696 break;
697
698 case MADV_FREE:
699
700 /*
701 * These pages contain no valid data, and may be
702 * garbage-collected. Toss all resources, including
703 * any swap space in use.
704 */
705
706 error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
707 PGO_FREE);
708 break;
709
710 case MADV_SPACEAVAIL:
711
712 /*
713 * XXXMRG What is this? I think it's:
714 *
715 * Ensure that we have allocated backing-store
716 * for these pages.
717 *
718 * This is going to require changes to the page daemon,
719 * as it will free swap space allocated to pages in core.
720 * There's also what to do for device/file/anonymous memory.
721 */
722
723 return EINVAL;
724
725 default:
726 return EINVAL;
727 }
728
729 return error;
730 }
731
732 /*
733 * sys_mlock: memory lock
734 */
735
736 int
737 sys_mlock(struct lwp *l, const struct sys_mlock_args *uap, register_t *retval)
738 {
739 /* {
740 syscallarg(const void *) addr;
741 syscallarg(size_t) len;
742 } */
743 struct proc *p = l->l_proc;
744 vaddr_t addr;
745 vsize_t size;
746 int error;
747
748 /*
749 * extract syscall args from uap
750 */
751
752 addr = (vaddr_t)SCARG(uap, addr);
753 size = (vsize_t)SCARG(uap, len);
754
755 if (round_and_check(&p->p_vmspace->vm_map, &addr, &size))
756 return ENOMEM;
757
758 if (atop(size) + uvmexp.wired > uvmexp.wiredmax)
759 return EAGAIN;
760
761 if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
762 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
763 return EAGAIN;
764
765 error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, false,
766 0);
767 if (error == EFAULT)
768 error = ENOMEM;
769 return error;
770 }
771
772 /*
773 * sys_munlock: unlock wired pages
774 */
775
776 int
777 sys_munlock(struct lwp *l, const struct sys_munlock_args *uap,
778 register_t *retval)
779 {
780 /* {
781 syscallarg(const void *) addr;
782 syscallarg(size_t) len;
783 } */
784 struct proc *p = l->l_proc;
785 vaddr_t addr;
786 vsize_t size;
787
788 /*
789 * extract syscall args from uap
790 */
791
792 addr = (vaddr_t)SCARG(uap, addr);
793 size = (vsize_t)SCARG(uap, len);
794
795 if (round_and_check(&p->p_vmspace->vm_map, &addr, &size))
796 return ENOMEM;
797
798 if (uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, true, 0))
799 return ENOMEM;
800
801 return 0;
802 }
803
804 /*
805 * sys_mlockall: lock all pages mapped into an address space.
806 */
807
808 int
809 sys_mlockall(struct lwp *l, const struct sys_mlockall_args *uap,
810 register_t *retval)
811 {
812 /* {
813 syscallarg(int) flags;
814 } */
815 struct proc *p = l->l_proc;
816 int error, flags;
817
818 flags = SCARG(uap, flags);
819
820 if (flags == 0 || (flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)
821 return EINVAL;
822
823 error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags,
824 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
825 return error;
826 }
827
828 /*
829 * sys_munlockall: unlock all pages mapped into an address space.
830 */
831
832 int
833 sys_munlockall(struct lwp *l, const void *v, register_t *retval)
834 {
835 struct proc *p = l->l_proc;
836
837 (void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0);
838 return 0;
839 }
840
841 /*
842 * uvm_mmap: internal version of mmap
843 *
844 * - used by sys_mmap and various framebuffers
845 * - uobj is a struct uvm_object pointer or NULL for MAP_ANON
846 * - caller must page-align the file offset
847 *
848 * XXX This appears to leak the uobj in various error branches? Need
849 * to clean up the contract around uobj reference.
850 */
851
852 static int
853 uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
854 vm_prot_t maxprot, int flags, int advice, struct uvm_object *uobj,
855 voff_t foff, vsize_t locklimit)
856 {
857 vaddr_t align = 0;
858 int error;
859 uvm_flag_t uvmflag = 0;
860
861 /*
862 * check params
863 */
864
865 if (size == 0)
866 return 0;
867 if (foff & PAGE_MASK)
868 return EINVAL;
869 if ((prot & maxprot) != prot)
870 return EINVAL;
871
872 /*
873 * for non-fixed mappings, round off the suggested address.
874 * for fixed mappings, check alignment.
875 */
876
877 if ((flags & MAP_FIXED) == 0) {
878 *addr = round_page(*addr);
879 } else {
880 if (*addr & PAGE_MASK)
881 return EINVAL;
882 uvmflag |= UVM_FLAG_FIXED | UVM_FLAG_UNMAP;
883 }
884
885 /*
886 * Try to see if any requested alignment can even be attemped.
887 * Make sure we can express the alignment (asking for a >= 4GB
888 * alignment on an ILP32 architecure make no sense) and the
889 * alignment is at least for a page sized quanitiy. If the
890 * request was for a fixed mapping, make sure supplied address
891 * adheres to the request alignment.
892 */
893 align = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT;
894 if (align) {
895 if (align >= sizeof(vaddr_t) * NBBY)
896 return EINVAL;
897 align = 1UL << align;
898 if (align < PAGE_SIZE)
899 return EINVAL;
900 if (align >= vm_map_max(map))
901 return ENOMEM;
902 if (flags & MAP_FIXED) {
903 if ((*addr & (align-1)) != 0)
904 return EINVAL;
905 align = 0;
906 }
907 }
908
909 /*
910 * check resource limits
911 */
912
913 if (!VM_MAP_IS_KERNEL(map) &&
914 (((rlim_t)curproc->p_vmspace->vm_map.size + (rlim_t)size) >
915 curproc->p_rlimit[RLIMIT_AS].rlim_cur))
916 return ENOMEM;
917
918 /*
919 * handle anon vs. non-anon mappings. for non-anon mappings attach
920 * to underlying vm object.
921 */
922
923 if (flags & MAP_ANON) {
924 KASSERT(uobj == NULL);
925 foff = UVM_UNKNOWN_OFFSET;
926 if ((flags & MAP_SHARED) == 0)
927 /* XXX: defer amap create */
928 uvmflag |= UVM_FLAG_COPYONW;
929 else
930 /* shared: create amap now */
931 uvmflag |= UVM_FLAG_OVERLAY;
932
933 } else {
934 KASSERT(uobj != NULL);
935 if ((flags & MAP_SHARED) == 0) {
936 uvmflag |= UVM_FLAG_COPYONW;
937 }
938 }
939
940 uvmflag = UVM_MAPFLAG(prot, maxprot,
941 (flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY, advice,
942 uvmflag);
943 error = uvm_map(map, addr, size, uobj, foff, align, uvmflag);
944 if (error) {
945 if (uobj)
946 uobj->pgops->pgo_detach(uobj);
947 return error;
948 }
949
950 /*
951 * POSIX 1003.1b -- if our address space was configured
952 * to lock all future mappings, wire the one we just made.
953 *
954 * Also handle the MAP_WIRED flag here.
955 */
956
957 if (prot == VM_PROT_NONE) {
958
959 /*
960 * No more work to do in this case.
961 */
962
963 return 0;
964 }
965 if ((flags & MAP_WIRED) != 0 || (map->flags & VM_MAP_WIREFUTURE) != 0) {
966 vm_map_lock(map);
967 if (atop(size) + uvmexp.wired > uvmexp.wiredmax ||
968 (locklimit != 0 &&
969 size + ptoa(pmap_wired_count(vm_map_pmap(map))) >
970 locklimit)) {
971 vm_map_unlock(map);
972 uvm_unmap(map, *addr, *addr + size);
973 return ENOMEM;
974 }
975
976 /*
977 * uvm_map_pageable() always returns the map unlocked.
978 */
979
980 error = uvm_map_pageable(map, *addr, *addr + size,
981 false, UVM_LK_ENTER);
982 if (error) {
983 uvm_unmap(map, *addr, *addr + size);
984 return error;
985 }
986 return 0;
987 }
988 return 0;
989 }
990
991 vaddr_t
992 uvm_default_mapaddr(struct proc *p, vaddr_t base, vsize_t sz, int topdown)
993 {
994
995 if (topdown)
996 return VM_DEFAULT_ADDRESS_TOPDOWN(base, sz);
997 else
998 return VM_DEFAULT_ADDRESS_BOTTOMUP(base, sz);
999 }
1000
1001 int
1002 uvm_mmap_dev(struct proc *p, void **addrp, size_t len, dev_t dev,
1003 off_t off)
1004 {
1005 struct uvm_object *uobj;
1006 int error, flags, prot;
1007
1008 flags = MAP_SHARED;
1009 prot = VM_PROT_READ | VM_PROT_WRITE;
1010 if (*addrp)
1011 flags |= MAP_FIXED;
1012 else
1013 *addrp = (void *)p->p_emul->e_vm_default_addr(p,
1014 (vaddr_t)p->p_vmspace->vm_daddr, len,
1015 p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
1016
1017 uobj = udv_attach(dev, prot, off, len);
1018 if (uobj == NULL)
1019 return EINVAL;
1020
1021 error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp,
1022 (vsize_t)len, prot, prot, flags, UVM_ADV_RANDOM, uobj, off,
1023 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
1024 return error;
1025 }
1026
1027 int
1028 uvm_mmap_anon(struct proc *p, void **addrp, size_t len)
1029 {
1030 int error, flags, prot;
1031
1032 flags = MAP_PRIVATE | MAP_ANON;
1033 prot = VM_PROT_READ | VM_PROT_WRITE;
1034 if (*addrp)
1035 flags |= MAP_FIXED;
1036 else
1037 *addrp = (void *)p->p_emul->e_vm_default_addr(p,
1038 (vaddr_t)p->p_vmspace->vm_daddr, len,
1039 p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
1040
1041 error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp,
1042 (vsize_t)len, prot, prot, flags, UVM_ADV_NORMAL, NULL, 0,
1043 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
1044 return error;
1045 }
1046