uvm_mmap.c revision 1.181 1 /* $NetBSD: uvm_mmap.c,v 1.181 2022/07/06 00:40:16 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993 The Regents of the University of California.
6 * Copyright (c) 1988 University of Utah.
7 *
8 * All rights reserved.
9 *
10 * This code is derived from software contributed to Berkeley by
11 * the Systems Programming Group of the University of Utah Computer
12 * Science Department.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
39 * @(#)vm_mmap.c 8.5 (Berkeley) 5/19/94
40 * from: Id: uvm_mmap.c,v 1.1.2.14 1998/01/05 21:04:26 chuck Exp
41 */
42
43 /*
44 * uvm_mmap.c: system call interface into VM system, plus kernel vm_mmap
45 * function.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.181 2022/07/06 00:40:16 riastradh Exp $");
50
51 #include "opt_compat_netbsd.h"
52 #include "opt_pax.h"
53
54 #include <sys/param.h>
55 #include <sys/types.h>
56 #include <sys/file.h>
57 #include <sys/filedesc.h>
58 #include <sys/resourcevar.h>
59 #include <sys/mman.h>
60 #include <sys/pax.h>
61
62 #include <sys/syscallargs.h>
63
64 #include <uvm/uvm.h>
65 #include <uvm/uvm_device.h>
66
67 static int uvm_mmap(struct vm_map *, vaddr_t *, vsize_t, vm_prot_t, vm_prot_t,
68 int, int, struct uvm_object *, voff_t, vsize_t);
69
70 static int
71 range_test(const struct vm_map *map, vaddr_t addr, vsize_t size, bool ismmap)
72 {
73 vaddr_t vm_min_address = vm_map_min(map);
74 vaddr_t vm_max_address = vm_map_max(map);
75 vaddr_t eaddr = addr + size;
76 int res = 0;
77
78 if (addr < vm_min_address)
79 return EINVAL;
80 if (eaddr > vm_max_address)
81 return ismmap ? EFBIG : EINVAL;
82 if (addr > eaddr) /* no wrapping! */
83 return ismmap ? EOVERFLOW : EINVAL;
84
85 #ifdef MD_MMAP_RANGE_TEST
86 res = MD_MMAP_RANGE_TEST(addr, eaddr);
87 #endif
88
89 return res;
90 }
91
92 /*
93 * align the address to a page boundary, and adjust the size accordingly
94 */
95 static int
96 round_and_check(const struct vm_map *map, vaddr_t *addr, vsize_t *size)
97 {
98 const vsize_t pageoff = (vsize_t)(*addr & PAGE_MASK);
99
100 *addr -= pageoff;
101
102 if (*size != 0) {
103 *size += pageoff;
104 *size = (vsize_t)round_page(*size);
105 } else if (*addr + *size < *addr) {
106 return ENOMEM;
107 }
108
109 return range_test(map, *addr, *size, false);
110 }
111
112 /*
113 * sys_mincore: determine if pages are in core or not.
114 */
115
116 /* ARGSUSED */
117 int
118 sys_mincore(struct lwp *l, const struct sys_mincore_args *uap,
119 register_t *retval)
120 {
121 /* {
122 syscallarg(void *) addr;
123 syscallarg(size_t) len;
124 syscallarg(char *) vec;
125 } */
126 struct proc *p = l->l_proc;
127 struct vm_page *pg;
128 char *vec, pgi;
129 struct uvm_object *uobj;
130 struct vm_amap *amap;
131 struct vm_anon *anon;
132 struct vm_map_entry *entry;
133 vaddr_t start, end, lim;
134 struct vm_map *map;
135 vsize_t len;
136 int error = 0;
137 size_t npgs;
138
139 map = &p->p_vmspace->vm_map;
140
141 start = (vaddr_t)SCARG(uap, addr);
142 len = SCARG(uap, len);
143 vec = SCARG(uap, vec);
144
145 if (start & PAGE_MASK)
146 return EINVAL;
147 len = round_page(len);
148 end = start + len;
149 if (end <= start)
150 return EINVAL;
151
152 /*
153 * Lock down vec, so our returned status isn't outdated by
154 * storing the status byte for a page.
155 */
156
157 npgs = len >> PAGE_SHIFT;
158 error = uvm_vslock(p->p_vmspace, vec, npgs, VM_PROT_WRITE);
159 if (error) {
160 return error;
161 }
162 vm_map_lock_read(map);
163
164 if (uvm_map_lookup_entry(map, start, &entry) == false) {
165 error = ENOMEM;
166 goto out;
167 }
168
169 for (/* nothing */;
170 entry != &map->header && entry->start < end;
171 entry = entry->next) {
172 KASSERT(!UVM_ET_ISSUBMAP(entry));
173 KASSERT(start >= entry->start);
174
175 /* Make sure there are no holes. */
176 if (entry->end < end &&
177 (entry->next == &map->header ||
178 entry->next->start > entry->end)) {
179 error = ENOMEM;
180 goto out;
181 }
182
183 lim = end < entry->end ? end : entry->end;
184
185 /*
186 * Special case for objects with no "real" pages. Those
187 * are always considered resident (mapped devices).
188 */
189
190 if (UVM_ET_ISOBJ(entry)) {
191 KASSERT(!UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj));
192 if (UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) {
193 for (/* nothing */; start < lim;
194 start += PAGE_SIZE, vec++)
195 ustore_char(vec, 1);
196 continue;
197 }
198 }
199
200 amap = entry->aref.ar_amap; /* upper layer */
201 uobj = entry->object.uvm_obj; /* lower layer */
202
203 if (amap != NULL)
204 amap_lock(amap, RW_READER);
205 if (uobj != NULL)
206 rw_enter(uobj->vmobjlock, RW_READER);
207
208 for (/* nothing */; start < lim; start += PAGE_SIZE, vec++) {
209 pgi = 0;
210 if (amap != NULL) {
211 /* Check the upper layer first. */
212 anon = amap_lookup(&entry->aref,
213 start - entry->start);
214 /* Don't need to lock anon here. */
215 if (anon != NULL && anon->an_page != NULL) {
216
217 /*
218 * Anon has the page for this entry
219 * offset.
220 */
221
222 pgi = 1;
223 }
224 }
225 if (uobj != NULL && pgi == 0) {
226 /* Check the lower layer. */
227 pg = uvm_pagelookup(uobj,
228 entry->offset + (start - entry->start));
229 if (pg != NULL) {
230
231 /*
232 * Object has the page for this entry
233 * offset.
234 */
235
236 pgi = 1;
237 }
238 }
239 (void) ustore_char(vec, pgi);
240 }
241 if (uobj != NULL)
242 rw_exit(uobj->vmobjlock);
243 if (amap != NULL)
244 amap_unlock(amap);
245 }
246
247 out:
248 vm_map_unlock_read(map);
249 uvm_vsunlock(p->p_vmspace, SCARG(uap, vec), npgs);
250 return error;
251 }
252
253 /*
254 * sys_mmap: mmap system call.
255 *
256 * => file offset and address may not be page aligned
257 * - if MAP_FIXED, offset and address must have remainder mod PAGE_SIZE
258 * - if address isn't page aligned the mapping starts at trunc_page(addr)
259 * and the return value is adjusted up by the page offset.
260 */
261
262 int
263 sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
264 {
265 /* {
266 syscallarg(void *) addr;
267 syscallarg(size_t) len;
268 syscallarg(int) prot;
269 syscallarg(int) flags;
270 syscallarg(int) fd;
271 syscallarg(long) pad;
272 syscallarg(off_t) pos;
273 } */
274 struct proc *p = l->l_proc;
275 vaddr_t addr;
276 off_t pos;
277 vsize_t size, pageoff;
278 vm_prot_t prot, maxprot, extraprot;
279 int flags, fd, advice;
280 vaddr_t defaddr = 0; /* XXXGCC */
281 bool addrhint = false;
282 struct file *fp = NULL;
283 struct uvm_object *uobj;
284 int error;
285 #ifdef PAX_ASLR
286 vaddr_t orig_addr;
287 #endif /* PAX_ASLR */
288
289 /*
290 * first, extract syscall args from the uap.
291 */
292
293 addr = (vaddr_t)SCARG(uap, addr);
294 size = (vsize_t)SCARG(uap, len);
295 prot = SCARG(uap, prot) & VM_PROT_ALL;
296 extraprot = PROT_MPROTECT_EXTRACT(SCARG(uap, prot));
297 flags = SCARG(uap, flags);
298 fd = SCARG(uap, fd);
299 pos = SCARG(uap, pos);
300
301 #ifdef PAX_ASLR
302 orig_addr = addr;
303 #endif /* PAX_ASLR */
304
305 if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
306 return EINVAL;
307
308 if (size == 0 && (flags & MAP_ANON) == 0)
309 return EINVAL;
310
311 /*
312 * Align file position and save offset into page. Adjust size
313 * so that it is an integral multiple of the page size.
314 */
315 pageoff = pos & PAGE_MASK;
316 pos -= pageoff;
317 CTASSERT(PAGE_MASK <= __type_max(vsize_t));
318 CTASSERT((__type_max(vsize_t) - PAGE_SIZE + 1) % PAGE_SIZE == 0);
319 if (size > __type_max(vsize_t) - PAGE_SIZE + 1 - pageoff)
320 return ENOMEM;
321 /*
322 * size + pageoff <= VSIZE_MAX + 1 - PAGE_SIZE, and the
323 * right-hand side is an integral multiple of the page size, so
324 * round_page(size + pageoff) <= VSIZE_MAX + 1 - PAGE_SIZE.
325 */
326 size = round_page(size + pageoff);
327
328 /*
329 * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr"
330 */
331 if (flags & MAP_FIXED) {
332 /* ensure address and file offset are aligned properly */
333 addr -= pageoff;
334 if (addr & PAGE_MASK)
335 return EINVAL;
336
337 error = range_test(&p->p_vmspace->vm_map, addr, size, true);
338 if (error) {
339 return error;
340 }
341 } else if (addr == 0 || !(flags & MAP_TRYFIXED)) {
342 /*
343 * not fixed: make sure we skip over the largest
344 * possible heap for non-topdown mapping arrangements.
345 * we will refine our guess later (e.g. to account for
346 * VAC, etc)
347 */
348
349 defaddr = p->p_emul->e_vm_default_addr(p,
350 (vaddr_t)p->p_vmspace->vm_daddr, size,
351 p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
352
353 if (addr == 0 || !(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN))
354 addr = MAX(addr, defaddr);
355 else
356 addr = MIN(addr, defaddr);
357
358 /*
359 * If addr is nonzero and not the default, then the
360 * address is a hint.
361 */
362 addrhint = (addr != 0 && addr != defaddr);
363 }
364
365 /*
366 * check for file mappings (i.e. not anonymous) and verify file.
367 */
368
369 advice = UVM_ADV_NORMAL;
370 if ((flags & MAP_ANON) == 0) {
371 if ((fp = fd_getfile(fd)) == NULL)
372 return EBADF;
373
374 if (fp->f_ops->fo_mmap == NULL) {
375 error = ENODEV;
376 goto out;
377 }
378 error = (*fp->f_ops->fo_mmap)(fp, &pos, size, prot, &flags,
379 &advice, &uobj, &maxprot);
380 if (error) {
381 goto out;
382 }
383 if (uobj == NULL) {
384 flags |= MAP_ANON;
385 fd_putfile(fd);
386 fp = NULL;
387 goto is_anon;
388 }
389 } else { /* MAP_ANON case */
390 /*
391 * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0?
392 */
393 if (fd != -1)
394 return EINVAL;
395
396 is_anon: /* label for SunOS style /dev/zero */
397 uobj = NULL;
398 maxprot = VM_PROT_ALL;
399 pos = 0;
400 }
401
402 maxprot = PAX_MPROTECT_MAXPROTECT(l, prot, extraprot, maxprot);
403 if (((prot | extraprot) & maxprot) != (prot | extraprot)) {
404 error = EACCES;
405 goto out;
406 }
407 if ((error = PAX_MPROTECT_VALIDATE(l, prot)))
408 goto out;
409
410 pax_aslr_mmap(l, &addr, orig_addr, flags);
411
412 /*
413 * Now let kernel internal function uvm_mmap do the work.
414 *
415 * If the user provided a hint, take a reference to uobj in
416 * case the first attempt to satisfy the hint fails, so we can
417 * try again with the default address.
418 */
419 if (addrhint) {
420 if (uobj)
421 (*uobj->pgops->pgo_reference)(uobj);
422 }
423 error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
424 flags, advice, uobj, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
425 if (addrhint) {
426 if (error) {
427 addr = defaddr;
428 pax_aslr_mmap(l, &addr, orig_addr, flags);
429 error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size,
430 prot, maxprot, flags, advice, uobj, pos,
431 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
432 } else if (uobj) {
433 /* Release the exta reference we took. */
434 (*uobj->pgops->pgo_detach)(uobj);
435 }
436 }
437
438 /* remember to add offset */
439 *retval = (register_t)(addr + pageoff);
440
441 out:
442 if (fp != NULL)
443 fd_putfile(fd);
444
445 return error;
446 }
447
448 /*
449 * sys___msync13: the msync system call (a front-end for flush)
450 */
451
452 int
453 sys___msync13(struct lwp *l, const struct sys___msync13_args *uap,
454 register_t *retval)
455 {
456 /* {
457 syscallarg(void *) addr;
458 syscallarg(size_t) len;
459 syscallarg(int) flags;
460 } */
461 struct proc *p = l->l_proc;
462 vaddr_t addr;
463 vsize_t size;
464 struct vm_map *map;
465 int error, flags, uvmflags;
466 bool rv;
467
468 /*
469 * extract syscall args from the uap
470 */
471
472 addr = (vaddr_t)SCARG(uap, addr);
473 size = (vsize_t)SCARG(uap, len);
474 flags = SCARG(uap, flags);
475
476 /* sanity check flags */
477 if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 ||
478 (flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 ||
479 (flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC))
480 return EINVAL;
481 if ((flags & (MS_ASYNC | MS_SYNC)) == 0)
482 flags |= MS_SYNC;
483
484 /*
485 * get map
486 */
487 map = &p->p_vmspace->vm_map;
488
489 if (round_and_check(map, &addr, &size))
490 return ENOMEM;
491
492 /*
493 * XXXCDC: do we really need this semantic?
494 *
495 * XXX Gak! If size is zero we are supposed to sync "all modified
496 * pages with the region containing addr". Unfortunately, we
497 * don't really keep track of individual mmaps so we approximate
498 * by flushing the range of the map entry containing addr.
499 * This can be incorrect if the region splits or is coalesced
500 * with a neighbor.
501 */
502
503 if (size == 0) {
504 struct vm_map_entry *entry;
505
506 vm_map_lock_read(map);
507 rv = uvm_map_lookup_entry(map, addr, &entry);
508 if (rv == true) {
509 addr = entry->start;
510 size = entry->end - entry->start;
511 }
512 vm_map_unlock_read(map);
513 if (rv == false)
514 return EINVAL;
515 }
516
517 /*
518 * translate MS_ flags into PGO_ flags
519 */
520
521 uvmflags = PGO_CLEANIT;
522 if (flags & MS_INVALIDATE)
523 uvmflags |= PGO_FREE;
524 if (flags & MS_SYNC)
525 uvmflags |= PGO_SYNCIO;
526
527 error = uvm_map_clean(map, addr, addr+size, uvmflags);
528 return error;
529 }
530
531 /*
532 * sys_munmap: unmap a users memory
533 */
534
535 int
536 sys_munmap(struct lwp *l, const struct sys_munmap_args *uap, register_t *retval)
537 {
538 /* {
539 syscallarg(void *) addr;
540 syscallarg(size_t) len;
541 } */
542 struct proc *p = l->l_proc;
543 vaddr_t addr;
544 vsize_t size;
545 struct vm_map *map;
546 struct vm_map_entry *dead_entries;
547
548 /*
549 * get syscall args.
550 */
551
552 addr = (vaddr_t)SCARG(uap, addr);
553 size = (vsize_t)SCARG(uap, len);
554
555 map = &p->p_vmspace->vm_map;
556
557 if (round_and_check(map, &addr, &size))
558 return EINVAL;
559
560 if (size == 0)
561 return 0;
562
563 vm_map_lock(map);
564 #if 0
565 /*
566 * interesting system call semantic: make sure entire range is
567 * allocated before allowing an unmap.
568 */
569 if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) {
570 vm_map_unlock(map);
571 return EINVAL;
572 }
573 #endif
574 uvm_unmap_remove(map, addr, addr + size, &dead_entries, 0);
575 vm_map_unlock(map);
576 if (dead_entries != NULL)
577 uvm_unmap_detach(dead_entries, 0);
578 return 0;
579 }
580
581 /*
582 * sys_mprotect: the mprotect system call
583 */
584
585 int
586 sys_mprotect(struct lwp *l, const struct sys_mprotect_args *uap,
587 register_t *retval)
588 {
589 /* {
590 syscallarg(void *) addr;
591 syscallarg(size_t) len;
592 syscallarg(int) prot;
593 } */
594 struct proc *p = l->l_proc;
595 vaddr_t addr;
596 vsize_t size;
597 vm_prot_t prot;
598 int error;
599
600 /*
601 * extract syscall args from uap
602 */
603
604 addr = (vaddr_t)SCARG(uap, addr);
605 size = (vsize_t)SCARG(uap, len);
606 prot = SCARG(uap, prot) & VM_PROT_ALL;
607
608 if (round_and_check(&p->p_vmspace->vm_map, &addr, &size))
609 return EINVAL;
610
611 error = uvm_map_protect_user(l, addr, addr + size, prot);
612 return error;
613 }
614
615 /*
616 * sys_minherit: the minherit system call
617 */
618
619 int
620 sys_minherit(struct lwp *l, const struct sys_minherit_args *uap,
621 register_t *retval)
622 {
623 /* {
624 syscallarg(void *) addr;
625 syscallarg(int) len;
626 syscallarg(int) inherit;
627 } */
628 struct proc *p = l->l_proc;
629 vaddr_t addr;
630 vsize_t size;
631 vm_inherit_t inherit;
632 int error;
633
634 addr = (vaddr_t)SCARG(uap, addr);
635 size = (vsize_t)SCARG(uap, len);
636 inherit = SCARG(uap, inherit);
637
638 if (round_and_check(&p->p_vmspace->vm_map, &addr, &size))
639 return EINVAL;
640
641 error = uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr + size,
642 inherit);
643 return error;
644 }
645
646 /*
647 * sys_madvise: give advice about memory usage.
648 */
649
650 /* ARGSUSED */
651 int
652 sys_madvise(struct lwp *l, const struct sys_madvise_args *uap,
653 register_t *retval)
654 {
655 /* {
656 syscallarg(void *) addr;
657 syscallarg(size_t) len;
658 syscallarg(int) behav;
659 } */
660 struct proc *p = l->l_proc;
661 vaddr_t addr;
662 vsize_t size;
663 int advice, error;
664
665 addr = (vaddr_t)SCARG(uap, addr);
666 size = (vsize_t)SCARG(uap, len);
667 advice = SCARG(uap, behav);
668
669 if (round_and_check(&p->p_vmspace->vm_map, &addr, &size))
670 return EINVAL;
671
672 switch (advice) {
673 case MADV_NORMAL:
674 case MADV_RANDOM:
675 case MADV_SEQUENTIAL:
676 error = uvm_map_advice(&p->p_vmspace->vm_map, addr, addr + size,
677 advice);
678 break;
679
680 case MADV_WILLNEED:
681
682 /*
683 * Activate all these pages, pre-faulting them in if
684 * necessary.
685 */
686 error = uvm_map_willneed(&p->p_vmspace->vm_map,
687 addr, addr + size);
688 break;
689
690 case MADV_DONTNEED:
691
692 /*
693 * Deactivate all these pages. We don't need them
694 * any more. We don't, however, toss the data in
695 * the pages.
696 */
697
698 error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
699 PGO_DEACTIVATE);
700 break;
701
702 case MADV_FREE:
703
704 /*
705 * These pages contain no valid data, and may be
706 * garbage-collected. Toss all resources, including
707 * any swap space in use.
708 */
709
710 error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
711 PGO_FREE);
712 break;
713
714 case MADV_SPACEAVAIL:
715
716 /*
717 * XXXMRG What is this? I think it's:
718 *
719 * Ensure that we have allocated backing-store
720 * for these pages.
721 *
722 * This is going to require changes to the page daemon,
723 * as it will free swap space allocated to pages in core.
724 * There's also what to do for device/file/anonymous memory.
725 */
726
727 return EINVAL;
728
729 default:
730 return EINVAL;
731 }
732
733 return error;
734 }
735
736 /*
737 * sys_mlock: memory lock
738 */
739
740 int
741 sys_mlock(struct lwp *l, const struct sys_mlock_args *uap, register_t *retval)
742 {
743 /* {
744 syscallarg(const void *) addr;
745 syscallarg(size_t) len;
746 } */
747 struct proc *p = l->l_proc;
748 vaddr_t addr;
749 vsize_t size;
750 int error;
751
752 /*
753 * extract syscall args from uap
754 */
755
756 addr = (vaddr_t)SCARG(uap, addr);
757 size = (vsize_t)SCARG(uap, len);
758
759 if (round_and_check(&p->p_vmspace->vm_map, &addr, &size))
760 return ENOMEM;
761
762 if (atop(size) + uvmexp.wired > uvmexp.wiredmax)
763 return EAGAIN;
764
765 if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
766 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
767 return EAGAIN;
768
769 error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, false,
770 0);
771 if (error == EFAULT)
772 error = ENOMEM;
773 return error;
774 }
775
776 /*
777 * sys_munlock: unlock wired pages
778 */
779
780 int
781 sys_munlock(struct lwp *l, const struct sys_munlock_args *uap,
782 register_t *retval)
783 {
784 /* {
785 syscallarg(const void *) addr;
786 syscallarg(size_t) len;
787 } */
788 struct proc *p = l->l_proc;
789 vaddr_t addr;
790 vsize_t size;
791
792 /*
793 * extract syscall args from uap
794 */
795
796 addr = (vaddr_t)SCARG(uap, addr);
797 size = (vsize_t)SCARG(uap, len);
798
799 if (round_and_check(&p->p_vmspace->vm_map, &addr, &size))
800 return ENOMEM;
801
802 if (uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, true, 0))
803 return ENOMEM;
804
805 return 0;
806 }
807
808 /*
809 * sys_mlockall: lock all pages mapped into an address space.
810 */
811
812 int
813 sys_mlockall(struct lwp *l, const struct sys_mlockall_args *uap,
814 register_t *retval)
815 {
816 /* {
817 syscallarg(int) flags;
818 } */
819 struct proc *p = l->l_proc;
820 int error, flags;
821
822 flags = SCARG(uap, flags);
823
824 if (flags == 0 || (flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)
825 return EINVAL;
826
827 error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags,
828 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
829 return error;
830 }
831
832 /*
833 * sys_munlockall: unlock all pages mapped into an address space.
834 */
835
836 int
837 sys_munlockall(struct lwp *l, const void *v, register_t *retval)
838 {
839 struct proc *p = l->l_proc;
840
841 (void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0);
842 return 0;
843 }
844
845 /*
846 * uvm_mmap: internal version of mmap
847 *
848 * - used by sys_mmap and various framebuffers
849 * - uobj is a struct uvm_object pointer or NULL for MAP_ANON
850 * - caller must page-align the file offset
851 *
852 * XXX This appears to leak the uobj in various error branches? Need
853 * to clean up the contract around uobj reference.
854 */
855
856 static int
857 uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
858 vm_prot_t maxprot, int flags, int advice, struct uvm_object *uobj,
859 voff_t foff, vsize_t locklimit)
860 {
861 vaddr_t align = 0;
862 int error;
863 uvm_flag_t uvmflag = 0;
864
865 /*
866 * check params
867 */
868
869 if (size == 0)
870 return 0;
871 if (foff & PAGE_MASK)
872 return EINVAL;
873 if ((prot & maxprot) != prot)
874 return EINVAL;
875
876 /*
877 * for non-fixed mappings, round off the suggested address.
878 * for fixed mappings, check alignment.
879 */
880
881 if ((flags & MAP_FIXED) == 0) {
882 *addr = round_page(*addr);
883 } else {
884 if (*addr & PAGE_MASK)
885 return EINVAL;
886 uvmflag |= UVM_FLAG_FIXED | UVM_FLAG_UNMAP;
887 }
888
889 /*
890 * Try to see if any requested alignment can even be attemped.
891 * Make sure we can express the alignment (asking for a >= 4GB
892 * alignment on an ILP32 architecure make no sense) and the
893 * alignment is at least for a page sized quanitiy. If the
894 * request was for a fixed mapping, make sure supplied address
895 * adheres to the request alignment.
896 */
897 align = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT;
898 if (align) {
899 if (align >= sizeof(vaddr_t) * NBBY)
900 return EINVAL;
901 align = 1UL << align;
902 if (align < PAGE_SIZE)
903 return EINVAL;
904 if (align >= vm_map_max(map))
905 return ENOMEM;
906 if (flags & MAP_FIXED) {
907 if ((*addr & (align-1)) != 0)
908 return EINVAL;
909 align = 0;
910 }
911 }
912
913 /*
914 * check resource limits
915 */
916
917 if (!VM_MAP_IS_KERNEL(map) &&
918 (((rlim_t)curproc->p_vmspace->vm_map.size + (rlim_t)size) >
919 curproc->p_rlimit[RLIMIT_AS].rlim_cur))
920 return ENOMEM;
921
922 /*
923 * handle anon vs. non-anon mappings. for non-anon mappings attach
924 * to underlying vm object.
925 */
926
927 if (flags & MAP_ANON) {
928 KASSERT(uobj == NULL);
929 foff = UVM_UNKNOWN_OFFSET;
930 if ((flags & MAP_SHARED) == 0)
931 /* XXX: defer amap create */
932 uvmflag |= UVM_FLAG_COPYONW;
933 else
934 /* shared: create amap now */
935 uvmflag |= UVM_FLAG_OVERLAY;
936
937 } else {
938 KASSERT(uobj != NULL);
939 if ((flags & MAP_SHARED) == 0) {
940 uvmflag |= UVM_FLAG_COPYONW;
941 }
942 }
943
944 uvmflag = UVM_MAPFLAG(prot, maxprot,
945 (flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY, advice,
946 uvmflag);
947 error = uvm_map(map, addr, size, uobj, foff, align, uvmflag);
948 if (error) {
949 if (uobj)
950 uobj->pgops->pgo_detach(uobj);
951 return error;
952 }
953
954 /*
955 * POSIX 1003.1b -- if our address space was configured
956 * to lock all future mappings, wire the one we just made.
957 *
958 * Also handle the MAP_WIRED flag here.
959 */
960
961 if (prot == VM_PROT_NONE) {
962
963 /*
964 * No more work to do in this case.
965 */
966
967 return 0;
968 }
969 if ((flags & MAP_WIRED) != 0 || (map->flags & VM_MAP_WIREFUTURE) != 0) {
970 vm_map_lock(map);
971 if (atop(size) + uvmexp.wired > uvmexp.wiredmax ||
972 (locklimit != 0 &&
973 size + ptoa(pmap_wired_count(vm_map_pmap(map))) >
974 locklimit)) {
975 vm_map_unlock(map);
976 uvm_unmap(map, *addr, *addr + size);
977 return ENOMEM;
978 }
979
980 /*
981 * uvm_map_pageable() always returns the map unlocked.
982 */
983
984 error = uvm_map_pageable(map, *addr, *addr + size,
985 false, UVM_LK_ENTER);
986 if (error) {
987 uvm_unmap(map, *addr, *addr + size);
988 return error;
989 }
990 return 0;
991 }
992 return 0;
993 }
994
995 vaddr_t
996 uvm_default_mapaddr(struct proc *p, vaddr_t base, vsize_t sz, int topdown)
997 {
998
999 if (topdown)
1000 return VM_DEFAULT_ADDRESS_TOPDOWN(base, sz);
1001 else
1002 return VM_DEFAULT_ADDRESS_BOTTOMUP(base, sz);
1003 }
1004
1005 int
1006 uvm_mmap_dev(struct proc *p, void **addrp, size_t len, dev_t dev,
1007 off_t off)
1008 {
1009 struct uvm_object *uobj;
1010 int error, flags, prot;
1011
1012 flags = MAP_SHARED;
1013 prot = VM_PROT_READ | VM_PROT_WRITE;
1014 if (*addrp)
1015 flags |= MAP_FIXED;
1016 else
1017 *addrp = (void *)p->p_emul->e_vm_default_addr(p,
1018 (vaddr_t)p->p_vmspace->vm_daddr, len,
1019 p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
1020
1021 uobj = udv_attach(dev, prot, off, len);
1022 if (uobj == NULL)
1023 return EINVAL;
1024
1025 error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp,
1026 (vsize_t)len, prot, prot, flags, UVM_ADV_RANDOM, uobj, off,
1027 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
1028 return error;
1029 }
1030
1031 int
1032 uvm_mmap_anon(struct proc *p, void **addrp, size_t len)
1033 {
1034 int error, flags, prot;
1035
1036 flags = MAP_PRIVATE | MAP_ANON;
1037 prot = VM_PROT_READ | VM_PROT_WRITE;
1038 if (*addrp)
1039 flags |= MAP_FIXED;
1040 else
1041 *addrp = (void *)p->p_emul->e_vm_default_addr(p,
1042 (vaddr_t)p->p_vmspace->vm_daddr, len,
1043 p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
1044
1045 error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp,
1046 (vsize_t)len, prot, prot, flags, UVM_ADV_NORMAL, NULL, 0,
1047 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
1048 return error;
1049 }
1050