uvm_map.c revision 1.423 1 /* $NetBSD: uvm_map.c,v 1.423 2024/08/14 21:05:11 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_map.c 8.3 (Berkeley) 1/12/94
37 * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
38 *
39 *
40 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 * All rights reserved.
42 *
43 * Permission to use, copy, modify and distribute this software and
44 * its documentation is hereby granted, provided that both the copyright
45 * notice and this permission notice appear in all copies of the
46 * software, derivative works or modified versions, and any portions
47 * thereof, and that both notices appear in supporting documentation.
48 *
49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 *
53 * Carnegie Mellon requests users of this software to return to
54 *
55 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
56 * School of Computer Science
57 * Carnegie Mellon University
58 * Pittsburgh PA 15213-3890
59 *
60 * any improvements or extensions that they make and grant Carnegie the
61 * rights to redistribute these changes.
62 */
63
64 /*
65 * uvm_map.c: uvm map operations
66 */
67
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.423 2024/08/14 21:05:11 riastradh Exp $");
70
71 #include "opt_ddb.h"
72 #include "opt_pax.h"
73 #include "opt_uvmhist.h"
74 #include "opt_uvm.h"
75 #include "opt_sysv.h"
76
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/mman.h>
80 #include <sys/proc.h>
81 #include <sys/pool.h>
82 #include <sys/kernel.h>
83 #include <sys/mount.h>
84 #include <sys/pax.h>
85 #include <sys/vnode.h>
86 #include <sys/filedesc.h>
87 #include <sys/lockdebug.h>
88 #include <sys/atomic.h>
89 #include <sys/sysctl.h>
90 #ifndef __USER_VA0_IS_SAFE
91 #include <sys/kauth.h>
92 #include "opt_user_va0_disable_default.h"
93 #endif
94
95 #include <sys/shm.h>
96
97 #include <uvm/uvm.h>
98 #include <uvm/uvm_readahead.h>
99
100 #if defined(DDB) || defined(DEBUGPRINT)
101 #include <uvm/uvm_ddb.h>
102 #endif
103
104 #ifdef UVMHIST
105 #ifndef UVMHIST_MAPHIST_SIZE
106 #define UVMHIST_MAPHIST_SIZE 100
107 #endif
108 static struct kern_history_ent maphistbuf[UVMHIST_MAPHIST_SIZE];
109 UVMHIST_DEFINE(maphist) = UVMHIST_INITIALIZER(maphist, maphistbuf);
110 #endif
111
112 #if !defined(UVMMAP_COUNTERS)
113
114 #define UVMMAP_EVCNT_DEFINE(name) /* nothing */
115 #define UVMMAP_EVCNT_INCR(ev) /* nothing */
116 #define UVMMAP_EVCNT_DECR(ev) /* nothing */
117
118 #else /* defined(UVMMAP_NOCOUNTERS) */
119
120 #include <sys/evcnt.h>
121 #define UVMMAP_EVCNT_DEFINE(name) \
122 struct evcnt uvmmap_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
123 "uvmmap", #name); \
124 EVCNT_ATTACH_STATIC(uvmmap_evcnt_##name);
125 #define UVMMAP_EVCNT_INCR(ev) uvmmap_evcnt_##ev.ev_count++
126 #define UVMMAP_EVCNT_DECR(ev) uvmmap_evcnt_##ev.ev_count--
127
128 #endif /* defined(UVMMAP_NOCOUNTERS) */
129
130 UVMMAP_EVCNT_DEFINE(ubackmerge)
131 UVMMAP_EVCNT_DEFINE(uforwmerge)
132 UVMMAP_EVCNT_DEFINE(ubimerge)
133 UVMMAP_EVCNT_DEFINE(unomerge)
134 UVMMAP_EVCNT_DEFINE(kbackmerge)
135 UVMMAP_EVCNT_DEFINE(kforwmerge)
136 UVMMAP_EVCNT_DEFINE(kbimerge)
137 UVMMAP_EVCNT_DEFINE(knomerge)
138 UVMMAP_EVCNT_DEFINE(map_call)
139 UVMMAP_EVCNT_DEFINE(mlk_call)
140 UVMMAP_EVCNT_DEFINE(mlk_hint)
141 UVMMAP_EVCNT_DEFINE(mlk_tree)
142 UVMMAP_EVCNT_DEFINE(mlk_treeloop)
143
144 const char vmmapbsy[] = "vmmapbsy";
145
146 /*
147 * cache for dynamically-allocated map entries.
148 */
149
150 static struct pool_cache uvm_map_entry_cache;
151
152 #ifdef PMAP_GROWKERNEL
153 /*
154 * This global represents the end of the kernel virtual address
155 * space. If we want to exceed this, we must grow the kernel
156 * virtual address space dynamically.
157 *
158 * Note, this variable is locked by kernel_map's lock.
159 */
160 vaddr_t uvm_maxkaddr;
161 #endif
162
163 #ifndef __USER_VA0_IS_SAFE
164 #ifndef __USER_VA0_DISABLE_DEFAULT
165 #define __USER_VA0_DISABLE_DEFAULT 1
166 #endif
167 #ifdef USER_VA0_DISABLE_DEFAULT /* kernel config option overrides */
168 #undef __USER_VA0_DISABLE_DEFAULT
169 #define __USER_VA0_DISABLE_DEFAULT USER_VA0_DISABLE_DEFAULT
170 #endif
171 int user_va0_disable = __USER_VA0_DISABLE_DEFAULT;
172 #endif
173
174 /*
175 * macros
176 */
177
178 /*
179 * uvm_map_align_va: round down or up virtual address
180 */
181 static __inline void
182 uvm_map_align_va(vaddr_t *vap, vsize_t align, int topdown)
183 {
184
185 KASSERT(powerof2(align));
186
187 if (align != 0 && (*vap & (align - 1)) != 0) {
188 if (topdown)
189 *vap = rounddown2(*vap, align);
190 else
191 *vap = roundup2(*vap, align);
192 }
193 }
194
195 /*
196 * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging
197 */
198 extern struct vm_map *pager_map;
199
200 #define UVM_ET_ISCOMPATIBLE(ent, type, uobj, meflags, \
201 prot, maxprot, inh, adv, wire) \
202 ((ent)->etype == (type) && \
203 (((ent)->flags ^ (meflags)) & (UVM_MAP_NOMERGE)) == 0 && \
204 (ent)->object.uvm_obj == (uobj) && \
205 (ent)->protection == (prot) && \
206 (ent)->max_protection == (maxprot) && \
207 (ent)->inheritance == (inh) && \
208 (ent)->advice == (adv) && \
209 (ent)->wired_count == (wire))
210
211 /*
212 * uvm_map_entry_link: insert entry into a map
213 *
214 * => map must be locked
215 */
216 #define uvm_map_entry_link(map, after_where, entry) do { \
217 uvm_mapent_check(entry); \
218 (map)->nentries++; \
219 (entry)->prev = (after_where); \
220 (entry)->next = (after_where)->next; \
221 (entry)->prev->next = (entry); \
222 (entry)->next->prev = (entry); \
223 uvm_rb_insert((map), (entry)); \
224 } while (/*CONSTCOND*/ 0)
225
226 /*
227 * uvm_map_entry_unlink: remove entry from a map
228 *
229 * => map must be locked
230 */
231 #define uvm_map_entry_unlink(map, entry) do { \
232 KASSERT((entry) != (map)->first_free); \
233 KASSERT((entry) != (map)->hint); \
234 uvm_mapent_check(entry); \
235 (map)->nentries--; \
236 (entry)->next->prev = (entry)->prev; \
237 (entry)->prev->next = (entry)->next; \
238 uvm_rb_remove((map), (entry)); \
239 } while (/*CONSTCOND*/ 0)
240
241 /*
242 * SAVE_HINT: saves the specified entry as the hint for future lookups.
243 *
244 * => map need not be locked.
245 */
246 #define SAVE_HINT(map, check, value) do { \
247 if ((map)->hint == (check)) \
248 (map)->hint = (value); \
249 } while (/*CONSTCOND*/ 0)
250
251 /*
252 * clear_hints: ensure that hints don't point to the entry.
253 *
254 * => map must be write-locked.
255 */
256 static void
257 clear_hints(struct vm_map *map, struct vm_map_entry *ent)
258 {
259
260 SAVE_HINT(map, ent, ent->prev);
261 if (map->first_free == ent) {
262 map->first_free = ent->prev;
263 }
264 }
265
266 /*
267 * VM_MAP_RANGE_CHECK: check and correct range
268 *
269 * => map must at least be read locked
270 */
271
272 #define VM_MAP_RANGE_CHECK(map, start, end) do { \
273 if (start < vm_map_min(map)) \
274 start = vm_map_min(map); \
275 if (end > vm_map_max(map)) \
276 end = vm_map_max(map); \
277 if (start > end) \
278 start = end; \
279 } while (/*CONSTCOND*/ 0)
280
281 /*
282 * local prototypes
283 */
284
285 static struct vm_map_entry *
286 uvm_mapent_alloc(struct vm_map *, int);
287 static void uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *);
288 static void uvm_mapent_free(struct vm_map_entry *);
289 #if defined(DEBUG)
290 static void _uvm_mapent_check(const struct vm_map_entry *, int);
291 #define uvm_mapent_check(map) _uvm_mapent_check(map, __LINE__)
292 #else /* defined(DEBUG) */
293 #define uvm_mapent_check(e) /* nothing */
294 #endif /* defined(DEBUG) */
295
296 static void uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
297 static void uvm_map_reference_amap(struct vm_map_entry *, int);
298 static int uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int,
299 int, struct vm_map_entry *);
300 static void uvm_map_unreference_amap(struct vm_map_entry *, int);
301
302 int _uvm_map_sanity(struct vm_map *);
303 int _uvm_tree_sanity(struct vm_map *);
304 static vsize_t uvm_rb_maxgap(const struct vm_map_entry *);
305
306 /*
307 * Tree iteration. We violate the rbtree(9) abstraction for various
308 * things here. Entries are ascending left to right, so, provided the
309 * child entry in question exists:
310 *
311 * LEFT_ENTRY(entry)->end <= entry->start
312 * entry->end <= RIGHT_ENTRY(entry)->start
313 */
314 __CTASSERT(offsetof(struct vm_map_entry, rb_node) == 0);
315 #define ROOT_ENTRY(map) \
316 ((struct vm_map_entry *)(map)->rb_tree.rbt_root)
317 #define LEFT_ENTRY(entry) \
318 ((struct vm_map_entry *)(entry)->rb_node.rb_left)
319 #define RIGHT_ENTRY(entry) \
320 ((struct vm_map_entry *)(entry)->rb_node.rb_right)
321 #define PARENT_ENTRY(map, entry) \
322 (ROOT_ENTRY(map) == (entry) \
323 ? NULL : (struct vm_map_entry *)RB_FATHER(&(entry)->rb_node))
324
325 /*
326 * These get filled in if/when SYSVSHM shared memory code is loaded
327 *
328 * We do this with function pointers rather the #ifdef SYSVSHM so the
329 * SYSVSHM code can be loaded and unloaded
330 */
331 void (*uvm_shmexit)(struct vmspace *) = NULL;
332 void (*uvm_shmfork)(struct vmspace *, struct vmspace *) = NULL;
333
334 static int
335 uvm_map_compare_nodes(void *ctx, const void *nparent, const void *nkey)
336 {
337 const struct vm_map_entry *eparent = nparent;
338 const struct vm_map_entry *ekey = nkey;
339
340 KASSERT(eparent->start < ekey->start || eparent->start >= ekey->end);
341 KASSERT(ekey->start < eparent->start || ekey->start >= eparent->end);
342
343 if (eparent->start < ekey->start)
344 return -1;
345 if (eparent->end >= ekey->start)
346 return 1;
347 return 0;
348 }
349
350 static int
351 uvm_map_compare_key(void *ctx, const void *nparent, const void *vkey)
352 {
353 const struct vm_map_entry *eparent = nparent;
354 const vaddr_t va = *(const vaddr_t *) vkey;
355
356 if (eparent->start < va)
357 return -1;
358 if (eparent->end >= va)
359 return 1;
360 return 0;
361 }
362
363 static const rb_tree_ops_t uvm_map_tree_ops = {
364 .rbto_compare_nodes = uvm_map_compare_nodes,
365 .rbto_compare_key = uvm_map_compare_key,
366 .rbto_node_offset = offsetof(struct vm_map_entry, rb_node),
367 .rbto_context = NULL
368 };
369
370 /*
371 * uvm_rb_gap: return the gap size between our entry and next entry.
372 */
373 static inline vsize_t
374 uvm_rb_gap(const struct vm_map_entry *entry)
375 {
376
377 KASSERT(entry->next != NULL);
378 return entry->next->start - entry->end;
379 }
380
381 static vsize_t
382 uvm_rb_maxgap(const struct vm_map_entry *entry)
383 {
384 struct vm_map_entry *child;
385 vsize_t maxgap = entry->gap;
386
387 /*
388 * We need maxgap to be the largest gap of us or any of our
389 * descendents. Since each of our children's maxgap is the
390 * cached value of their largest gap of themselves or their
391 * descendents, we can just use that value and avoid recursing
392 * down the tree to calculate it.
393 */
394 if ((child = LEFT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
395 maxgap = child->maxgap;
396
397 if ((child = RIGHT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
398 maxgap = child->maxgap;
399
400 return maxgap;
401 }
402
403 static void
404 uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
405 {
406 struct vm_map_entry *parent;
407
408 KASSERT(entry->gap == uvm_rb_gap(entry));
409 entry->maxgap = uvm_rb_maxgap(entry);
410
411 while ((parent = PARENT_ENTRY(map, entry)) != NULL) {
412 struct vm_map_entry *brother;
413 vsize_t maxgap = parent->gap;
414 unsigned int which;
415
416 KDASSERT(parent->gap == uvm_rb_gap(parent));
417 if (maxgap < entry->maxgap)
418 maxgap = entry->maxgap;
419 /*
420 * Since we work towards the root, we know entry's maxgap
421 * value is OK, but its brothers may now be out-of-date due
422 * to rebalancing. So refresh it.
423 */
424 which = RB_POSITION(&entry->rb_node) ^ RB_DIR_OTHER;
425 brother = (struct vm_map_entry *)parent->rb_node.rb_nodes[which];
426 if (brother != NULL) {
427 KDASSERT(brother->gap == uvm_rb_gap(brother));
428 brother->maxgap = uvm_rb_maxgap(brother);
429 if (maxgap < brother->maxgap)
430 maxgap = brother->maxgap;
431 }
432
433 parent->maxgap = maxgap;
434 entry = parent;
435 }
436 }
437
438 static void
439 uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
440 {
441 struct vm_map_entry *ret __diagused;
442
443 entry->gap = entry->maxgap = uvm_rb_gap(entry);
444 if (entry->prev != &map->header)
445 entry->prev->gap = uvm_rb_gap(entry->prev);
446
447 ret = rb_tree_insert_node(&map->rb_tree, entry);
448 KASSERTMSG(ret == entry,
449 "uvm_rb_insert: map %p: duplicate entry %p", map, ret);
450
451 /*
452 * If the previous entry is not our immediate left child, then it's an
453 * ancestor and will be fixed up on the way to the root. We don't
454 * have to check entry->prev against &map->header since &map->header
455 * will never be in the tree.
456 */
457 uvm_rb_fixup(map,
458 LEFT_ENTRY(entry) == entry->prev ? entry->prev : entry);
459 }
460
461 static void
462 uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
463 {
464 struct vm_map_entry *prev_parent = NULL, *next_parent = NULL;
465
466 /*
467 * If we are removing an interior node, then an adjacent node will
468 * be used to replace its position in the tree. Therefore we will
469 * need to fixup the tree starting at the parent of the replacement
470 * node. So record their parents for later use.
471 */
472 if (entry->prev != &map->header)
473 prev_parent = PARENT_ENTRY(map, entry->prev);
474 if (entry->next != &map->header)
475 next_parent = PARENT_ENTRY(map, entry->next);
476
477 rb_tree_remove_node(&map->rb_tree, entry);
478
479 /*
480 * If the previous node has a new parent, fixup the tree starting
481 * at the previous node's old parent.
482 */
483 if (entry->prev != &map->header) {
484 /*
485 * Update the previous entry's gap due to our absence.
486 */
487 entry->prev->gap = uvm_rb_gap(entry->prev);
488 uvm_rb_fixup(map, entry->prev);
489 if (prev_parent != NULL
490 && prev_parent != entry
491 && prev_parent != PARENT_ENTRY(map, entry->prev))
492 uvm_rb_fixup(map, prev_parent);
493 }
494
495 /*
496 * If the next node has a new parent, fixup the tree starting
497 * at the next node's old parent.
498 */
499 if (entry->next != &map->header) {
500 uvm_rb_fixup(map, entry->next);
501 if (next_parent != NULL
502 && next_parent != entry
503 && next_parent != PARENT_ENTRY(map, entry->next))
504 uvm_rb_fixup(map, next_parent);
505 }
506 }
507
508 #if defined(DEBUG)
509 int uvm_debug_check_map = 0;
510 int uvm_debug_check_rbtree = 0;
511 #define uvm_map_check(map, name) \
512 _uvm_map_check((map), (name), __FILE__, __LINE__)
513 static void
514 _uvm_map_check(struct vm_map *map, const char *name,
515 const char *file, int line)
516 {
517
518 if ((uvm_debug_check_map && _uvm_map_sanity(map)) ||
519 (uvm_debug_check_rbtree && _uvm_tree_sanity(map))) {
520 panic("uvm_map_check failed: \"%s\" map=%p (%s:%d)",
521 name, map, file, line);
522 }
523 }
524 #else /* defined(DEBUG) */
525 #define uvm_map_check(map, name) /* nothing */
526 #endif /* defined(DEBUG) */
527
528 #if defined(DEBUG) || defined(DDB)
529 int
530 _uvm_map_sanity(struct vm_map *map)
531 {
532 bool first_free_found = false;
533 bool hint_found = false;
534 const struct vm_map_entry *e;
535 struct vm_map_entry *hint = map->hint;
536
537 e = &map->header;
538 for (;;) {
539 if (map->first_free == e) {
540 first_free_found = true;
541 } else if (!first_free_found && e->next->start > e->end) {
542 printf("first_free %p should be %p\n",
543 map->first_free, e);
544 return -1;
545 }
546 if (hint == e) {
547 hint_found = true;
548 }
549
550 e = e->next;
551 if (e == &map->header) {
552 break;
553 }
554 }
555 if (!first_free_found) {
556 printf("stale first_free\n");
557 return -1;
558 }
559 if (!hint_found) {
560 printf("stale hint\n");
561 return -1;
562 }
563 return 0;
564 }
565
566 int
567 _uvm_tree_sanity(struct vm_map *map)
568 {
569 struct vm_map_entry *tmp, *trtmp;
570 int n = 0, i = 1;
571
572 for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
573 if (tmp->gap != uvm_rb_gap(tmp)) {
574 printf("%d/%d gap %#lx != %#lx %s\n",
575 n + 1, map->nentries,
576 (ulong)tmp->gap, (ulong)uvm_rb_gap(tmp),
577 tmp->next == &map->header ? "(last)" : "");
578 goto error;
579 }
580 /*
581 * If any entries are out of order, tmp->gap will be unsigned
582 * and will likely exceed the size of the map.
583 */
584 if (tmp->gap >= vm_map_max(map) - vm_map_min(map)) {
585 printf("too large gap %zu\n", (size_t)tmp->gap);
586 goto error;
587 }
588 n++;
589 }
590
591 if (n != map->nentries) {
592 printf("nentries: %d vs %d\n", n, map->nentries);
593 goto error;
594 }
595
596 trtmp = NULL;
597 for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
598 if (tmp->maxgap != uvm_rb_maxgap(tmp)) {
599 printf("maxgap %#lx != %#lx\n",
600 (ulong)tmp->maxgap,
601 (ulong)uvm_rb_maxgap(tmp));
602 goto error;
603 }
604 if (trtmp != NULL && trtmp->start >= tmp->start) {
605 printf("corrupt: 0x%"PRIxVADDR"x >= 0x%"PRIxVADDR"x\n",
606 trtmp->start, tmp->start);
607 goto error;
608 }
609
610 trtmp = tmp;
611 }
612
613 for (tmp = map->header.next; tmp != &map->header;
614 tmp = tmp->next, i++) {
615 trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_LEFT);
616 if (trtmp == NULL)
617 trtmp = &map->header;
618 if (tmp->prev != trtmp) {
619 printf("lookup: %d: %p->prev=%p: %p\n",
620 i, tmp, tmp->prev, trtmp);
621 goto error;
622 }
623 trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_RIGHT);
624 if (trtmp == NULL)
625 trtmp = &map->header;
626 if (tmp->next != trtmp) {
627 printf("lookup: %d: %p->next=%p: %p\n",
628 i, tmp, tmp->next, trtmp);
629 goto error;
630 }
631 trtmp = rb_tree_find_node(&map->rb_tree, &tmp->start);
632 if (trtmp != tmp) {
633 printf("lookup: %d: %p - %p: %p\n", i, tmp, trtmp,
634 PARENT_ENTRY(map, tmp));
635 goto error;
636 }
637 }
638
639 return (0);
640 error:
641 return (-1);
642 }
643 #endif /* defined(DEBUG) || defined(DDB) */
644
645 /*
646 * vm_map_lock: acquire an exclusive (write) lock on a map.
647 *
648 * => The locking protocol provides for guaranteed upgrade from shared ->
649 * exclusive by whichever thread currently has the map marked busy.
650 * See "LOCKING PROTOCOL NOTES" in uvm_map.h. This is horrible; among
651 * other problems, it defeats any fairness guarantees provided by RW
652 * locks.
653 */
654
655 void
656 vm_map_lock(struct vm_map *map)
657 {
658
659 for (;;) {
660 rw_enter(&map->lock, RW_WRITER);
661 if (map->busy == NULL || map->busy == curlwp) {
662 break;
663 }
664 mutex_enter(&map->misc_lock);
665 rw_exit(&map->lock);
666 if (map->busy != NULL) {
667 cv_wait(&map->cv, &map->misc_lock);
668 }
669 mutex_exit(&map->misc_lock);
670 }
671 map->timestamp++;
672 }
673
674 /*
675 * vm_map_lock_try: try to lock a map, failing if it is already locked.
676 */
677
678 bool
679 vm_map_lock_try(struct vm_map *map)
680 {
681
682 if (!rw_tryenter(&map->lock, RW_WRITER)) {
683 return false;
684 }
685 if (map->busy != NULL) {
686 rw_exit(&map->lock);
687 return false;
688 }
689 map->timestamp++;
690 return true;
691 }
692
693 /*
694 * vm_map_unlock: release an exclusive lock on a map.
695 */
696
697 void
698 vm_map_unlock(struct vm_map *map)
699 {
700
701 KASSERT(rw_write_held(&map->lock));
702 KASSERT(map->busy == NULL || map->busy == curlwp);
703 rw_exit(&map->lock);
704 }
705
706 /*
707 * vm_map_unbusy: mark the map as unbusy, and wake any waiters that
708 * want an exclusive lock.
709 */
710
711 void
712 vm_map_unbusy(struct vm_map *map)
713 {
714
715 KASSERT(map->busy == curlwp);
716
717 /*
718 * Safe to clear 'busy' and 'waiters' with only a read lock held:
719 *
720 * o they can only be set with a write lock held
721 * o writers are blocked out with a read or write hold
722 * o at any time, only one thread owns the set of values
723 */
724 mutex_enter(&map->misc_lock);
725 map->busy = NULL;
726 cv_broadcast(&map->cv);
727 mutex_exit(&map->misc_lock);
728 }
729
730 /*
731 * vm_map_lock_read: acquire a shared (read) lock on a map.
732 */
733
734 void
735 vm_map_lock_read(struct vm_map *map)
736 {
737
738 rw_enter(&map->lock, RW_READER);
739 }
740
741 /*
742 * vm_map_unlock_read: release a shared lock on a map.
743 */
744
745 void
746 vm_map_unlock_read(struct vm_map *map)
747 {
748
749 rw_exit(&map->lock);
750 }
751
752 /*
753 * vm_map_busy: mark a map as busy.
754 *
755 * => the caller must hold the map write locked
756 */
757
758 void
759 vm_map_busy(struct vm_map *map)
760 {
761
762 KASSERT(rw_write_held(&map->lock));
763 KASSERT(map->busy == NULL);
764
765 map->busy = curlwp;
766 }
767
768 /*
769 * vm_map_locked_p: return true if the map is write locked.
770 *
771 * => only for debug purposes like KASSERTs.
772 * => should not be used to verify that a map is not locked.
773 */
774
775 bool
776 vm_map_locked_p(struct vm_map *map)
777 {
778
779 return rw_write_held(&map->lock);
780 }
781
782 /*
783 * uvm_mapent_alloc: allocate a map entry
784 */
785
786 static struct vm_map_entry *
787 uvm_mapent_alloc(struct vm_map *map, int flags)
788 {
789 struct vm_map_entry *me;
790 int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
791 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
792
793 me = pool_cache_get(&uvm_map_entry_cache, pflags);
794 if (__predict_false(me == NULL)) {
795 return NULL;
796 }
797 me->flags = 0;
798
799 UVMHIST_LOG(maphist, "<- new entry=%#jx [kentry=%jd]", (uintptr_t)me,
800 (map == kernel_map), 0, 0);
801 return me;
802 }
803
804 /*
805 * uvm_mapent_free: free map entry
806 */
807
808 static void
809 uvm_mapent_free(struct vm_map_entry *me)
810 {
811 UVMHIST_FUNC(__func__);
812 UVMHIST_CALLARGS(maphist,"<- freeing map entry=%#jx [flags=%#jx]",
813 (uintptr_t)me, me->flags, 0, 0);
814 pool_cache_put(&uvm_map_entry_cache, me);
815 }
816
817 /*
818 * uvm_mapent_copy: copy a map entry, preserving flags
819 */
820
821 static inline void
822 uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
823 {
824
825 memcpy(dst, src, sizeof(*dst));
826 dst->flags = 0;
827 }
828
829 #if defined(DEBUG)
830 static void
831 _uvm_mapent_check(const struct vm_map_entry *entry, int line)
832 {
833
834 if (entry->start >= entry->end) {
835 goto bad;
836 }
837 if (UVM_ET_ISOBJ(entry)) {
838 if (entry->object.uvm_obj == NULL) {
839 goto bad;
840 }
841 } else if (UVM_ET_ISSUBMAP(entry)) {
842 if (entry->object.sub_map == NULL) {
843 goto bad;
844 }
845 } else {
846 if (entry->object.uvm_obj != NULL ||
847 entry->object.sub_map != NULL) {
848 goto bad;
849 }
850 }
851 if (!UVM_ET_ISOBJ(entry)) {
852 if (entry->offset != 0) {
853 goto bad;
854 }
855 }
856
857 return;
858
859 bad:
860 panic("%s: bad entry %p, line %d", __func__, entry, line);
861 }
862 #endif /* defined(DEBUG) */
863
864 /*
865 * uvm_map_entry_unwire: unwire a map entry
866 *
867 * => map should be locked by caller
868 */
869
870 static inline void
871 uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
872 {
873
874 entry->wired_count = 0;
875 uvm_fault_unwire_locked(map, entry->start, entry->end);
876 }
877
878
879 /*
880 * wrapper for calling amap_ref()
881 */
882 static inline void
883 uvm_map_reference_amap(struct vm_map_entry *entry, int flags)
884 {
885
886 amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
887 (entry->end - entry->start) >> PAGE_SHIFT, flags);
888 }
889
890
891 /*
892 * wrapper for calling amap_unref()
893 */
894 static inline void
895 uvm_map_unreference_amap(struct vm_map_entry *entry, int flags)
896 {
897
898 amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
899 (entry->end - entry->start) >> PAGE_SHIFT, flags);
900 }
901
902
903 /*
904 * uvm_map_init: init mapping system at boot time.
905 */
906
907 void
908 uvm_map_init(void)
909 {
910 /*
911 * first, init logging system.
912 */
913
914 UVMHIST_FUNC(__func__);
915 UVMHIST_LINK_STATIC(maphist);
916 UVMHIST_LINK_STATIC(pdhist);
917 UVMHIST_CALLED(maphist);
918 UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
919
920 /*
921 * initialize the global lock for kernel map entry.
922 */
923
924 mutex_init(&uvm_kentry_lock, MUTEX_DRIVER, IPL_VM);
925 }
926
927 /*
928 * uvm_map_init_caches: init mapping system caches.
929 */
930 void
931 uvm_map_init_caches(void)
932 {
933 /*
934 * initialize caches.
935 */
936
937 pool_cache_bootstrap(&uvm_map_entry_cache, sizeof(struct vm_map_entry),
938 coherency_unit, 0, PR_LARGECACHE, "vmmpepl", NULL, IPL_NONE, NULL,
939 NULL, NULL);
940 }
941
942 /*
943 * clippers
944 */
945
946 /*
947 * uvm_mapent_splitadj: adjust map entries for splitting, after uvm_mapent_copy.
948 */
949
950 static void
951 uvm_mapent_splitadj(struct vm_map_entry *entry1, struct vm_map_entry *entry2,
952 vaddr_t splitat)
953 {
954 vaddr_t adj;
955
956 KASSERT(entry1->start < splitat);
957 KASSERT(splitat < entry1->end);
958
959 adj = splitat - entry1->start;
960 entry1->end = entry2->start = splitat;
961
962 if (entry1->aref.ar_amap) {
963 amap_splitref(&entry1->aref, &entry2->aref, adj);
964 }
965 if (UVM_ET_ISSUBMAP(entry1)) {
966 /* ... unlikely to happen, but play it safe */
967 uvm_map_reference(entry1->object.sub_map);
968 } else if (UVM_ET_ISOBJ(entry1)) {
969 KASSERT(entry1->object.uvm_obj != NULL); /* suppress coverity */
970 entry2->offset += adj;
971 if (entry1->object.uvm_obj->pgops &&
972 entry1->object.uvm_obj->pgops->pgo_reference)
973 entry1->object.uvm_obj->pgops->pgo_reference(
974 entry1->object.uvm_obj);
975 }
976 }
977
978 /*
979 * uvm_map_clip_start: ensure that the entry begins at or after
980 * the starting address, if it doesn't we split the entry.
981 *
982 * => caller should use UVM_MAP_CLIP_START macro rather than calling
983 * this directly
984 * => map must be locked by caller
985 */
986
987 void
988 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
989 vaddr_t start)
990 {
991 struct vm_map_entry *new_entry;
992
993 /* uvm_map_simplify_entry(map, entry); */ /* XXX */
994
995 uvm_map_check(map, "clip_start entry");
996 uvm_mapent_check(entry);
997
998 /*
999 * Split off the front portion. note that we must insert the new
1000 * entry BEFORE this one, so that this entry has the specified
1001 * starting address.
1002 */
1003 new_entry = uvm_mapent_alloc(map, 0);
1004 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1005 uvm_mapent_splitadj(new_entry, entry, start);
1006 uvm_map_entry_link(map, entry->prev, new_entry);
1007
1008 uvm_map_check(map, "clip_start leave");
1009 }
1010
1011 /*
1012 * uvm_map_clip_end: ensure that the entry ends at or before
1013 * the ending address, if it does't we split the reference
1014 *
1015 * => caller should use UVM_MAP_CLIP_END macro rather than calling
1016 * this directly
1017 * => map must be locked by caller
1018 */
1019
1020 void
1021 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end)
1022 {
1023 struct vm_map_entry *new_entry;
1024
1025 uvm_map_check(map, "clip_end entry");
1026 uvm_mapent_check(entry);
1027
1028 /*
1029 * Create a new entry and insert it
1030 * AFTER the specified entry
1031 */
1032 new_entry = uvm_mapent_alloc(map, 0);
1033 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1034 uvm_mapent_splitadj(entry, new_entry, end);
1035 uvm_map_entry_link(map, entry, new_entry);
1036
1037 uvm_map_check(map, "clip_end leave");
1038 }
1039
1040 /*
1041 * M A P - m a i n e n t r y p o i n t
1042 */
1043 /*
1044 * uvm_map: establish a valid mapping in a map
1045 *
1046 * => assume startp is page aligned.
1047 * => assume size is a multiple of PAGE_SIZE.
1048 * => assume sys_mmap provides enough of a "hint" to have us skip
1049 * over text/data/bss area.
1050 * => map must be unlocked (we will lock it)
1051 * => <uobj,uoffset> value meanings (4 cases):
1052 * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER
1053 * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER
1054 * [3] <uobj,uoffset> == normal mapping
1055 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA
1056 *
1057 * case [4] is for kernel mappings where we don't know the offset until
1058 * we've found a virtual address. note that kernel object offsets are
1059 * always relative to vm_map_min(kernel_map).
1060 *
1061 * => if `align' is non-zero, we align the virtual address to the specified
1062 * alignment.
1063 * this is provided as a mechanism for large pages.
1064 *
1065 * => XXXCDC: need way to map in external amap?
1066 */
1067
1068 int
1069 uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
1070 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags)
1071 {
1072 struct uvm_map_args args;
1073 struct vm_map_entry *new_entry;
1074 int error;
1075
1076 KASSERT((size & PAGE_MASK) == 0);
1077 KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1078
1079 /*
1080 * for pager_map, allocate the new entry first to avoid sleeping
1081 * for memory while we have the map locked.
1082 */
1083
1084 new_entry = NULL;
1085 if (map == pager_map) {
1086 new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
1087 if (__predict_false(new_entry == NULL))
1088 return ENOMEM;
1089 }
1090 if (map == pager_map)
1091 flags |= UVM_FLAG_NOMERGE;
1092
1093 error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align,
1094 flags, &args);
1095 if (!error) {
1096 error = uvm_map_enter(map, &args, new_entry);
1097 *startp = args.uma_start;
1098 } else if (new_entry) {
1099 uvm_mapent_free(new_entry);
1100 }
1101
1102 #if defined(DEBUG)
1103 if (!error && VM_MAP_IS_KERNEL(map) && (flags & UVM_FLAG_NOWAIT) == 0) {
1104 uvm_km_check_empty(map, *startp, *startp + size);
1105 }
1106 #endif /* defined(DEBUG) */
1107
1108 return error;
1109 }
1110
1111 /*
1112 * uvm_map_prepare:
1113 *
1114 * called with map unlocked.
1115 * on success, returns the map locked.
1116 */
1117
1118 int
1119 uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size,
1120 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags,
1121 struct uvm_map_args *args)
1122 {
1123 struct vm_map_entry *prev_entry;
1124 vm_prot_t prot = UVM_PROTECTION(flags);
1125 vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1126
1127 UVMHIST_FUNC(__func__);
1128 UVMHIST_CALLARGS(maphist, "(map=%#jx, start=%#jx, size=%jx, flags=%#jx)",
1129 (uintptr_t)map, start, size, flags);
1130 UVMHIST_LOG(maphist, " uobj/offset %#jx/%jd", (uintptr_t)uobj,
1131 uoffset,0,0);
1132
1133 /*
1134 * detect a popular device driver bug.
1135 */
1136
1137 KASSERT(doing_shutdown || curlwp != NULL);
1138
1139 /*
1140 * zero-sized mapping doesn't make any sense.
1141 */
1142 KASSERT(size > 0);
1143
1144 KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0);
1145
1146 uvm_map_check(map, "map entry");
1147
1148 /*
1149 * check sanity of protection code
1150 */
1151
1152 if ((prot & maxprot) != prot) {
1153 UVMHIST_LOG(maphist, "<- prot. failure: prot=%#jx, max=%#jx",
1154 prot, maxprot,0,0);
1155 return EACCES;
1156 }
1157
1158 /*
1159 * figure out where to put new VM range
1160 */
1161 retry:
1162 if (vm_map_lock_try(map) == false) {
1163 if ((flags & UVM_FLAG_TRYLOCK) != 0) {
1164 return EAGAIN;
1165 }
1166 vm_map_lock(map); /* could sleep here */
1167 }
1168 if (flags & UVM_FLAG_UNMAP) {
1169 KASSERT(flags & UVM_FLAG_FIXED);
1170 KASSERT((flags & UVM_FLAG_NOWAIT) == 0);
1171
1172 /*
1173 * Set prev_entry to what it will need to be after any existing
1174 * entries are removed later in uvm_map_enter().
1175 */
1176
1177 if (uvm_map_lookup_entry(map, start, &prev_entry)) {
1178 if (start == prev_entry->start)
1179 prev_entry = prev_entry->prev;
1180 else
1181 UVM_MAP_CLIP_END(map, prev_entry, start);
1182 SAVE_HINT(map, map->hint, prev_entry);
1183 }
1184 } else {
1185 prev_entry = uvm_map_findspace(map, start, size, &start,
1186 uobj, uoffset, align, flags);
1187 }
1188 if (prev_entry == NULL) {
1189 unsigned int timestamp;
1190
1191 timestamp = map->timestamp;
1192 UVMHIST_LOG(maphist,"waiting va timestamp=%#jx",
1193 timestamp,0,0,0);
1194 map->flags |= VM_MAP_WANTVA;
1195 vm_map_unlock(map);
1196
1197 /*
1198 * try to reclaim kva and wait until someone does unmap.
1199 * fragile locking here, so we awaken every second to
1200 * recheck the condition.
1201 */
1202
1203 mutex_enter(&map->misc_lock);
1204 while ((map->flags & VM_MAP_WANTVA) != 0 &&
1205 map->timestamp == timestamp) {
1206 if ((flags & UVM_FLAG_WAITVA) == 0) {
1207 mutex_exit(&map->misc_lock);
1208 UVMHIST_LOG(maphist,
1209 "<- uvm_map_findspace failed!", 0,0,0,0);
1210 return ENOMEM;
1211 } else {
1212 cv_timedwait(&map->cv, &map->misc_lock, hz);
1213 }
1214 }
1215 mutex_exit(&map->misc_lock);
1216 goto retry;
1217 }
1218
1219 #ifdef PMAP_GROWKERNEL
1220 /*
1221 * If the kernel pmap can't map the requested space,
1222 * then allocate more resources for it.
1223 */
1224 if (map == kernel_map && uvm_maxkaddr < (start + size))
1225 uvm_maxkaddr = pmap_growkernel(start + size);
1226 #endif
1227
1228 UVMMAP_EVCNT_INCR(map_call);
1229
1230 /*
1231 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
1232 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in
1233 * either case we want to zero it before storing it in the map entry
1234 * (because it looks strange and confusing when debugging...)
1235 *
1236 * if uobj is not null
1237 * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
1238 * and we do not need to change uoffset.
1239 * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
1240 * now (based on the starting address of the map). this case is
1241 * for kernel object mappings where we don't know the offset until
1242 * the virtual address is found (with uvm_map_findspace). the
1243 * offset is the distance we are from the start of the map.
1244 */
1245
1246 if (uobj == NULL) {
1247 uoffset = 0;
1248 } else {
1249 if (uoffset == UVM_UNKNOWN_OFFSET) {
1250 KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
1251 uoffset = start - vm_map_min(kernel_map);
1252 }
1253 }
1254
1255 args->uma_flags = flags;
1256 args->uma_prev = prev_entry;
1257 args->uma_start = start;
1258 args->uma_size = size;
1259 args->uma_uobj = uobj;
1260 args->uma_uoffset = uoffset;
1261
1262 UVMHIST_LOG(maphist, "<- done!", 0,0,0,0);
1263 return 0;
1264 }
1265
1266 /*
1267 * uvm_map_enter:
1268 *
1269 * called with map locked.
1270 * unlock the map before returning.
1271 */
1272
1273 int
1274 uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args,
1275 struct vm_map_entry *new_entry)
1276 {
1277 struct vm_map_entry *prev_entry = args->uma_prev;
1278 struct vm_map_entry *dead = NULL, *dead_entries = NULL;
1279
1280 const uvm_flag_t flags = args->uma_flags;
1281 const vm_prot_t prot = UVM_PROTECTION(flags);
1282 const vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1283 const vm_inherit_t inherit = UVM_INHERIT(flags);
1284 const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
1285 AMAP_EXTEND_NOWAIT : 0;
1286 const int advice = UVM_ADVICE(flags);
1287
1288 vaddr_t start = args->uma_start;
1289 vsize_t size = args->uma_size;
1290 struct uvm_object *uobj = args->uma_uobj;
1291 voff_t uoffset = args->uma_uoffset;
1292
1293 const int kmap = (vm_map_pmap(map) == pmap_kernel());
1294 int merged = 0;
1295 int error;
1296 int newetype;
1297
1298 UVMHIST_FUNC(__func__);
1299 UVMHIST_CALLARGS(maphist, "(map=%#jx, start=%#jx, size=%ju, flags=%#jx)",
1300 (uintptr_t)map, start, size, flags);
1301 UVMHIST_LOG(maphist, " uobj/offset %#jx/%jd", (uintptr_t)uobj,
1302 uoffset,0,0);
1303
1304 KASSERT(map->hint == prev_entry); /* bimerge case assumes this */
1305 KASSERT(vm_map_locked_p(map));
1306 KASSERT((flags & (UVM_FLAG_NOWAIT | UVM_FLAG_UNMAP)) !=
1307 (UVM_FLAG_NOWAIT | UVM_FLAG_UNMAP));
1308
1309 if (uobj)
1310 newetype = UVM_ET_OBJ;
1311 else
1312 newetype = 0;
1313
1314 if (flags & UVM_FLAG_COPYONW) {
1315 newetype |= UVM_ET_COPYONWRITE;
1316 if ((flags & UVM_FLAG_OVERLAY) == 0)
1317 newetype |= UVM_ET_NEEDSCOPY;
1318 }
1319
1320 /*
1321 * For mappings with unmap, remove any old entries now. Adding the new
1322 * entry cannot fail because that can only happen if UVM_FLAG_NOWAIT
1323 * is set, and we do not support nowait and unmap together.
1324 */
1325
1326 if (flags & UVM_FLAG_UNMAP) {
1327 KASSERT(flags & UVM_FLAG_FIXED);
1328 uvm_unmap_remove(map, start, start + size, &dead_entries, 0);
1329 #ifdef DEBUG
1330 struct vm_map_entry *tmp_entry __diagused;
1331 bool rv __diagused;
1332
1333 rv = uvm_map_lookup_entry(map, start, &tmp_entry);
1334 KASSERT(!rv);
1335 KASSERTMSG(prev_entry == tmp_entry,
1336 "args %p prev_entry %p tmp_entry %p",
1337 args, prev_entry, tmp_entry);
1338 #endif
1339 SAVE_HINT(map, map->hint, prev_entry);
1340 }
1341
1342 /*
1343 * try and insert in map by extending previous entry, if possible.
1344 * XXX: we don't try and pull back the next entry. might be useful
1345 * for a stack, but we are currently allocating our stack in advance.
1346 */
1347
1348 if (flags & UVM_FLAG_NOMERGE)
1349 goto nomerge;
1350
1351 if (prev_entry->end == start &&
1352 prev_entry != &map->header &&
1353 UVM_ET_ISCOMPATIBLE(prev_entry, newetype, uobj, 0,
1354 prot, maxprot, inherit, advice, 0)) {
1355
1356 if (uobj && prev_entry->offset +
1357 (prev_entry->end - prev_entry->start) != uoffset)
1358 goto forwardmerge;
1359
1360 /*
1361 * can't extend a shared amap. note: no need to lock amap to
1362 * look at refs since we don't care about its exact value.
1363 * if it is one (i.e. we have only reference) it will stay there
1364 */
1365
1366 if (prev_entry->aref.ar_amap &&
1367 amap_refs(prev_entry->aref.ar_amap) != 1) {
1368 goto forwardmerge;
1369 }
1370
1371 if (prev_entry->aref.ar_amap) {
1372 error = amap_extend(prev_entry, size,
1373 amapwaitflag | AMAP_EXTEND_FORWARDS);
1374 if (error)
1375 goto nomerge;
1376 }
1377
1378 if (kmap) {
1379 UVMMAP_EVCNT_INCR(kbackmerge);
1380 } else {
1381 UVMMAP_EVCNT_INCR(ubackmerge);
1382 }
1383 UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0);
1384
1385 /*
1386 * drop our reference to uobj since we are extending a reference
1387 * that we already have (the ref count can not drop to zero).
1388 */
1389
1390 if (uobj && uobj->pgops->pgo_detach)
1391 uobj->pgops->pgo_detach(uobj);
1392
1393 /*
1394 * Now that we've merged the entries, note that we've grown
1395 * and our gap has shrunk. Then fix the tree.
1396 */
1397 prev_entry->end += size;
1398 prev_entry->gap -= size;
1399 uvm_rb_fixup(map, prev_entry);
1400
1401 uvm_map_check(map, "map backmerged");
1402
1403 UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
1404 merged++;
1405 }
1406
1407 forwardmerge:
1408 if (prev_entry->next->start == (start + size) &&
1409 prev_entry->next != &map->header &&
1410 UVM_ET_ISCOMPATIBLE(prev_entry->next, newetype, uobj, 0,
1411 prot, maxprot, inherit, advice, 0)) {
1412
1413 if (uobj && prev_entry->next->offset != uoffset + size)
1414 goto nomerge;
1415
1416 /*
1417 * can't extend a shared amap. note: no need to lock amap to
1418 * look at refs since we don't care about its exact value.
1419 * if it is one (i.e. we have only reference) it will stay there.
1420 *
1421 * note that we also can't merge two amaps, so if we
1422 * merged with the previous entry which has an amap,
1423 * and the next entry also has an amap, we give up.
1424 *
1425 * Interesting cases:
1426 * amap, new, amap -> give up second merge (single fwd extend)
1427 * amap, new, none -> double forward extend (extend again here)
1428 * none, new, amap -> double backward extend (done here)
1429 * uobj, new, amap -> single backward extend (done here)
1430 *
1431 * XXX should we attempt to deal with someone refilling
1432 * the deallocated region between two entries that are
1433 * backed by the same amap (ie, arefs is 2, "prev" and
1434 * "next" refer to it, and adding this allocation will
1435 * close the hole, thus restoring arefs to 1 and
1436 * deallocating the "next" vm_map_entry)? -- @@@
1437 */
1438
1439 if (prev_entry->next->aref.ar_amap &&
1440 (amap_refs(prev_entry->next->aref.ar_amap) != 1 ||
1441 (merged && prev_entry->aref.ar_amap))) {
1442 goto nomerge;
1443 }
1444
1445 if (merged) {
1446 /*
1447 * Try to extend the amap of the previous entry to
1448 * cover the next entry as well. If it doesn't work
1449 * just skip on, don't actually give up, since we've
1450 * already completed the back merge.
1451 */
1452 if (prev_entry->aref.ar_amap) {
1453 if (amap_extend(prev_entry,
1454 prev_entry->next->end -
1455 prev_entry->next->start,
1456 amapwaitflag | AMAP_EXTEND_FORWARDS))
1457 goto nomerge;
1458 }
1459
1460 /*
1461 * Try to extend the amap of the *next* entry
1462 * back to cover the new allocation *and* the
1463 * previous entry as well (the previous merge
1464 * didn't have an amap already otherwise we
1465 * wouldn't be checking here for an amap). If
1466 * it doesn't work just skip on, again, don't
1467 * actually give up, since we've already
1468 * completed the back merge.
1469 */
1470 else if (prev_entry->next->aref.ar_amap) {
1471 if (amap_extend(prev_entry->next,
1472 prev_entry->end -
1473 prev_entry->start,
1474 amapwaitflag | AMAP_EXTEND_BACKWARDS))
1475 goto nomerge;
1476 }
1477 } else {
1478 /*
1479 * Pull the next entry's amap backwards to cover this
1480 * new allocation.
1481 */
1482 if (prev_entry->next->aref.ar_amap) {
1483 error = amap_extend(prev_entry->next, size,
1484 amapwaitflag | AMAP_EXTEND_BACKWARDS);
1485 if (error)
1486 goto nomerge;
1487 }
1488 }
1489
1490 if (merged) {
1491 if (kmap) {
1492 UVMMAP_EVCNT_DECR(kbackmerge);
1493 UVMMAP_EVCNT_INCR(kbimerge);
1494 } else {
1495 UVMMAP_EVCNT_DECR(ubackmerge);
1496 UVMMAP_EVCNT_INCR(ubimerge);
1497 }
1498 } else {
1499 if (kmap) {
1500 UVMMAP_EVCNT_INCR(kforwmerge);
1501 } else {
1502 UVMMAP_EVCNT_INCR(uforwmerge);
1503 }
1504 }
1505 UVMHIST_LOG(maphist," starting forward merge", 0, 0, 0, 0);
1506
1507 /*
1508 * drop our reference to uobj since we are extending a reference
1509 * that we already have (the ref count can not drop to zero).
1510 */
1511 if (uobj && uobj->pgops->pgo_detach)
1512 uobj->pgops->pgo_detach(uobj);
1513
1514 if (merged) {
1515 dead = prev_entry->next;
1516 prev_entry->end = dead->end;
1517 uvm_map_entry_unlink(map, dead);
1518 if (dead->aref.ar_amap != NULL) {
1519 prev_entry->aref = dead->aref;
1520 dead->aref.ar_amap = NULL;
1521 }
1522 } else {
1523 prev_entry->next->start -= size;
1524 if (prev_entry != &map->header) {
1525 prev_entry->gap -= size;
1526 KASSERT(prev_entry->gap == uvm_rb_gap(prev_entry));
1527 uvm_rb_fixup(map, prev_entry);
1528 }
1529 if (uobj)
1530 prev_entry->next->offset = uoffset;
1531 }
1532
1533 uvm_map_check(map, "map forwardmerged");
1534
1535 UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0);
1536 merged++;
1537 }
1538
1539 nomerge:
1540 if (!merged) {
1541 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0);
1542 if (kmap) {
1543 UVMMAP_EVCNT_INCR(knomerge);
1544 } else {
1545 UVMMAP_EVCNT_INCR(unomerge);
1546 }
1547
1548 /*
1549 * allocate new entry and link it in.
1550 */
1551
1552 if (new_entry == NULL) {
1553 new_entry = uvm_mapent_alloc(map,
1554 (flags & UVM_FLAG_NOWAIT));
1555 if (__predict_false(new_entry == NULL)) {
1556 error = ENOMEM;
1557 goto done;
1558 }
1559 }
1560 new_entry->start = start;
1561 new_entry->end = new_entry->start + size;
1562 new_entry->object.uvm_obj = uobj;
1563 new_entry->offset = uoffset;
1564
1565 new_entry->etype = newetype;
1566
1567 if (flags & UVM_FLAG_NOMERGE) {
1568 new_entry->flags |= UVM_MAP_NOMERGE;
1569 }
1570
1571 new_entry->protection = prot;
1572 new_entry->max_protection = maxprot;
1573 new_entry->inheritance = inherit;
1574 new_entry->wired_count = 0;
1575 new_entry->advice = advice;
1576 if (flags & UVM_FLAG_OVERLAY) {
1577
1578 /*
1579 * to_add: for BSS we overallocate a little since we
1580 * are likely to extend
1581 */
1582
1583 vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
1584 UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
1585 struct vm_amap *amap = amap_alloc(size, to_add,
1586 (flags & UVM_FLAG_NOWAIT));
1587 if (__predict_false(amap == NULL)) {
1588 error = ENOMEM;
1589 goto done;
1590 }
1591 new_entry->aref.ar_pageoff = 0;
1592 new_entry->aref.ar_amap = amap;
1593 } else {
1594 new_entry->aref.ar_pageoff = 0;
1595 new_entry->aref.ar_amap = NULL;
1596 }
1597 uvm_map_entry_link(map, prev_entry, new_entry);
1598
1599 /*
1600 * Update the free space hint
1601 */
1602
1603 if ((map->first_free == prev_entry) &&
1604 (prev_entry->end >= new_entry->start))
1605 map->first_free = new_entry;
1606
1607 new_entry = NULL;
1608 }
1609
1610 map->size += size;
1611
1612 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1613
1614 error = 0;
1615
1616 done:
1617 vm_map_unlock(map);
1618
1619 if (new_entry) {
1620 uvm_mapent_free(new_entry);
1621 }
1622 if (dead) {
1623 KDASSERT(merged);
1624 uvm_mapent_free(dead);
1625 }
1626 if (dead_entries)
1627 uvm_unmap_detach(dead_entries, 0);
1628
1629 return error;
1630 }
1631
1632 /*
1633 * uvm_map_lookup_entry_bytree: lookup an entry in tree
1634 *
1635 * => map must at least be read-locked by caller.
1636 *
1637 * => If address lies in an entry, set *entry to it and return true;
1638 * then (*entry)->start <= address < (*entry)->end.
1639
1640 * => If address is below all entries in map, return false and set
1641 * *entry to &map->header.
1642 *
1643 * => Otherwise, return false and set *entry to the highest entry below
1644 * address, so (*entry)->end <= address, and if (*entry)->next is
1645 * not &map->header, address < (*entry)->next->start.
1646 */
1647
1648 static inline bool
1649 uvm_map_lookup_entry_bytree(struct vm_map *map, vaddr_t address,
1650 struct vm_map_entry **entry /* OUT */)
1651 {
1652 struct vm_map_entry *prev = &map->header;
1653 struct vm_map_entry *cur = ROOT_ENTRY(map);
1654
1655 KASSERT(rw_lock_held(&map->lock));
1656
1657 while (cur) {
1658 KASSERT(prev == &map->header || prev->end <= address);
1659 KASSERT(prev == &map->header || prev->end <= cur->start);
1660 UVMMAP_EVCNT_INCR(mlk_treeloop);
1661 if (address >= cur->start) {
1662 if (address < cur->end) {
1663 *entry = cur;
1664 return true;
1665 }
1666 prev = cur;
1667 KASSERT(prev->end <= address);
1668 cur = RIGHT_ENTRY(cur);
1669 KASSERT(cur == NULL || prev->end <= cur->start);
1670 } else
1671 cur = LEFT_ENTRY(cur);
1672 }
1673 KASSERT(prev == &map->header || prev->end <= address);
1674 KASSERT(prev->next == &map->header || address < prev->next->start);
1675 *entry = prev;
1676 return false;
1677 }
1678
1679 /*
1680 * uvm_map_lookup_entry: find map entry at or before an address
1681 *
1682 * => map must at least be read-locked by caller.
1683 *
1684 * => If address lies in an entry, set *entry to it and return true;
1685 * then (*entry)->start <= address < (*entry)->end.
1686
1687 * => If address is below all entries in map, return false and set
1688 * *entry to &map->header.
1689 *
1690 * => Otherwise, return false and set *entry to the highest entry below
1691 * address, so (*entry)->end <= address, and if (*entry)->next is
1692 * not &map->header, address < (*entry)->next->start.
1693 */
1694
1695 bool
1696 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
1697 struct vm_map_entry **entry /* OUT */)
1698 {
1699 struct vm_map_entry *cur;
1700 UVMHIST_FUNC(__func__);
1701 UVMHIST_CALLARGS(maphist,"(map=%#jx,addr=%#jx,ent=%#jx)",
1702 (uintptr_t)map, address, (uintptr_t)entry, 0);
1703
1704 KASSERT(rw_lock_held(&map->lock));
1705
1706 /*
1707 * make a quick check to see if we are already looking at
1708 * the entry we want (which is usually the case). note also
1709 * that we don't need to save the hint here... it is the
1710 * same hint (unless we are at the header, in which case the
1711 * hint didn't buy us anything anyway).
1712 */
1713
1714 cur = map->hint;
1715 UVMMAP_EVCNT_INCR(mlk_call);
1716 if (cur != &map->header &&
1717 address >= cur->start && cur->end > address) {
1718 UVMMAP_EVCNT_INCR(mlk_hint);
1719 *entry = cur;
1720 UVMHIST_LOG(maphist,"<- got it via hint (%#jx)",
1721 (uintptr_t)cur, 0, 0, 0);
1722 uvm_mapent_check(*entry);
1723 return (true);
1724 }
1725 uvm_map_check(map, __func__);
1726
1727 /*
1728 * lookup in the tree.
1729 */
1730
1731 UVMMAP_EVCNT_INCR(mlk_tree);
1732 if (__predict_true(uvm_map_lookup_entry_bytree(map, address, entry))) {
1733 SAVE_HINT(map, map->hint, *entry);
1734 UVMHIST_LOG(maphist,"<- search got it (%#jx)",
1735 (uintptr_t)cur, 0, 0, 0);
1736 KDASSERT((*entry)->start <= address);
1737 KDASSERT(address < (*entry)->end);
1738 uvm_mapent_check(*entry);
1739 return (true);
1740 }
1741
1742 SAVE_HINT(map, map->hint, *entry);
1743 UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
1744 KDASSERT((*entry) == &map->header || (*entry)->end <= address);
1745 KDASSERT((*entry)->next == &map->header ||
1746 address < (*entry)->next->start);
1747 return (false);
1748 }
1749
1750 /*
1751 * See if the range between start and start + length fits in the gap
1752 * entry->next->start and entry->end. Returns 1 if fits, 0 if doesn't
1753 * fit, and -1 address wraps around.
1754 */
1755 static int
1756 uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
1757 vsize_t align, int flags, int topdown, struct vm_map_entry *entry)
1758 {
1759 vaddr_t orig_start = *start;
1760 vaddr_t end;
1761
1762 #define INVARIANTS() \
1763 KASSERTMSG((topdown \
1764 ? *start <= orig_start \
1765 : *start >= orig_start), \
1766 "[%s] *start=%"PRIxVADDR" orig_start=%"PRIxVADDR \
1767 " length=%"PRIxVSIZE" uoffset=%#llx align=%"PRIxVSIZE \
1768 " flags=%x entry@%p=[%"PRIxVADDR",%"PRIxVADDR")" \
1769 " ncolors=%d colormask=%x", \
1770 topdown ? "topdown" : "bottomup", *start, orig_start, \
1771 length, (unsigned long long)uoffset, align, \
1772 flags, entry, entry->start, entry->end, \
1773 uvmexp.ncolors, uvmexp.colormask)
1774
1775 INVARIANTS();
1776
1777 #ifdef PMAP_PREFER
1778 /*
1779 * push start address forward as needed to avoid VAC alias problems.
1780 * we only do this if a valid offset is specified.
1781 */
1782
1783 if (uoffset != UVM_UNKNOWN_OFFSET) {
1784 PMAP_PREFER(uoffset, start, length, topdown);
1785 INVARIANTS();
1786 }
1787 #endif
1788 if ((flags & UVM_FLAG_COLORMATCH) != 0) {
1789 KASSERT(align < uvmexp.ncolors);
1790 if (uvmexp.ncolors > 1) {
1791 const u_int colormask = uvmexp.colormask;
1792 const u_int colorsize = colormask + 1;
1793 vaddr_t hint = atop(*start);
1794 const u_int color = hint & colormask;
1795 if (color != align) {
1796 hint -= color; /* adjust to color boundary */
1797 KASSERT((hint & colormask) == 0);
1798 if (topdown) {
1799 if (align > color)
1800 hint -= colorsize;
1801 } else {
1802 if (align < color)
1803 hint += colorsize;
1804 }
1805 *start = ptoa(hint + align); /* adjust to color */
1806 INVARIANTS();
1807 }
1808 }
1809 } else {
1810 KASSERT(powerof2(align));
1811 uvm_map_align_va(start, align, topdown);
1812 INVARIANTS();
1813 /*
1814 * XXX Should we PMAP_PREFER() here again?
1815 * eh...i think we're okay
1816 */
1817 }
1818
1819 /*
1820 * Find the end of the proposed new region. Be sure we didn't
1821 * wrap around the address; if so, we lose. Otherwise, if the
1822 * proposed new region fits before the next entry, we win.
1823 *
1824 * XXX Should this use vm_map_max(map) as the max?
1825 */
1826
1827 if (length > __type_max(vaddr_t) - *start)
1828 return (-1);
1829 end = *start + length;
1830
1831 if (entry->next->start >= end && *start >= entry->end)
1832 return (1);
1833
1834 return (0);
1835
1836 #undef INVARIANTS
1837 }
1838
1839 static void
1840 uvm_findspace_invariants(struct vm_map *map, vaddr_t orig_hint, vaddr_t length,
1841 struct uvm_object *uobj, voff_t uoffset, vsize_t align, int flags,
1842 vaddr_t hint, struct vm_map_entry *entry, int line)
1843 {
1844 const int topdown = map->flags & VM_MAP_TOPDOWN;
1845 const int hint_location_ok =
1846 topdown ? hint <= orig_hint
1847 : hint >= orig_hint;
1848
1849 #if !(defined(__sh3__) && defined(DIAGNOSTIC)) /* XXXRO: kern/51254 */
1850 #define UVM_FINDSPACE_KASSERTMSG KASSERTMSG
1851
1852 #else /* sh3 && DIAGNOSTIC */
1853 /* like KASSERTMSG but make it not fatal */
1854 #define UVM_FINDSPACE_KASSERTMSG(e, msg, ...) \
1855 (__predict_true((e)) ? (void)0 : \
1856 printf(__KASSERTSTR msg "\n", \
1857 "weak diagnostic ", #e, \
1858 __FILE__, __LINE__, ## __VA_ARGS__))
1859 #endif
1860
1861 UVM_FINDSPACE_KASSERTMSG(hint_location_ok,
1862 "%s map=%p hint=%#" PRIxVADDR " %s orig_hint=%#" PRIxVADDR
1863 " length=%#" PRIxVSIZE " uobj=%p uoffset=%#llx align=%" PRIxVSIZE
1864 " flags=%#x entry@%p=[%" PRIxVADDR ",%" PRIxVADDR ")"
1865 " entry->next@%p=[%" PRIxVADDR ",%" PRIxVADDR ")"
1866 " (uvm_map_findspace line %d)",
1867 topdown ? "topdown" : "bottomup",
1868 map, hint, topdown ? ">" : "<", orig_hint,
1869 length, uobj, (unsigned long long)uoffset, align,
1870 flags, entry, entry ? entry->start : 0, entry ? entry->end : 0,
1871 entry && entry->next,
1872 entry && entry->next ? entry->next->start : 0,
1873 entry && entry->next ? entry->next->end : 0,
1874 line);
1875 }
1876
1877 /*
1878 * uvm_map_findspace: find "length" sized space in "map".
1879 *
1880 * => "hint" is a hint about where we want it, unless UVM_FLAG_FIXED is
1881 * set in "flags" (in which case we insist on using "hint").
1882 * => "result" is VA returned
1883 * => uobj/uoffset are to be used to handle VAC alignment, if required
1884 * => if "align" is non-zero, we attempt to align to that value.
1885 * => caller must at least have read-locked map
1886 * => returns NULL on failure, or pointer to prev. map entry if success
1887 * => note this is a cross between the old vm_map_findspace and vm_map_find
1888 */
1889
1890 struct vm_map_entry *
1891 uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
1892 vaddr_t *result /* OUT */, struct uvm_object *uobj, voff_t uoffset,
1893 vsize_t align, int flags)
1894 {
1895 #define INVARIANTS() \
1896 uvm_findspace_invariants(map, orig_hint, length, uobj, uoffset, align,\
1897 flags, hint, entry, __LINE__)
1898 struct vm_map_entry *entry = NULL;
1899 struct vm_map_entry *child, *prev, *tmp;
1900 vaddr_t orig_hint __diagused;
1901 const int topdown = map->flags & VM_MAP_TOPDOWN;
1902 int avail;
1903 UVMHIST_FUNC(__func__);
1904 UVMHIST_CALLARGS(maphist, "(map=%#jx, hint=%#jx, len=%ju, flags=%#jx...",
1905 (uintptr_t)map, hint, length, flags);
1906 UVMHIST_LOG(maphist, " uobj=%#jx, uoffset=%#jx, align=%#jx)",
1907 (uintptr_t)uobj, uoffset, align, 0);
1908
1909 KASSERT((flags & UVM_FLAG_COLORMATCH) != 0 || powerof2(align));
1910 KASSERT((flags & UVM_FLAG_COLORMATCH) == 0 || align < uvmexp.ncolors);
1911 KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1912
1913 uvm_map_check(map, "map_findspace entry");
1914
1915 /*
1916 * Clamp the hint to the VM map's min/max address, and remmeber
1917 * the clamped original hint. Remember the original hint,
1918 * clamped to the min/max address. If we are aligning, then we
1919 * may have to try again with no alignment constraint if we
1920 * fail the first time.
1921 *
1922 * We use the original hint to verify later that the search has
1923 * been monotonic -- that is, nonincreasing or nondecreasing,
1924 * according to topdown or !topdown respectively. But the
1925 * clamping is not monotonic.
1926 */
1927 if (hint < vm_map_min(map)) { /* check ranges ... */
1928 if (flags & UVM_FLAG_FIXED) {
1929 UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
1930 return (NULL);
1931 }
1932 hint = vm_map_min(map);
1933 }
1934 if (hint > vm_map_max(map)) {
1935 UVMHIST_LOG(maphist,"<- VA %#jx > range [%#jx->%#jx]",
1936 hint, vm_map_min(map), vm_map_max(map), 0);
1937 return (NULL);
1938 }
1939 orig_hint = hint;
1940 INVARIANTS();
1941
1942 UVMHIST_LOG(maphist,"<- VA %#jx vs range [%#jx->%#jx]",
1943 hint, vm_map_min(map), vm_map_max(map), 0);
1944
1945 /*
1946 * hint may not be aligned properly; we need round up or down it
1947 * before proceeding further.
1948 */
1949 if ((flags & UVM_FLAG_COLORMATCH) == 0) {
1950 uvm_map_align_va(&hint, align, topdown);
1951 INVARIANTS();
1952 }
1953
1954 UVMHIST_LOG(maphist,"<- VA %#jx vs range [%#jx->%#jx]",
1955 hint, vm_map_min(map), vm_map_max(map), 0);
1956 /*
1957 * Look for the first possible address; if there's already
1958 * something at this address, we have to start after it.
1959 */
1960
1961 /*
1962 * @@@: there are four, no, eight cases to consider.
1963 *
1964 * 0: found, fixed, bottom up -> fail
1965 * 1: found, fixed, top down -> fail
1966 * 2: found, not fixed, bottom up -> start after entry->end,
1967 * loop up
1968 * 3: found, not fixed, top down -> start before entry->start,
1969 * loop down
1970 * 4: not found, fixed, bottom up -> check entry->next->start, fail
1971 * 5: not found, fixed, top down -> check entry->next->start, fail
1972 * 6: not found, not fixed, bottom up -> check entry->next->start,
1973 * loop up
1974 * 7: not found, not fixed, top down -> check entry->next->start,
1975 * loop down
1976 *
1977 * as you can see, it reduces to roughly five cases, and that
1978 * adding top down mapping only adds one unique case (without
1979 * it, there would be four cases).
1980 */
1981
1982 if ((flags & UVM_FLAG_FIXED) == 0 &&
1983 hint == (topdown ? vm_map_max(map) : vm_map_min(map))) {
1984 /*
1985 * The uvm_map_findspace algorithm is monotonic -- for
1986 * topdown VM it starts with a high hint and returns a
1987 * lower free address; for !topdown VM it starts with a
1988 * low hint and returns a higher free address. As an
1989 * optimization, start with the first (highest for
1990 * topdown, lowest for !topdown) free address.
1991 *
1992 * XXX This `optimization' probably doesn't actually do
1993 * much in practice unless userland explicitly passes
1994 * the VM map's minimum or maximum address, which
1995 * varies from machine to machine (VM_MAX/MIN_ADDRESS,
1996 * e.g. 0x7fbfdfeff000 on amd64 but 0xfffffffff000 on
1997 * aarch64) and may vary according to other factors
1998 * like sysctl vm.user_va0_disable. In particular, if
1999 * the user specifies 0 as a hint to mmap, then mmap
2000 * will choose a default address which is usually _not_
2001 * VM_MAX/MIN_ADDRESS but something else instead like
2002 * VM_MAX_ADDRESS - stack size - guard page overhead,
2003 * in which case this branch is never hit.
2004 *
2005 * In fact, this branch appears to have been broken for
2006 * two decades between when topdown was introduced in
2007 * ~2003 and when it was adapted to handle the topdown
2008 * case without violating the monotonicity assertion in
2009 * 2022. Maybe Someone^TM should either ditch the
2010 * optimization or find a better way to do it.
2011 */
2012 entry = map->first_free;
2013 } else if (uvm_map_lookup_entry(map, hint, &entry)) {
2014 KASSERT(entry->start <= hint);
2015 KASSERT(hint < entry->end);
2016 /* "hint" address already in use ... */
2017 if (flags & UVM_FLAG_FIXED) {
2018 UVMHIST_LOG(maphist, "<- fixed & VA in use",
2019 0, 0, 0, 0);
2020 return (NULL);
2021 }
2022 if (topdown)
2023 /* Start from lower gap. */
2024 entry = entry->prev;
2025 } else {
2026 KASSERT(entry == &map->header || entry->end <= hint);
2027 KASSERT(entry->next == &map->header ||
2028 hint < entry->next->start);
2029 if (flags & UVM_FLAG_FIXED) {
2030 if (entry->next->start >= hint &&
2031 length <= entry->next->start - hint)
2032 goto found;
2033
2034 /* "hint" address is gap but too small */
2035 UVMHIST_LOG(maphist, "<- fixed mapping failed",
2036 0, 0, 0, 0);
2037 return (NULL); /* only one shot at it ... */
2038 } else {
2039 /*
2040 * See if given hint fits in this gap.
2041 */
2042 avail = uvm_map_space_avail(&hint, length,
2043 uoffset, align, flags, topdown, entry);
2044 INVARIANTS();
2045 switch (avail) {
2046 case 1:
2047 goto found;
2048 case -1:
2049 goto wraparound;
2050 }
2051
2052 if (topdown) {
2053 /*
2054 * Still there is a chance to fit
2055 * if hint > entry->end.
2056 */
2057 } else {
2058 /* Start from higher gap. */
2059 entry = entry->next;
2060 if (entry == &map->header)
2061 goto notfound;
2062 goto nextgap;
2063 }
2064 }
2065 }
2066
2067 /*
2068 * Note that all UVM_FLAGS_FIXED case is already handled.
2069 */
2070 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
2071
2072 /* Try to find the space in the red-black tree */
2073
2074 /* Check slot before any entry */
2075 if (topdown) {
2076 KASSERTMSG(entry->next->start >= vm_map_min(map),
2077 "map=%p entry=%p entry->next=%p"
2078 " entry->next->start=0x%"PRIxVADDR" min=0x%"PRIxVADDR,
2079 map, entry, entry->next,
2080 entry->next->start, vm_map_min(map));
2081 if (length > entry->next->start - vm_map_min(map))
2082 hint = vm_map_min(map); /* XXX goto wraparound? */
2083 else
2084 hint = MIN(orig_hint, entry->next->start - length);
2085 KASSERT(hint >= vm_map_min(map));
2086 } else {
2087 hint = entry->end;
2088 }
2089 INVARIANTS();
2090 avail = uvm_map_space_avail(&hint, length, uoffset, align, flags,
2091 topdown, entry);
2092 INVARIANTS();
2093 switch (avail) {
2094 case 1:
2095 goto found;
2096 case -1:
2097 goto wraparound;
2098 }
2099
2100 nextgap:
2101 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
2102 /* If there is not enough space in the whole tree, we fail */
2103 tmp = ROOT_ENTRY(map);
2104 if (tmp == NULL || tmp->maxgap < length)
2105 goto notfound;
2106
2107 prev = NULL; /* previous candidate */
2108
2109 /* Find an entry close to hint that has enough space */
2110 for (; tmp;) {
2111 KASSERT(tmp->next->start == tmp->end + tmp->gap);
2112 if (topdown) {
2113 if (tmp->next->start < hint + length &&
2114 (prev == NULL || tmp->end > prev->end)) {
2115 if (tmp->gap >= length)
2116 prev = tmp;
2117 else if ((child = LEFT_ENTRY(tmp)) != NULL
2118 && child->maxgap >= length)
2119 prev = tmp;
2120 }
2121 } else {
2122 if (tmp->end >= hint &&
2123 (prev == NULL || tmp->end < prev->end)) {
2124 if (tmp->gap >= length)
2125 prev = tmp;
2126 else if ((child = RIGHT_ENTRY(tmp)) != NULL
2127 && child->maxgap >= length)
2128 prev = tmp;
2129 }
2130 }
2131 if (tmp->next->start < hint + length)
2132 child = RIGHT_ENTRY(tmp);
2133 else if (tmp->end > hint)
2134 child = LEFT_ENTRY(tmp);
2135 else {
2136 if (tmp->gap >= length)
2137 break;
2138 if (topdown)
2139 child = LEFT_ENTRY(tmp);
2140 else
2141 child = RIGHT_ENTRY(tmp);
2142 }
2143 if (child == NULL || child->maxgap < length)
2144 break;
2145 tmp = child;
2146 }
2147
2148 if (tmp != NULL && tmp->start < hint && hint < tmp->next->start) {
2149 /*
2150 * Check if the entry that we found satifies the
2151 * space requirement
2152 */
2153 if (topdown) {
2154 if (hint > tmp->next->start - length)
2155 hint = tmp->next->start - length;
2156 } else {
2157 if (hint < tmp->end)
2158 hint = tmp->end;
2159 }
2160 INVARIANTS();
2161 avail = uvm_map_space_avail(&hint, length, uoffset, align,
2162 flags, topdown, tmp);
2163 INVARIANTS();
2164 switch (avail) {
2165 case 1:
2166 entry = tmp;
2167 goto found;
2168 case -1:
2169 goto wraparound;
2170 }
2171 if (tmp->gap >= length)
2172 goto listsearch;
2173 }
2174 if (prev == NULL)
2175 goto notfound;
2176
2177 if (topdown) {
2178 KASSERT(orig_hint >= prev->next->start - length ||
2179 prev->next->start - length > prev->next->start);
2180 hint = prev->next->start - length;
2181 } else {
2182 KASSERT(orig_hint <= prev->end);
2183 hint = prev->end;
2184 }
2185 INVARIANTS();
2186 avail = uvm_map_space_avail(&hint, length, uoffset, align,
2187 flags, topdown, prev);
2188 INVARIANTS();
2189 switch (avail) {
2190 case 1:
2191 entry = prev;
2192 goto found;
2193 case -1:
2194 goto wraparound;
2195 }
2196 if (prev->gap >= length)
2197 goto listsearch;
2198
2199 if (topdown)
2200 tmp = LEFT_ENTRY(prev);
2201 else
2202 tmp = RIGHT_ENTRY(prev);
2203 for (;;) {
2204 KASSERT(tmp);
2205 KASSERTMSG(tmp->maxgap >= length,
2206 "tmp->maxgap=0x%"PRIxVSIZE" length=0x%"PRIxVSIZE,
2207 tmp->maxgap, length);
2208 if (topdown)
2209 child = RIGHT_ENTRY(tmp);
2210 else
2211 child = LEFT_ENTRY(tmp);
2212 if (child && child->maxgap >= length) {
2213 tmp = child;
2214 continue;
2215 }
2216 if (tmp->gap >= length)
2217 break;
2218 if (topdown)
2219 tmp = LEFT_ENTRY(tmp);
2220 else
2221 tmp = RIGHT_ENTRY(tmp);
2222 }
2223
2224 if (topdown) {
2225 KASSERT(orig_hint >= tmp->next->start - length ||
2226 tmp->next->start - length > tmp->next->start);
2227 hint = tmp->next->start - length;
2228 } else {
2229 KASSERT(orig_hint <= tmp->end);
2230 hint = tmp->end;
2231 }
2232 INVARIANTS();
2233 avail = uvm_map_space_avail(&hint, length, uoffset, align,
2234 flags, topdown, tmp);
2235 INVARIANTS();
2236 switch (avail) {
2237 case 1:
2238 entry = tmp;
2239 goto found;
2240 case -1:
2241 goto wraparound;
2242 }
2243
2244 /*
2245 * The tree fails to find an entry because of offset or alignment
2246 * restrictions. Search the list instead.
2247 */
2248 listsearch:
2249 /*
2250 * Look through the rest of the map, trying to fit a new region in
2251 * the gap between existing regions, or after the very last region.
2252 * note: entry->end = base VA of current gap,
2253 * entry->next->start = VA of end of current gap
2254 */
2255
2256 INVARIANTS();
2257 for (;;) {
2258 /* Update hint for current gap. */
2259 hint = topdown ? entry->next->start - length : entry->end;
2260 INVARIANTS();
2261
2262 /* See if it fits. */
2263 avail = uvm_map_space_avail(&hint, length, uoffset, align,
2264 flags, topdown, entry);
2265 INVARIANTS();
2266 switch (avail) {
2267 case 1:
2268 goto found;
2269 case -1:
2270 goto wraparound;
2271 }
2272
2273 /* Advance to next/previous gap */
2274 if (topdown) {
2275 if (entry == &map->header) {
2276 UVMHIST_LOG(maphist, "<- failed (off start)",
2277 0,0,0,0);
2278 goto notfound;
2279 }
2280 entry = entry->prev;
2281 } else {
2282 entry = entry->next;
2283 if (entry == &map->header) {
2284 UVMHIST_LOG(maphist, "<- failed (off end)",
2285 0,0,0,0);
2286 goto notfound;
2287 }
2288 }
2289 }
2290
2291 found:
2292 SAVE_HINT(map, map->hint, entry);
2293 *result = hint;
2294 UVMHIST_LOG(maphist,"<- got it! (result=%#jx)", hint, 0,0,0);
2295 INVARIANTS();
2296 KASSERT(entry->end <= hint);
2297 KASSERT(hint <= entry->next->start);
2298 KASSERT(length <= entry->next->start - hint);
2299 return (entry);
2300
2301 wraparound:
2302 UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0);
2303
2304 return (NULL);
2305
2306 notfound:
2307 UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0);
2308
2309 return (NULL);
2310 #undef INVARIANTS
2311 }
2312
2313 /*
2314 * U N M A P - m a i n h e l p e r f u n c t i o n s
2315 */
2316
2317 /*
2318 * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
2319 *
2320 * => caller must check alignment and size
2321 * => map must be locked by caller
2322 * => we return a list of map entries that we've remove from the map
2323 * in "entry_list"
2324 */
2325
2326 void
2327 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
2328 struct vm_map_entry **entry_list /* OUT */, int flags)
2329 {
2330 struct vm_map_entry *entry, *first_entry, *next;
2331 vaddr_t len;
2332 UVMHIST_FUNC(__func__);
2333 UVMHIST_CALLARGS(maphist,"(map=%#jx, start=%#jx, end=%#jx)",
2334 (uintptr_t)map, start, end, 0);
2335 VM_MAP_RANGE_CHECK(map, start, end);
2336
2337 KASSERT(vm_map_locked_p(map));
2338
2339 uvm_map_check(map, "unmap_remove entry");
2340
2341 /*
2342 * find first entry
2343 */
2344
2345 if (uvm_map_lookup_entry(map, start, &first_entry) == true) {
2346 /* clip and go... */
2347 entry = first_entry;
2348 UVM_MAP_CLIP_START(map, entry, start);
2349 /* critical! prevents stale hint */
2350 SAVE_HINT(map, entry, entry->prev);
2351 } else {
2352 entry = first_entry->next;
2353 }
2354
2355 /*
2356 * save the free space hint
2357 */
2358
2359 if (map->first_free != &map->header && map->first_free->start >= start)
2360 map->first_free = entry->prev;
2361
2362 /*
2363 * note: we now re-use first_entry for a different task. we remove
2364 * a number of map entries from the map and save them in a linked
2365 * list headed by "first_entry". once we remove them from the map
2366 * the caller should unlock the map and drop the references to the
2367 * backing objects [c.f. uvm_unmap_detach]. the object is to
2368 * separate unmapping from reference dropping. why?
2369 * [1] the map has to be locked for unmapping
2370 * [2] the map need not be locked for reference dropping
2371 * [3] dropping references may trigger pager I/O, and if we hit
2372 * a pager that does synchronous I/O we may have to wait for it.
2373 * [4] we would like all waiting for I/O to occur with maps unlocked
2374 * so that we don't block other threads.
2375 */
2376
2377 first_entry = NULL;
2378 *entry_list = NULL;
2379
2380 /*
2381 * break up the area into map entry sized regions and unmap. note
2382 * that all mappings have to be removed before we can even consider
2383 * dropping references to amaps or VM objects (otherwise we could end
2384 * up with a mapping to a page on the free list which would be very bad)
2385 */
2386
2387 while ((entry != &map->header) && (entry->start < end)) {
2388 KASSERT((entry->flags & UVM_MAP_STATIC) == 0);
2389
2390 UVM_MAP_CLIP_END(map, entry, end);
2391 next = entry->next;
2392 len = entry->end - entry->start;
2393
2394 /*
2395 * unwire before removing addresses from the pmap; otherwise
2396 * unwiring will put the entries back into the pmap (XXX).
2397 */
2398
2399 if (VM_MAPENT_ISWIRED(entry)) {
2400 uvm_map_entry_unwire(map, entry);
2401 }
2402 if (flags & UVM_FLAG_VAONLY) {
2403
2404 /* nothing */
2405
2406 } else if ((map->flags & VM_MAP_PAGEABLE) == 0) {
2407
2408 /*
2409 * if the map is non-pageable, any pages mapped there
2410 * must be wired and entered with pmap_kenter_pa(),
2411 * and we should free any such pages immediately.
2412 * this is mostly used for kmem_map.
2413 */
2414 KASSERT(vm_map_pmap(map) == pmap_kernel());
2415
2416 uvm_km_pgremove_intrsafe(map, entry->start, entry->end);
2417 } else if (UVM_ET_ISOBJ(entry) &&
2418 UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
2419 panic("%s: kernel object %p %p\n",
2420 __func__, map, entry);
2421 } else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
2422 /*
2423 * remove mappings the standard way. lock object
2424 * and/or amap to ensure vm_page state does not
2425 * change while in pmap_remove().
2426 */
2427
2428 #ifdef __HAVE_UNLOCKED_PMAP /* XXX temporary */
2429 uvm_map_lock_entry(entry, RW_WRITER);
2430 #else
2431 uvm_map_lock_entry(entry, RW_READER);
2432 #endif
2433 pmap_remove(map->pmap, entry->start, entry->end);
2434
2435 /*
2436 * note: if map is dying, leave pmap_update() for
2437 * later. if the map is to be reused (exec) then
2438 * pmap_update() will be called. if the map is
2439 * being disposed of (exit) then pmap_destroy()
2440 * will be called.
2441 */
2442
2443 if ((map->flags & VM_MAP_DYING) == 0) {
2444 pmap_update(vm_map_pmap(map));
2445 } else {
2446 KASSERT(vm_map_pmap(map) != pmap_kernel());
2447 }
2448
2449 uvm_map_unlock_entry(entry);
2450 }
2451
2452 #if defined(UVMDEBUG)
2453 /*
2454 * check if there's remaining mapping,
2455 * which is a bug in caller.
2456 */
2457
2458 vaddr_t va;
2459 for (va = entry->start; va < entry->end;
2460 va += PAGE_SIZE) {
2461 if (pmap_extract(vm_map_pmap(map), va, NULL)) {
2462 panic("%s: %#"PRIxVADDR" has mapping",
2463 __func__, va);
2464 }
2465 }
2466
2467 if (VM_MAP_IS_KERNEL(map) && (flags & UVM_FLAG_NOWAIT) == 0) {
2468 uvm_km_check_empty(map, entry->start, entry->end);
2469 }
2470 #endif /* defined(UVMDEBUG) */
2471
2472 /*
2473 * remove entry from map and put it on our list of entries
2474 * that we've nuked. then go to next entry.
2475 */
2476
2477 UVMHIST_LOG(maphist, " removed map entry %#jx",
2478 (uintptr_t)entry, 0, 0, 0);
2479
2480 /* critical! prevents stale hint */
2481 SAVE_HINT(map, entry, entry->prev);
2482
2483 uvm_map_entry_unlink(map, entry);
2484 KASSERT(map->size >= len);
2485 map->size -= len;
2486 entry->prev = NULL;
2487 entry->next = first_entry;
2488 first_entry = entry;
2489 entry = next;
2490 }
2491
2492 uvm_map_check(map, "unmap_remove leave");
2493
2494 /*
2495 * now we've cleaned up the map and are ready for the caller to drop
2496 * references to the mapped objects.
2497 */
2498
2499 *entry_list = first_entry;
2500 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
2501
2502 if (map->flags & VM_MAP_WANTVA) {
2503 mutex_enter(&map->misc_lock);
2504 map->flags &= ~VM_MAP_WANTVA;
2505 cv_broadcast(&map->cv);
2506 mutex_exit(&map->misc_lock);
2507 }
2508 }
2509
2510 /*
2511 * uvm_unmap_detach: drop references in a chain of map entries
2512 *
2513 * => we will free the map entries as we traverse the list.
2514 */
2515
2516 void
2517 uvm_unmap_detach(struct vm_map_entry *first_entry, int flags)
2518 {
2519 struct vm_map_entry *next_entry;
2520 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
2521
2522 while (first_entry) {
2523 KASSERT(!VM_MAPENT_ISWIRED(first_entry));
2524 UVMHIST_LOG(maphist,
2525 " detach %#jx: amap=%#jx, obj=%#jx, submap?=%jd",
2526 (uintptr_t)first_entry,
2527 (uintptr_t)first_entry->aref.ar_amap,
2528 (uintptr_t)first_entry->object.uvm_obj,
2529 UVM_ET_ISSUBMAP(first_entry));
2530
2531 /*
2532 * drop reference to amap, if we've got one
2533 */
2534
2535 if (first_entry->aref.ar_amap)
2536 uvm_map_unreference_amap(first_entry, flags);
2537
2538 /*
2539 * drop reference to our backing object, if we've got one
2540 */
2541
2542 KASSERT(!UVM_ET_ISSUBMAP(first_entry));
2543 if (UVM_ET_ISOBJ(first_entry) &&
2544 first_entry->object.uvm_obj->pgops->pgo_detach) {
2545 (*first_entry->object.uvm_obj->pgops->pgo_detach)
2546 (first_entry->object.uvm_obj);
2547 }
2548 next_entry = first_entry->next;
2549 uvm_mapent_free(first_entry);
2550 first_entry = next_entry;
2551 }
2552 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
2553 }
2554
2555 /*
2556 * E X T R A C T I O N F U N C T I O N S
2557 */
2558
2559 /*
2560 * uvm_map_reserve: reserve space in a vm_map for future use.
2561 *
2562 * => we reserve space in a map by putting a dummy map entry in the
2563 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
2564 * => map should be unlocked (we will write lock it)
2565 * => we return true if we were able to reserve space
2566 * => XXXCDC: should be inline?
2567 */
2568
2569 int
2570 uvm_map_reserve(struct vm_map *map, vsize_t size,
2571 vaddr_t offset /* hint for pmap_prefer */,
2572 vsize_t align /* alignment */,
2573 vaddr_t *raddr /* IN:hint, OUT: reserved VA */,
2574 uvm_flag_t flags /* UVM_FLAG_FIXED or UVM_FLAG_COLORMATCH or 0 */)
2575 {
2576 UVMHIST_FUNC(__func__);
2577 UVMHIST_CALLARGS(maphist, "(map=%#jx, size=%#jx, offset=%#jx, addr=%#jx)",
2578 (uintptr_t)map, size, offset, (uintptr_t)raddr);
2579
2580 size = round_page(size);
2581
2582 /*
2583 * reserve some virtual space.
2584 */
2585
2586 if (uvm_map(map, raddr, size, NULL, offset, align,
2587 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
2588 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE|flags)) != 0) {
2589 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
2590 return (false);
2591 }
2592
2593 UVMHIST_LOG(maphist, "<- done (*raddr=%#jx)", *raddr,0,0,0);
2594 return (true);
2595 }
2596
2597 /*
2598 * uvm_map_replace: replace a reserved (blank) area of memory with
2599 * real mappings.
2600 *
2601 * => caller must WRITE-LOCK the map
2602 * => we return true if replacement was a success
2603 * => we expect the newents chain to have nnewents entrys on it and
2604 * we expect newents->prev to point to the last entry on the list
2605 * => note newents is allowed to be NULL
2606 */
2607
2608 static int
2609 uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
2610 struct vm_map_entry *newents, int nnewents, vsize_t nsize,
2611 struct vm_map_entry **oldentryp)
2612 {
2613 struct vm_map_entry *oldent, *last;
2614
2615 uvm_map_check(map, "map_replace entry");
2616
2617 /*
2618 * first find the blank map entry at the specified address
2619 */
2620
2621 if (!uvm_map_lookup_entry(map, start, &oldent)) {
2622 return (false);
2623 }
2624
2625 /*
2626 * check to make sure we have a proper blank entry
2627 */
2628
2629 if (end < oldent->end) {
2630 UVM_MAP_CLIP_END(map, oldent, end);
2631 }
2632 if (oldent->start != start || oldent->end != end ||
2633 oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
2634 return (false);
2635 }
2636
2637 #ifdef DIAGNOSTIC
2638
2639 /*
2640 * sanity check the newents chain
2641 */
2642
2643 {
2644 struct vm_map_entry *tmpent = newents;
2645 int nent = 0;
2646 vsize_t sz = 0;
2647 vaddr_t cur = start;
2648
2649 while (tmpent) {
2650 nent++;
2651 sz += tmpent->end - tmpent->start;
2652 if (tmpent->start < cur)
2653 panic("uvm_map_replace1");
2654 if (tmpent->start >= tmpent->end || tmpent->end > end) {
2655 panic("uvm_map_replace2: "
2656 "tmpent->start=%#"PRIxVADDR
2657 ", tmpent->end=%#"PRIxVADDR
2658 ", end=%#"PRIxVADDR,
2659 tmpent->start, tmpent->end, end);
2660 }
2661 cur = tmpent->end;
2662 if (tmpent->next) {
2663 if (tmpent->next->prev != tmpent)
2664 panic("uvm_map_replace3");
2665 } else {
2666 if (newents->prev != tmpent)
2667 panic("uvm_map_replace4");
2668 }
2669 tmpent = tmpent->next;
2670 }
2671 if (nent != nnewents)
2672 panic("uvm_map_replace5");
2673 if (sz != nsize)
2674 panic("uvm_map_replace6");
2675 }
2676 #endif
2677
2678 /*
2679 * map entry is a valid blank! replace it. (this does all the
2680 * work of map entry link/unlink...).
2681 */
2682
2683 if (newents) {
2684 last = newents->prev;
2685
2686 /* critical: flush stale hints out of map */
2687 SAVE_HINT(map, map->hint, newents);
2688 if (map->first_free == oldent)
2689 map->first_free = last;
2690
2691 last->next = oldent->next;
2692 last->next->prev = last;
2693
2694 /* Fix RB tree */
2695 uvm_rb_remove(map, oldent);
2696
2697 newents->prev = oldent->prev;
2698 newents->prev->next = newents;
2699 map->nentries = map->nentries + (nnewents - 1);
2700
2701 /* Fixup the RB tree */
2702 {
2703 int i;
2704 struct vm_map_entry *tmp;
2705
2706 tmp = newents;
2707 for (i = 0; i < nnewents && tmp; i++) {
2708 uvm_rb_insert(map, tmp);
2709 tmp = tmp->next;
2710 }
2711 }
2712 } else {
2713 /* NULL list of new entries: just remove the old one */
2714 clear_hints(map, oldent);
2715 uvm_map_entry_unlink(map, oldent);
2716 }
2717 map->size -= end - start - nsize;
2718
2719 uvm_map_check(map, "map_replace leave");
2720
2721 /*
2722 * now we can free the old blank entry and return.
2723 */
2724
2725 *oldentryp = oldent;
2726 return (true);
2727 }
2728
2729 /*
2730 * uvm_map_extract: extract a mapping from a map and put it somewhere
2731 * (maybe removing the old mapping)
2732 *
2733 * => maps should be unlocked (we will write lock them)
2734 * => returns 0 on success, error code otherwise
2735 * => start must be page aligned
2736 * => len must be page sized
2737 * => flags:
2738 * UVM_EXTRACT_REMOVE: remove mappings from srcmap
2739 * UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
2740 * UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
2741 * UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
2742 * UVM_EXTRACT_PROT_ALL: set prot to UVM_PROT_ALL as we go
2743 * >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
2744 * >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
2745 * be used from within the kernel in a kernel level map <<<
2746 */
2747
2748 int
2749 uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
2750 struct vm_map *dstmap, vaddr_t *dstaddrp, int flags)
2751 {
2752 vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge;
2753 struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry,
2754 *deadentry, *oldentry;
2755 struct vm_map_entry *resentry = NULL; /* a dummy reservation entry */
2756 vsize_t elen __unused;
2757 int nchain, error, copy_ok;
2758 vsize_t nsize;
2759 UVMHIST_FUNC(__func__);
2760 UVMHIST_CALLARGS(maphist,"(srcmap=%#jx,start=%#jx, len=%#jx",
2761 (uintptr_t)srcmap, start, len, 0);
2762 UVMHIST_LOG(maphist," ...,dstmap=%#jx, flags=%#jx)",
2763 (uintptr_t)dstmap, flags, 0, 0);
2764
2765 /*
2766 * step 0: sanity check: start must be on a page boundary, length
2767 * must be page sized. can't ask for CONTIG/QREF if you asked for
2768 * REMOVE.
2769 */
2770
2771 KASSERTMSG((start & PAGE_MASK) == 0, "start=0x%"PRIxVADDR, start);
2772 KASSERTMSG((len & PAGE_MASK) == 0, "len=0x%"PRIxVADDR, len);
2773 KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
2774 (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
2775
2776 /*
2777 * step 1: reserve space in the target map for the extracted area
2778 */
2779
2780 if ((flags & UVM_EXTRACT_RESERVED) == 0) {
2781 dstaddr = vm_map_min(dstmap);
2782 if (!uvm_map_reserve(dstmap, len, start,
2783 atop(start) & uvmexp.colormask, &dstaddr,
2784 UVM_FLAG_COLORMATCH))
2785 return (ENOMEM);
2786 KASSERT((atop(start ^ dstaddr) & uvmexp.colormask) == 0);
2787 *dstaddrp = dstaddr; /* pass address back to caller */
2788 UVMHIST_LOG(maphist, " dstaddr=%#jx", dstaddr,0,0,0);
2789 } else {
2790 dstaddr = *dstaddrp;
2791 }
2792
2793 /*
2794 * step 2: setup for the extraction process loop by init'ing the
2795 * map entry chain, locking src map, and looking up the first useful
2796 * entry in the map.
2797 */
2798
2799 end = start + len;
2800 newend = dstaddr + len;
2801 chain = endchain = NULL;
2802 nchain = 0;
2803 nsize = 0;
2804 vm_map_lock(srcmap);
2805
2806 if (uvm_map_lookup_entry(srcmap, start, &entry)) {
2807
2808 /* "start" is within an entry */
2809 if (flags & UVM_EXTRACT_QREF) {
2810
2811 /*
2812 * for quick references we don't clip the entry, so
2813 * the entry may map space "before" the starting
2814 * virtual address... this is the "fudge" factor
2815 * (which can be non-zero only the first time
2816 * through the "while" loop in step 3).
2817 */
2818
2819 fudge = start - entry->start;
2820 } else {
2821
2822 /*
2823 * normal reference: we clip the map to fit (thus
2824 * fudge is zero)
2825 */
2826
2827 UVM_MAP_CLIP_START(srcmap, entry, start);
2828 SAVE_HINT(srcmap, srcmap->hint, entry->prev);
2829 fudge = 0;
2830 }
2831 } else {
2832
2833 /* "start" is not within an entry ... skip to next entry */
2834 if (flags & UVM_EXTRACT_CONTIG) {
2835 error = EINVAL;
2836 goto bad; /* definite hole here ... */
2837 }
2838
2839 entry = entry->next;
2840 fudge = 0;
2841 }
2842
2843 /* save values from srcmap for step 6 */
2844 orig_entry = entry;
2845 orig_fudge = fudge;
2846
2847 /*
2848 * step 3: now start looping through the map entries, extracting
2849 * as we go.
2850 */
2851
2852 while (entry->start < end && entry != &srcmap->header) {
2853
2854 /* if we are not doing a quick reference, clip it */
2855 if ((flags & UVM_EXTRACT_QREF) == 0)
2856 UVM_MAP_CLIP_END(srcmap, entry, end);
2857
2858 /* clear needs_copy (allow chunking) */
2859 if (UVM_ET_ISNEEDSCOPY(entry)) {
2860 amap_copy(srcmap, entry,
2861 AMAP_COPY_NOWAIT|AMAP_COPY_NOMERGE, start, end);
2862 if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */
2863 error = ENOMEM;
2864 goto bad;
2865 }
2866
2867 /* amap_copy could clip (during chunk)! update fudge */
2868 if (fudge) {
2869 fudge = start - entry->start;
2870 orig_fudge = fudge;
2871 }
2872 }
2873
2874 /* calculate the offset of this from "start" */
2875 oldoffset = (entry->start + fudge) - start;
2876
2877 /* allocate a new map entry */
2878 newentry = uvm_mapent_alloc(dstmap, 0);
2879 if (newentry == NULL) {
2880 error = ENOMEM;
2881 goto bad;
2882 }
2883
2884 /* set up new map entry */
2885 newentry->next = NULL;
2886 newentry->prev = endchain;
2887 newentry->start = dstaddr + oldoffset;
2888 newentry->end =
2889 newentry->start + (entry->end - (entry->start + fudge));
2890 if (newentry->end > newend || newentry->end < newentry->start)
2891 newentry->end = newend;
2892 newentry->object.uvm_obj = entry->object.uvm_obj;
2893 if (newentry->object.uvm_obj) {
2894 if (newentry->object.uvm_obj->pgops->pgo_reference)
2895 newentry->object.uvm_obj->pgops->
2896 pgo_reference(newentry->object.uvm_obj);
2897 newentry->offset = entry->offset + fudge;
2898 } else {
2899 newentry->offset = 0;
2900 }
2901 newentry->etype = entry->etype;
2902 if (flags & UVM_EXTRACT_PROT_ALL) {
2903 newentry->protection = newentry->max_protection =
2904 UVM_PROT_ALL;
2905 } else {
2906 newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
2907 entry->max_protection : entry->protection;
2908 newentry->max_protection = entry->max_protection;
2909 }
2910 newentry->inheritance = entry->inheritance;
2911 newentry->wired_count = 0;
2912 newentry->aref.ar_amap = entry->aref.ar_amap;
2913 if (newentry->aref.ar_amap) {
2914 newentry->aref.ar_pageoff =
2915 entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
2916 uvm_map_reference_amap(newentry, AMAP_SHARED |
2917 ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
2918 } else {
2919 newentry->aref.ar_pageoff = 0;
2920 }
2921 newentry->advice = entry->advice;
2922 if ((flags & UVM_EXTRACT_QREF) != 0) {
2923 newentry->flags |= UVM_MAP_NOMERGE;
2924 }
2925
2926 /* now link it on the chain */
2927 nchain++;
2928 nsize += newentry->end - newentry->start;
2929 if (endchain == NULL) {
2930 chain = endchain = newentry;
2931 } else {
2932 endchain->next = newentry;
2933 endchain = newentry;
2934 }
2935
2936 /* end of 'while' loop! */
2937 if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
2938 (entry->next == &srcmap->header ||
2939 entry->next->start != entry->end)) {
2940 error = EINVAL;
2941 goto bad;
2942 }
2943 entry = entry->next;
2944 fudge = 0;
2945 }
2946
2947 /*
2948 * step 4: close off chain (in format expected by uvm_map_replace)
2949 */
2950
2951 if (chain)
2952 chain->prev = endchain;
2953
2954 /*
2955 * step 5: attempt to lock the dest map so we can pmap_copy.
2956 * note usage of copy_ok:
2957 * 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
2958 * 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
2959 */
2960
2961 if (srcmap == dstmap || vm_map_lock_try(dstmap) == true) {
2962 copy_ok = 1;
2963 if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2964 nchain, nsize, &resentry)) {
2965 if (srcmap != dstmap)
2966 vm_map_unlock(dstmap);
2967 error = EIO;
2968 goto bad;
2969 }
2970 } else {
2971 copy_ok = 0;
2972 /* replace deferred until step 7 */
2973 }
2974
2975 /*
2976 * step 6: traverse the srcmap a second time to do the following:
2977 * - if we got a lock on the dstmap do pmap_copy
2978 * - if UVM_EXTRACT_REMOVE remove the entries
2979 * we make use of orig_entry and orig_fudge (saved in step 2)
2980 */
2981
2982 if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
2983
2984 /* purge possible stale hints from srcmap */
2985 if (flags & UVM_EXTRACT_REMOVE) {
2986 SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
2987 if (srcmap->first_free != &srcmap->header &&
2988 srcmap->first_free->start >= start)
2989 srcmap->first_free = orig_entry->prev;
2990 }
2991
2992 entry = orig_entry;
2993 fudge = orig_fudge;
2994 deadentry = NULL; /* for UVM_EXTRACT_REMOVE */
2995
2996 while (entry->start < end && entry != &srcmap->header) {
2997 if (copy_ok) {
2998 oldoffset = (entry->start + fudge) - start;
2999 elen = MIN(end, entry->end) -
3000 (entry->start + fudge);
3001 pmap_copy(dstmap->pmap, srcmap->pmap,
3002 dstaddr + oldoffset, elen,
3003 entry->start + fudge);
3004 }
3005
3006 /* we advance "entry" in the following if statement */
3007 if (flags & UVM_EXTRACT_REMOVE) {
3008 #ifdef __HAVE_UNLOCKED_PMAP /* XXX temporary */
3009 uvm_map_lock_entry(entry, RW_WRITER);
3010 #else
3011 uvm_map_lock_entry(entry, RW_READER);
3012 #endif
3013 pmap_remove(srcmap->pmap, entry->start,
3014 entry->end);
3015 uvm_map_unlock_entry(entry);
3016 oldentry = entry; /* save entry */
3017 entry = entry->next; /* advance */
3018 uvm_map_entry_unlink(srcmap, oldentry);
3019 /* add to dead list */
3020 oldentry->next = deadentry;
3021 deadentry = oldentry;
3022 } else {
3023 entry = entry->next; /* advance */
3024 }
3025
3026 /* end of 'while' loop */
3027 fudge = 0;
3028 }
3029 pmap_update(srcmap->pmap);
3030
3031 /*
3032 * unlock dstmap. we will dispose of deadentry in
3033 * step 7 if needed
3034 */
3035
3036 if (copy_ok && srcmap != dstmap)
3037 vm_map_unlock(dstmap);
3038
3039 } else {
3040 deadentry = NULL;
3041 }
3042
3043 /*
3044 * step 7: we are done with the source map, unlock. if copy_ok
3045 * is 0 then we have not replaced the dummy mapping in dstmap yet
3046 * and we need to do so now.
3047 */
3048
3049 vm_map_unlock(srcmap);
3050 if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
3051 uvm_unmap_detach(deadentry, 0); /* dispose of old entries */
3052
3053 /* now do the replacement if we didn't do it in step 5 */
3054 if (copy_ok == 0) {
3055 vm_map_lock(dstmap);
3056 error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
3057 nchain, nsize, &resentry);
3058 vm_map_unlock(dstmap);
3059
3060 if (error == false) {
3061 error = EIO;
3062 goto bad2;
3063 }
3064 }
3065
3066 if (resentry != NULL)
3067 uvm_mapent_free(resentry);
3068
3069 return (0);
3070
3071 /*
3072 * bad: failure recovery
3073 */
3074 bad:
3075 vm_map_unlock(srcmap);
3076 bad2: /* src already unlocked */
3077 if (chain)
3078 uvm_unmap_detach(chain,
3079 (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
3080
3081 if (resentry != NULL)
3082 uvm_mapent_free(resentry);
3083
3084 if ((flags & UVM_EXTRACT_RESERVED) == 0) {
3085 uvm_unmap(dstmap, dstaddr, dstaddr+len); /* ??? */
3086 }
3087 return (error);
3088 }
3089
3090 /* end of extraction functions */
3091
3092 /*
3093 * uvm_map_submap: punch down part of a map into a submap
3094 *
3095 * => only the kernel_map is allowed to be submapped
3096 * => the purpose of submapping is to break up the locking granularity
3097 * of a larger map
3098 * => the range specified must have been mapped previously with a uvm_map()
3099 * call [with uobj==NULL] to create a blank map entry in the main map.
3100 * [And it had better still be blank!]
3101 * => maps which contain submaps should never be copied or forked.
3102 * => to remove a submap, use uvm_unmap() on the main map
3103 * and then uvm_map_deallocate() the submap.
3104 * => main map must be unlocked.
3105 * => submap must have been init'd and have a zero reference count.
3106 * [need not be locked as we don't actually reference it]
3107 */
3108
3109 int
3110 uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
3111 struct vm_map *submap)
3112 {
3113 struct vm_map_entry *entry;
3114 int error;
3115
3116 vm_map_lock(map);
3117 VM_MAP_RANGE_CHECK(map, start, end);
3118
3119 if (uvm_map_lookup_entry(map, start, &entry)) {
3120 UVM_MAP_CLIP_START(map, entry, start);
3121 UVM_MAP_CLIP_END(map, entry, end); /* to be safe */
3122 } else {
3123 entry = NULL;
3124 }
3125
3126 if (entry != NULL &&
3127 entry->start == start && entry->end == end &&
3128 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
3129 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
3130 entry->etype |= UVM_ET_SUBMAP;
3131 entry->object.sub_map = submap;
3132 entry->offset = 0;
3133 uvm_map_reference(submap);
3134 error = 0;
3135 } else {
3136 error = EINVAL;
3137 }
3138 vm_map_unlock(map);
3139
3140 return error;
3141 }
3142
3143 /*
3144 * uvm_map_protect_user: change map protection on behalf of the user.
3145 * Enforces PAX settings as necessary.
3146 */
3147 int
3148 uvm_map_protect_user(struct lwp *l, vaddr_t start, vaddr_t end,
3149 vm_prot_t new_prot)
3150 {
3151 int error;
3152
3153 if ((error = PAX_MPROTECT_VALIDATE(l, new_prot)))
3154 return error;
3155
3156 return uvm_map_protect(&l->l_proc->p_vmspace->vm_map, start, end,
3157 new_prot, false);
3158 }
3159
3160
3161 /*
3162 * uvm_map_protect: change map protection
3163 *
3164 * => set_max means set max_protection.
3165 * => map must be unlocked.
3166 */
3167
3168 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
3169 ~VM_PROT_WRITE : VM_PROT_ALL)
3170
3171 int
3172 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
3173 vm_prot_t new_prot, bool set_max)
3174 {
3175 struct vm_map_entry *current, *entry;
3176 int error = 0;
3177 UVMHIST_FUNC(__func__);
3178 UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_prot=%#jx)",
3179 (uintptr_t)map, start, end, new_prot);
3180
3181 vm_map_lock(map);
3182 VM_MAP_RANGE_CHECK(map, start, end);
3183 if (uvm_map_lookup_entry(map, start, &entry)) {
3184 UVM_MAP_CLIP_START(map, entry, start);
3185 } else {
3186 entry = entry->next;
3187 }
3188
3189 /*
3190 * make a first pass to check for protection violations.
3191 */
3192
3193 current = entry;
3194 while ((current != &map->header) && (current->start < end)) {
3195 if (UVM_ET_ISSUBMAP(current)) {
3196 error = EINVAL;
3197 goto out;
3198 }
3199 if ((new_prot & current->max_protection) != new_prot) {
3200 error = EACCES;
3201 goto out;
3202 }
3203 /*
3204 * Don't allow VM_PROT_EXECUTE to be set on entries that
3205 * point to vnodes that are associated with a NOEXEC file
3206 * system.
3207 */
3208 if (UVM_ET_ISOBJ(current) &&
3209 UVM_OBJ_IS_VNODE(current->object.uvm_obj)) {
3210 struct vnode *vp =
3211 (struct vnode *) current->object.uvm_obj;
3212
3213 if ((new_prot & VM_PROT_EXECUTE) != 0 &&
3214 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
3215 error = EACCES;
3216 goto out;
3217 }
3218 }
3219
3220 current = current->next;
3221 }
3222
3223 /* go back and fix up protections (no need to clip this time). */
3224
3225 current = entry;
3226 while ((current != &map->header) && (current->start < end)) {
3227 vm_prot_t old_prot;
3228
3229 UVM_MAP_CLIP_END(map, current, end);
3230 old_prot = current->protection;
3231 if (set_max)
3232 current->protection =
3233 (current->max_protection = new_prot) & old_prot;
3234 else
3235 current->protection = new_prot;
3236
3237 /*
3238 * update physical map if necessary. worry about copy-on-write
3239 * here -- CHECK THIS XXX
3240 */
3241
3242 if (current->protection != old_prot) {
3243 /* update pmap! */
3244 #ifdef __HAVE_UNLOCKED_PMAP /* XXX temporary */
3245 uvm_map_lock_entry(current, RW_WRITER);
3246 #else
3247 uvm_map_lock_entry(current, RW_READER);
3248 #endif
3249 pmap_protect(map->pmap, current->start, current->end,
3250 current->protection & MASK(current));
3251 uvm_map_unlock_entry(current);
3252
3253 /*
3254 * If this entry points at a vnode, and the
3255 * protection includes VM_PROT_EXECUTE, mark
3256 * the vnode as VEXECMAP.
3257 */
3258 if (UVM_ET_ISOBJ(current)) {
3259 struct uvm_object *uobj =
3260 current->object.uvm_obj;
3261
3262 if (UVM_OBJ_IS_VNODE(uobj) &&
3263 (current->protection & VM_PROT_EXECUTE)) {
3264 vn_markexec((struct vnode *) uobj);
3265 }
3266 }
3267 }
3268
3269 /*
3270 * If the map is configured to lock any future mappings,
3271 * wire this entry now if the old protection was VM_PROT_NONE
3272 * and the new protection is not VM_PROT_NONE.
3273 */
3274
3275 if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
3276 VM_MAPENT_ISWIRED(current) == 0 &&
3277 old_prot == VM_PROT_NONE &&
3278 new_prot != VM_PROT_NONE) {
3279
3280 /*
3281 * We must call pmap_update() here because the
3282 * pmap_protect() call above might have removed some
3283 * pmap entries and uvm_map_pageable() might create
3284 * some new pmap entries that rely on the prior
3285 * removals being completely finished.
3286 */
3287
3288 pmap_update(map->pmap);
3289
3290 if (uvm_map_pageable(map, current->start,
3291 current->end, false,
3292 UVM_LK_ENTER|UVM_LK_EXIT) != 0) {
3293
3294 /*
3295 * If locking the entry fails, remember the
3296 * error if it's the first one. Note we
3297 * still continue setting the protection in
3298 * the map, but will return the error
3299 * condition regardless.
3300 *
3301 * XXX Ignore what the actual error is,
3302 * XXX just call it a resource shortage
3303 * XXX so that it doesn't get confused
3304 * XXX what uvm_map_protect() itself would
3305 * XXX normally return.
3306 */
3307
3308 error = ENOMEM;
3309 }
3310 }
3311 current = current->next;
3312 }
3313 pmap_update(map->pmap);
3314
3315 out:
3316 vm_map_unlock(map);
3317
3318 UVMHIST_LOG(maphist, "<- done, error=%jd",error,0,0,0);
3319 return error;
3320 }
3321
3322 #undef MASK
3323
3324 /*
3325 * uvm_map_inherit: set inheritance code for range of addrs in map.
3326 *
3327 * => map must be unlocked
3328 * => note that the inherit code is used during a "fork". see fork
3329 * code for details.
3330 */
3331
3332 int
3333 uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
3334 vm_inherit_t new_inheritance)
3335 {
3336 struct vm_map_entry *entry, *temp_entry;
3337 UVMHIST_FUNC(__func__);
3338 UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_inh=%#jx)",
3339 (uintptr_t)map, start, end, new_inheritance);
3340
3341 switch (new_inheritance) {
3342 case MAP_INHERIT_NONE:
3343 case MAP_INHERIT_COPY:
3344 case MAP_INHERIT_SHARE:
3345 case MAP_INHERIT_ZERO:
3346 break;
3347 default:
3348 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3349 return EINVAL;
3350 }
3351
3352 vm_map_lock(map);
3353 VM_MAP_RANGE_CHECK(map, start, end);
3354 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3355 entry = temp_entry;
3356 UVM_MAP_CLIP_START(map, entry, start);
3357 } else {
3358 entry = temp_entry->next;
3359 }
3360 while ((entry != &map->header) && (entry->start < end)) {
3361 UVM_MAP_CLIP_END(map, entry, end);
3362 entry->inheritance = new_inheritance;
3363 entry = entry->next;
3364 }
3365 vm_map_unlock(map);
3366 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3367 return 0;
3368 }
3369
3370 /*
3371 * uvm_map_advice: set advice code for range of addrs in map.
3372 *
3373 * => map must be unlocked
3374 */
3375
3376 int
3377 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
3378 {
3379 struct vm_map_entry *entry, *temp_entry;
3380 UVMHIST_FUNC(__func__);
3381 UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_adv=%#jx)",
3382 (uintptr_t)map, start, end, new_advice);
3383
3384 vm_map_lock(map);
3385 VM_MAP_RANGE_CHECK(map, start, end);
3386 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3387 entry = temp_entry;
3388 UVM_MAP_CLIP_START(map, entry, start);
3389 } else {
3390 entry = temp_entry->next;
3391 }
3392
3393 /*
3394 * XXXJRT: disallow holes?
3395 */
3396
3397 while ((entry != &map->header) && (entry->start < end)) {
3398 UVM_MAP_CLIP_END(map, entry, end);
3399
3400 switch (new_advice) {
3401 case MADV_NORMAL:
3402 case MADV_RANDOM:
3403 case MADV_SEQUENTIAL:
3404 /* nothing special here */
3405 break;
3406
3407 default:
3408 vm_map_unlock(map);
3409 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3410 return EINVAL;
3411 }
3412 entry->advice = new_advice;
3413 entry = entry->next;
3414 }
3415
3416 vm_map_unlock(map);
3417 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3418 return 0;
3419 }
3420
3421 /*
3422 * uvm_map_willneed: apply MADV_WILLNEED
3423 */
3424
3425 int
3426 uvm_map_willneed(struct vm_map *map, vaddr_t start, vaddr_t end)
3427 {
3428 struct vm_map_entry *entry;
3429 UVMHIST_FUNC(__func__);
3430 UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx)",
3431 (uintptr_t)map, start, end, 0);
3432
3433 vm_map_lock_read(map);
3434 VM_MAP_RANGE_CHECK(map, start, end);
3435 if (!uvm_map_lookup_entry(map, start, &entry)) {
3436 entry = entry->next;
3437 }
3438 while (entry->start < end) {
3439 struct vm_amap * const amap = entry->aref.ar_amap;
3440 struct uvm_object * const uobj = entry->object.uvm_obj;
3441
3442 KASSERT(entry != &map->header);
3443 KASSERT(start < entry->end);
3444 /*
3445 * For now, we handle only the easy but commonly-requested case.
3446 * ie. start prefetching of backing uobj pages.
3447 *
3448 * XXX It might be useful to pmap_enter() the already-in-core
3449 * pages by inventing a "weak" mode for uvm_fault() which would
3450 * only do the PGO_LOCKED pgo_get().
3451 */
3452 if (UVM_ET_ISOBJ(entry) && amap == NULL && uobj != NULL) {
3453 off_t offset;
3454 off_t size;
3455
3456 offset = entry->offset;
3457 if (start < entry->start) {
3458 offset += entry->start - start;
3459 }
3460 size = entry->offset + (entry->end - entry->start);
3461 if (entry->end < end) {
3462 size -= end - entry->end;
3463 }
3464 uvm_readahead(uobj, offset, size);
3465 }
3466 entry = entry->next;
3467 }
3468 vm_map_unlock_read(map);
3469 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3470 return 0;
3471 }
3472
3473 /*
3474 * uvm_map_pageable: sets the pageability of a range in a map.
3475 *
3476 * => wires map entries. should not be used for transient page locking.
3477 * for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
3478 * => regions specified as not pageable require lock-down (wired) memory
3479 * and page tables.
3480 * => map must never be read-locked
3481 * => if islocked is true, map is already write-locked
3482 * => we always unlock the map, since we must downgrade to a read-lock
3483 * to call uvm_fault_wire()
3484 * => XXXCDC: check this and try and clean it up.
3485 */
3486
3487 int
3488 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
3489 bool new_pageable, int lockflags)
3490 {
3491 struct vm_map_entry *entry, *start_entry, *failed_entry;
3492 int rv;
3493 #ifdef DIAGNOSTIC
3494 u_int timestamp_save;
3495 #endif
3496 UVMHIST_FUNC(__func__);
3497 UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_pageable=%ju)",
3498 (uintptr_t)map, start, end, new_pageable);
3499 KASSERT(map->flags & VM_MAP_PAGEABLE);
3500
3501 if ((lockflags & UVM_LK_ENTER) == 0)
3502 vm_map_lock(map);
3503 VM_MAP_RANGE_CHECK(map, start, end);
3504
3505 /*
3506 * only one pageability change may take place at one time, since
3507 * uvm_fault_wire assumes it will be called only once for each
3508 * wiring/unwiring. therefore, we have to make sure we're actually
3509 * changing the pageability for the entire region. we do so before
3510 * making any changes.
3511 */
3512
3513 if (uvm_map_lookup_entry(map, start, &start_entry) == false) {
3514 if ((lockflags & UVM_LK_EXIT) == 0)
3515 vm_map_unlock(map);
3516
3517 UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0);
3518 return EFAULT;
3519 }
3520 entry = start_entry;
3521
3522 if (start == end) { /* nothing required */
3523 if ((lockflags & UVM_LK_EXIT) == 0)
3524 vm_map_unlock(map);
3525
3526 UVMHIST_LOG(maphist,"<- done (nothing)",0,0,0,0);
3527 return 0;
3528 }
3529
3530 /*
3531 * handle wiring and unwiring separately.
3532 */
3533
3534 if (new_pageable) { /* unwire */
3535 UVM_MAP_CLIP_START(map, entry, start);
3536
3537 /*
3538 * unwiring. first ensure that the range to be unwired is
3539 * really wired down and that there are no holes.
3540 */
3541
3542 while ((entry != &map->header) && (entry->start < end)) {
3543 if (entry->wired_count == 0 ||
3544 (entry->end < end &&
3545 (entry->next == &map->header ||
3546 entry->next->start > entry->end))) {
3547 if ((lockflags & UVM_LK_EXIT) == 0)
3548 vm_map_unlock(map);
3549 UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0);
3550 return EINVAL;
3551 }
3552 entry = entry->next;
3553 }
3554
3555 /*
3556 * POSIX 1003.1b - a single munlock call unlocks a region,
3557 * regardless of the number of mlock calls made on that
3558 * region.
3559 */
3560
3561 entry = start_entry;
3562 while ((entry != &map->header) && (entry->start < end)) {
3563 UVM_MAP_CLIP_END(map, entry, end);
3564 if (VM_MAPENT_ISWIRED(entry))
3565 uvm_map_entry_unwire(map, entry);
3566 entry = entry->next;
3567 }
3568 if ((lockflags & UVM_LK_EXIT) == 0)
3569 vm_map_unlock(map);
3570 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3571 return 0;
3572 }
3573
3574 /*
3575 * wire case: in two passes [XXXCDC: ugly block of code here]
3576 *
3577 * 1: holding the write lock, we create any anonymous maps that need
3578 * to be created. then we clip each map entry to the region to
3579 * be wired and increment its wiring count.
3580 *
3581 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
3582 * in the pages for any newly wired area (wired_count == 1).
3583 *
3584 * downgrading to a read lock for uvm_fault_wire avoids a possible
3585 * deadlock with another thread that may have faulted on one of
3586 * the pages to be wired (it would mark the page busy, blocking
3587 * us, then in turn block on the map lock that we hold). because
3588 * of problems in the recursive lock package, we cannot upgrade
3589 * to a write lock in vm_map_lookup. thus, any actions that
3590 * require the write lock must be done beforehand. because we
3591 * keep the read lock on the map, the copy-on-write status of the
3592 * entries we modify here cannot change.
3593 */
3594
3595 while ((entry != &map->header) && (entry->start < end)) {
3596 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3597
3598 /*
3599 * perform actions of vm_map_lookup that need the
3600 * write lock on the map: create an anonymous map
3601 * for a copy-on-write region, or an anonymous map
3602 * for a zero-fill region. (XXXCDC: submap case
3603 * ok?)
3604 */
3605
3606 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3607 if (UVM_ET_ISNEEDSCOPY(entry) &&
3608 ((entry->max_protection & VM_PROT_WRITE) ||
3609 (entry->object.uvm_obj == NULL))) {
3610 amap_copy(map, entry, 0, start, end);
3611 /* XXXCDC: wait OK? */
3612 }
3613 }
3614 }
3615 UVM_MAP_CLIP_START(map, entry, start);
3616 UVM_MAP_CLIP_END(map, entry, end);
3617 entry->wired_count++;
3618
3619 /*
3620 * Check for holes
3621 */
3622
3623 if (entry->protection == VM_PROT_NONE ||
3624 (entry->end < end &&
3625 (entry->next == &map->header ||
3626 entry->next->start > entry->end))) {
3627
3628 /*
3629 * found one. amap creation actions do not need to
3630 * be undone, but the wired counts need to be restored.
3631 */
3632
3633 while (entry != &map->header && entry->end > start) {
3634 entry->wired_count--;
3635 entry = entry->prev;
3636 }
3637 if ((lockflags & UVM_LK_EXIT) == 0)
3638 vm_map_unlock(map);
3639 UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
3640 return EINVAL;
3641 }
3642 entry = entry->next;
3643 }
3644
3645 /*
3646 * Pass 2.
3647 */
3648
3649 #ifdef DIAGNOSTIC
3650 timestamp_save = map->timestamp;
3651 #endif
3652 vm_map_busy(map);
3653 vm_map_unlock(map);
3654
3655 rv = 0;
3656 entry = start_entry;
3657 while (entry != &map->header && entry->start < end) {
3658 if (entry->wired_count == 1) {
3659 rv = uvm_fault_wire(map, entry->start, entry->end,
3660 entry->max_protection, 1);
3661 if (rv) {
3662
3663 /*
3664 * wiring failed. break out of the loop.
3665 * we'll clean up the map below, once we
3666 * have a write lock again.
3667 */
3668
3669 break;
3670 }
3671 }
3672 entry = entry->next;
3673 }
3674
3675 if (rv) { /* failed? */
3676
3677 /*
3678 * Get back to an exclusive (write) lock.
3679 */
3680
3681 vm_map_lock(map);
3682 vm_map_unbusy(map);
3683
3684 #ifdef DIAGNOSTIC
3685 if (timestamp_save + 1 != map->timestamp)
3686 panic("uvm_map_pageable: stale map");
3687 #endif
3688
3689 /*
3690 * first drop the wiring count on all the entries
3691 * which haven't actually been wired yet.
3692 */
3693
3694 failed_entry = entry;
3695 while (entry != &map->header && entry->start < end) {
3696 entry->wired_count--;
3697 entry = entry->next;
3698 }
3699
3700 /*
3701 * now, unwire all the entries that were successfully
3702 * wired above.
3703 */
3704
3705 entry = start_entry;
3706 while (entry != failed_entry) {
3707 entry->wired_count--;
3708 if (VM_MAPENT_ISWIRED(entry) == 0)
3709 uvm_map_entry_unwire(map, entry);
3710 entry = entry->next;
3711 }
3712 if ((lockflags & UVM_LK_EXIT) == 0)
3713 vm_map_unlock(map);
3714 UVMHIST_LOG(maphist, "<- done (RV=%jd)", rv,0,0,0);
3715 return (rv);
3716 }
3717
3718 if ((lockflags & UVM_LK_EXIT) == 0) {
3719 vm_map_unbusy(map);
3720 } else {
3721
3722 /*
3723 * Get back to an exclusive (write) lock.
3724 */
3725
3726 vm_map_lock(map);
3727 vm_map_unbusy(map);
3728 }
3729
3730 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3731 return 0;
3732 }
3733
3734 /*
3735 * uvm_map_pageable_all: special case of uvm_map_pageable - affects
3736 * all mapped regions.
3737 *
3738 * => map must not be locked.
3739 * => if no flags are specified, all regions are unwired.
3740 * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
3741 */
3742
3743 int
3744 uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
3745 {
3746 struct vm_map_entry *entry, *failed_entry;
3747 vsize_t size;
3748 int rv;
3749 #ifdef DIAGNOSTIC
3750 u_int timestamp_save;
3751 #endif
3752 UVMHIST_FUNC(__func__);
3753 UVMHIST_CALLARGS(maphist,"(map=%#jx,flags=%#jx)", (uintptr_t)map, flags,
3754 0, 0);
3755
3756 KASSERT(map->flags & VM_MAP_PAGEABLE);
3757
3758 vm_map_lock(map);
3759
3760 /*
3761 * handle wiring and unwiring separately.
3762 */
3763
3764 if (flags == 0) { /* unwire */
3765
3766 /*
3767 * POSIX 1003.1b -- munlockall unlocks all regions,
3768 * regardless of how many times mlockall has been called.
3769 */
3770
3771 for (entry = map->header.next; entry != &map->header;
3772 entry = entry->next) {
3773 if (VM_MAPENT_ISWIRED(entry))
3774 uvm_map_entry_unwire(map, entry);
3775 }
3776 map->flags &= ~VM_MAP_WIREFUTURE;
3777 vm_map_unlock(map);
3778 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3779 return 0;
3780 }
3781
3782 if (flags & MCL_FUTURE) {
3783
3784 /*
3785 * must wire all future mappings; remember this.
3786 */
3787
3788 map->flags |= VM_MAP_WIREFUTURE;
3789 }
3790
3791 if ((flags & MCL_CURRENT) == 0) {
3792
3793 /*
3794 * no more work to do!
3795 */
3796
3797 UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
3798 vm_map_unlock(map);
3799 return 0;
3800 }
3801
3802 /*
3803 * wire case: in three passes [XXXCDC: ugly block of code here]
3804 *
3805 * 1: holding the write lock, count all pages mapped by non-wired
3806 * entries. if this would cause us to go over our limit, we fail.
3807 *
3808 * 2: still holding the write lock, we create any anonymous maps that
3809 * need to be created. then we increment its wiring count.
3810 *
3811 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
3812 * in the pages for any newly wired area (wired_count == 1).
3813 *
3814 * downgrading to a read lock for uvm_fault_wire avoids a possible
3815 * deadlock with another thread that may have faulted on one of
3816 * the pages to be wired (it would mark the page busy, blocking
3817 * us, then in turn block on the map lock that we hold). because
3818 * of problems in the recursive lock package, we cannot upgrade
3819 * to a write lock in vm_map_lookup. thus, any actions that
3820 * require the write lock must be done beforehand. because we
3821 * keep the read lock on the map, the copy-on-write status of the
3822 * entries we modify here cannot change.
3823 */
3824
3825 for (size = 0, entry = map->header.next; entry != &map->header;
3826 entry = entry->next) {
3827 if (entry->protection != VM_PROT_NONE &&
3828 VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3829 size += entry->end - entry->start;
3830 }
3831 }
3832
3833 if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
3834 vm_map_unlock(map);
3835 return ENOMEM;
3836 }
3837
3838 if (limit != 0 &&
3839 (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
3840 vm_map_unlock(map);
3841 return ENOMEM;
3842 }
3843
3844 /*
3845 * Pass 2.
3846 */
3847
3848 for (entry = map->header.next; entry != &map->header;
3849 entry = entry->next) {
3850 if (entry->protection == VM_PROT_NONE)
3851 continue;
3852 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3853
3854 /*
3855 * perform actions of vm_map_lookup that need the
3856 * write lock on the map: create an anonymous map
3857 * for a copy-on-write region, or an anonymous map
3858 * for a zero-fill region. (XXXCDC: submap case
3859 * ok?)
3860 */
3861
3862 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3863 if (UVM_ET_ISNEEDSCOPY(entry) &&
3864 ((entry->max_protection & VM_PROT_WRITE) ||
3865 (entry->object.uvm_obj == NULL))) {
3866 amap_copy(map, entry, 0, entry->start,
3867 entry->end);
3868 /* XXXCDC: wait OK? */
3869 }
3870 }
3871 }
3872 entry->wired_count++;
3873 }
3874
3875 /*
3876 * Pass 3.
3877 */
3878
3879 #ifdef DIAGNOSTIC
3880 timestamp_save = map->timestamp;
3881 #endif
3882 vm_map_busy(map);
3883 vm_map_unlock(map);
3884
3885 rv = 0;
3886 for (entry = map->header.next; entry != &map->header;
3887 entry = entry->next) {
3888 if (entry->wired_count == 1) {
3889 rv = uvm_fault_wire(map, entry->start, entry->end,
3890 entry->max_protection, 1);
3891 if (rv) {
3892
3893 /*
3894 * wiring failed. break out of the loop.
3895 * we'll clean up the map below, once we
3896 * have a write lock again.
3897 */
3898
3899 break;
3900 }
3901 }
3902 }
3903
3904 if (rv) {
3905
3906 /*
3907 * Get back an exclusive (write) lock.
3908 */
3909
3910 vm_map_lock(map);
3911 vm_map_unbusy(map);
3912
3913 #ifdef DIAGNOSTIC
3914 if (timestamp_save + 1 != map->timestamp)
3915 panic("uvm_map_pageable_all: stale map");
3916 #endif
3917
3918 /*
3919 * first drop the wiring count on all the entries
3920 * which haven't actually been wired yet.
3921 *
3922 * Skip VM_PROT_NONE entries like we did above.
3923 */
3924
3925 failed_entry = entry;
3926 for (/* nothing */; entry != &map->header;
3927 entry = entry->next) {
3928 if (entry->protection == VM_PROT_NONE)
3929 continue;
3930 entry->wired_count--;
3931 }
3932
3933 /*
3934 * now, unwire all the entries that were successfully
3935 * wired above.
3936 *
3937 * Skip VM_PROT_NONE entries like we did above.
3938 */
3939
3940 for (entry = map->header.next; entry != failed_entry;
3941 entry = entry->next) {
3942 if (entry->protection == VM_PROT_NONE)
3943 continue;
3944 entry->wired_count--;
3945 if (VM_MAPENT_ISWIRED(entry))
3946 uvm_map_entry_unwire(map, entry);
3947 }
3948 vm_map_unlock(map);
3949 UVMHIST_LOG(maphist,"<- done (RV=%jd)", rv,0,0,0);
3950 return (rv);
3951 }
3952
3953 vm_map_unbusy(map);
3954
3955 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3956 return 0;
3957 }
3958
3959 /*
3960 * uvm_map_clean: clean out a map range
3961 *
3962 * => valid flags:
3963 * if (flags & PGO_CLEANIT): dirty pages are cleaned first
3964 * if (flags & PGO_SYNCIO): dirty pages are written synchronously
3965 * if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
3966 * if (flags & PGO_FREE): any cached pages are freed after clean
3967 * => returns an error if any part of the specified range isn't mapped
3968 * => never a need to flush amap layer since the anonymous memory has
3969 * no permanent home, but may deactivate pages there
3970 * => called from sys_msync() and sys_madvise()
3971 * => caller must not have map locked
3972 */
3973
3974 int
3975 uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
3976 {
3977 struct vm_map_entry *current, *entry;
3978 struct uvm_object *uobj;
3979 struct vm_amap *amap;
3980 struct vm_anon *anon;
3981 struct vm_page *pg;
3982 vaddr_t offset;
3983 vsize_t size;
3984 voff_t uoff;
3985 int error, refs;
3986 UVMHIST_FUNC(__func__);
3987 UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,flags=%#jx)",
3988 (uintptr_t)map, start, end, flags);
3989
3990 KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
3991 (PGO_FREE|PGO_DEACTIVATE));
3992
3993 vm_map_lock(map);
3994 VM_MAP_RANGE_CHECK(map, start, end);
3995 if (!uvm_map_lookup_entry(map, start, &entry)) {
3996 vm_map_unlock(map);
3997 return EFAULT;
3998 }
3999
4000 /*
4001 * Make a first pass to check for holes and wiring problems.
4002 */
4003
4004 for (current = entry; current->start < end; current = current->next) {
4005 if (UVM_ET_ISSUBMAP(current)) {
4006 vm_map_unlock(map);
4007 return EINVAL;
4008 }
4009 if ((flags & PGO_FREE) != 0 && VM_MAPENT_ISWIRED(entry)) {
4010 vm_map_unlock(map);
4011 return EBUSY;
4012 }
4013 if (end <= current->end) {
4014 break;
4015 }
4016 if (current->end != current->next->start) {
4017 vm_map_unlock(map);
4018 return EFAULT;
4019 }
4020 }
4021
4022 vm_map_busy(map);
4023 vm_map_unlock(map);
4024 error = 0;
4025 for (current = entry; start < end; current = current->next) {
4026 amap = current->aref.ar_amap; /* upper layer */
4027 uobj = current->object.uvm_obj; /* lower layer */
4028 KASSERT(start >= current->start);
4029
4030 /*
4031 * No amap cleaning necessary if:
4032 *
4033 * (1) There's no amap.
4034 *
4035 * (2) We're not deactivating or freeing pages.
4036 */
4037
4038 if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
4039 goto flush_object;
4040
4041 offset = start - current->start;
4042 size = MIN(end, current->end) - start;
4043
4044 amap_lock(amap, RW_WRITER);
4045 for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) {
4046 anon = amap_lookup(¤t->aref, offset);
4047 if (anon == NULL)
4048 continue;
4049
4050 KASSERT(anon->an_lock == amap->am_lock);
4051 pg = anon->an_page;
4052 if (pg == NULL) {
4053 continue;
4054 }
4055 if (pg->flags & PG_BUSY) {
4056 continue;
4057 }
4058
4059 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
4060
4061 /*
4062 * In these first 3 cases, we just deactivate the page.
4063 */
4064
4065 case PGO_CLEANIT|PGO_FREE:
4066 case PGO_CLEANIT|PGO_DEACTIVATE:
4067 case PGO_DEACTIVATE:
4068 deactivate_it:
4069 /*
4070 * skip the page if it's loaned or wired,
4071 * since it shouldn't be on a paging queue
4072 * at all in these cases.
4073 */
4074
4075 if (pg->loan_count != 0 ||
4076 pg->wire_count != 0) {
4077 continue;
4078 }
4079 KASSERT(pg->uanon == anon);
4080 uvm_pagelock(pg);
4081 uvm_pagedeactivate(pg);
4082 uvm_pageunlock(pg);
4083 continue;
4084
4085 case PGO_FREE:
4086
4087 /*
4088 * If there are multiple references to
4089 * the amap, just deactivate the page.
4090 */
4091
4092 if (amap_refs(amap) > 1)
4093 goto deactivate_it;
4094
4095 /* skip the page if it's wired */
4096 if (pg->wire_count != 0) {
4097 continue;
4098 }
4099 amap_unadd(¤t->aref, offset);
4100 refs = --anon->an_ref;
4101 if (refs == 0) {
4102 uvm_anfree(anon);
4103 }
4104 continue;
4105 }
4106 }
4107 amap_unlock(amap);
4108
4109 flush_object:
4110 /*
4111 * flush pages if we've got a valid backing object.
4112 * note that we must always clean object pages before
4113 * freeing them since otherwise we could reveal stale
4114 * data from files.
4115 */
4116
4117 uoff = current->offset + (start - current->start);
4118 size = MIN(end, current->end) - start;
4119 if (uobj != NULL) {
4120 rw_enter(uobj->vmobjlock, RW_WRITER);
4121 if (uobj->pgops->pgo_put != NULL)
4122 error = (uobj->pgops->pgo_put)(uobj, uoff,
4123 uoff + size, flags | PGO_CLEANIT);
4124 else
4125 error = 0;
4126 }
4127 start += size;
4128 }
4129 vm_map_unbusy(map);
4130 return error;
4131 }
4132
4133
4134 /*
4135 * uvm_map_checkprot: check protection in map
4136 *
4137 * => must allow specified protection in a fully allocated region.
4138 * => map must be read or write locked by caller.
4139 */
4140
4141 bool
4142 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
4143 vm_prot_t protection)
4144 {
4145 struct vm_map_entry *entry;
4146 struct vm_map_entry *tmp_entry;
4147
4148 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
4149 return (false);
4150 }
4151 entry = tmp_entry;
4152 while (start < end) {
4153 if (entry == &map->header) {
4154 return (false);
4155 }
4156
4157 /*
4158 * no holes allowed
4159 */
4160
4161 if (start < entry->start) {
4162 return (false);
4163 }
4164
4165 /*
4166 * check protection associated with entry
4167 */
4168
4169 if ((entry->protection & protection) != protection) {
4170 return (false);
4171 }
4172 start = entry->end;
4173 entry = entry->next;
4174 }
4175 return (true);
4176 }
4177
4178 /*
4179 * uvmspace_alloc: allocate a vmspace structure.
4180 *
4181 * - structure includes vm_map and pmap
4182 * - XXX: no locking on this structure
4183 * - refcnt set to 1, rest must be init'd by caller
4184 */
4185 struct vmspace *
4186 uvmspace_alloc(vaddr_t vmin, vaddr_t vmax, bool topdown)
4187 {
4188 struct vmspace *vm;
4189 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
4190
4191 vm = kmem_alloc(sizeof(*vm), KM_SLEEP);
4192 uvmspace_init(vm, NULL, vmin, vmax, topdown);
4193 UVMHIST_LOG(maphist,"<- done (vm=%#jx)", (uintptr_t)vm, 0, 0, 0);
4194 return (vm);
4195 }
4196
4197 /*
4198 * uvmspace_init: initialize a vmspace structure.
4199 *
4200 * - XXX: no locking on this structure
4201 * - refcnt set to 1, rest must be init'd by caller
4202 */
4203 void
4204 uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin,
4205 vaddr_t vmax, bool topdown)
4206 {
4207 UVMHIST_FUNC(__func__);
4208 UVMHIST_CALLARGS(maphist, "(vm=%#jx, pmap=%#jx, vmin=%#jx, vmax=%#jx",
4209 (uintptr_t)vm, (uintptr_t)pmap, vmin, vmax);
4210 UVMHIST_LOG(maphist, " topdown=%ju)", topdown, 0, 0, 0);
4211
4212 memset(vm, 0, sizeof(*vm));
4213 uvm_map_setup(&vm->vm_map, vmin, vmax, VM_MAP_PAGEABLE
4214 | (topdown ? VM_MAP_TOPDOWN : 0)
4215 );
4216 if (pmap)
4217 pmap_reference(pmap);
4218 else
4219 pmap = pmap_create();
4220 vm->vm_map.pmap = pmap;
4221 vm->vm_refcnt = 1;
4222 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4223 }
4224
4225 /*
4226 * uvmspace_share: share a vmspace between two processes
4227 *
4228 * - used for vfork, threads(?)
4229 */
4230
4231 void
4232 uvmspace_share(struct proc *p1, struct proc *p2)
4233 {
4234
4235 uvmspace_addref(p1->p_vmspace);
4236 p2->p_vmspace = p1->p_vmspace;
4237 }
4238
4239 #if 0
4240
4241 /*
4242 * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
4243 *
4244 * - XXX: no locking on vmspace
4245 */
4246
4247 void
4248 uvmspace_unshare(struct lwp *l)
4249 {
4250 struct proc *p = l->l_proc;
4251 struct vmspace *nvm, *ovm = p->p_vmspace;
4252
4253 if (ovm->vm_refcnt == 1)
4254 /* nothing to do: vmspace isn't shared in the first place */
4255 return;
4256
4257 /* make a new vmspace, still holding old one */
4258 nvm = uvmspace_fork(ovm);
4259
4260 kpreempt_disable();
4261 pmap_deactivate(l); /* unbind old vmspace */
4262 p->p_vmspace = nvm;
4263 pmap_activate(l); /* switch to new vmspace */
4264 kpreempt_enable();
4265
4266 uvmspace_free(ovm); /* drop reference to old vmspace */
4267 }
4268
4269 #endif
4270
4271
4272 /*
4273 * uvmspace_spawn: a new process has been spawned and needs a vmspace
4274 */
4275
4276 void
4277 uvmspace_spawn(struct lwp *l, vaddr_t start, vaddr_t end, bool topdown)
4278 {
4279 struct proc *p = l->l_proc;
4280 struct vmspace *nvm;
4281
4282 #ifdef __HAVE_CPU_VMSPACE_EXEC
4283 cpu_vmspace_exec(l, start, end);
4284 #endif
4285
4286 nvm = uvmspace_alloc(start, end, topdown);
4287 kpreempt_disable();
4288 p->p_vmspace = nvm;
4289 pmap_activate(l);
4290 kpreempt_enable();
4291 }
4292
4293 /*
4294 * uvmspace_exec: the process wants to exec a new program
4295 */
4296
4297 void
4298 uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end, bool topdown)
4299 {
4300 struct proc *p = l->l_proc;
4301 struct vmspace *nvm, *ovm = p->p_vmspace;
4302 struct vm_map *map;
4303 int flags;
4304
4305 KASSERT(ovm != NULL);
4306 #ifdef __HAVE_CPU_VMSPACE_EXEC
4307 cpu_vmspace_exec(l, start, end);
4308 #endif
4309
4310 map = &ovm->vm_map;
4311 /*
4312 * see if more than one process is using this vmspace...
4313 */
4314
4315 if (ovm->vm_refcnt == 1
4316 && topdown == ((ovm->vm_map.flags & VM_MAP_TOPDOWN) != 0)) {
4317
4318 /*
4319 * if p is the only process using its vmspace then we can safely
4320 * recycle that vmspace for the program that is being exec'd.
4321 * But only if TOPDOWN matches the requested value for the new
4322 * vm space!
4323 */
4324
4325 /*
4326 * SYSV SHM semantics require us to kill all segments on an exec
4327 */
4328 if (uvm_shmexit && ovm->vm_shm)
4329 (*uvm_shmexit)(ovm);
4330
4331 /*
4332 * POSIX 1003.1b -- "lock future mappings" is revoked
4333 * when a process execs another program image.
4334 */
4335
4336 map->flags &= ~VM_MAP_WIREFUTURE;
4337
4338 /*
4339 * now unmap the old program.
4340 *
4341 * XXX set VM_MAP_DYING for the duration, so pmap_update()
4342 * is not called until the pmap has been totally cleared out
4343 * after pmap_remove_all(), or it can confuse some pmap
4344 * implementations. it would be nice to handle this by
4345 * deferring the pmap_update() while it is known the address
4346 * space is not visible to any user LWP other than curlwp,
4347 * but there isn't an elegant way of inferring that right
4348 * now.
4349 */
4350
4351 flags = pmap_remove_all(map->pmap) ? UVM_FLAG_VAONLY : 0;
4352 map->flags |= VM_MAP_DYING;
4353 uvm_unmap1(map, vm_map_min(map), vm_map_max(map), flags);
4354 map->flags &= ~VM_MAP_DYING;
4355 pmap_update(map->pmap);
4356 KASSERT(map->header.prev == &map->header);
4357 KASSERT(map->nentries == 0);
4358
4359 /*
4360 * resize the map
4361 */
4362
4363 vm_map_setmin(map, start);
4364 vm_map_setmax(map, end);
4365 } else {
4366
4367 /*
4368 * p's vmspace is being shared, so we can't reuse it for p since
4369 * it is still being used for others. allocate a new vmspace
4370 * for p
4371 */
4372
4373 nvm = uvmspace_alloc(start, end, topdown);
4374
4375 /*
4376 * install new vmspace and drop our ref to the old one.
4377 */
4378
4379 kpreempt_disable();
4380 pmap_deactivate(l);
4381 p->p_vmspace = nvm;
4382 pmap_activate(l);
4383 kpreempt_enable();
4384
4385 uvmspace_free(ovm);
4386 }
4387 }
4388
4389 /*
4390 * uvmspace_addref: add a reference to a vmspace.
4391 */
4392
4393 void
4394 uvmspace_addref(struct vmspace *vm)
4395 {
4396
4397 KASSERT((vm->vm_map.flags & VM_MAP_DYING) == 0);
4398 KASSERT(vm->vm_refcnt > 0);
4399 atomic_inc_uint(&vm->vm_refcnt);
4400 }
4401
4402 /*
4403 * uvmspace_free: free a vmspace data structure
4404 */
4405
4406 void
4407 uvmspace_free(struct vmspace *vm)
4408 {
4409 struct vm_map_entry *dead_entries;
4410 struct vm_map *map = &vm->vm_map;
4411 int flags;
4412
4413 UVMHIST_FUNC(__func__);
4414 UVMHIST_CALLARGS(maphist,"(vm=%#jx) ref=%jd", (uintptr_t)vm,
4415 vm->vm_refcnt, 0, 0);
4416
4417 membar_release();
4418 if (atomic_dec_uint_nv(&vm->vm_refcnt) > 0)
4419 return;
4420 membar_acquire();
4421
4422 /*
4423 * at this point, there should be no other references to the map.
4424 * delete all of the mappings, then destroy the pmap.
4425 */
4426
4427 map->flags |= VM_MAP_DYING;
4428 flags = pmap_remove_all(map->pmap) ? UVM_FLAG_VAONLY : 0;
4429
4430 /* Get rid of any SYSV shared memory segments. */
4431 if (uvm_shmexit && vm->vm_shm != NULL)
4432 (*uvm_shmexit)(vm);
4433
4434 if (map->nentries) {
4435 vm_map_lock(map);
4436 uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map),
4437 &dead_entries, flags);
4438 vm_map_unlock(map);
4439 if (dead_entries != NULL)
4440 uvm_unmap_detach(dead_entries, 0);
4441 }
4442 KASSERT(map->nentries == 0);
4443 KASSERT(map->size == 0);
4444
4445 mutex_destroy(&map->misc_lock);
4446 rw_destroy(&map->lock);
4447 cv_destroy(&map->cv);
4448 pmap_destroy(map->pmap);
4449 kmem_free(vm, sizeof(*vm));
4450 }
4451
4452 static struct vm_map_entry *
4453 uvm_mapent_clone(struct vm_map *new_map, struct vm_map_entry *old_entry,
4454 int flags)
4455 {
4456 struct vm_map_entry *new_entry;
4457
4458 new_entry = uvm_mapent_alloc(new_map, 0);
4459 /* old_entry -> new_entry */
4460 uvm_mapent_copy(old_entry, new_entry);
4461
4462 /* new pmap has nothing wired in it */
4463 new_entry->wired_count = 0;
4464
4465 /*
4466 * gain reference to object backing the map (can't
4467 * be a submap, already checked this case).
4468 */
4469
4470 if (new_entry->aref.ar_amap)
4471 uvm_map_reference_amap(new_entry, flags);
4472
4473 if (new_entry->object.uvm_obj &&
4474 new_entry->object.uvm_obj->pgops->pgo_reference)
4475 new_entry->object.uvm_obj->pgops->pgo_reference(
4476 new_entry->object.uvm_obj);
4477
4478 /* insert entry at end of new_map's entry list */
4479 uvm_map_entry_link(new_map, new_map->header.prev,
4480 new_entry);
4481
4482 return new_entry;
4483 }
4484
4485 /*
4486 * share the mapping: this means we want the old and
4487 * new entries to share amaps and backing objects.
4488 */
4489 static void
4490 uvm_mapent_forkshared(struct vm_map *new_map, struct vm_map *old_map,
4491 struct vm_map_entry *old_entry)
4492 {
4493 /*
4494 * if the old_entry needs a new amap (due to prev fork)
4495 * then we need to allocate it now so that we have
4496 * something we own to share with the new_entry. [in
4497 * other words, we need to clear needs_copy]
4498 */
4499
4500 if (UVM_ET_ISNEEDSCOPY(old_entry)) {
4501 /* get our own amap, clears needs_copy */
4502 amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,
4503 0, 0);
4504 /* XXXCDC: WAITOK??? */
4505 }
4506
4507 uvm_mapent_clone(new_map, old_entry, AMAP_SHARED);
4508 }
4509
4510
4511 static void
4512 uvm_mapent_forkcopy(struct vm_map *new_map, struct vm_map *old_map,
4513 struct vm_map_entry *old_entry)
4514 {
4515 struct vm_map_entry *new_entry;
4516
4517 /*
4518 * copy-on-write the mapping (using mmap's
4519 * MAP_PRIVATE semantics)
4520 *
4521 * allocate new_entry, adjust reference counts.
4522 * (note that new references are read-only).
4523 */
4524
4525 new_entry = uvm_mapent_clone(new_map, old_entry, 0);
4526
4527 new_entry->etype |=
4528 (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
4529
4530 /*
4531 * the new entry will need an amap. it will either
4532 * need to be copied from the old entry or created
4533 * from scratch (if the old entry does not have an
4534 * amap). can we defer this process until later
4535 * (by setting "needs_copy") or do we need to copy
4536 * the amap now?
4537 *
4538 * we must copy the amap now if any of the following
4539 * conditions hold:
4540 * 1. the old entry has an amap and that amap is
4541 * being shared. this means that the old (parent)
4542 * process is sharing the amap with another
4543 * process. if we do not clear needs_copy here
4544 * we will end up in a situation where both the
4545 * parent and child process are referring to the
4546 * same amap with "needs_copy" set. if the
4547 * parent write-faults, the fault routine will
4548 * clear "needs_copy" in the parent by allocating
4549 * a new amap. this is wrong because the
4550 * parent is supposed to be sharing the old amap
4551 * and the new amap will break that.
4552 *
4553 * 2. if the old entry has an amap and a non-zero
4554 * wire count then we are going to have to call
4555 * amap_cow_now to avoid page faults in the
4556 * parent process. since amap_cow_now requires
4557 * "needs_copy" to be clear we might as well
4558 * clear it here as well.
4559 *
4560 */
4561
4562 if (old_entry->aref.ar_amap != NULL) {
4563 if ((amap_flags(old_entry->aref.ar_amap) & AMAP_SHARED) != 0 ||
4564 VM_MAPENT_ISWIRED(old_entry)) {
4565
4566 amap_copy(new_map, new_entry,
4567 AMAP_COPY_NOCHUNK, 0, 0);
4568 /* XXXCDC: M_WAITOK ... ok? */
4569 }
4570 }
4571
4572 /*
4573 * if the parent's entry is wired down, then the
4574 * parent process does not want page faults on
4575 * access to that memory. this means that we
4576 * cannot do copy-on-write because we can't write
4577 * protect the old entry. in this case we
4578 * resolve all copy-on-write faults now, using
4579 * amap_cow_now. note that we have already
4580 * allocated any needed amap (above).
4581 */
4582
4583 if (VM_MAPENT_ISWIRED(old_entry)) {
4584
4585 /*
4586 * resolve all copy-on-write faults now
4587 * (note that there is nothing to do if
4588 * the old mapping does not have an amap).
4589 */
4590 if (old_entry->aref.ar_amap)
4591 amap_cow_now(new_map, new_entry);
4592
4593 } else {
4594 /*
4595 * setup mappings to trigger copy-on-write faults
4596 * we must write-protect the parent if it has
4597 * an amap and it is not already "needs_copy"...
4598 * if it is already "needs_copy" then the parent
4599 * has already been write-protected by a previous
4600 * fork operation.
4601 */
4602 if (old_entry->aref.ar_amap &&
4603 !UVM_ET_ISNEEDSCOPY(old_entry)) {
4604 if (old_entry->max_protection & VM_PROT_WRITE) {
4605 #ifdef __HAVE_UNLOCKED_PMAP /* XXX temporary */
4606 uvm_map_lock_entry(old_entry, RW_WRITER);
4607 #else
4608 uvm_map_lock_entry(old_entry, RW_READER);
4609 #endif
4610 pmap_protect(old_map->pmap,
4611 old_entry->start, old_entry->end,
4612 old_entry->protection & ~VM_PROT_WRITE);
4613 uvm_map_unlock_entry(old_entry);
4614 }
4615 old_entry->etype |= UVM_ET_NEEDSCOPY;
4616 }
4617 }
4618 }
4619
4620 /*
4621 * zero the mapping: the new entry will be zero initialized
4622 */
4623 static void
4624 uvm_mapent_forkzero(struct vm_map *new_map, struct vm_map *old_map,
4625 struct vm_map_entry *old_entry)
4626 {
4627 struct vm_map_entry *new_entry;
4628
4629 new_entry = uvm_mapent_clone(new_map, old_entry, 0);
4630
4631 new_entry->etype |=
4632 (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
4633
4634 if (new_entry->aref.ar_amap) {
4635 uvm_map_unreference_amap(new_entry, 0);
4636 new_entry->aref.ar_pageoff = 0;
4637 new_entry->aref.ar_amap = NULL;
4638 }
4639
4640 if (UVM_ET_ISOBJ(new_entry)) {
4641 if (new_entry->object.uvm_obj->pgops->pgo_detach)
4642 new_entry->object.uvm_obj->pgops->pgo_detach(
4643 new_entry->object.uvm_obj);
4644 new_entry->object.uvm_obj = NULL;
4645 new_entry->offset = 0;
4646 new_entry->etype &= ~UVM_ET_OBJ;
4647 }
4648 }
4649
4650 /*
4651 * F O R K - m a i n e n t r y p o i n t
4652 */
4653 /*
4654 * uvmspace_fork: fork a process' main map
4655 *
4656 * => create a new vmspace for child process from parent.
4657 * => parent's map must not be locked.
4658 */
4659
4660 struct vmspace *
4661 uvmspace_fork(struct vmspace *vm1)
4662 {
4663 struct vmspace *vm2;
4664 struct vm_map *old_map = &vm1->vm_map;
4665 struct vm_map *new_map;
4666 struct vm_map_entry *old_entry;
4667 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
4668
4669 vm_map_lock(old_map);
4670
4671 vm2 = uvmspace_alloc(vm_map_min(old_map), vm_map_max(old_map),
4672 vm1->vm_map.flags & VM_MAP_TOPDOWN);
4673 memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
4674 (char *) (vm1 + 1) - (char *) &vm1->vm_startcopy);
4675 new_map = &vm2->vm_map; /* XXX */
4676
4677 old_entry = old_map->header.next;
4678 new_map->size = old_map->size;
4679
4680 /*
4681 * go entry-by-entry
4682 */
4683
4684 while (old_entry != &old_map->header) {
4685
4686 /*
4687 * first, some sanity checks on the old entry
4688 */
4689
4690 KASSERT(!UVM_ET_ISSUBMAP(old_entry));
4691 KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) ||
4692 !UVM_ET_ISNEEDSCOPY(old_entry));
4693
4694 switch (old_entry->inheritance) {
4695 case MAP_INHERIT_NONE:
4696 /*
4697 * drop the mapping, modify size
4698 */
4699 new_map->size -= old_entry->end - old_entry->start;
4700 break;
4701
4702 case MAP_INHERIT_SHARE:
4703 uvm_mapent_forkshared(new_map, old_map, old_entry);
4704 break;
4705
4706 case MAP_INHERIT_COPY:
4707 uvm_mapent_forkcopy(new_map, old_map, old_entry);
4708 break;
4709
4710 case MAP_INHERIT_ZERO:
4711 uvm_mapent_forkzero(new_map, old_map, old_entry);
4712 break;
4713 default:
4714 KASSERT(0);
4715 break;
4716 }
4717 old_entry = old_entry->next;
4718 }
4719
4720 pmap_update(old_map->pmap);
4721 vm_map_unlock(old_map);
4722
4723 if (uvm_shmfork && vm1->vm_shm)
4724 (*uvm_shmfork)(vm1, vm2);
4725
4726 #ifdef PMAP_FORK
4727 pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap);
4728 #endif
4729
4730 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4731 return (vm2);
4732 }
4733
4734
4735 /*
4736 * uvm_mapent_trymerge: try to merge an entry with its neighbors.
4737 *
4738 * => called with map locked.
4739 * => return non zero if successfully merged.
4740 */
4741
4742 int
4743 uvm_mapent_trymerge(struct vm_map *map, struct vm_map_entry *entry, int flags)
4744 {
4745 struct uvm_object *uobj;
4746 struct vm_map_entry *next;
4747 struct vm_map_entry *prev;
4748 vsize_t size;
4749 int merged = 0;
4750 bool copying;
4751 int newetype;
4752
4753 if (entry->aref.ar_amap != NULL) {
4754 return 0;
4755 }
4756 if ((entry->flags & UVM_MAP_NOMERGE) != 0) {
4757 return 0;
4758 }
4759
4760 uobj = entry->object.uvm_obj;
4761 size = entry->end - entry->start;
4762 copying = (flags & UVM_MERGE_COPYING) != 0;
4763 newetype = copying ? (entry->etype & ~UVM_ET_NEEDSCOPY) : entry->etype;
4764
4765 next = entry->next;
4766 if (next != &map->header &&
4767 next->start == entry->end &&
4768 ((copying && next->aref.ar_amap != NULL &&
4769 amap_refs(next->aref.ar_amap) == 1) ||
4770 (!copying && next->aref.ar_amap == NULL)) &&
4771 UVM_ET_ISCOMPATIBLE(next, newetype,
4772 uobj, entry->flags, entry->protection,
4773 entry->max_protection, entry->inheritance, entry->advice,
4774 entry->wired_count) &&
4775 (uobj == NULL || entry->offset + size == next->offset)) {
4776 int error;
4777
4778 if (copying) {
4779 error = amap_extend(next, size,
4780 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_BACKWARDS);
4781 } else {
4782 error = 0;
4783 }
4784 if (error == 0) {
4785 if (uobj) {
4786 if (uobj->pgops->pgo_detach) {
4787 uobj->pgops->pgo_detach(uobj);
4788 }
4789 }
4790
4791 entry->end = next->end;
4792 clear_hints(map, next);
4793 uvm_map_entry_unlink(map, next);
4794 if (copying) {
4795 entry->aref = next->aref;
4796 entry->etype &= ~UVM_ET_NEEDSCOPY;
4797 }
4798 uvm_map_check(map, "trymerge forwardmerge");
4799 uvm_mapent_free(next);
4800 merged++;
4801 }
4802 }
4803
4804 prev = entry->prev;
4805 if (prev != &map->header &&
4806 prev->end == entry->start &&
4807 ((copying && !merged && prev->aref.ar_amap != NULL &&
4808 amap_refs(prev->aref.ar_amap) == 1) ||
4809 (!copying && prev->aref.ar_amap == NULL)) &&
4810 UVM_ET_ISCOMPATIBLE(prev, newetype,
4811 uobj, entry->flags, entry->protection,
4812 entry->max_protection, entry->inheritance, entry->advice,
4813 entry->wired_count) &&
4814 (uobj == NULL ||
4815 prev->offset + prev->end - prev->start == entry->offset)) {
4816 int error;
4817
4818 if (copying) {
4819 error = amap_extend(prev, size,
4820 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_FORWARDS);
4821 } else {
4822 error = 0;
4823 }
4824 if (error == 0) {
4825 if (uobj) {
4826 if (uobj->pgops->pgo_detach) {
4827 uobj->pgops->pgo_detach(uobj);
4828 }
4829 entry->offset = prev->offset;
4830 }
4831
4832 entry->start = prev->start;
4833 clear_hints(map, prev);
4834 uvm_map_entry_unlink(map, prev);
4835 if (copying) {
4836 entry->aref = prev->aref;
4837 entry->etype &= ~UVM_ET_NEEDSCOPY;
4838 }
4839 uvm_map_check(map, "trymerge backmerge");
4840 uvm_mapent_free(prev);
4841 merged++;
4842 }
4843 }
4844
4845 return merged;
4846 }
4847
4848 /*
4849 * uvm_map_setup: init map
4850 *
4851 * => map must not be in service yet.
4852 */
4853
4854 void
4855 uvm_map_setup(struct vm_map *map, vaddr_t vmin, vaddr_t vmax, int flags)
4856 {
4857
4858 rb_tree_init(&map->rb_tree, &uvm_map_tree_ops);
4859 map->header.next = map->header.prev = &map->header;
4860 map->nentries = 0;
4861 map->size = 0;
4862 map->ref_count = 1;
4863 vm_map_setmin(map, vmin);
4864 vm_map_setmax(map, vmax);
4865 map->flags = flags;
4866 map->first_free = &map->header;
4867 map->hint = &map->header;
4868 map->timestamp = 0;
4869 map->busy = NULL;
4870
4871 rw_init(&map->lock);
4872 cv_init(&map->cv, "vm_map");
4873 mutex_init(&map->misc_lock, MUTEX_DRIVER, IPL_NONE);
4874 }
4875
4876 /*
4877 * U N M A P - m a i n e n t r y p o i n t
4878 */
4879
4880 /*
4881 * uvm_unmap1: remove mappings from a vm_map (from "start" up to "stop")
4882 *
4883 * => caller must check alignment and size
4884 * => map must be unlocked (we will lock it)
4885 * => flags is UVM_FLAG_QUANTUM or 0.
4886 */
4887
4888 void
4889 uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
4890 {
4891 struct vm_map_entry *dead_entries;
4892 UVMHIST_FUNC(__func__);
4893 UVMHIST_CALLARGS(maphist, " (map=%#jx, start=%#jx, end=%#jx)",
4894 (uintptr_t)map, start, end, 0);
4895
4896 KASSERTMSG(start < end,
4897 "%s: map %p: start %#jx < end %#jx", __func__, map,
4898 (uintmax_t)start, (uintmax_t)end);
4899 if (map == kernel_map) {
4900 LOCKDEBUG_MEM_CHECK((void *)start, end - start);
4901 }
4902
4903 /*
4904 * work now done by helper functions. wipe the pmap's and then
4905 * detach from the dead entries...
4906 */
4907 vm_map_lock(map);
4908 uvm_unmap_remove(map, start, end, &dead_entries, flags);
4909 vm_map_unlock(map);
4910
4911 if (dead_entries != NULL)
4912 uvm_unmap_detach(dead_entries, 0);
4913
4914 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
4915 }
4916
4917
4918 /*
4919 * uvm_map_reference: add reference to a map
4920 *
4921 * => map need not be locked
4922 */
4923
4924 void
4925 uvm_map_reference(struct vm_map *map)
4926 {
4927
4928 atomic_inc_uint(&map->ref_count);
4929 }
4930
4931 void
4932 uvm_map_lock_entry(struct vm_map_entry *entry, krw_t op)
4933 {
4934
4935 if (entry->aref.ar_amap != NULL) {
4936 amap_lock(entry->aref.ar_amap, op);
4937 }
4938 if (UVM_ET_ISOBJ(entry)) {
4939 rw_enter(entry->object.uvm_obj->vmobjlock, op);
4940 }
4941 }
4942
4943 void
4944 uvm_map_unlock_entry(struct vm_map_entry *entry)
4945 {
4946
4947 if (UVM_ET_ISOBJ(entry)) {
4948 rw_exit(entry->object.uvm_obj->vmobjlock);
4949 }
4950 if (entry->aref.ar_amap != NULL) {
4951 amap_unlock(entry->aref.ar_amap);
4952 }
4953 }
4954
4955 #define UVM_VOADDR_TYPE_MASK 0x3UL
4956 #define UVM_VOADDR_TYPE_UOBJ 0x1UL
4957 #define UVM_VOADDR_TYPE_ANON 0x2UL
4958 #define UVM_VOADDR_OBJECT_MASK ~UVM_VOADDR_TYPE_MASK
4959
4960 #define UVM_VOADDR_GET_TYPE(voa) \
4961 ((voa)->object & UVM_VOADDR_TYPE_MASK)
4962 #define UVM_VOADDR_GET_OBJECT(voa) \
4963 ((voa)->object & UVM_VOADDR_OBJECT_MASK)
4964 #define UVM_VOADDR_SET_OBJECT(voa, obj, type) \
4965 do { \
4966 KASSERT(((uintptr_t)(obj) & UVM_VOADDR_TYPE_MASK) == 0); \
4967 (voa)->object = ((uintptr_t)(obj)) | (type); \
4968 } while (/*CONSTCOND*/0)
4969
4970 #define UVM_VOADDR_GET_UOBJ(voa) \
4971 ((struct uvm_object *)UVM_VOADDR_GET_OBJECT(voa))
4972 #define UVM_VOADDR_SET_UOBJ(voa, uobj) \
4973 UVM_VOADDR_SET_OBJECT(voa, uobj, UVM_VOADDR_TYPE_UOBJ)
4974
4975 #define UVM_VOADDR_GET_ANON(voa) \
4976 ((struct vm_anon *)UVM_VOADDR_GET_OBJECT(voa))
4977 #define UVM_VOADDR_SET_ANON(voa, anon) \
4978 UVM_VOADDR_SET_OBJECT(voa, anon, UVM_VOADDR_TYPE_ANON)
4979
4980 /*
4981 * uvm_voaddr_acquire: returns the virtual object address corresponding
4982 * to the specified virtual address.
4983 *
4984 * => resolves COW so the true page identity is tracked.
4985 *
4986 * => acquires a reference on the page's owner (uvm_object or vm_anon)
4987 */
4988 bool
4989 uvm_voaddr_acquire(struct vm_map * const map, vaddr_t const va,
4990 struct uvm_voaddr * const voaddr)
4991 {
4992 struct vm_map_entry *entry;
4993 struct vm_anon *anon = NULL;
4994 bool result = false;
4995 bool exclusive = false;
4996 void (*unlock_fn)(struct vm_map *);
4997
4998 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
4999 UVMHIST_LOG(maphist,"(map=%#jx,va=%#jx)", (uintptr_t)map, va, 0, 0);
5000
5001 const vaddr_t start = trunc_page(va);
5002 const vaddr_t end = round_page(va+1);
5003
5004 lookup_again:
5005 if (__predict_false(exclusive)) {
5006 vm_map_lock(map);
5007 unlock_fn = vm_map_unlock;
5008 } else {
5009 vm_map_lock_read(map);
5010 unlock_fn = vm_map_unlock_read;
5011 }
5012
5013 if (__predict_false(!uvm_map_lookup_entry(map, start, &entry))) {
5014 unlock_fn(map);
5015 UVMHIST_LOG(maphist,"<- done (no entry)",0,0,0,0);
5016 return false;
5017 }
5018
5019 if (__predict_false(entry->protection == VM_PROT_NONE)) {
5020 unlock_fn(map);
5021 UVMHIST_LOG(maphist,"<- done (PROT_NONE)",0,0,0,0);
5022 return false;
5023 }
5024
5025 /*
5026 * We have a fast path for the common case of "no COW resolution
5027 * needed" whereby we have taken a read lock on the map and if
5028 * we don't encounter any need to create a vm_anon then great!
5029 * But if we do, we loop around again, instead taking an exclusive
5030 * lock so that we can perform the fault.
5031 *
5032 * In the event that we have to resolve the fault, we do nearly the
5033 * same work as uvm_map_pageable() does:
5034 *
5035 * 1: holding the write lock, we create any anonymous maps that need
5036 * to be created. however, we do NOT need to clip the map entries
5037 * in this case.
5038 *
5039 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
5040 * in the page (assuming the entry is not already wired). this
5041 * is done because we need the vm_anon to be present.
5042 */
5043 if (__predict_true(!VM_MAPENT_ISWIRED(entry))) {
5044
5045 bool need_fault = false;
5046
5047 /*
5048 * perform the action of vm_map_lookup that need the
5049 * write lock on the map: create an anonymous map for
5050 * a copy-on-write region, or an anonymous map for
5051 * a zero-fill region.
5052 */
5053 if (__predict_false(UVM_ET_ISSUBMAP(entry))) {
5054 unlock_fn(map);
5055 UVMHIST_LOG(maphist,"<- done (submap)",0,0,0,0);
5056 return false;
5057 }
5058 if (__predict_false(UVM_ET_ISNEEDSCOPY(entry) &&
5059 ((entry->max_protection & VM_PROT_WRITE) ||
5060 (entry->object.uvm_obj == NULL)))) {
5061 if (!exclusive) {
5062 /* need to take the slow path */
5063 KASSERT(unlock_fn == vm_map_unlock_read);
5064 vm_map_unlock_read(map);
5065 exclusive = true;
5066 goto lookup_again;
5067 }
5068 need_fault = true;
5069 amap_copy(map, entry, 0, start, end);
5070 /* XXXCDC: wait OK? */
5071 }
5072
5073 /*
5074 * do a quick check to see if the fault has already
5075 * been resolved to the upper layer.
5076 */
5077 if (__predict_true(entry->aref.ar_amap != NULL &&
5078 need_fault == false)) {
5079 amap_lock(entry->aref.ar_amap, RW_WRITER);
5080 anon = amap_lookup(&entry->aref, start - entry->start);
5081 if (__predict_true(anon != NULL)) {
5082 /* amap unlocked below */
5083 goto found_anon;
5084 }
5085 amap_unlock(entry->aref.ar_amap);
5086 need_fault = true;
5087 }
5088
5089 /*
5090 * we predict this test as false because if we reach
5091 * this point, then we are likely dealing with a
5092 * shared memory region backed by a uvm_object, in
5093 * which case a fault to create the vm_anon is not
5094 * necessary.
5095 */
5096 if (__predict_false(need_fault)) {
5097 if (exclusive) {
5098 vm_map_busy(map);
5099 vm_map_unlock(map);
5100 unlock_fn = vm_map_unbusy;
5101 }
5102
5103 if (uvm_fault_wire(map, start, end,
5104 entry->max_protection, 1)) {
5105 /* wiring failed */
5106 unlock_fn(map);
5107 UVMHIST_LOG(maphist,"<- done (wire failed)",
5108 0,0,0,0);
5109 return false;
5110 }
5111
5112 /*
5113 * now that we have resolved the fault, we can unwire
5114 * the page.
5115 */
5116 if (exclusive) {
5117 vm_map_lock(map);
5118 vm_map_unbusy(map);
5119 unlock_fn = vm_map_unlock;
5120 }
5121
5122 uvm_fault_unwire_locked(map, start, end);
5123 }
5124 }
5125
5126 /* check the upper layer */
5127 if (entry->aref.ar_amap) {
5128 amap_lock(entry->aref.ar_amap, RW_WRITER);
5129 anon = amap_lookup(&entry->aref, start - entry->start);
5130 if (anon) {
5131 found_anon: KASSERT(anon->an_lock == entry->aref.ar_amap->am_lock);
5132 anon->an_ref++;
5133 rw_obj_hold(anon->an_lock);
5134 KASSERT(anon->an_ref != 0);
5135 UVM_VOADDR_SET_ANON(voaddr, anon);
5136 voaddr->offset = va & PAGE_MASK;
5137 result = true;
5138 }
5139 amap_unlock(entry->aref.ar_amap);
5140 }
5141
5142 /* check the lower layer */
5143 if (!result && UVM_ET_ISOBJ(entry)) {
5144 struct uvm_object *uobj = entry->object.uvm_obj;
5145
5146 KASSERT(uobj != NULL);
5147 (*uobj->pgops->pgo_reference)(uobj);
5148 UVM_VOADDR_SET_UOBJ(voaddr, uobj);
5149 voaddr->offset = entry->offset + (va - entry->start);
5150 result = true;
5151 }
5152
5153 unlock_fn(map);
5154
5155 if (result) {
5156 UVMHIST_LOG(maphist,
5157 "<- done OK (type=%jd,owner=%#jx,offset=%#jx)",
5158 UVM_VOADDR_GET_TYPE(voaddr),
5159 UVM_VOADDR_GET_OBJECT(voaddr),
5160 voaddr->offset, 0);
5161 } else {
5162 UVMHIST_LOG(maphist,"<- done (failed)",0,0,0,0);
5163 }
5164
5165 return result;
5166 }
5167
5168 /*
5169 * uvm_voaddr_release: release the references held by the
5170 * vitual object address.
5171 */
5172 void
5173 uvm_voaddr_release(struct uvm_voaddr * const voaddr)
5174 {
5175
5176 switch (UVM_VOADDR_GET_TYPE(voaddr)) {
5177 case UVM_VOADDR_TYPE_UOBJ: {
5178 struct uvm_object * const uobj = UVM_VOADDR_GET_UOBJ(voaddr);
5179
5180 KASSERT(uobj != NULL);
5181 KASSERT(uobj->pgops->pgo_detach != NULL);
5182 (*uobj->pgops->pgo_detach)(uobj);
5183 break;
5184 }
5185 case UVM_VOADDR_TYPE_ANON: {
5186 struct vm_anon * const anon = UVM_VOADDR_GET_ANON(voaddr);
5187 krwlock_t *lock;
5188
5189 KASSERT(anon != NULL);
5190 rw_enter((lock = anon->an_lock), RW_WRITER);
5191 KASSERT(anon->an_ref > 0);
5192 if (--anon->an_ref == 0) {
5193 uvm_anfree(anon);
5194 }
5195 rw_exit(lock);
5196 rw_obj_free(lock);
5197 break;
5198 }
5199 default:
5200 panic("uvm_voaddr_release: bad type");
5201 }
5202 memset(voaddr, 0, sizeof(*voaddr));
5203 }
5204
5205 /*
5206 * uvm_voaddr_compare: compare two uvm_voaddr objects.
5207 *
5208 * => memcmp() semantics
5209 */
5210 int
5211 uvm_voaddr_compare(const struct uvm_voaddr * const voaddr1,
5212 const struct uvm_voaddr * const voaddr2)
5213 {
5214 const uintptr_t type1 = UVM_VOADDR_GET_TYPE(voaddr1);
5215 const uintptr_t type2 = UVM_VOADDR_GET_TYPE(voaddr2);
5216
5217 KASSERT(type1 == UVM_VOADDR_TYPE_UOBJ ||
5218 type1 == UVM_VOADDR_TYPE_ANON);
5219
5220 KASSERT(type2 == UVM_VOADDR_TYPE_UOBJ ||
5221 type2 == UVM_VOADDR_TYPE_ANON);
5222
5223 if (type1 < type2)
5224 return -1;
5225 if (type1 > type2)
5226 return 1;
5227
5228 const uintptr_t addr1 = UVM_VOADDR_GET_OBJECT(voaddr1);
5229 const uintptr_t addr2 = UVM_VOADDR_GET_OBJECT(voaddr2);
5230
5231 if (addr1 < addr2)
5232 return -1;
5233 if (addr1 > addr2)
5234 return 1;
5235
5236 if (voaddr1->offset < voaddr2->offset)
5237 return -1;
5238 if (voaddr1->offset > voaddr2->offset)
5239 return 1;
5240
5241 return 0;
5242 }
5243
5244 #if defined(DDB) || defined(DEBUGPRINT)
5245
5246 /*
5247 * uvm_map_printit: actually prints the map
5248 */
5249
5250 void
5251 uvm_map_printit(struct vm_map *map, bool full,
5252 void (*pr)(const char *, ...))
5253 {
5254 struct vm_map_entry *entry;
5255
5256 (*pr)("MAP %p: [%#lx->%#lx]\n", map, vm_map_min(map),
5257 vm_map_max(map));
5258 (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=%#x\n",
5259 map->nentries, map->size, map->ref_count, map->timestamp,
5260 map->flags);
5261 (*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap,
5262 pmap_resident_count(map->pmap), pmap_wired_count(map->pmap));
5263 if (!full)
5264 return;
5265 for (entry = map->header.next; entry != &map->header;
5266 entry = entry->next) {
5267 (*pr)(" - %p: %#lx->%#lx: obj=%p/%#llx, amap=%p/%d\n",
5268 entry, entry->start, entry->end, entry->object.uvm_obj,
5269 (long long)entry->offset, entry->aref.ar_amap,
5270 entry->aref.ar_pageoff);
5271 (*pr)(
5272 "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
5273 "wc=%d, adv=%d%s\n",
5274 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
5275 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
5276 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
5277 entry->protection, entry->max_protection,
5278 entry->inheritance, entry->wired_count, entry->advice,
5279 entry == map->first_free ? " (first_free)" : "");
5280 }
5281 }
5282
5283 void
5284 uvm_whatis(uintptr_t addr, void (*pr)(const char *, ...))
5285 {
5286 struct vm_map *map;
5287
5288 for (map = kernel_map;;) {
5289 struct vm_map_entry *entry;
5290
5291 if (!uvm_map_lookup_entry_bytree(map, (vaddr_t)addr, &entry)) {
5292 break;
5293 }
5294 (*pr)("%p is %p+%zu from VMMAP %p\n",
5295 (void *)addr, (void *)entry->start,
5296 (size_t)(addr - (uintptr_t)entry->start), map);
5297 if (!UVM_ET_ISSUBMAP(entry)) {
5298 break;
5299 }
5300 map = entry->object.sub_map;
5301 }
5302 }
5303
5304 #endif /* DDB || DEBUGPRINT */
5305
5306 #ifndef __USER_VA0_IS_SAFE
5307 static int
5308 sysctl_user_va0_disable(SYSCTLFN_ARGS)
5309 {
5310 struct sysctlnode node;
5311 int t, error;
5312
5313 node = *rnode;
5314 node.sysctl_data = &t;
5315 t = user_va0_disable;
5316 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5317 if (error || newp == NULL)
5318 return (error);
5319
5320 if (!t && user_va0_disable &&
5321 kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MAP_VA_ZERO, 0,
5322 NULL, NULL, NULL))
5323 return EPERM;
5324
5325 user_va0_disable = !!t;
5326 return 0;
5327 }
5328 #endif
5329
5330 static int
5331 fill_vmentry(struct lwp *l, struct proc *p, struct kinfo_vmentry *kve,
5332 struct vm_map *m, struct vm_map_entry *e)
5333 {
5334 #ifndef _RUMPKERNEL
5335 int error;
5336
5337 memset(kve, 0, sizeof(*kve));
5338 KASSERT(e != NULL);
5339 if (UVM_ET_ISOBJ(e)) {
5340 struct uvm_object *uobj = e->object.uvm_obj;
5341 KASSERT(uobj != NULL);
5342 kve->kve_ref_count = uobj->uo_refs;
5343 kve->kve_count = uobj->uo_npages;
5344 if (UVM_OBJ_IS_VNODE(uobj)) {
5345 struct vattr va;
5346 struct vnode *vp = (struct vnode *)uobj;
5347 vn_lock(vp, LK_SHARED | LK_RETRY);
5348 error = VOP_GETATTR(vp, &va, l->l_cred);
5349 VOP_UNLOCK(vp);
5350 kve->kve_type = KVME_TYPE_VNODE;
5351 if (error == 0) {
5352 kve->kve_vn_size = vp->v_size;
5353 kve->kve_vn_type = (int)vp->v_type;
5354 kve->kve_vn_mode = va.va_mode;
5355 kve->kve_vn_rdev = va.va_rdev;
5356 kve->kve_vn_fileid = va.va_fileid;
5357 kve->kve_vn_fsid = va.va_fsid;
5358 error = vnode_to_path(kve->kve_path,
5359 sizeof(kve->kve_path) / 2, vp, l, p);
5360 }
5361 } else if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
5362 kve->kve_type = KVME_TYPE_KERN;
5363 } else if (UVM_OBJ_IS_DEVICE(uobj)) {
5364 kve->kve_type = KVME_TYPE_DEVICE;
5365 } else if (UVM_OBJ_IS_AOBJ(uobj)) {
5366 kve->kve_type = KVME_TYPE_ANON;
5367 } else {
5368 kve->kve_type = KVME_TYPE_OBJECT;
5369 }
5370 } else if (UVM_ET_ISSUBMAP(e)) {
5371 struct vm_map *map = e->object.sub_map;
5372 KASSERT(map != NULL);
5373 kve->kve_ref_count = map->ref_count;
5374 kve->kve_count = map->nentries;
5375 kve->kve_type = KVME_TYPE_SUBMAP;
5376 } else
5377 kve->kve_type = KVME_TYPE_UNKNOWN;
5378
5379 kve->kve_start = e->start;
5380 kve->kve_end = e->end;
5381 kve->kve_offset = e->offset;
5382 kve->kve_wired_count = e->wired_count;
5383 kve->kve_inheritance = e->inheritance;
5384 kve->kve_attributes = 0; /* unused */
5385 kve->kve_advice = e->advice;
5386 #define PROT(p) (((p) & VM_PROT_READ) ? KVME_PROT_READ : 0) | \
5387 (((p) & VM_PROT_WRITE) ? KVME_PROT_WRITE : 0) | \
5388 (((p) & VM_PROT_EXECUTE) ? KVME_PROT_EXEC : 0)
5389 kve->kve_protection = PROT(e->protection);
5390 kve->kve_max_protection = PROT(e->max_protection);
5391 kve->kve_flags |= (e->etype & UVM_ET_COPYONWRITE)
5392 ? KVME_FLAG_COW : 0;
5393 kve->kve_flags |= (e->etype & UVM_ET_NEEDSCOPY)
5394 ? KVME_FLAG_NEEDS_COPY : 0;
5395 kve->kve_flags |= (m->flags & VM_MAP_TOPDOWN)
5396 ? KVME_FLAG_GROWS_DOWN : KVME_FLAG_GROWS_UP;
5397 kve->kve_flags |= (m->flags & VM_MAP_PAGEABLE)
5398 ? KVME_FLAG_PAGEABLE : 0;
5399 #endif
5400 return 0;
5401 }
5402
5403 static int
5404 fill_vmentries(struct lwp *l, pid_t pid, u_int elem_size, void *oldp,
5405 size_t *oldlenp)
5406 {
5407 int error;
5408 struct proc *p;
5409 struct kinfo_vmentry *vme;
5410 struct vmspace *vm;
5411 struct vm_map *map;
5412 struct vm_map_entry *entry;
5413 char *dp;
5414 size_t count, vmesize;
5415
5416 if (elem_size == 0 || elem_size > 2 * sizeof(*vme))
5417 return EINVAL;
5418
5419 if (oldp) {
5420 if (*oldlenp > 10UL * 1024UL * 1024UL)
5421 return E2BIG;
5422 count = *oldlenp / elem_size;
5423 if (count == 0)
5424 return ENOMEM;
5425 vmesize = count * sizeof(*vme);
5426 } else
5427 vmesize = 0;
5428
5429 if ((error = proc_find_locked(l, &p, pid)) != 0)
5430 return error;
5431
5432 vme = NULL;
5433 count = 0;
5434
5435 if ((error = proc_vmspace_getref(p, &vm)) != 0)
5436 goto out;
5437
5438 map = &vm->vm_map;
5439 vm_map_lock_read(map);
5440
5441 dp = oldp;
5442 if (oldp)
5443 vme = kmem_alloc(vmesize, KM_SLEEP);
5444 for (entry = map->header.next; entry != &map->header;
5445 entry = entry->next) {
5446 if (oldp && (dp - (char *)oldp) < vmesize) {
5447 error = fill_vmentry(l, p, &vme[count], map, entry);
5448 if (error)
5449 goto out;
5450 dp += elem_size;
5451 }
5452 count++;
5453 }
5454 vm_map_unlock_read(map);
5455 uvmspace_free(vm);
5456
5457 out:
5458 if (pid != -1)
5459 mutex_exit(p->p_lock);
5460 if (error == 0) {
5461 const u_int esize = uimin(sizeof(*vme), elem_size);
5462 dp = oldp;
5463 for (size_t i = 0; i < count; i++) {
5464 if (oldp && (dp - (char *)oldp) < vmesize) {
5465 error = sysctl_copyout(l, &vme[i], dp, esize);
5466 if (error)
5467 break;
5468 dp += elem_size;
5469 } else
5470 break;
5471 }
5472 count *= elem_size;
5473 if (oldp != NULL && *oldlenp < count)
5474 error = ENOSPC;
5475 *oldlenp = count;
5476 }
5477 if (vme)
5478 kmem_free(vme, vmesize);
5479 return error;
5480 }
5481
5482 static int
5483 sysctl_vmproc(SYSCTLFN_ARGS)
5484 {
5485 int error;
5486
5487 if (namelen == 1 && name[0] == CTL_QUERY)
5488 return (sysctl_query(SYSCTLFN_CALL(rnode)));
5489
5490 if (namelen == 0)
5491 return EINVAL;
5492
5493 switch (name[0]) {
5494 case VM_PROC_MAP:
5495 if (namelen != 3)
5496 return EINVAL;
5497 sysctl_unlock();
5498 error = fill_vmentries(l, name[1], name[2], oldp, oldlenp);
5499 sysctl_relock();
5500 return error;
5501 default:
5502 return EINVAL;
5503 }
5504 }
5505
5506 SYSCTL_SETUP(sysctl_uvmmap_setup, "sysctl uvmmap setup")
5507 {
5508
5509 sysctl_createv(clog, 0, NULL, NULL,
5510 CTLFLAG_PERMANENT,
5511 CTLTYPE_STRUCT, "proc",
5512 SYSCTL_DESCR("Process vm information"),
5513 sysctl_vmproc, 0, NULL, 0,
5514 CTL_VM, VM_PROC, CTL_EOL);
5515 #ifndef __USER_VA0_IS_SAFE
5516 sysctl_createv(clog, 0, NULL, NULL,
5517 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5518 CTLTYPE_INT, "user_va0_disable",
5519 SYSCTL_DESCR("Disable VA 0"),
5520 sysctl_user_va0_disable, 0, &user_va0_disable, 0,
5521 CTL_VM, CTL_CREATE, CTL_EOL);
5522 #endif
5523 }
5524