uvm_map.c revision 1.338 1 /* $NetBSD: uvm_map.c,v 1.338 2016/06/01 00:49:44 christos Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_map.c 8.3 (Berkeley) 1/12/94
37 * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
38 *
39 *
40 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 * All rights reserved.
42 *
43 * Permission to use, copy, modify and distribute this software and
44 * its documentation is hereby granted, provided that both the copyright
45 * notice and this permission notice appear in all copies of the
46 * software, derivative works or modified versions, and any portions
47 * thereof, and that both notices appear in supporting documentation.
48 *
49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 *
53 * Carnegie Mellon requests users of this software to return to
54 *
55 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
56 * School of Computer Science
57 * Carnegie Mellon University
58 * Pittsburgh PA 15213-3890
59 *
60 * any improvements or extensions that they make and grant Carnegie the
61 * rights to redistribute these changes.
62 */
63
64 /*
65 * uvm_map.c: uvm map operations
66 */
67
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.338 2016/06/01 00:49:44 christos Exp $");
70
71 #include "opt_ddb.h"
72 #include "opt_uvmhist.h"
73 #include "opt_uvm.h"
74 #include "opt_sysv.h"
75
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/mman.h>
79 #include <sys/proc.h>
80 #include <sys/pool.h>
81 #include <sys/kernel.h>
82 #include <sys/mount.h>
83 #include <sys/vnode.h>
84 #include <sys/filedesc.h>
85 #include <sys/lockdebug.h>
86 #include <sys/atomic.h>
87 #include <sys/sysctl.h>
88 #ifndef __USER_VA0_IS_SAFE
89 #include <sys/kauth.h>
90 #include "opt_user_va0_disable_default.h"
91 #endif
92
93 #include <sys/shm.h>
94
95 #include <uvm/uvm.h>
96 #include <uvm/uvm_readahead.h>
97
98 #if defined(DDB) || defined(DEBUGPRINT)
99 #include <uvm/uvm_ddb.h>
100 #endif
101
102 #ifdef UVMHIST
103 static struct kern_history_ent maphistbuf[100];
104 UVMHIST_DEFINE(maphist) = UVMHIST_INITIALIZER(maphist, maphistbuf);
105 #endif
106
107 #if !defined(UVMMAP_COUNTERS)
108
109 #define UVMMAP_EVCNT_DEFINE(name) /* nothing */
110 #define UVMMAP_EVCNT_INCR(ev) /* nothing */
111 #define UVMMAP_EVCNT_DECR(ev) /* nothing */
112
113 #else /* defined(UVMMAP_NOCOUNTERS) */
114
115 #include <sys/evcnt.h>
116 #define UVMMAP_EVCNT_DEFINE(name) \
117 struct evcnt uvmmap_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
118 "uvmmap", #name); \
119 EVCNT_ATTACH_STATIC(uvmmap_evcnt_##name);
120 #define UVMMAP_EVCNT_INCR(ev) uvmmap_evcnt_##ev.ev_count++
121 #define UVMMAP_EVCNT_DECR(ev) uvmmap_evcnt_##ev.ev_count--
122
123 #endif /* defined(UVMMAP_NOCOUNTERS) */
124
125 UVMMAP_EVCNT_DEFINE(ubackmerge)
126 UVMMAP_EVCNT_DEFINE(uforwmerge)
127 UVMMAP_EVCNT_DEFINE(ubimerge)
128 UVMMAP_EVCNT_DEFINE(unomerge)
129 UVMMAP_EVCNT_DEFINE(kbackmerge)
130 UVMMAP_EVCNT_DEFINE(kforwmerge)
131 UVMMAP_EVCNT_DEFINE(kbimerge)
132 UVMMAP_EVCNT_DEFINE(knomerge)
133 UVMMAP_EVCNT_DEFINE(map_call)
134 UVMMAP_EVCNT_DEFINE(mlk_call)
135 UVMMAP_EVCNT_DEFINE(mlk_hint)
136 UVMMAP_EVCNT_DEFINE(mlk_list)
137 UVMMAP_EVCNT_DEFINE(mlk_tree)
138 UVMMAP_EVCNT_DEFINE(mlk_treeloop)
139 UVMMAP_EVCNT_DEFINE(mlk_listloop)
140
141 const char vmmapbsy[] = "vmmapbsy";
142
143 /*
144 * cache for vmspace structures.
145 */
146
147 static struct pool_cache uvm_vmspace_cache;
148
149 /*
150 * cache for dynamically-allocated map entries.
151 */
152
153 static struct pool_cache uvm_map_entry_cache;
154
155 #ifdef PMAP_GROWKERNEL
156 /*
157 * This global represents the end of the kernel virtual address
158 * space. If we want to exceed this, we must grow the kernel
159 * virtual address space dynamically.
160 *
161 * Note, this variable is locked by kernel_map's lock.
162 */
163 vaddr_t uvm_maxkaddr;
164 #endif
165
166 #ifndef __USER_VA0_IS_SAFE
167 #ifndef __USER_VA0_DISABLE_DEFAULT
168 #define __USER_VA0_DISABLE_DEFAULT 1
169 #endif
170 #ifdef USER_VA0_DISABLE_DEFAULT /* kernel config option overrides */
171 #undef __USER_VA0_DISABLE_DEFAULT
172 #define __USER_VA0_DISABLE_DEFAULT USER_VA0_DISABLE_DEFAULT
173 #endif
174 static int user_va0_disable = __USER_VA0_DISABLE_DEFAULT;
175 #endif
176
177 /*
178 * macros
179 */
180
181 /*
182 * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging
183 */
184 extern struct vm_map *pager_map;
185
186 #define UVM_ET_ISCOMPATIBLE(ent, type, uobj, meflags, \
187 prot, maxprot, inh, adv, wire) \
188 ((ent)->etype == (type) && \
189 (((ent)->flags ^ (meflags)) & (UVM_MAP_NOMERGE)) == 0 && \
190 (ent)->object.uvm_obj == (uobj) && \
191 (ent)->protection == (prot) && \
192 (ent)->max_protection == (maxprot) && \
193 (ent)->inheritance == (inh) && \
194 (ent)->advice == (adv) && \
195 (ent)->wired_count == (wire))
196
197 /*
198 * uvm_map_entry_link: insert entry into a map
199 *
200 * => map must be locked
201 */
202 #define uvm_map_entry_link(map, after_where, entry) do { \
203 uvm_mapent_check(entry); \
204 (map)->nentries++; \
205 (entry)->prev = (after_where); \
206 (entry)->next = (after_where)->next; \
207 (entry)->prev->next = (entry); \
208 (entry)->next->prev = (entry); \
209 uvm_rb_insert((map), (entry)); \
210 } while (/*CONSTCOND*/ 0)
211
212 /*
213 * uvm_map_entry_unlink: remove entry from a map
214 *
215 * => map must be locked
216 */
217 #define uvm_map_entry_unlink(map, entry) do { \
218 KASSERT((entry) != (map)->first_free); \
219 KASSERT((entry) != (map)->hint); \
220 uvm_mapent_check(entry); \
221 (map)->nentries--; \
222 (entry)->next->prev = (entry)->prev; \
223 (entry)->prev->next = (entry)->next; \
224 uvm_rb_remove((map), (entry)); \
225 } while (/*CONSTCOND*/ 0)
226
227 /*
228 * SAVE_HINT: saves the specified entry as the hint for future lookups.
229 *
230 * => map need not be locked.
231 */
232 #define SAVE_HINT(map, check, value) do { \
233 if ((map)->hint == (check)) \
234 (map)->hint = (value); \
235 } while (/*CONSTCOND*/ 0)
236
237 /*
238 * clear_hints: ensure that hints don't point to the entry.
239 *
240 * => map must be write-locked.
241 */
242 static void
243 clear_hints(struct vm_map *map, struct vm_map_entry *ent)
244 {
245
246 SAVE_HINT(map, ent, ent->prev);
247 if (map->first_free == ent) {
248 map->first_free = ent->prev;
249 }
250 }
251
252 /*
253 * VM_MAP_RANGE_CHECK: check and correct range
254 *
255 * => map must at least be read locked
256 */
257
258 #define VM_MAP_RANGE_CHECK(map, start, end) do { \
259 if (start < vm_map_min(map)) \
260 start = vm_map_min(map); \
261 if (end > vm_map_max(map)) \
262 end = vm_map_max(map); \
263 if (start > end) \
264 start = end; \
265 } while (/*CONSTCOND*/ 0)
266
267 /*
268 * local prototypes
269 */
270
271 static struct vm_map_entry *
272 uvm_mapent_alloc(struct vm_map *, int);
273 static void uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *);
274 static void uvm_mapent_free(struct vm_map_entry *);
275 #if defined(DEBUG)
276 static void _uvm_mapent_check(const struct vm_map_entry *, const char *,
277 int);
278 #define uvm_mapent_check(map) _uvm_mapent_check(map, __FILE__, __LINE__)
279 #else /* defined(DEBUG) */
280 #define uvm_mapent_check(e) /* nothing */
281 #endif /* defined(DEBUG) */
282
283 static void uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
284 static void uvm_map_reference_amap(struct vm_map_entry *, int);
285 static int uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int,
286 int, struct vm_map_entry *);
287 static void uvm_map_unreference_amap(struct vm_map_entry *, int);
288
289 int _uvm_map_sanity(struct vm_map *);
290 int _uvm_tree_sanity(struct vm_map *);
291 static vsize_t uvm_rb_maxgap(const struct vm_map_entry *);
292
293 #define ROOT_ENTRY(map) ((struct vm_map_entry *)(map)->rb_tree.rbt_root)
294 #define LEFT_ENTRY(entry) ((struct vm_map_entry *)(entry)->rb_node.rb_left)
295 #define RIGHT_ENTRY(entry) ((struct vm_map_entry *)(entry)->rb_node.rb_right)
296 #define PARENT_ENTRY(map, entry) \
297 (ROOT_ENTRY(map) == (entry) \
298 ? NULL : (struct vm_map_entry *)RB_FATHER(&(entry)->rb_node))
299
300 /*
301 * These get filled in if/when SYSVSHM shared memory code is loaded
302 *
303 * We do this with function pointers rather the #ifdef SYSVSHM so the
304 * SYSVSHM code can be loaded and unloaded
305 */
306 void (*uvm_shmexit)(struct vmspace *) = NULL;
307 void (*uvm_shmfork)(struct vmspace *, struct vmspace *) = NULL;
308
309 static int
310 uvm_map_compare_nodes(void *ctx, const void *nparent, const void *nkey)
311 {
312 const struct vm_map_entry *eparent = nparent;
313 const struct vm_map_entry *ekey = nkey;
314
315 KASSERT(eparent->start < ekey->start || eparent->start >= ekey->end);
316 KASSERT(ekey->start < eparent->start || ekey->start >= eparent->end);
317
318 if (eparent->start < ekey->start)
319 return -1;
320 if (eparent->end >= ekey->start)
321 return 1;
322 return 0;
323 }
324
325 static int
326 uvm_map_compare_key(void *ctx, const void *nparent, const void *vkey)
327 {
328 const struct vm_map_entry *eparent = nparent;
329 const vaddr_t va = *(const vaddr_t *) vkey;
330
331 if (eparent->start < va)
332 return -1;
333 if (eparent->end >= va)
334 return 1;
335 return 0;
336 }
337
338 static const rb_tree_ops_t uvm_map_tree_ops = {
339 .rbto_compare_nodes = uvm_map_compare_nodes,
340 .rbto_compare_key = uvm_map_compare_key,
341 .rbto_node_offset = offsetof(struct vm_map_entry, rb_node),
342 .rbto_context = NULL
343 };
344
345 /*
346 * uvm_rb_gap: return the gap size between our entry and next entry.
347 */
348 static inline vsize_t
349 uvm_rb_gap(const struct vm_map_entry *entry)
350 {
351
352 KASSERT(entry->next != NULL);
353 return entry->next->start - entry->end;
354 }
355
356 static vsize_t
357 uvm_rb_maxgap(const struct vm_map_entry *entry)
358 {
359 struct vm_map_entry *child;
360 vsize_t maxgap = entry->gap;
361
362 /*
363 * We need maxgap to be the largest gap of us or any of our
364 * descendents. Since each of our children's maxgap is the
365 * cached value of their largest gap of themselves or their
366 * descendents, we can just use that value and avoid recursing
367 * down the tree to calculate it.
368 */
369 if ((child = LEFT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
370 maxgap = child->maxgap;
371
372 if ((child = RIGHT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
373 maxgap = child->maxgap;
374
375 return maxgap;
376 }
377
378 static void
379 uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
380 {
381 struct vm_map_entry *parent;
382
383 KASSERT(entry->gap == uvm_rb_gap(entry));
384 entry->maxgap = uvm_rb_maxgap(entry);
385
386 while ((parent = PARENT_ENTRY(map, entry)) != NULL) {
387 struct vm_map_entry *brother;
388 vsize_t maxgap = parent->gap;
389 unsigned int which;
390
391 KDASSERT(parent->gap == uvm_rb_gap(parent));
392 if (maxgap < entry->maxgap)
393 maxgap = entry->maxgap;
394 /*
395 * Since we work towards the root, we know entry's maxgap
396 * value is OK, but its brothers may now be out-of-date due
397 * to rebalancing. So refresh it.
398 */
399 which = RB_POSITION(&entry->rb_node) ^ RB_DIR_OTHER;
400 brother = (struct vm_map_entry *)parent->rb_node.rb_nodes[which];
401 if (brother != NULL) {
402 KDASSERT(brother->gap == uvm_rb_gap(brother));
403 brother->maxgap = uvm_rb_maxgap(brother);
404 if (maxgap < brother->maxgap)
405 maxgap = brother->maxgap;
406 }
407
408 parent->maxgap = maxgap;
409 entry = parent;
410 }
411 }
412
413 static void
414 uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
415 {
416 struct vm_map_entry *ret __diagused;
417
418 entry->gap = entry->maxgap = uvm_rb_gap(entry);
419 if (entry->prev != &map->header)
420 entry->prev->gap = uvm_rb_gap(entry->prev);
421
422 ret = rb_tree_insert_node(&map->rb_tree, entry);
423 KASSERTMSG(ret == entry,
424 "uvm_rb_insert: map %p: duplicate entry %p", map, ret);
425
426 /*
427 * If the previous entry is not our immediate left child, then it's an
428 * ancestor and will be fixed up on the way to the root. We don't
429 * have to check entry->prev against &map->header since &map->header
430 * will never be in the tree.
431 */
432 uvm_rb_fixup(map,
433 LEFT_ENTRY(entry) == entry->prev ? entry->prev : entry);
434 }
435
436 static void
437 uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
438 {
439 struct vm_map_entry *prev_parent = NULL, *next_parent = NULL;
440
441 /*
442 * If we are removing an interior node, then an adjacent node will
443 * be used to replace its position in the tree. Therefore we will
444 * need to fixup the tree starting at the parent of the replacement
445 * node. So record their parents for later use.
446 */
447 if (entry->prev != &map->header)
448 prev_parent = PARENT_ENTRY(map, entry->prev);
449 if (entry->next != &map->header)
450 next_parent = PARENT_ENTRY(map, entry->next);
451
452 rb_tree_remove_node(&map->rb_tree, entry);
453
454 /*
455 * If the previous node has a new parent, fixup the tree starting
456 * at the previous node's old parent.
457 */
458 if (entry->prev != &map->header) {
459 /*
460 * Update the previous entry's gap due to our absence.
461 */
462 entry->prev->gap = uvm_rb_gap(entry->prev);
463 uvm_rb_fixup(map, entry->prev);
464 if (prev_parent != NULL
465 && prev_parent != entry
466 && prev_parent != PARENT_ENTRY(map, entry->prev))
467 uvm_rb_fixup(map, prev_parent);
468 }
469
470 /*
471 * If the next node has a new parent, fixup the tree starting
472 * at the next node's old parent.
473 */
474 if (entry->next != &map->header) {
475 uvm_rb_fixup(map, entry->next);
476 if (next_parent != NULL
477 && next_parent != entry
478 && next_parent != PARENT_ENTRY(map, entry->next))
479 uvm_rb_fixup(map, next_parent);
480 }
481 }
482
483 #if defined(DEBUG)
484 int uvm_debug_check_map = 0;
485 int uvm_debug_check_rbtree = 0;
486 #define uvm_map_check(map, name) \
487 _uvm_map_check((map), (name), __FILE__, __LINE__)
488 static void
489 _uvm_map_check(struct vm_map *map, const char *name,
490 const char *file, int line)
491 {
492
493 if ((uvm_debug_check_map && _uvm_map_sanity(map)) ||
494 (uvm_debug_check_rbtree && _uvm_tree_sanity(map))) {
495 panic("uvm_map_check failed: \"%s\" map=%p (%s:%d)",
496 name, map, file, line);
497 }
498 }
499 #else /* defined(DEBUG) */
500 #define uvm_map_check(map, name) /* nothing */
501 #endif /* defined(DEBUG) */
502
503 #if defined(DEBUG) || defined(DDB)
504 int
505 _uvm_map_sanity(struct vm_map *map)
506 {
507 bool first_free_found = false;
508 bool hint_found = false;
509 const struct vm_map_entry *e;
510 struct vm_map_entry *hint = map->hint;
511
512 e = &map->header;
513 for (;;) {
514 if (map->first_free == e) {
515 first_free_found = true;
516 } else if (!first_free_found && e->next->start > e->end) {
517 printf("first_free %p should be %p\n",
518 map->first_free, e);
519 return -1;
520 }
521 if (hint == e) {
522 hint_found = true;
523 }
524
525 e = e->next;
526 if (e == &map->header) {
527 break;
528 }
529 }
530 if (!first_free_found) {
531 printf("stale first_free\n");
532 return -1;
533 }
534 if (!hint_found) {
535 printf("stale hint\n");
536 return -1;
537 }
538 return 0;
539 }
540
541 int
542 _uvm_tree_sanity(struct vm_map *map)
543 {
544 struct vm_map_entry *tmp, *trtmp;
545 int n = 0, i = 1;
546
547 for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
548 if (tmp->gap != uvm_rb_gap(tmp)) {
549 printf("%d/%d gap %#lx != %#lx %s\n",
550 n + 1, map->nentries,
551 (ulong)tmp->gap, (ulong)uvm_rb_gap(tmp),
552 tmp->next == &map->header ? "(last)" : "");
553 goto error;
554 }
555 /*
556 * If any entries are out of order, tmp->gap will be unsigned
557 * and will likely exceed the size of the map.
558 */
559 if (tmp->gap >= vm_map_max(map) - vm_map_min(map)) {
560 printf("too large gap %zu\n", (size_t)tmp->gap);
561 goto error;
562 }
563 n++;
564 }
565
566 if (n != map->nentries) {
567 printf("nentries: %d vs %d\n", n, map->nentries);
568 goto error;
569 }
570
571 trtmp = NULL;
572 for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
573 if (tmp->maxgap != uvm_rb_maxgap(tmp)) {
574 printf("maxgap %#lx != %#lx\n",
575 (ulong)tmp->maxgap,
576 (ulong)uvm_rb_maxgap(tmp));
577 goto error;
578 }
579 if (trtmp != NULL && trtmp->start >= tmp->start) {
580 printf("corrupt: 0x%"PRIxVADDR"x >= 0x%"PRIxVADDR"x\n",
581 trtmp->start, tmp->start);
582 goto error;
583 }
584
585 trtmp = tmp;
586 }
587
588 for (tmp = map->header.next; tmp != &map->header;
589 tmp = tmp->next, i++) {
590 trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_LEFT);
591 if (trtmp == NULL)
592 trtmp = &map->header;
593 if (tmp->prev != trtmp) {
594 printf("lookup: %d: %p->prev=%p: %p\n",
595 i, tmp, tmp->prev, trtmp);
596 goto error;
597 }
598 trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_RIGHT);
599 if (trtmp == NULL)
600 trtmp = &map->header;
601 if (tmp->next != trtmp) {
602 printf("lookup: %d: %p->next=%p: %p\n",
603 i, tmp, tmp->next, trtmp);
604 goto error;
605 }
606 trtmp = rb_tree_find_node(&map->rb_tree, &tmp->start);
607 if (trtmp != tmp) {
608 printf("lookup: %d: %p - %p: %p\n", i, tmp, trtmp,
609 PARENT_ENTRY(map, tmp));
610 goto error;
611 }
612 }
613
614 return (0);
615 error:
616 return (-1);
617 }
618 #endif /* defined(DEBUG) || defined(DDB) */
619
620 /*
621 * vm_map_lock: acquire an exclusive (write) lock on a map.
622 *
623 * => The locking protocol provides for guaranteed upgrade from shared ->
624 * exclusive by whichever thread currently has the map marked busy.
625 * See "LOCKING PROTOCOL NOTES" in uvm_map.h. This is horrible; among
626 * other problems, it defeats any fairness guarantees provided by RW
627 * locks.
628 */
629
630 void
631 vm_map_lock(struct vm_map *map)
632 {
633
634 for (;;) {
635 rw_enter(&map->lock, RW_WRITER);
636 if (map->busy == NULL || map->busy == curlwp) {
637 break;
638 }
639 mutex_enter(&map->misc_lock);
640 rw_exit(&map->lock);
641 if (map->busy != NULL) {
642 cv_wait(&map->cv, &map->misc_lock);
643 }
644 mutex_exit(&map->misc_lock);
645 }
646 map->timestamp++;
647 }
648
649 /*
650 * vm_map_lock_try: try to lock a map, failing if it is already locked.
651 */
652
653 bool
654 vm_map_lock_try(struct vm_map *map)
655 {
656
657 if (!rw_tryenter(&map->lock, RW_WRITER)) {
658 return false;
659 }
660 if (map->busy != NULL) {
661 rw_exit(&map->lock);
662 return false;
663 }
664 map->timestamp++;
665 return true;
666 }
667
668 /*
669 * vm_map_unlock: release an exclusive lock on a map.
670 */
671
672 void
673 vm_map_unlock(struct vm_map *map)
674 {
675
676 KASSERT(rw_write_held(&map->lock));
677 KASSERT(map->busy == NULL || map->busy == curlwp);
678 rw_exit(&map->lock);
679 }
680
681 /*
682 * vm_map_unbusy: mark the map as unbusy, and wake any waiters that
683 * want an exclusive lock.
684 */
685
686 void
687 vm_map_unbusy(struct vm_map *map)
688 {
689
690 KASSERT(map->busy == curlwp);
691
692 /*
693 * Safe to clear 'busy' and 'waiters' with only a read lock held:
694 *
695 * o they can only be set with a write lock held
696 * o writers are blocked out with a read or write hold
697 * o at any time, only one thread owns the set of values
698 */
699 mutex_enter(&map->misc_lock);
700 map->busy = NULL;
701 cv_broadcast(&map->cv);
702 mutex_exit(&map->misc_lock);
703 }
704
705 /*
706 * vm_map_lock_read: acquire a shared (read) lock on a map.
707 */
708
709 void
710 vm_map_lock_read(struct vm_map *map)
711 {
712
713 rw_enter(&map->lock, RW_READER);
714 }
715
716 /*
717 * vm_map_unlock_read: release a shared lock on a map.
718 */
719
720 void
721 vm_map_unlock_read(struct vm_map *map)
722 {
723
724 rw_exit(&map->lock);
725 }
726
727 /*
728 * vm_map_busy: mark a map as busy.
729 *
730 * => the caller must hold the map write locked
731 */
732
733 void
734 vm_map_busy(struct vm_map *map)
735 {
736
737 KASSERT(rw_write_held(&map->lock));
738 KASSERT(map->busy == NULL);
739
740 map->busy = curlwp;
741 }
742
743 /*
744 * vm_map_locked_p: return true if the map is write locked.
745 *
746 * => only for debug purposes like KASSERTs.
747 * => should not be used to verify that a map is not locked.
748 */
749
750 bool
751 vm_map_locked_p(struct vm_map *map)
752 {
753
754 return rw_write_held(&map->lock);
755 }
756
757 /*
758 * uvm_mapent_alloc: allocate a map entry
759 */
760
761 static struct vm_map_entry *
762 uvm_mapent_alloc(struct vm_map *map, int flags)
763 {
764 struct vm_map_entry *me;
765 int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
766 UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
767
768 me = pool_cache_get(&uvm_map_entry_cache, pflags);
769 if (__predict_false(me == NULL)) {
770 return NULL;
771 }
772 me->flags = 0;
773
774 UVMHIST_LOG(maphist, "<- new entry=%p [kentry=%d]", me,
775 (map == kernel_map), 0, 0);
776 return me;
777 }
778
779 /*
780 * uvm_mapent_free: free map entry
781 */
782
783 static void
784 uvm_mapent_free(struct vm_map_entry *me)
785 {
786 UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
787
788 UVMHIST_LOG(maphist,"<- freeing map entry=%p [flags=%d]",
789 me, me->flags, 0, 0);
790 pool_cache_put(&uvm_map_entry_cache, me);
791 }
792
793 /*
794 * uvm_mapent_copy: copy a map entry, preserving flags
795 */
796
797 static inline void
798 uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
799 {
800
801 memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) -
802 ((char *)src));
803 }
804
805 #if defined(DEBUG)
806 static void
807 _uvm_mapent_check(const struct vm_map_entry *entry, const char *file, int line)
808 {
809
810 if (entry->start >= entry->end) {
811 goto bad;
812 }
813 if (UVM_ET_ISOBJ(entry)) {
814 if (entry->object.uvm_obj == NULL) {
815 goto bad;
816 }
817 } else if (UVM_ET_ISSUBMAP(entry)) {
818 if (entry->object.sub_map == NULL) {
819 goto bad;
820 }
821 } else {
822 if (entry->object.uvm_obj != NULL ||
823 entry->object.sub_map != NULL) {
824 goto bad;
825 }
826 }
827 if (!UVM_ET_ISOBJ(entry)) {
828 if (entry->offset != 0) {
829 goto bad;
830 }
831 }
832
833 return;
834
835 bad:
836 panic("%s: bad entry %p (%s:%d)", __func__, entry, file, line);
837 }
838 #endif /* defined(DEBUG) */
839
840 /*
841 * uvm_map_entry_unwire: unwire a map entry
842 *
843 * => map should be locked by caller
844 */
845
846 static inline void
847 uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
848 {
849
850 entry->wired_count = 0;
851 uvm_fault_unwire_locked(map, entry->start, entry->end);
852 }
853
854
855 /*
856 * wrapper for calling amap_ref()
857 */
858 static inline void
859 uvm_map_reference_amap(struct vm_map_entry *entry, int flags)
860 {
861
862 amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
863 (entry->end - entry->start) >> PAGE_SHIFT, flags);
864 }
865
866
867 /*
868 * wrapper for calling amap_unref()
869 */
870 static inline void
871 uvm_map_unreference_amap(struct vm_map_entry *entry, int flags)
872 {
873
874 amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
875 (entry->end - entry->start) >> PAGE_SHIFT, flags);
876 }
877
878
879 /*
880 * uvm_map_init: init mapping system at boot time.
881 */
882
883 void
884 uvm_map_init(void)
885 {
886 #if defined(UVMHIST)
887 static struct kern_history_ent pdhistbuf[100];
888 #endif
889
890 /*
891 * first, init logging system.
892 */
893
894 UVMHIST_FUNC("uvm_map_init");
895 UVMHIST_LINK_STATIC(maphist);
896 UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
897 UVMHIST_CALLED(maphist);
898 UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
899
900 /*
901 * initialize the global lock for kernel map entry.
902 */
903
904 mutex_init(&uvm_kentry_lock, MUTEX_DRIVER, IPL_VM);
905 }
906
907 /*
908 * uvm_map_init_caches: init mapping system caches.
909 */
910 void
911 uvm_map_init_caches(void)
912 {
913 /*
914 * initialize caches.
915 */
916
917 pool_cache_bootstrap(&uvm_map_entry_cache, sizeof(struct vm_map_entry),
918 0, 0, 0, "vmmpepl", NULL, IPL_NONE, NULL, NULL, NULL);
919 pool_cache_bootstrap(&uvm_vmspace_cache, sizeof(struct vmspace),
920 0, 0, 0, "vmsppl", NULL, IPL_NONE, NULL, NULL, NULL);
921 }
922
923 /*
924 * clippers
925 */
926
927 /*
928 * uvm_mapent_splitadj: adjust map entries for splitting, after uvm_mapent_copy.
929 */
930
931 static void
932 uvm_mapent_splitadj(struct vm_map_entry *entry1, struct vm_map_entry *entry2,
933 vaddr_t splitat)
934 {
935 vaddr_t adj;
936
937 KASSERT(entry1->start < splitat);
938 KASSERT(splitat < entry1->end);
939
940 adj = splitat - entry1->start;
941 entry1->end = entry2->start = splitat;
942
943 if (entry1->aref.ar_amap) {
944 amap_splitref(&entry1->aref, &entry2->aref, adj);
945 }
946 if (UVM_ET_ISSUBMAP(entry1)) {
947 /* ... unlikely to happen, but play it safe */
948 uvm_map_reference(entry1->object.sub_map);
949 } else if (UVM_ET_ISOBJ(entry1)) {
950 KASSERT(entry1->object.uvm_obj != NULL); /* suppress coverity */
951 entry2->offset += adj;
952 if (entry1->object.uvm_obj->pgops &&
953 entry1->object.uvm_obj->pgops->pgo_reference)
954 entry1->object.uvm_obj->pgops->pgo_reference(
955 entry1->object.uvm_obj);
956 }
957 }
958
959 /*
960 * uvm_map_clip_start: ensure that the entry begins at or after
961 * the starting address, if it doesn't we split the entry.
962 *
963 * => caller should use UVM_MAP_CLIP_START macro rather than calling
964 * this directly
965 * => map must be locked by caller
966 */
967
968 void
969 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
970 vaddr_t start)
971 {
972 struct vm_map_entry *new_entry;
973
974 /* uvm_map_simplify_entry(map, entry); */ /* XXX */
975
976 uvm_map_check(map, "clip_start entry");
977 uvm_mapent_check(entry);
978
979 /*
980 * Split off the front portion. note that we must insert the new
981 * entry BEFORE this one, so that this entry has the specified
982 * starting address.
983 */
984 new_entry = uvm_mapent_alloc(map, 0);
985 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
986 uvm_mapent_splitadj(new_entry, entry, start);
987 uvm_map_entry_link(map, entry->prev, new_entry);
988
989 uvm_map_check(map, "clip_start leave");
990 }
991
992 /*
993 * uvm_map_clip_end: ensure that the entry ends at or before
994 * the ending address, if it does't we split the reference
995 *
996 * => caller should use UVM_MAP_CLIP_END macro rather than calling
997 * this directly
998 * => map must be locked by caller
999 */
1000
1001 void
1002 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end)
1003 {
1004 struct vm_map_entry *new_entry;
1005
1006 uvm_map_check(map, "clip_end entry");
1007 uvm_mapent_check(entry);
1008
1009 /*
1010 * Create a new entry and insert it
1011 * AFTER the specified entry
1012 */
1013 new_entry = uvm_mapent_alloc(map, 0);
1014 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1015 uvm_mapent_splitadj(entry, new_entry, end);
1016 uvm_map_entry_link(map, entry, new_entry);
1017
1018 uvm_map_check(map, "clip_end leave");
1019 }
1020
1021 /*
1022 * M A P - m a i n e n t r y p o i n t
1023 */
1024 /*
1025 * uvm_map: establish a valid mapping in a map
1026 *
1027 * => assume startp is page aligned.
1028 * => assume size is a multiple of PAGE_SIZE.
1029 * => assume sys_mmap provides enough of a "hint" to have us skip
1030 * over text/data/bss area.
1031 * => map must be unlocked (we will lock it)
1032 * => <uobj,uoffset> value meanings (4 cases):
1033 * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER
1034 * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER
1035 * [3] <uobj,uoffset> == normal mapping
1036 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA
1037 *
1038 * case [4] is for kernel mappings where we don't know the offset until
1039 * we've found a virtual address. note that kernel object offsets are
1040 * always relative to vm_map_min(kernel_map).
1041 *
1042 * => if `align' is non-zero, we align the virtual address to the specified
1043 * alignment.
1044 * this is provided as a mechanism for large pages.
1045 *
1046 * => XXXCDC: need way to map in external amap?
1047 */
1048
1049 int
1050 uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
1051 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags)
1052 {
1053 struct uvm_map_args args;
1054 struct vm_map_entry *new_entry;
1055 int error;
1056
1057 KASSERT((size & PAGE_MASK) == 0);
1058
1059 #ifndef __USER_VA0_IS_SAFE
1060 if ((flags & UVM_FLAG_FIXED) && *startp == 0 &&
1061 !VM_MAP_IS_KERNEL(map) && user_va0_disable)
1062 return EACCES;
1063 #endif
1064
1065 /*
1066 * for pager_map, allocate the new entry first to avoid sleeping
1067 * for memory while we have the map locked.
1068 */
1069
1070 new_entry = NULL;
1071 if (map == pager_map) {
1072 new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
1073 if (__predict_false(new_entry == NULL))
1074 return ENOMEM;
1075 }
1076 if (map == pager_map)
1077 flags |= UVM_FLAG_NOMERGE;
1078
1079 error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align,
1080 flags, &args);
1081 if (!error) {
1082 error = uvm_map_enter(map, &args, new_entry);
1083 *startp = args.uma_start;
1084 } else if (new_entry) {
1085 uvm_mapent_free(new_entry);
1086 }
1087
1088 #if defined(DEBUG)
1089 if (!error && VM_MAP_IS_KERNEL(map) && (flags & UVM_FLAG_NOWAIT) == 0) {
1090 uvm_km_check_empty(map, *startp, *startp + size);
1091 }
1092 #endif /* defined(DEBUG) */
1093
1094 return error;
1095 }
1096
1097 /*
1098 * uvm_map_prepare:
1099 *
1100 * called with map unlocked.
1101 * on success, returns the map locked.
1102 */
1103
1104 int
1105 uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size,
1106 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags,
1107 struct uvm_map_args *args)
1108 {
1109 struct vm_map_entry *prev_entry;
1110 vm_prot_t prot = UVM_PROTECTION(flags);
1111 vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1112
1113 UVMHIST_FUNC("uvm_map_prepare");
1114 UVMHIST_CALLED(maphist);
1115
1116 UVMHIST_LOG(maphist, "(map=%p, start=%#lx, size=%lu, flags=%#x)",
1117 map, start, size, flags);
1118 UVMHIST_LOG(maphist, " uobj/offset %p/%ld", uobj, uoffset,0,0);
1119
1120 /*
1121 * detect a popular device driver bug.
1122 */
1123
1124 KASSERT(doing_shutdown || curlwp != NULL);
1125
1126 /*
1127 * zero-sized mapping doesn't make any sense.
1128 */
1129 KASSERT(size > 0);
1130
1131 KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0);
1132
1133 uvm_map_check(map, "map entry");
1134
1135 /*
1136 * check sanity of protection code
1137 */
1138
1139 if ((prot & maxprot) != prot) {
1140 UVMHIST_LOG(maphist, "<- prot. failure: prot=%#x, max=%#x",
1141 prot, maxprot,0,0);
1142 return EACCES;
1143 }
1144
1145 /*
1146 * figure out where to put new VM range
1147 */
1148 retry:
1149 if (vm_map_lock_try(map) == false) {
1150 if ((flags & UVM_FLAG_TRYLOCK) != 0) {
1151 return EAGAIN;
1152 }
1153 vm_map_lock(map); /* could sleep here */
1154 }
1155 prev_entry = uvm_map_findspace(map, start, size, &start,
1156 uobj, uoffset, align, flags);
1157 if (prev_entry == NULL) {
1158 unsigned int timestamp;
1159
1160 timestamp = map->timestamp;
1161 UVMHIST_LOG(maphist,"waiting va timestamp=%#x",
1162 timestamp,0,0,0);
1163 map->flags |= VM_MAP_WANTVA;
1164 vm_map_unlock(map);
1165
1166 /*
1167 * try to reclaim kva and wait until someone does unmap.
1168 * fragile locking here, so we awaken every second to
1169 * recheck the condition.
1170 */
1171
1172 mutex_enter(&map->misc_lock);
1173 while ((map->flags & VM_MAP_WANTVA) != 0 &&
1174 map->timestamp == timestamp) {
1175 if ((flags & UVM_FLAG_WAITVA) == 0) {
1176 mutex_exit(&map->misc_lock);
1177 UVMHIST_LOG(maphist,
1178 "<- uvm_map_findspace failed!", 0,0,0,0);
1179 return ENOMEM;
1180 } else {
1181 cv_timedwait(&map->cv, &map->misc_lock, hz);
1182 }
1183 }
1184 mutex_exit(&map->misc_lock);
1185 goto retry;
1186 }
1187
1188 #ifdef PMAP_GROWKERNEL
1189 /*
1190 * If the kernel pmap can't map the requested space,
1191 * then allocate more resources for it.
1192 */
1193 if (map == kernel_map && uvm_maxkaddr < (start + size))
1194 uvm_maxkaddr = pmap_growkernel(start + size);
1195 #endif
1196
1197 UVMMAP_EVCNT_INCR(map_call);
1198
1199 /*
1200 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
1201 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in
1202 * either case we want to zero it before storing it in the map entry
1203 * (because it looks strange and confusing when debugging...)
1204 *
1205 * if uobj is not null
1206 * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
1207 * and we do not need to change uoffset.
1208 * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
1209 * now (based on the starting address of the map). this case is
1210 * for kernel object mappings where we don't know the offset until
1211 * the virtual address is found (with uvm_map_findspace). the
1212 * offset is the distance we are from the start of the map.
1213 */
1214
1215 if (uobj == NULL) {
1216 uoffset = 0;
1217 } else {
1218 if (uoffset == UVM_UNKNOWN_OFFSET) {
1219 KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
1220 uoffset = start - vm_map_min(kernel_map);
1221 }
1222 }
1223
1224 args->uma_flags = flags;
1225 args->uma_prev = prev_entry;
1226 args->uma_start = start;
1227 args->uma_size = size;
1228 args->uma_uobj = uobj;
1229 args->uma_uoffset = uoffset;
1230
1231 UVMHIST_LOG(maphist, "<- done!", 0,0,0,0);
1232 return 0;
1233 }
1234
1235 /*
1236 * uvm_map_enter:
1237 *
1238 * called with map locked.
1239 * unlock the map before returning.
1240 */
1241
1242 int
1243 uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args,
1244 struct vm_map_entry *new_entry)
1245 {
1246 struct vm_map_entry *prev_entry = args->uma_prev;
1247 struct vm_map_entry *dead = NULL;
1248
1249 const uvm_flag_t flags = args->uma_flags;
1250 const vm_prot_t prot = UVM_PROTECTION(flags);
1251 const vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1252 const vm_inherit_t inherit = UVM_INHERIT(flags);
1253 const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
1254 AMAP_EXTEND_NOWAIT : 0;
1255 const int advice = UVM_ADVICE(flags);
1256
1257 vaddr_t start = args->uma_start;
1258 vsize_t size = args->uma_size;
1259 struct uvm_object *uobj = args->uma_uobj;
1260 voff_t uoffset = args->uma_uoffset;
1261
1262 const int kmap = (vm_map_pmap(map) == pmap_kernel());
1263 int merged = 0;
1264 int error;
1265 int newetype;
1266
1267 UVMHIST_FUNC("uvm_map_enter");
1268 UVMHIST_CALLED(maphist);
1269
1270 UVMHIST_LOG(maphist, "(map=%p, start=%#lx, size=%lu, flags=%#x)",
1271 map, start, size, flags);
1272 UVMHIST_LOG(maphist, " uobj/offset %p/%ld", uobj, uoffset,0,0);
1273
1274 KASSERT(map->hint == prev_entry); /* bimerge case assumes this */
1275 KASSERT(vm_map_locked_p(map));
1276
1277 if (uobj)
1278 newetype = UVM_ET_OBJ;
1279 else
1280 newetype = 0;
1281
1282 if (flags & UVM_FLAG_COPYONW) {
1283 newetype |= UVM_ET_COPYONWRITE;
1284 if ((flags & UVM_FLAG_OVERLAY) == 0)
1285 newetype |= UVM_ET_NEEDSCOPY;
1286 }
1287
1288 /*
1289 * try and insert in map by extending previous entry, if possible.
1290 * XXX: we don't try and pull back the next entry. might be useful
1291 * for a stack, but we are currently allocating our stack in advance.
1292 */
1293
1294 if (flags & UVM_FLAG_NOMERGE)
1295 goto nomerge;
1296
1297 if (prev_entry->end == start &&
1298 prev_entry != &map->header &&
1299 UVM_ET_ISCOMPATIBLE(prev_entry, newetype, uobj, 0,
1300 prot, maxprot, inherit, advice, 0)) {
1301
1302 if (uobj && prev_entry->offset +
1303 (prev_entry->end - prev_entry->start) != uoffset)
1304 goto forwardmerge;
1305
1306 /*
1307 * can't extend a shared amap. note: no need to lock amap to
1308 * look at refs since we don't care about its exact value.
1309 * if it is one (i.e. we have only reference) it will stay there
1310 */
1311
1312 if (prev_entry->aref.ar_amap &&
1313 amap_refs(prev_entry->aref.ar_amap) != 1) {
1314 goto forwardmerge;
1315 }
1316
1317 if (prev_entry->aref.ar_amap) {
1318 error = amap_extend(prev_entry, size,
1319 amapwaitflag | AMAP_EXTEND_FORWARDS);
1320 if (error)
1321 goto nomerge;
1322 }
1323
1324 if (kmap) {
1325 UVMMAP_EVCNT_INCR(kbackmerge);
1326 } else {
1327 UVMMAP_EVCNT_INCR(ubackmerge);
1328 }
1329 UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0);
1330
1331 /*
1332 * drop our reference to uobj since we are extending a reference
1333 * that we already have (the ref count can not drop to zero).
1334 */
1335
1336 if (uobj && uobj->pgops->pgo_detach)
1337 uobj->pgops->pgo_detach(uobj);
1338
1339 /*
1340 * Now that we've merged the entries, note that we've grown
1341 * and our gap has shrunk. Then fix the tree.
1342 */
1343 prev_entry->end += size;
1344 prev_entry->gap -= size;
1345 uvm_rb_fixup(map, prev_entry);
1346
1347 uvm_map_check(map, "map backmerged");
1348
1349 UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
1350 merged++;
1351 }
1352
1353 forwardmerge:
1354 if (prev_entry->next->start == (start + size) &&
1355 prev_entry->next != &map->header &&
1356 UVM_ET_ISCOMPATIBLE(prev_entry->next, newetype, uobj, 0,
1357 prot, maxprot, inherit, advice, 0)) {
1358
1359 if (uobj && prev_entry->next->offset != uoffset + size)
1360 goto nomerge;
1361
1362 /*
1363 * can't extend a shared amap. note: no need to lock amap to
1364 * look at refs since we don't care about its exact value.
1365 * if it is one (i.e. we have only reference) it will stay there.
1366 *
1367 * note that we also can't merge two amaps, so if we
1368 * merged with the previous entry which has an amap,
1369 * and the next entry also has an amap, we give up.
1370 *
1371 * Interesting cases:
1372 * amap, new, amap -> give up second merge (single fwd extend)
1373 * amap, new, none -> double forward extend (extend again here)
1374 * none, new, amap -> double backward extend (done here)
1375 * uobj, new, amap -> single backward extend (done here)
1376 *
1377 * XXX should we attempt to deal with someone refilling
1378 * the deallocated region between two entries that are
1379 * backed by the same amap (ie, arefs is 2, "prev" and
1380 * "next" refer to it, and adding this allocation will
1381 * close the hole, thus restoring arefs to 1 and
1382 * deallocating the "next" vm_map_entry)? -- @@@
1383 */
1384
1385 if (prev_entry->next->aref.ar_amap &&
1386 (amap_refs(prev_entry->next->aref.ar_amap) != 1 ||
1387 (merged && prev_entry->aref.ar_amap))) {
1388 goto nomerge;
1389 }
1390
1391 if (merged) {
1392 /*
1393 * Try to extend the amap of the previous entry to
1394 * cover the next entry as well. If it doesn't work
1395 * just skip on, don't actually give up, since we've
1396 * already completed the back merge.
1397 */
1398 if (prev_entry->aref.ar_amap) {
1399 if (amap_extend(prev_entry,
1400 prev_entry->next->end -
1401 prev_entry->next->start,
1402 amapwaitflag | AMAP_EXTEND_FORWARDS))
1403 goto nomerge;
1404 }
1405
1406 /*
1407 * Try to extend the amap of the *next* entry
1408 * back to cover the new allocation *and* the
1409 * previous entry as well (the previous merge
1410 * didn't have an amap already otherwise we
1411 * wouldn't be checking here for an amap). If
1412 * it doesn't work just skip on, again, don't
1413 * actually give up, since we've already
1414 * completed the back merge.
1415 */
1416 else if (prev_entry->next->aref.ar_amap) {
1417 if (amap_extend(prev_entry->next,
1418 prev_entry->end -
1419 prev_entry->start,
1420 amapwaitflag | AMAP_EXTEND_BACKWARDS))
1421 goto nomerge;
1422 }
1423 } else {
1424 /*
1425 * Pull the next entry's amap backwards to cover this
1426 * new allocation.
1427 */
1428 if (prev_entry->next->aref.ar_amap) {
1429 error = amap_extend(prev_entry->next, size,
1430 amapwaitflag | AMAP_EXTEND_BACKWARDS);
1431 if (error)
1432 goto nomerge;
1433 }
1434 }
1435
1436 if (merged) {
1437 if (kmap) {
1438 UVMMAP_EVCNT_DECR(kbackmerge);
1439 UVMMAP_EVCNT_INCR(kbimerge);
1440 } else {
1441 UVMMAP_EVCNT_DECR(ubackmerge);
1442 UVMMAP_EVCNT_INCR(ubimerge);
1443 }
1444 } else {
1445 if (kmap) {
1446 UVMMAP_EVCNT_INCR(kforwmerge);
1447 } else {
1448 UVMMAP_EVCNT_INCR(uforwmerge);
1449 }
1450 }
1451 UVMHIST_LOG(maphist," starting forward merge", 0, 0, 0, 0);
1452
1453 /*
1454 * drop our reference to uobj since we are extending a reference
1455 * that we already have (the ref count can not drop to zero).
1456 */
1457 if (uobj && uobj->pgops->pgo_detach)
1458 uobj->pgops->pgo_detach(uobj);
1459
1460 if (merged) {
1461 dead = prev_entry->next;
1462 prev_entry->end = dead->end;
1463 uvm_map_entry_unlink(map, dead);
1464 if (dead->aref.ar_amap != NULL) {
1465 prev_entry->aref = dead->aref;
1466 dead->aref.ar_amap = NULL;
1467 }
1468 } else {
1469 prev_entry->next->start -= size;
1470 if (prev_entry != &map->header) {
1471 prev_entry->gap -= size;
1472 KASSERT(prev_entry->gap == uvm_rb_gap(prev_entry));
1473 uvm_rb_fixup(map, prev_entry);
1474 }
1475 if (uobj)
1476 prev_entry->next->offset = uoffset;
1477 }
1478
1479 uvm_map_check(map, "map forwardmerged");
1480
1481 UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0);
1482 merged++;
1483 }
1484
1485 nomerge:
1486 if (!merged) {
1487 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0);
1488 if (kmap) {
1489 UVMMAP_EVCNT_INCR(knomerge);
1490 } else {
1491 UVMMAP_EVCNT_INCR(unomerge);
1492 }
1493
1494 /*
1495 * allocate new entry and link it in.
1496 */
1497
1498 if (new_entry == NULL) {
1499 new_entry = uvm_mapent_alloc(map,
1500 (flags & UVM_FLAG_NOWAIT));
1501 if (__predict_false(new_entry == NULL)) {
1502 error = ENOMEM;
1503 goto done;
1504 }
1505 }
1506 new_entry->start = start;
1507 new_entry->end = new_entry->start + size;
1508 new_entry->object.uvm_obj = uobj;
1509 new_entry->offset = uoffset;
1510
1511 new_entry->etype = newetype;
1512
1513 if (flags & UVM_FLAG_NOMERGE) {
1514 new_entry->flags |= UVM_MAP_NOMERGE;
1515 }
1516
1517 new_entry->protection = prot;
1518 new_entry->max_protection = maxprot;
1519 new_entry->inheritance = inherit;
1520 new_entry->wired_count = 0;
1521 new_entry->advice = advice;
1522 if (flags & UVM_FLAG_OVERLAY) {
1523
1524 /*
1525 * to_add: for BSS we overallocate a little since we
1526 * are likely to extend
1527 */
1528
1529 vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
1530 UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
1531 struct vm_amap *amap = amap_alloc(size, to_add,
1532 (flags & UVM_FLAG_NOWAIT));
1533 if (__predict_false(amap == NULL)) {
1534 error = ENOMEM;
1535 goto done;
1536 }
1537 new_entry->aref.ar_pageoff = 0;
1538 new_entry->aref.ar_amap = amap;
1539 } else {
1540 new_entry->aref.ar_pageoff = 0;
1541 new_entry->aref.ar_amap = NULL;
1542 }
1543 uvm_map_entry_link(map, prev_entry, new_entry);
1544
1545 /*
1546 * Update the free space hint
1547 */
1548
1549 if ((map->first_free == prev_entry) &&
1550 (prev_entry->end >= new_entry->start))
1551 map->first_free = new_entry;
1552
1553 new_entry = NULL;
1554 }
1555
1556 map->size += size;
1557
1558 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1559
1560 error = 0;
1561 done:
1562 vm_map_unlock(map);
1563
1564 if (new_entry) {
1565 uvm_mapent_free(new_entry);
1566 }
1567
1568 if (dead) {
1569 KDASSERT(merged);
1570 uvm_mapent_free(dead);
1571 }
1572
1573 return error;
1574 }
1575
1576 /*
1577 * uvm_map_lookup_entry_bytree: lookup an entry in tree
1578 */
1579
1580 static inline bool
1581 uvm_map_lookup_entry_bytree(struct vm_map *map, vaddr_t address,
1582 struct vm_map_entry **entry /* OUT */)
1583 {
1584 struct vm_map_entry *prev = &map->header;
1585 struct vm_map_entry *cur = ROOT_ENTRY(map);
1586
1587 while (cur) {
1588 UVMMAP_EVCNT_INCR(mlk_treeloop);
1589 if (address >= cur->start) {
1590 if (address < cur->end) {
1591 *entry = cur;
1592 return true;
1593 }
1594 prev = cur;
1595 cur = RIGHT_ENTRY(cur);
1596 } else
1597 cur = LEFT_ENTRY(cur);
1598 }
1599 *entry = prev;
1600 return false;
1601 }
1602
1603 /*
1604 * uvm_map_lookup_entry: find map entry at or before an address
1605 *
1606 * => map must at least be read-locked by caller
1607 * => entry is returned in "entry"
1608 * => return value is true if address is in the returned entry
1609 */
1610
1611 bool
1612 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
1613 struct vm_map_entry **entry /* OUT */)
1614 {
1615 struct vm_map_entry *cur;
1616 bool use_tree = false;
1617 UVMHIST_FUNC("uvm_map_lookup_entry");
1618 UVMHIST_CALLED(maphist);
1619
1620 UVMHIST_LOG(maphist,"(map=%p,addr=%#lx,ent=%p)",
1621 map, address, entry, 0);
1622
1623 /*
1624 * start looking either from the head of the
1625 * list, or from the hint.
1626 */
1627
1628 cur = map->hint;
1629
1630 if (cur == &map->header)
1631 cur = cur->next;
1632
1633 UVMMAP_EVCNT_INCR(mlk_call);
1634 if (address >= cur->start) {
1635
1636 /*
1637 * go from hint to end of list.
1638 *
1639 * but first, make a quick check to see if
1640 * we are already looking at the entry we
1641 * want (which is usually the case).
1642 * note also that we don't need to save the hint
1643 * here... it is the same hint (unless we are
1644 * at the header, in which case the hint didn't
1645 * buy us anything anyway).
1646 */
1647
1648 if (cur != &map->header && cur->end > address) {
1649 UVMMAP_EVCNT_INCR(mlk_hint);
1650 *entry = cur;
1651 UVMHIST_LOG(maphist,"<- got it via hint (%p)",
1652 cur, 0, 0, 0);
1653 uvm_mapent_check(*entry);
1654 return (true);
1655 }
1656
1657 if (map->nentries > 15)
1658 use_tree = true;
1659 } else {
1660
1661 /*
1662 * invalid hint. use tree.
1663 */
1664 use_tree = true;
1665 }
1666
1667 uvm_map_check(map, __func__);
1668
1669 if (use_tree) {
1670 /*
1671 * Simple lookup in the tree. Happens when the hint is
1672 * invalid, or nentries reach a threshold.
1673 */
1674 UVMMAP_EVCNT_INCR(mlk_tree);
1675 if (uvm_map_lookup_entry_bytree(map, address, entry)) {
1676 goto got;
1677 } else {
1678 goto failed;
1679 }
1680 }
1681
1682 /*
1683 * search linearly
1684 */
1685
1686 UVMMAP_EVCNT_INCR(mlk_list);
1687 while (cur != &map->header) {
1688 UVMMAP_EVCNT_INCR(mlk_listloop);
1689 if (cur->end > address) {
1690 if (address >= cur->start) {
1691 /*
1692 * save this lookup for future
1693 * hints, and return
1694 */
1695
1696 *entry = cur;
1697 got:
1698 SAVE_HINT(map, map->hint, *entry);
1699 UVMHIST_LOG(maphist,"<- search got it (%p)",
1700 cur, 0, 0, 0);
1701 KDASSERT((*entry)->start <= address);
1702 KDASSERT(address < (*entry)->end);
1703 uvm_mapent_check(*entry);
1704 return (true);
1705 }
1706 break;
1707 }
1708 cur = cur->next;
1709 }
1710 *entry = cur->prev;
1711 failed:
1712 SAVE_HINT(map, map->hint, *entry);
1713 UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
1714 KDASSERT((*entry) == &map->header || (*entry)->end <= address);
1715 KDASSERT((*entry)->next == &map->header ||
1716 address < (*entry)->next->start);
1717 return (false);
1718 }
1719
1720 /*
1721 * See if the range between start and start + length fits in the gap
1722 * entry->next->start and entry->end. Returns 1 if fits, 0 if doesn't
1723 * fit, and -1 address wraps around.
1724 */
1725 static int
1726 uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
1727 vsize_t align, int flags, int topdown, struct vm_map_entry *entry)
1728 {
1729 vaddr_t end;
1730
1731 #ifdef PMAP_PREFER
1732 /*
1733 * push start address forward as needed to avoid VAC alias problems.
1734 * we only do this if a valid offset is specified.
1735 */
1736
1737 if (uoffset != UVM_UNKNOWN_OFFSET)
1738 PMAP_PREFER(uoffset, start, length, topdown);
1739 #endif
1740 if ((flags & UVM_FLAG_COLORMATCH) != 0) {
1741 KASSERT(align < uvmexp.ncolors);
1742 if (uvmexp.ncolors > 1) {
1743 const u_int colormask = uvmexp.colormask;
1744 const u_int colorsize = colormask + 1;
1745 vaddr_t hint = atop(*start);
1746 const u_int color = hint & colormask;
1747 if (color != align) {
1748 hint -= color; /* adjust to color boundary */
1749 KASSERT((hint & colormask) == 0);
1750 if (topdown) {
1751 if (align > color)
1752 hint -= colorsize;
1753 } else {
1754 if (align < color)
1755 hint += colorsize;
1756 }
1757 *start = ptoa(hint + align); /* adjust to color */
1758 }
1759 }
1760 } else if (align != 0) {
1761 if ((*start & (align - 1)) != 0) {
1762 if (topdown)
1763 *start &= ~(align - 1);
1764 else
1765 *start = roundup(*start, align);
1766 }
1767 /*
1768 * XXX Should we PMAP_PREFER() here again?
1769 * eh...i think we're okay
1770 */
1771 }
1772
1773 /*
1774 * Find the end of the proposed new region. Be sure we didn't
1775 * wrap around the address; if so, we lose. Otherwise, if the
1776 * proposed new region fits before the next entry, we win.
1777 */
1778
1779 end = *start + length;
1780 if (end < *start)
1781 return (-1);
1782
1783 if (entry->next->start >= end && *start >= entry->end)
1784 return (1);
1785
1786 return (0);
1787 }
1788
1789 /*
1790 * uvm_map_findspace: find "length" sized space in "map".
1791 *
1792 * => "hint" is a hint about where we want it, unless UVM_FLAG_FIXED is
1793 * set in "flags" (in which case we insist on using "hint").
1794 * => "result" is VA returned
1795 * => uobj/uoffset are to be used to handle VAC alignment, if required
1796 * => if "align" is non-zero, we attempt to align to that value.
1797 * => caller must at least have read-locked map
1798 * => returns NULL on failure, or pointer to prev. map entry if success
1799 * => note this is a cross between the old vm_map_findspace and vm_map_find
1800 */
1801
1802 struct vm_map_entry *
1803 uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
1804 vaddr_t *result /* OUT */, struct uvm_object *uobj, voff_t uoffset,
1805 vsize_t align, int flags)
1806 {
1807 struct vm_map_entry *entry;
1808 struct vm_map_entry *child, *prev, *tmp;
1809 vaddr_t orig_hint __diagused;
1810 const int topdown = map->flags & VM_MAP_TOPDOWN;
1811 UVMHIST_FUNC("uvm_map_findspace");
1812 UVMHIST_CALLED(maphist);
1813
1814 UVMHIST_LOG(maphist, "(map=%p, hint=%l#x, len=%lu, flags=%#x)",
1815 map, hint, length, flags);
1816 KASSERT((flags & UVM_FLAG_COLORMATCH) != 0 || (align & (align - 1)) == 0);
1817 KASSERT((flags & UVM_FLAG_COLORMATCH) == 0 || align < uvmexp.ncolors);
1818 KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1819
1820 uvm_map_check(map, "map_findspace entry");
1821
1822 /*
1823 * remember the original hint. if we are aligning, then we
1824 * may have to try again with no alignment constraint if
1825 * we fail the first time.
1826 */
1827
1828 orig_hint = hint;
1829 if (hint < vm_map_min(map)) { /* check ranges ... */
1830 if (flags & UVM_FLAG_FIXED) {
1831 UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
1832 return (NULL);
1833 }
1834 hint = vm_map_min(map);
1835 }
1836 if (hint > vm_map_max(map)) {
1837 UVMHIST_LOG(maphist,"<- VA %#lx > range [%#lx->%#lx]",
1838 hint, vm_map_min(map), vm_map_max(map), 0);
1839 return (NULL);
1840 }
1841
1842 /*
1843 * Look for the first possible address; if there's already
1844 * something at this address, we have to start after it.
1845 */
1846
1847 /*
1848 * @@@: there are four, no, eight cases to consider.
1849 *
1850 * 0: found, fixed, bottom up -> fail
1851 * 1: found, fixed, top down -> fail
1852 * 2: found, not fixed, bottom up -> start after entry->end,
1853 * loop up
1854 * 3: found, not fixed, top down -> start before entry->start,
1855 * loop down
1856 * 4: not found, fixed, bottom up -> check entry->next->start, fail
1857 * 5: not found, fixed, top down -> check entry->next->start, fail
1858 * 6: not found, not fixed, bottom up -> check entry->next->start,
1859 * loop up
1860 * 7: not found, not fixed, top down -> check entry->next->start,
1861 * loop down
1862 *
1863 * as you can see, it reduces to roughly five cases, and that
1864 * adding top down mapping only adds one unique case (without
1865 * it, there would be four cases).
1866 */
1867
1868 if ((flags & UVM_FLAG_FIXED) == 0 && hint == vm_map_min(map)) {
1869 entry = map->first_free;
1870 } else {
1871 if (uvm_map_lookup_entry(map, hint, &entry)) {
1872 /* "hint" address already in use ... */
1873 if (flags & UVM_FLAG_FIXED) {
1874 UVMHIST_LOG(maphist, "<- fixed & VA in use",
1875 0, 0, 0, 0);
1876 return (NULL);
1877 }
1878 if (topdown)
1879 /* Start from lower gap. */
1880 entry = entry->prev;
1881 } else if (flags & UVM_FLAG_FIXED) {
1882 if (entry->next->start >= hint + length &&
1883 hint + length > hint)
1884 goto found;
1885
1886 /* "hint" address is gap but too small */
1887 UVMHIST_LOG(maphist, "<- fixed mapping failed",
1888 0, 0, 0, 0);
1889 return (NULL); /* only one shot at it ... */
1890 } else {
1891 /*
1892 * See if given hint fits in this gap.
1893 */
1894 switch (uvm_map_space_avail(&hint, length,
1895 uoffset, align, flags, topdown, entry)) {
1896 case 1:
1897 goto found;
1898 case -1:
1899 goto wraparound;
1900 }
1901
1902 if (topdown) {
1903 /*
1904 * Still there is a chance to fit
1905 * if hint > entry->end.
1906 */
1907 } else {
1908 /* Start from higher gap. */
1909 entry = entry->next;
1910 if (entry == &map->header)
1911 goto notfound;
1912 goto nextgap;
1913 }
1914 }
1915 }
1916
1917 /*
1918 * Note that all UVM_FLAGS_FIXED case is already handled.
1919 */
1920 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
1921
1922 /* Try to find the space in the red-black tree */
1923
1924 /* Check slot before any entry */
1925 hint = topdown ? entry->next->start - length : entry->end;
1926 switch (uvm_map_space_avail(&hint, length, uoffset, align, flags,
1927 topdown, entry)) {
1928 case 1:
1929 goto found;
1930 case -1:
1931 goto wraparound;
1932 }
1933
1934 nextgap:
1935 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
1936 /* If there is not enough space in the whole tree, we fail */
1937 tmp = ROOT_ENTRY(map);
1938 if (tmp == NULL || tmp->maxgap < length)
1939 goto notfound;
1940
1941 prev = NULL; /* previous candidate */
1942
1943 /* Find an entry close to hint that has enough space */
1944 for (; tmp;) {
1945 KASSERT(tmp->next->start == tmp->end + tmp->gap);
1946 if (topdown) {
1947 if (tmp->next->start < hint + length &&
1948 (prev == NULL || tmp->end > prev->end)) {
1949 if (tmp->gap >= length)
1950 prev = tmp;
1951 else if ((child = LEFT_ENTRY(tmp)) != NULL
1952 && child->maxgap >= length)
1953 prev = tmp;
1954 }
1955 } else {
1956 if (tmp->end >= hint &&
1957 (prev == NULL || tmp->end < prev->end)) {
1958 if (tmp->gap >= length)
1959 prev = tmp;
1960 else if ((child = RIGHT_ENTRY(tmp)) != NULL
1961 && child->maxgap >= length)
1962 prev = tmp;
1963 }
1964 }
1965 if (tmp->next->start < hint + length)
1966 child = RIGHT_ENTRY(tmp);
1967 else if (tmp->end > hint)
1968 child = LEFT_ENTRY(tmp);
1969 else {
1970 if (tmp->gap >= length)
1971 break;
1972 if (topdown)
1973 child = LEFT_ENTRY(tmp);
1974 else
1975 child = RIGHT_ENTRY(tmp);
1976 }
1977 if (child == NULL || child->maxgap < length)
1978 break;
1979 tmp = child;
1980 }
1981
1982 if (tmp != NULL && tmp->start < hint && hint < tmp->next->start) {
1983 /*
1984 * Check if the entry that we found satifies the
1985 * space requirement
1986 */
1987 if (topdown) {
1988 if (hint > tmp->next->start - length)
1989 hint = tmp->next->start - length;
1990 } else {
1991 if (hint < tmp->end)
1992 hint = tmp->end;
1993 }
1994 switch (uvm_map_space_avail(&hint, length, uoffset, align,
1995 flags, topdown, tmp)) {
1996 case 1:
1997 entry = tmp;
1998 goto found;
1999 case -1:
2000 goto wraparound;
2001 }
2002 if (tmp->gap >= length)
2003 goto listsearch;
2004 }
2005 if (prev == NULL)
2006 goto notfound;
2007
2008 if (topdown) {
2009 KASSERT(orig_hint >= prev->next->start - length ||
2010 prev->next->start - length > prev->next->start);
2011 hint = prev->next->start - length;
2012 } else {
2013 KASSERT(orig_hint <= prev->end);
2014 hint = prev->end;
2015 }
2016 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2017 flags, topdown, prev)) {
2018 case 1:
2019 entry = prev;
2020 goto found;
2021 case -1:
2022 goto wraparound;
2023 }
2024 if (prev->gap >= length)
2025 goto listsearch;
2026
2027 if (topdown)
2028 tmp = LEFT_ENTRY(prev);
2029 else
2030 tmp = RIGHT_ENTRY(prev);
2031 for (;;) {
2032 KASSERT(tmp && tmp->maxgap >= length);
2033 if (topdown)
2034 child = RIGHT_ENTRY(tmp);
2035 else
2036 child = LEFT_ENTRY(tmp);
2037 if (child && child->maxgap >= length) {
2038 tmp = child;
2039 continue;
2040 }
2041 if (tmp->gap >= length)
2042 break;
2043 if (topdown)
2044 tmp = LEFT_ENTRY(tmp);
2045 else
2046 tmp = RIGHT_ENTRY(tmp);
2047 }
2048
2049 if (topdown) {
2050 KASSERT(orig_hint >= tmp->next->start - length ||
2051 tmp->next->start - length > tmp->next->start);
2052 hint = tmp->next->start - length;
2053 } else {
2054 KASSERT(orig_hint <= tmp->end);
2055 hint = tmp->end;
2056 }
2057 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2058 flags, topdown, tmp)) {
2059 case 1:
2060 entry = tmp;
2061 goto found;
2062 case -1:
2063 goto wraparound;
2064 }
2065
2066 /*
2067 * The tree fails to find an entry because of offset or alignment
2068 * restrictions. Search the list instead.
2069 */
2070 listsearch:
2071 /*
2072 * Look through the rest of the map, trying to fit a new region in
2073 * the gap between existing regions, or after the very last region.
2074 * note: entry->end = base VA of current gap,
2075 * entry->next->start = VA of end of current gap
2076 */
2077
2078 for (;;) {
2079 /* Update hint for current gap. */
2080 hint = topdown ? entry->next->start - length : entry->end;
2081
2082 /* See if it fits. */
2083 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2084 flags, topdown, entry)) {
2085 case 1:
2086 goto found;
2087 case -1:
2088 goto wraparound;
2089 }
2090
2091 /* Advance to next/previous gap */
2092 if (topdown) {
2093 if (entry == &map->header) {
2094 UVMHIST_LOG(maphist, "<- failed (off start)",
2095 0,0,0,0);
2096 goto notfound;
2097 }
2098 entry = entry->prev;
2099 } else {
2100 entry = entry->next;
2101 if (entry == &map->header) {
2102 UVMHIST_LOG(maphist, "<- failed (off end)",
2103 0,0,0,0);
2104 goto notfound;
2105 }
2106 }
2107 }
2108
2109 found:
2110 SAVE_HINT(map, map->hint, entry);
2111 *result = hint;
2112 UVMHIST_LOG(maphist,"<- got it! (result=%#lx)", hint, 0,0,0);
2113 KASSERT( topdown || hint >= orig_hint);
2114 KASSERT(!topdown || hint <= orig_hint);
2115 KASSERT(entry->end <= hint);
2116 KASSERT(hint + length <= entry->next->start);
2117 return (entry);
2118
2119 wraparound:
2120 UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0);
2121
2122 return (NULL);
2123
2124 notfound:
2125 UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0);
2126
2127 return (NULL);
2128 }
2129
2130 /*
2131 * U N M A P - m a i n h e l p e r f u n c t i o n s
2132 */
2133
2134 /*
2135 * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
2136 *
2137 * => caller must check alignment and size
2138 * => map must be locked by caller
2139 * => we return a list of map entries that we've remove from the map
2140 * in "entry_list"
2141 */
2142
2143 void
2144 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
2145 struct vm_map_entry **entry_list /* OUT */, int flags)
2146 {
2147 struct vm_map_entry *entry, *first_entry, *next;
2148 vaddr_t len;
2149 UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist);
2150
2151 UVMHIST_LOG(maphist,"(map=%p, start=%#lx, end=%#lx)",
2152 map, start, end, 0);
2153 VM_MAP_RANGE_CHECK(map, start, end);
2154
2155 uvm_map_check(map, "unmap_remove entry");
2156
2157 /*
2158 * find first entry
2159 */
2160
2161 if (uvm_map_lookup_entry(map, start, &first_entry) == true) {
2162 /* clip and go... */
2163 entry = first_entry;
2164 UVM_MAP_CLIP_START(map, entry, start);
2165 /* critical! prevents stale hint */
2166 SAVE_HINT(map, entry, entry->prev);
2167 } else {
2168 entry = first_entry->next;
2169 }
2170
2171 /*
2172 * Save the free space hint
2173 */
2174
2175 if (map->first_free != &map->header && map->first_free->start >= start)
2176 map->first_free = entry->prev;
2177
2178 /*
2179 * note: we now re-use first_entry for a different task. we remove
2180 * a number of map entries from the map and save them in a linked
2181 * list headed by "first_entry". once we remove them from the map
2182 * the caller should unlock the map and drop the references to the
2183 * backing objects [c.f. uvm_unmap_detach]. the object is to
2184 * separate unmapping from reference dropping. why?
2185 * [1] the map has to be locked for unmapping
2186 * [2] the map need not be locked for reference dropping
2187 * [3] dropping references may trigger pager I/O, and if we hit
2188 * a pager that does synchronous I/O we may have to wait for it.
2189 * [4] we would like all waiting for I/O to occur with maps unlocked
2190 * so that we don't block other threads.
2191 */
2192
2193 first_entry = NULL;
2194 *entry_list = NULL;
2195
2196 /*
2197 * break up the area into map entry sized regions and unmap. note
2198 * that all mappings have to be removed before we can even consider
2199 * dropping references to amaps or VM objects (otherwise we could end
2200 * up with a mapping to a page on the free list which would be very bad)
2201 */
2202
2203 while ((entry != &map->header) && (entry->start < end)) {
2204 KASSERT((entry->flags & UVM_MAP_STATIC) == 0);
2205
2206 UVM_MAP_CLIP_END(map, entry, end);
2207 next = entry->next;
2208 len = entry->end - entry->start;
2209
2210 /*
2211 * unwire before removing addresses from the pmap; otherwise
2212 * unwiring will put the entries back into the pmap (XXX).
2213 */
2214
2215 if (VM_MAPENT_ISWIRED(entry)) {
2216 uvm_map_entry_unwire(map, entry);
2217 }
2218 if (flags & UVM_FLAG_VAONLY) {
2219
2220 /* nothing */
2221
2222 } else if ((map->flags & VM_MAP_PAGEABLE) == 0) {
2223
2224 /*
2225 * if the map is non-pageable, any pages mapped there
2226 * must be wired and entered with pmap_kenter_pa(),
2227 * and we should free any such pages immediately.
2228 * this is mostly used for kmem_map.
2229 */
2230 KASSERT(vm_map_pmap(map) == pmap_kernel());
2231
2232 uvm_km_pgremove_intrsafe(map, entry->start, entry->end);
2233 } else if (UVM_ET_ISOBJ(entry) &&
2234 UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
2235 panic("%s: kernel object %p %p\n",
2236 __func__, map, entry);
2237 } else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
2238 /*
2239 * remove mappings the standard way. lock object
2240 * and/or amap to ensure vm_page state does not
2241 * change while in pmap_remove().
2242 */
2243
2244 uvm_map_lock_entry(entry);
2245 pmap_remove(map->pmap, entry->start, entry->end);
2246 uvm_map_unlock_entry(entry);
2247 }
2248
2249 #if defined(UVMDEBUG)
2250 /*
2251 * check if there's remaining mapping,
2252 * which is a bug in caller.
2253 */
2254
2255 vaddr_t va;
2256 for (va = entry->start; va < entry->end;
2257 va += PAGE_SIZE) {
2258 if (pmap_extract(vm_map_pmap(map), va, NULL)) {
2259 panic("%s: %#"PRIxVADDR" has mapping",
2260 __func__, va);
2261 }
2262 }
2263
2264 if (VM_MAP_IS_KERNEL(map) && (flags & UVM_FLAG_NOWAIT) == 0) {
2265 uvm_km_check_empty(map, entry->start,
2266 entry->end);
2267 }
2268 #endif /* defined(UVMDEBUG) */
2269
2270 /*
2271 * remove entry from map and put it on our list of entries
2272 * that we've nuked. then go to next entry.
2273 */
2274
2275 UVMHIST_LOG(maphist, " removed map entry %p", entry, 0, 0,0);
2276
2277 /* critical! prevents stale hint */
2278 SAVE_HINT(map, entry, entry->prev);
2279
2280 uvm_map_entry_unlink(map, entry);
2281 KASSERT(map->size >= len);
2282 map->size -= len;
2283 entry->prev = NULL;
2284 entry->next = first_entry;
2285 first_entry = entry;
2286 entry = next;
2287 }
2288
2289 /*
2290 * Note: if map is dying, leave pmap_update() for pmap_destroy(),
2291 * which will be called later.
2292 */
2293 if ((map->flags & VM_MAP_DYING) == 0) {
2294 pmap_update(vm_map_pmap(map));
2295 } else {
2296 KASSERT(vm_map_pmap(map) != pmap_kernel());
2297 }
2298
2299 uvm_map_check(map, "unmap_remove leave");
2300
2301 /*
2302 * now we've cleaned up the map and are ready for the caller to drop
2303 * references to the mapped objects.
2304 */
2305
2306 *entry_list = first_entry;
2307 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
2308
2309 if (map->flags & VM_MAP_WANTVA) {
2310 mutex_enter(&map->misc_lock);
2311 map->flags &= ~VM_MAP_WANTVA;
2312 cv_broadcast(&map->cv);
2313 mutex_exit(&map->misc_lock);
2314 }
2315 }
2316
2317 /*
2318 * uvm_unmap_detach: drop references in a chain of map entries
2319 *
2320 * => we will free the map entries as we traverse the list.
2321 */
2322
2323 void
2324 uvm_unmap_detach(struct vm_map_entry *first_entry, int flags)
2325 {
2326 struct vm_map_entry *next_entry;
2327 UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
2328
2329 while (first_entry) {
2330 KASSERT(!VM_MAPENT_ISWIRED(first_entry));
2331 UVMHIST_LOG(maphist,
2332 " detach %p: amap=%p, obj=%p, submap?=%d",
2333 first_entry, first_entry->aref.ar_amap,
2334 first_entry->object.uvm_obj,
2335 UVM_ET_ISSUBMAP(first_entry));
2336
2337 /*
2338 * drop reference to amap, if we've got one
2339 */
2340
2341 if (first_entry->aref.ar_amap)
2342 uvm_map_unreference_amap(first_entry, flags);
2343
2344 /*
2345 * drop reference to our backing object, if we've got one
2346 */
2347
2348 KASSERT(!UVM_ET_ISSUBMAP(first_entry));
2349 if (UVM_ET_ISOBJ(first_entry) &&
2350 first_entry->object.uvm_obj->pgops->pgo_detach) {
2351 (*first_entry->object.uvm_obj->pgops->pgo_detach)
2352 (first_entry->object.uvm_obj);
2353 }
2354 next_entry = first_entry->next;
2355 uvm_mapent_free(first_entry);
2356 first_entry = next_entry;
2357 }
2358 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
2359 }
2360
2361 /*
2362 * E X T R A C T I O N F U N C T I O N S
2363 */
2364
2365 /*
2366 * uvm_map_reserve: reserve space in a vm_map for future use.
2367 *
2368 * => we reserve space in a map by putting a dummy map entry in the
2369 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
2370 * => map should be unlocked (we will write lock it)
2371 * => we return true if we were able to reserve space
2372 * => XXXCDC: should be inline?
2373 */
2374
2375 int
2376 uvm_map_reserve(struct vm_map *map, vsize_t size,
2377 vaddr_t offset /* hint for pmap_prefer */,
2378 vsize_t align /* alignment */,
2379 vaddr_t *raddr /* IN:hint, OUT: reserved VA */,
2380 uvm_flag_t flags /* UVM_FLAG_FIXED or UVM_FLAG_COLORMATCH or 0 */)
2381 {
2382 UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
2383
2384 UVMHIST_LOG(maphist, "(map=%p, size=%#lx, offset=%#lx, addr=%p)",
2385 map,size,offset,raddr);
2386
2387 size = round_page(size);
2388
2389 /*
2390 * reserve some virtual space.
2391 */
2392
2393 if (uvm_map(map, raddr, size, NULL, offset, align,
2394 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
2395 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE|flags)) != 0) {
2396 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
2397 return (false);
2398 }
2399
2400 UVMHIST_LOG(maphist, "<- done (*raddr=%#lx)", *raddr,0,0,0);
2401 return (true);
2402 }
2403
2404 /*
2405 * uvm_map_replace: replace a reserved (blank) area of memory with
2406 * real mappings.
2407 *
2408 * => caller must WRITE-LOCK the map
2409 * => we return true if replacement was a success
2410 * => we expect the newents chain to have nnewents entrys on it and
2411 * we expect newents->prev to point to the last entry on the list
2412 * => note newents is allowed to be NULL
2413 */
2414
2415 static int
2416 uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
2417 struct vm_map_entry *newents, int nnewents, vsize_t nsize,
2418 struct vm_map_entry **oldentryp)
2419 {
2420 struct vm_map_entry *oldent, *last;
2421
2422 uvm_map_check(map, "map_replace entry");
2423
2424 /*
2425 * first find the blank map entry at the specified address
2426 */
2427
2428 if (!uvm_map_lookup_entry(map, start, &oldent)) {
2429 return (false);
2430 }
2431
2432 /*
2433 * check to make sure we have a proper blank entry
2434 */
2435
2436 if (end < oldent->end) {
2437 UVM_MAP_CLIP_END(map, oldent, end);
2438 }
2439 if (oldent->start != start || oldent->end != end ||
2440 oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
2441 return (false);
2442 }
2443
2444 #ifdef DIAGNOSTIC
2445
2446 /*
2447 * sanity check the newents chain
2448 */
2449
2450 {
2451 struct vm_map_entry *tmpent = newents;
2452 int nent = 0;
2453 vsize_t sz = 0;
2454 vaddr_t cur = start;
2455
2456 while (tmpent) {
2457 nent++;
2458 sz += tmpent->end - tmpent->start;
2459 if (tmpent->start < cur)
2460 panic("uvm_map_replace1");
2461 if (tmpent->start >= tmpent->end || tmpent->end > end) {
2462 panic("uvm_map_replace2: "
2463 "tmpent->start=%#"PRIxVADDR
2464 ", tmpent->end=%#"PRIxVADDR
2465 ", end=%#"PRIxVADDR,
2466 tmpent->start, tmpent->end, end);
2467 }
2468 cur = tmpent->end;
2469 if (tmpent->next) {
2470 if (tmpent->next->prev != tmpent)
2471 panic("uvm_map_replace3");
2472 } else {
2473 if (newents->prev != tmpent)
2474 panic("uvm_map_replace4");
2475 }
2476 tmpent = tmpent->next;
2477 }
2478 if (nent != nnewents)
2479 panic("uvm_map_replace5");
2480 if (sz != nsize)
2481 panic("uvm_map_replace6");
2482 }
2483 #endif
2484
2485 /*
2486 * map entry is a valid blank! replace it. (this does all the
2487 * work of map entry link/unlink...).
2488 */
2489
2490 if (newents) {
2491 last = newents->prev;
2492
2493 /* critical: flush stale hints out of map */
2494 SAVE_HINT(map, map->hint, newents);
2495 if (map->first_free == oldent)
2496 map->first_free = last;
2497
2498 last->next = oldent->next;
2499 last->next->prev = last;
2500
2501 /* Fix RB tree */
2502 uvm_rb_remove(map, oldent);
2503
2504 newents->prev = oldent->prev;
2505 newents->prev->next = newents;
2506 map->nentries = map->nentries + (nnewents - 1);
2507
2508 /* Fixup the RB tree */
2509 {
2510 int i;
2511 struct vm_map_entry *tmp;
2512
2513 tmp = newents;
2514 for (i = 0; i < nnewents && tmp; i++) {
2515 uvm_rb_insert(map, tmp);
2516 tmp = tmp->next;
2517 }
2518 }
2519 } else {
2520 /* NULL list of new entries: just remove the old one */
2521 clear_hints(map, oldent);
2522 uvm_map_entry_unlink(map, oldent);
2523 }
2524 map->size -= end - start - nsize;
2525
2526 uvm_map_check(map, "map_replace leave");
2527
2528 /*
2529 * now we can free the old blank entry and return.
2530 */
2531
2532 *oldentryp = oldent;
2533 return (true);
2534 }
2535
2536 /*
2537 * uvm_map_extract: extract a mapping from a map and put it somewhere
2538 * (maybe removing the old mapping)
2539 *
2540 * => maps should be unlocked (we will write lock them)
2541 * => returns 0 on success, error code otherwise
2542 * => start must be page aligned
2543 * => len must be page sized
2544 * => flags:
2545 * UVM_EXTRACT_REMOVE: remove mappings from srcmap
2546 * UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
2547 * UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
2548 * UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
2549 * UVM_EXTRACT_PROT_ALL: set prot to UVM_PROT_ALL as we go
2550 * >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
2551 * >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
2552 * be used from within the kernel in a kernel level map <<<
2553 */
2554
2555 int
2556 uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
2557 struct vm_map *dstmap, vaddr_t *dstaddrp, int flags)
2558 {
2559 vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge;
2560 struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry,
2561 *deadentry, *oldentry;
2562 struct vm_map_entry *resentry = NULL; /* a dummy reservation entry */
2563 vsize_t elen __unused;
2564 int nchain, error, copy_ok;
2565 vsize_t nsize;
2566 UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
2567
2568 UVMHIST_LOG(maphist,"(srcmap=%p,start=%#lx, len=%#lx", srcmap, start,
2569 len,0);
2570 UVMHIST_LOG(maphist," ...,dstmap=%p, flags=%#x)", dstmap,flags,0,0);
2571
2572 /*
2573 * step 0: sanity check: start must be on a page boundary, length
2574 * must be page sized. can't ask for CONTIG/QREF if you asked for
2575 * REMOVE.
2576 */
2577
2578 KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0);
2579 KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
2580 (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
2581
2582 /*
2583 * step 1: reserve space in the target map for the extracted area
2584 */
2585
2586 if ((flags & UVM_EXTRACT_RESERVED) == 0) {
2587 dstaddr = vm_map_min(dstmap);
2588 if (!uvm_map_reserve(dstmap, len, start,
2589 atop(start) & uvmexp.colormask, &dstaddr,
2590 UVM_FLAG_COLORMATCH))
2591 return (ENOMEM);
2592 KASSERT((atop(start ^ dstaddr) & uvmexp.colormask) == 0);
2593 *dstaddrp = dstaddr; /* pass address back to caller */
2594 UVMHIST_LOG(maphist, " dstaddr=%#lx", dstaddr,0,0,0);
2595 } else {
2596 dstaddr = *dstaddrp;
2597 }
2598
2599 /*
2600 * step 2: setup for the extraction process loop by init'ing the
2601 * map entry chain, locking src map, and looking up the first useful
2602 * entry in the map.
2603 */
2604
2605 end = start + len;
2606 newend = dstaddr + len;
2607 chain = endchain = NULL;
2608 nchain = 0;
2609 nsize = 0;
2610 vm_map_lock(srcmap);
2611
2612 if (uvm_map_lookup_entry(srcmap, start, &entry)) {
2613
2614 /* "start" is within an entry */
2615 if (flags & UVM_EXTRACT_QREF) {
2616
2617 /*
2618 * for quick references we don't clip the entry, so
2619 * the entry may map space "before" the starting
2620 * virtual address... this is the "fudge" factor
2621 * (which can be non-zero only the first time
2622 * through the "while" loop in step 3).
2623 */
2624
2625 fudge = start - entry->start;
2626 } else {
2627
2628 /*
2629 * normal reference: we clip the map to fit (thus
2630 * fudge is zero)
2631 */
2632
2633 UVM_MAP_CLIP_START(srcmap, entry, start);
2634 SAVE_HINT(srcmap, srcmap->hint, entry->prev);
2635 fudge = 0;
2636 }
2637 } else {
2638
2639 /* "start" is not within an entry ... skip to next entry */
2640 if (flags & UVM_EXTRACT_CONTIG) {
2641 error = EINVAL;
2642 goto bad; /* definite hole here ... */
2643 }
2644
2645 entry = entry->next;
2646 fudge = 0;
2647 }
2648
2649 /* save values from srcmap for step 6 */
2650 orig_entry = entry;
2651 orig_fudge = fudge;
2652
2653 /*
2654 * step 3: now start looping through the map entries, extracting
2655 * as we go.
2656 */
2657
2658 while (entry->start < end && entry != &srcmap->header) {
2659
2660 /* if we are not doing a quick reference, clip it */
2661 if ((flags & UVM_EXTRACT_QREF) == 0)
2662 UVM_MAP_CLIP_END(srcmap, entry, end);
2663
2664 /* clear needs_copy (allow chunking) */
2665 if (UVM_ET_ISNEEDSCOPY(entry)) {
2666 amap_copy(srcmap, entry,
2667 AMAP_COPY_NOWAIT|AMAP_COPY_NOMERGE, start, end);
2668 if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */
2669 error = ENOMEM;
2670 goto bad;
2671 }
2672
2673 /* amap_copy could clip (during chunk)! update fudge */
2674 if (fudge) {
2675 fudge = start - entry->start;
2676 orig_fudge = fudge;
2677 }
2678 }
2679
2680 /* calculate the offset of this from "start" */
2681 oldoffset = (entry->start + fudge) - start;
2682
2683 /* allocate a new map entry */
2684 newentry = uvm_mapent_alloc(dstmap, 0);
2685 if (newentry == NULL) {
2686 error = ENOMEM;
2687 goto bad;
2688 }
2689
2690 /* set up new map entry */
2691 newentry->next = NULL;
2692 newentry->prev = endchain;
2693 newentry->start = dstaddr + oldoffset;
2694 newentry->end =
2695 newentry->start + (entry->end - (entry->start + fudge));
2696 if (newentry->end > newend || newentry->end < newentry->start)
2697 newentry->end = newend;
2698 newentry->object.uvm_obj = entry->object.uvm_obj;
2699 if (newentry->object.uvm_obj) {
2700 if (newentry->object.uvm_obj->pgops->pgo_reference)
2701 newentry->object.uvm_obj->pgops->
2702 pgo_reference(newentry->object.uvm_obj);
2703 newentry->offset = entry->offset + fudge;
2704 } else {
2705 newentry->offset = 0;
2706 }
2707 newentry->etype = entry->etype;
2708 if (flags & UVM_EXTRACT_PROT_ALL) {
2709 newentry->protection = newentry->max_protection =
2710 UVM_PROT_ALL;
2711 } else {
2712 newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
2713 entry->max_protection : entry->protection;
2714 newentry->max_protection = entry->max_protection;
2715 }
2716 newentry->inheritance = entry->inheritance;
2717 newentry->wired_count = 0;
2718 newentry->aref.ar_amap = entry->aref.ar_amap;
2719 if (newentry->aref.ar_amap) {
2720 newentry->aref.ar_pageoff =
2721 entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
2722 uvm_map_reference_amap(newentry, AMAP_SHARED |
2723 ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
2724 } else {
2725 newentry->aref.ar_pageoff = 0;
2726 }
2727 newentry->advice = entry->advice;
2728 if ((flags & UVM_EXTRACT_QREF) != 0) {
2729 newentry->flags |= UVM_MAP_NOMERGE;
2730 }
2731
2732 /* now link it on the chain */
2733 nchain++;
2734 nsize += newentry->end - newentry->start;
2735 if (endchain == NULL) {
2736 chain = endchain = newentry;
2737 } else {
2738 endchain->next = newentry;
2739 endchain = newentry;
2740 }
2741
2742 /* end of 'while' loop! */
2743 if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
2744 (entry->next == &srcmap->header ||
2745 entry->next->start != entry->end)) {
2746 error = EINVAL;
2747 goto bad;
2748 }
2749 entry = entry->next;
2750 fudge = 0;
2751 }
2752
2753 /*
2754 * step 4: close off chain (in format expected by uvm_map_replace)
2755 */
2756
2757 if (chain)
2758 chain->prev = endchain;
2759
2760 /*
2761 * step 5: attempt to lock the dest map so we can pmap_copy.
2762 * note usage of copy_ok:
2763 * 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
2764 * 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
2765 */
2766
2767 if (srcmap == dstmap || vm_map_lock_try(dstmap) == true) {
2768 copy_ok = 1;
2769 if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2770 nchain, nsize, &resentry)) {
2771 if (srcmap != dstmap)
2772 vm_map_unlock(dstmap);
2773 error = EIO;
2774 goto bad;
2775 }
2776 } else {
2777 copy_ok = 0;
2778 /* replace defered until step 7 */
2779 }
2780
2781 /*
2782 * step 6: traverse the srcmap a second time to do the following:
2783 * - if we got a lock on the dstmap do pmap_copy
2784 * - if UVM_EXTRACT_REMOVE remove the entries
2785 * we make use of orig_entry and orig_fudge (saved in step 2)
2786 */
2787
2788 if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
2789
2790 /* purge possible stale hints from srcmap */
2791 if (flags & UVM_EXTRACT_REMOVE) {
2792 SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
2793 if (srcmap->first_free != &srcmap->header &&
2794 srcmap->first_free->start >= start)
2795 srcmap->first_free = orig_entry->prev;
2796 }
2797
2798 entry = orig_entry;
2799 fudge = orig_fudge;
2800 deadentry = NULL; /* for UVM_EXTRACT_REMOVE */
2801
2802 while (entry->start < end && entry != &srcmap->header) {
2803 if (copy_ok) {
2804 oldoffset = (entry->start + fudge) - start;
2805 elen = MIN(end, entry->end) -
2806 (entry->start + fudge);
2807 pmap_copy(dstmap->pmap, srcmap->pmap,
2808 dstaddr + oldoffset, elen,
2809 entry->start + fudge);
2810 }
2811
2812 /* we advance "entry" in the following if statement */
2813 if (flags & UVM_EXTRACT_REMOVE) {
2814 uvm_map_lock_entry(entry);
2815 pmap_remove(srcmap->pmap, entry->start,
2816 entry->end);
2817 uvm_map_unlock_entry(entry);
2818 oldentry = entry; /* save entry */
2819 entry = entry->next; /* advance */
2820 uvm_map_entry_unlink(srcmap, oldentry);
2821 /* add to dead list */
2822 oldentry->next = deadentry;
2823 deadentry = oldentry;
2824 } else {
2825 entry = entry->next; /* advance */
2826 }
2827
2828 /* end of 'while' loop */
2829 fudge = 0;
2830 }
2831 pmap_update(srcmap->pmap);
2832
2833 /*
2834 * unlock dstmap. we will dispose of deadentry in
2835 * step 7 if needed
2836 */
2837
2838 if (copy_ok && srcmap != dstmap)
2839 vm_map_unlock(dstmap);
2840
2841 } else {
2842 deadentry = NULL;
2843 }
2844
2845 /*
2846 * step 7: we are done with the source map, unlock. if copy_ok
2847 * is 0 then we have not replaced the dummy mapping in dstmap yet
2848 * and we need to do so now.
2849 */
2850
2851 vm_map_unlock(srcmap);
2852 if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
2853 uvm_unmap_detach(deadentry, 0); /* dispose of old entries */
2854
2855 /* now do the replacement if we didn't do it in step 5 */
2856 if (copy_ok == 0) {
2857 vm_map_lock(dstmap);
2858 error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2859 nchain, nsize, &resentry);
2860 vm_map_unlock(dstmap);
2861
2862 if (error == false) {
2863 error = EIO;
2864 goto bad2;
2865 }
2866 }
2867
2868 if (resentry != NULL)
2869 uvm_mapent_free(resentry);
2870
2871 return (0);
2872
2873 /*
2874 * bad: failure recovery
2875 */
2876 bad:
2877 vm_map_unlock(srcmap);
2878 bad2: /* src already unlocked */
2879 if (chain)
2880 uvm_unmap_detach(chain,
2881 (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
2882
2883 if (resentry != NULL)
2884 uvm_mapent_free(resentry);
2885
2886 if ((flags & UVM_EXTRACT_RESERVED) == 0) {
2887 uvm_unmap(dstmap, dstaddr, dstaddr+len); /* ??? */
2888 }
2889 return (error);
2890 }
2891
2892 /* end of extraction functions */
2893
2894 /*
2895 * uvm_map_submap: punch down part of a map into a submap
2896 *
2897 * => only the kernel_map is allowed to be submapped
2898 * => the purpose of submapping is to break up the locking granularity
2899 * of a larger map
2900 * => the range specified must have been mapped previously with a uvm_map()
2901 * call [with uobj==NULL] to create a blank map entry in the main map.
2902 * [And it had better still be blank!]
2903 * => maps which contain submaps should never be copied or forked.
2904 * => to remove a submap, use uvm_unmap() on the main map
2905 * and then uvm_map_deallocate() the submap.
2906 * => main map must be unlocked.
2907 * => submap must have been init'd and have a zero reference count.
2908 * [need not be locked as we don't actually reference it]
2909 */
2910
2911 int
2912 uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
2913 struct vm_map *submap)
2914 {
2915 struct vm_map_entry *entry;
2916 int error;
2917
2918 vm_map_lock(map);
2919 VM_MAP_RANGE_CHECK(map, start, end);
2920
2921 if (uvm_map_lookup_entry(map, start, &entry)) {
2922 UVM_MAP_CLIP_START(map, entry, start);
2923 UVM_MAP_CLIP_END(map, entry, end); /* to be safe */
2924 } else {
2925 entry = NULL;
2926 }
2927
2928 if (entry != NULL &&
2929 entry->start == start && entry->end == end &&
2930 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
2931 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
2932 entry->etype |= UVM_ET_SUBMAP;
2933 entry->object.sub_map = submap;
2934 entry->offset = 0;
2935 uvm_map_reference(submap);
2936 error = 0;
2937 } else {
2938 error = EINVAL;
2939 }
2940 vm_map_unlock(map);
2941
2942 return error;
2943 }
2944
2945 /*
2946 * uvm_map_protect: change map protection
2947 *
2948 * => set_max means set max_protection.
2949 * => map must be unlocked.
2950 */
2951
2952 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
2953 ~VM_PROT_WRITE : VM_PROT_ALL)
2954
2955 int
2956 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
2957 vm_prot_t new_prot, bool set_max)
2958 {
2959 struct vm_map_entry *current, *entry;
2960 int error = 0;
2961 UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
2962 UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx,new_prot=%#x)",
2963 map, start, end, new_prot);
2964
2965 vm_map_lock(map);
2966 VM_MAP_RANGE_CHECK(map, start, end);
2967 if (uvm_map_lookup_entry(map, start, &entry)) {
2968 UVM_MAP_CLIP_START(map, entry, start);
2969 } else {
2970 entry = entry->next;
2971 }
2972
2973 /*
2974 * make a first pass to check for protection violations.
2975 */
2976
2977 current = entry;
2978 while ((current != &map->header) && (current->start < end)) {
2979 if (UVM_ET_ISSUBMAP(current)) {
2980 error = EINVAL;
2981 goto out;
2982 }
2983 if ((new_prot & current->max_protection) != new_prot) {
2984 error = EACCES;
2985 goto out;
2986 }
2987 /*
2988 * Don't allow VM_PROT_EXECUTE to be set on entries that
2989 * point to vnodes that are associated with a NOEXEC file
2990 * system.
2991 */
2992 if (UVM_ET_ISOBJ(current) &&
2993 UVM_OBJ_IS_VNODE(current->object.uvm_obj)) {
2994 struct vnode *vp =
2995 (struct vnode *) current->object.uvm_obj;
2996
2997 if ((new_prot & VM_PROT_EXECUTE) != 0 &&
2998 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
2999 error = EACCES;
3000 goto out;
3001 }
3002 }
3003
3004 current = current->next;
3005 }
3006
3007 /* go back and fix up protections (no need to clip this time). */
3008
3009 current = entry;
3010 while ((current != &map->header) && (current->start < end)) {
3011 vm_prot_t old_prot;
3012
3013 UVM_MAP_CLIP_END(map, current, end);
3014 old_prot = current->protection;
3015 if (set_max)
3016 current->protection =
3017 (current->max_protection = new_prot) & old_prot;
3018 else
3019 current->protection = new_prot;
3020
3021 /*
3022 * update physical map if necessary. worry about copy-on-write
3023 * here -- CHECK THIS XXX
3024 */
3025
3026 if (current->protection != old_prot) {
3027 /* update pmap! */
3028 uvm_map_lock_entry(current);
3029 pmap_protect(map->pmap, current->start, current->end,
3030 current->protection & MASK(entry));
3031 uvm_map_unlock_entry(current);
3032
3033 /*
3034 * If this entry points at a vnode, and the
3035 * protection includes VM_PROT_EXECUTE, mark
3036 * the vnode as VEXECMAP.
3037 */
3038 if (UVM_ET_ISOBJ(current)) {
3039 struct uvm_object *uobj =
3040 current->object.uvm_obj;
3041
3042 if (UVM_OBJ_IS_VNODE(uobj) &&
3043 (current->protection & VM_PROT_EXECUTE)) {
3044 vn_markexec((struct vnode *) uobj);
3045 }
3046 }
3047 }
3048
3049 /*
3050 * If the map is configured to lock any future mappings,
3051 * wire this entry now if the old protection was VM_PROT_NONE
3052 * and the new protection is not VM_PROT_NONE.
3053 */
3054
3055 if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
3056 VM_MAPENT_ISWIRED(entry) == 0 &&
3057 old_prot == VM_PROT_NONE &&
3058 new_prot != VM_PROT_NONE) {
3059 if (uvm_map_pageable(map, entry->start,
3060 entry->end, false,
3061 UVM_LK_ENTER|UVM_LK_EXIT) != 0) {
3062
3063 /*
3064 * If locking the entry fails, remember the
3065 * error if it's the first one. Note we
3066 * still continue setting the protection in
3067 * the map, but will return the error
3068 * condition regardless.
3069 *
3070 * XXX Ignore what the actual error is,
3071 * XXX just call it a resource shortage
3072 * XXX so that it doesn't get confused
3073 * XXX what uvm_map_protect() itself would
3074 * XXX normally return.
3075 */
3076
3077 error = ENOMEM;
3078 }
3079 }
3080 current = current->next;
3081 }
3082 pmap_update(map->pmap);
3083
3084 out:
3085 vm_map_unlock(map);
3086
3087 UVMHIST_LOG(maphist, "<- done, error=%d",error,0,0,0);
3088 return error;
3089 }
3090
3091 #undef MASK
3092
3093 /*
3094 * uvm_map_inherit: set inheritance code for range of addrs in map.
3095 *
3096 * => map must be unlocked
3097 * => note that the inherit code is used during a "fork". see fork
3098 * code for details.
3099 */
3100
3101 int
3102 uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
3103 vm_inherit_t new_inheritance)
3104 {
3105 struct vm_map_entry *entry, *temp_entry;
3106 UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
3107 UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx,new_inh=%#x)",
3108 map, start, end, new_inheritance);
3109
3110 switch (new_inheritance) {
3111 case MAP_INHERIT_NONE:
3112 case MAP_INHERIT_COPY:
3113 case MAP_INHERIT_SHARE:
3114 case MAP_INHERIT_ZERO:
3115 break;
3116 default:
3117 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3118 return EINVAL;
3119 }
3120
3121 vm_map_lock(map);
3122 VM_MAP_RANGE_CHECK(map, start, end);
3123 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3124 entry = temp_entry;
3125 UVM_MAP_CLIP_START(map, entry, start);
3126 } else {
3127 entry = temp_entry->next;
3128 }
3129 while ((entry != &map->header) && (entry->start < end)) {
3130 UVM_MAP_CLIP_END(map, entry, end);
3131 entry->inheritance = new_inheritance;
3132 entry = entry->next;
3133 }
3134 vm_map_unlock(map);
3135 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3136 return 0;
3137 }
3138
3139 /*
3140 * uvm_map_advice: set advice code for range of addrs in map.
3141 *
3142 * => map must be unlocked
3143 */
3144
3145 int
3146 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
3147 {
3148 struct vm_map_entry *entry, *temp_entry;
3149 UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist);
3150 UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx,new_adv=%#x)",
3151 map, start, end, new_advice);
3152
3153 vm_map_lock(map);
3154 VM_MAP_RANGE_CHECK(map, start, end);
3155 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3156 entry = temp_entry;
3157 UVM_MAP_CLIP_START(map, entry, start);
3158 } else {
3159 entry = temp_entry->next;
3160 }
3161
3162 /*
3163 * XXXJRT: disallow holes?
3164 */
3165
3166 while ((entry != &map->header) && (entry->start < end)) {
3167 UVM_MAP_CLIP_END(map, entry, end);
3168
3169 switch (new_advice) {
3170 case MADV_NORMAL:
3171 case MADV_RANDOM:
3172 case MADV_SEQUENTIAL:
3173 /* nothing special here */
3174 break;
3175
3176 default:
3177 vm_map_unlock(map);
3178 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3179 return EINVAL;
3180 }
3181 entry->advice = new_advice;
3182 entry = entry->next;
3183 }
3184
3185 vm_map_unlock(map);
3186 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3187 return 0;
3188 }
3189
3190 /*
3191 * uvm_map_willneed: apply MADV_WILLNEED
3192 */
3193
3194 int
3195 uvm_map_willneed(struct vm_map *map, vaddr_t start, vaddr_t end)
3196 {
3197 struct vm_map_entry *entry;
3198 UVMHIST_FUNC("uvm_map_willneed"); UVMHIST_CALLED(maphist);
3199 UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx)",
3200 map, start, end, 0);
3201
3202 vm_map_lock_read(map);
3203 VM_MAP_RANGE_CHECK(map, start, end);
3204 if (!uvm_map_lookup_entry(map, start, &entry)) {
3205 entry = entry->next;
3206 }
3207 while (entry->start < end) {
3208 struct vm_amap * const amap = entry->aref.ar_amap;
3209 struct uvm_object * const uobj = entry->object.uvm_obj;
3210
3211 KASSERT(entry != &map->header);
3212 KASSERT(start < entry->end);
3213 /*
3214 * For now, we handle only the easy but commonly-requested case.
3215 * ie. start prefetching of backing uobj pages.
3216 *
3217 * XXX It might be useful to pmap_enter() the already-in-core
3218 * pages by inventing a "weak" mode for uvm_fault() which would
3219 * only do the PGO_LOCKED pgo_get().
3220 */
3221 if (UVM_ET_ISOBJ(entry) && amap == NULL && uobj != NULL) {
3222 off_t offset;
3223 off_t size;
3224
3225 offset = entry->offset;
3226 if (start < entry->start) {
3227 offset += entry->start - start;
3228 }
3229 size = entry->offset + (entry->end - entry->start);
3230 if (entry->end < end) {
3231 size -= end - entry->end;
3232 }
3233 uvm_readahead(uobj, offset, size);
3234 }
3235 entry = entry->next;
3236 }
3237 vm_map_unlock_read(map);
3238 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3239 return 0;
3240 }
3241
3242 /*
3243 * uvm_map_pageable: sets the pageability of a range in a map.
3244 *
3245 * => wires map entries. should not be used for transient page locking.
3246 * for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
3247 * => regions specified as not pageable require lock-down (wired) memory
3248 * and page tables.
3249 * => map must never be read-locked
3250 * => if islocked is true, map is already write-locked
3251 * => we always unlock the map, since we must downgrade to a read-lock
3252 * to call uvm_fault_wire()
3253 * => XXXCDC: check this and try and clean it up.
3254 */
3255
3256 int
3257 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
3258 bool new_pageable, int lockflags)
3259 {
3260 struct vm_map_entry *entry, *start_entry, *failed_entry;
3261 int rv;
3262 #ifdef DIAGNOSTIC
3263 u_int timestamp_save;
3264 #endif
3265 UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
3266 UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx,new_pageable=%u)",
3267 map, start, end, new_pageable);
3268 KASSERT(map->flags & VM_MAP_PAGEABLE);
3269
3270 if ((lockflags & UVM_LK_ENTER) == 0)
3271 vm_map_lock(map);
3272 VM_MAP_RANGE_CHECK(map, start, end);
3273
3274 /*
3275 * only one pageability change may take place at one time, since
3276 * uvm_fault_wire assumes it will be called only once for each
3277 * wiring/unwiring. therefore, we have to make sure we're actually
3278 * changing the pageability for the entire region. we do so before
3279 * making any changes.
3280 */
3281
3282 if (uvm_map_lookup_entry(map, start, &start_entry) == false) {
3283 if ((lockflags & UVM_LK_EXIT) == 0)
3284 vm_map_unlock(map);
3285
3286 UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0);
3287 return EFAULT;
3288 }
3289 entry = start_entry;
3290
3291 /*
3292 * handle wiring and unwiring separately.
3293 */
3294
3295 if (new_pageable) { /* unwire */
3296 UVM_MAP_CLIP_START(map, entry, start);
3297
3298 /*
3299 * unwiring. first ensure that the range to be unwired is
3300 * really wired down and that there are no holes.
3301 */
3302
3303 while ((entry != &map->header) && (entry->start < end)) {
3304 if (entry->wired_count == 0 ||
3305 (entry->end < end &&
3306 (entry->next == &map->header ||
3307 entry->next->start > entry->end))) {
3308 if ((lockflags & UVM_LK_EXIT) == 0)
3309 vm_map_unlock(map);
3310 UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0);
3311 return EINVAL;
3312 }
3313 entry = entry->next;
3314 }
3315
3316 /*
3317 * POSIX 1003.1b - a single munlock call unlocks a region,
3318 * regardless of the number of mlock calls made on that
3319 * region.
3320 */
3321
3322 entry = start_entry;
3323 while ((entry != &map->header) && (entry->start < end)) {
3324 UVM_MAP_CLIP_END(map, entry, end);
3325 if (VM_MAPENT_ISWIRED(entry))
3326 uvm_map_entry_unwire(map, entry);
3327 entry = entry->next;
3328 }
3329 if ((lockflags & UVM_LK_EXIT) == 0)
3330 vm_map_unlock(map);
3331 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3332 return 0;
3333 }
3334
3335 /*
3336 * wire case: in two passes [XXXCDC: ugly block of code here]
3337 *
3338 * 1: holding the write lock, we create any anonymous maps that need
3339 * to be created. then we clip each map entry to the region to
3340 * be wired and increment its wiring count.
3341 *
3342 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
3343 * in the pages for any newly wired area (wired_count == 1).
3344 *
3345 * downgrading to a read lock for uvm_fault_wire avoids a possible
3346 * deadlock with another thread that may have faulted on one of
3347 * the pages to be wired (it would mark the page busy, blocking
3348 * us, then in turn block on the map lock that we hold). because
3349 * of problems in the recursive lock package, we cannot upgrade
3350 * to a write lock in vm_map_lookup. thus, any actions that
3351 * require the write lock must be done beforehand. because we
3352 * keep the read lock on the map, the copy-on-write status of the
3353 * entries we modify here cannot change.
3354 */
3355
3356 while ((entry != &map->header) && (entry->start < end)) {
3357 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3358
3359 /*
3360 * perform actions of vm_map_lookup that need the
3361 * write lock on the map: create an anonymous map
3362 * for a copy-on-write region, or an anonymous map
3363 * for a zero-fill region. (XXXCDC: submap case
3364 * ok?)
3365 */
3366
3367 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3368 if (UVM_ET_ISNEEDSCOPY(entry) &&
3369 ((entry->max_protection & VM_PROT_WRITE) ||
3370 (entry->object.uvm_obj == NULL))) {
3371 amap_copy(map, entry, 0, start, end);
3372 /* XXXCDC: wait OK? */
3373 }
3374 }
3375 }
3376 UVM_MAP_CLIP_START(map, entry, start);
3377 UVM_MAP_CLIP_END(map, entry, end);
3378 entry->wired_count++;
3379
3380 /*
3381 * Check for holes
3382 */
3383
3384 if (entry->protection == VM_PROT_NONE ||
3385 (entry->end < end &&
3386 (entry->next == &map->header ||
3387 entry->next->start > entry->end))) {
3388
3389 /*
3390 * found one. amap creation actions do not need to
3391 * be undone, but the wired counts need to be restored.
3392 */
3393
3394 while (entry != &map->header && entry->end > start) {
3395 entry->wired_count--;
3396 entry = entry->prev;
3397 }
3398 if ((lockflags & UVM_LK_EXIT) == 0)
3399 vm_map_unlock(map);
3400 UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
3401 return EINVAL;
3402 }
3403 entry = entry->next;
3404 }
3405
3406 /*
3407 * Pass 2.
3408 */
3409
3410 #ifdef DIAGNOSTIC
3411 timestamp_save = map->timestamp;
3412 #endif
3413 vm_map_busy(map);
3414 vm_map_unlock(map);
3415
3416 rv = 0;
3417 entry = start_entry;
3418 while (entry != &map->header && entry->start < end) {
3419 if (entry->wired_count == 1) {
3420 rv = uvm_fault_wire(map, entry->start, entry->end,
3421 entry->max_protection, 1);
3422 if (rv) {
3423
3424 /*
3425 * wiring failed. break out of the loop.
3426 * we'll clean up the map below, once we
3427 * have a write lock again.
3428 */
3429
3430 break;
3431 }
3432 }
3433 entry = entry->next;
3434 }
3435
3436 if (rv) { /* failed? */
3437
3438 /*
3439 * Get back to an exclusive (write) lock.
3440 */
3441
3442 vm_map_lock(map);
3443 vm_map_unbusy(map);
3444
3445 #ifdef DIAGNOSTIC
3446 if (timestamp_save + 1 != map->timestamp)
3447 panic("uvm_map_pageable: stale map");
3448 #endif
3449
3450 /*
3451 * first drop the wiring count on all the entries
3452 * which haven't actually been wired yet.
3453 */
3454
3455 failed_entry = entry;
3456 while (entry != &map->header && entry->start < end) {
3457 entry->wired_count--;
3458 entry = entry->next;
3459 }
3460
3461 /*
3462 * now, unwire all the entries that were successfully
3463 * wired above.
3464 */
3465
3466 entry = start_entry;
3467 while (entry != failed_entry) {
3468 entry->wired_count--;
3469 if (VM_MAPENT_ISWIRED(entry) == 0)
3470 uvm_map_entry_unwire(map, entry);
3471 entry = entry->next;
3472 }
3473 if ((lockflags & UVM_LK_EXIT) == 0)
3474 vm_map_unlock(map);
3475 UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
3476 return (rv);
3477 }
3478
3479 if ((lockflags & UVM_LK_EXIT) == 0) {
3480 vm_map_unbusy(map);
3481 } else {
3482
3483 /*
3484 * Get back to an exclusive (write) lock.
3485 */
3486
3487 vm_map_lock(map);
3488 vm_map_unbusy(map);
3489 }
3490
3491 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3492 return 0;
3493 }
3494
3495 /*
3496 * uvm_map_pageable_all: special case of uvm_map_pageable - affects
3497 * all mapped regions.
3498 *
3499 * => map must not be locked.
3500 * => if no flags are specified, all regions are unwired.
3501 * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
3502 */
3503
3504 int
3505 uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
3506 {
3507 struct vm_map_entry *entry, *failed_entry;
3508 vsize_t size;
3509 int rv;
3510 #ifdef DIAGNOSTIC
3511 u_int timestamp_save;
3512 #endif
3513 UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist);
3514 UVMHIST_LOG(maphist,"(map=%p,flags=%#x)", map, flags, 0, 0);
3515
3516 KASSERT(map->flags & VM_MAP_PAGEABLE);
3517
3518 vm_map_lock(map);
3519
3520 /*
3521 * handle wiring and unwiring separately.
3522 */
3523
3524 if (flags == 0) { /* unwire */
3525
3526 /*
3527 * POSIX 1003.1b -- munlockall unlocks all regions,
3528 * regardless of how many times mlockall has been called.
3529 */
3530
3531 for (entry = map->header.next; entry != &map->header;
3532 entry = entry->next) {
3533 if (VM_MAPENT_ISWIRED(entry))
3534 uvm_map_entry_unwire(map, entry);
3535 }
3536 map->flags &= ~VM_MAP_WIREFUTURE;
3537 vm_map_unlock(map);
3538 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3539 return 0;
3540 }
3541
3542 if (flags & MCL_FUTURE) {
3543
3544 /*
3545 * must wire all future mappings; remember this.
3546 */
3547
3548 map->flags |= VM_MAP_WIREFUTURE;
3549 }
3550
3551 if ((flags & MCL_CURRENT) == 0) {
3552
3553 /*
3554 * no more work to do!
3555 */
3556
3557 UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
3558 vm_map_unlock(map);
3559 return 0;
3560 }
3561
3562 /*
3563 * wire case: in three passes [XXXCDC: ugly block of code here]
3564 *
3565 * 1: holding the write lock, count all pages mapped by non-wired
3566 * entries. if this would cause us to go over our limit, we fail.
3567 *
3568 * 2: still holding the write lock, we create any anonymous maps that
3569 * need to be created. then we increment its wiring count.
3570 *
3571 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
3572 * in the pages for any newly wired area (wired_count == 1).
3573 *
3574 * downgrading to a read lock for uvm_fault_wire avoids a possible
3575 * deadlock with another thread that may have faulted on one of
3576 * the pages to be wired (it would mark the page busy, blocking
3577 * us, then in turn block on the map lock that we hold). because
3578 * of problems in the recursive lock package, we cannot upgrade
3579 * to a write lock in vm_map_lookup. thus, any actions that
3580 * require the write lock must be done beforehand. because we
3581 * keep the read lock on the map, the copy-on-write status of the
3582 * entries we modify here cannot change.
3583 */
3584
3585 for (size = 0, entry = map->header.next; entry != &map->header;
3586 entry = entry->next) {
3587 if (entry->protection != VM_PROT_NONE &&
3588 VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3589 size += entry->end - entry->start;
3590 }
3591 }
3592
3593 if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
3594 vm_map_unlock(map);
3595 return ENOMEM;
3596 }
3597
3598 if (limit != 0 &&
3599 (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
3600 vm_map_unlock(map);
3601 return ENOMEM;
3602 }
3603
3604 /*
3605 * Pass 2.
3606 */
3607
3608 for (entry = map->header.next; entry != &map->header;
3609 entry = entry->next) {
3610 if (entry->protection == VM_PROT_NONE)
3611 continue;
3612 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3613
3614 /*
3615 * perform actions of vm_map_lookup that need the
3616 * write lock on the map: create an anonymous map
3617 * for a copy-on-write region, or an anonymous map
3618 * for a zero-fill region. (XXXCDC: submap case
3619 * ok?)
3620 */
3621
3622 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3623 if (UVM_ET_ISNEEDSCOPY(entry) &&
3624 ((entry->max_protection & VM_PROT_WRITE) ||
3625 (entry->object.uvm_obj == NULL))) {
3626 amap_copy(map, entry, 0, entry->start,
3627 entry->end);
3628 /* XXXCDC: wait OK? */
3629 }
3630 }
3631 }
3632 entry->wired_count++;
3633 }
3634
3635 /*
3636 * Pass 3.
3637 */
3638
3639 #ifdef DIAGNOSTIC
3640 timestamp_save = map->timestamp;
3641 #endif
3642 vm_map_busy(map);
3643 vm_map_unlock(map);
3644
3645 rv = 0;
3646 for (entry = map->header.next; entry != &map->header;
3647 entry = entry->next) {
3648 if (entry->wired_count == 1) {
3649 rv = uvm_fault_wire(map, entry->start, entry->end,
3650 entry->max_protection, 1);
3651 if (rv) {
3652
3653 /*
3654 * wiring failed. break out of the loop.
3655 * we'll clean up the map below, once we
3656 * have a write lock again.
3657 */
3658
3659 break;
3660 }
3661 }
3662 }
3663
3664 if (rv) {
3665
3666 /*
3667 * Get back an exclusive (write) lock.
3668 */
3669
3670 vm_map_lock(map);
3671 vm_map_unbusy(map);
3672
3673 #ifdef DIAGNOSTIC
3674 if (timestamp_save + 1 != map->timestamp)
3675 panic("uvm_map_pageable_all: stale map");
3676 #endif
3677
3678 /*
3679 * first drop the wiring count on all the entries
3680 * which haven't actually been wired yet.
3681 *
3682 * Skip VM_PROT_NONE entries like we did above.
3683 */
3684
3685 failed_entry = entry;
3686 for (/* nothing */; entry != &map->header;
3687 entry = entry->next) {
3688 if (entry->protection == VM_PROT_NONE)
3689 continue;
3690 entry->wired_count--;
3691 }
3692
3693 /*
3694 * now, unwire all the entries that were successfully
3695 * wired above.
3696 *
3697 * Skip VM_PROT_NONE entries like we did above.
3698 */
3699
3700 for (entry = map->header.next; entry != failed_entry;
3701 entry = entry->next) {
3702 if (entry->protection == VM_PROT_NONE)
3703 continue;
3704 entry->wired_count--;
3705 if (VM_MAPENT_ISWIRED(entry))
3706 uvm_map_entry_unwire(map, entry);
3707 }
3708 vm_map_unlock(map);
3709 UVMHIST_LOG(maphist,"<- done (RV=%d)", rv,0,0,0);
3710 return (rv);
3711 }
3712
3713 vm_map_unbusy(map);
3714
3715 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3716 return 0;
3717 }
3718
3719 /*
3720 * uvm_map_clean: clean out a map range
3721 *
3722 * => valid flags:
3723 * if (flags & PGO_CLEANIT): dirty pages are cleaned first
3724 * if (flags & PGO_SYNCIO): dirty pages are written synchronously
3725 * if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
3726 * if (flags & PGO_FREE): any cached pages are freed after clean
3727 * => returns an error if any part of the specified range isn't mapped
3728 * => never a need to flush amap layer since the anonymous memory has
3729 * no permanent home, but may deactivate pages there
3730 * => called from sys_msync() and sys_madvise()
3731 * => caller must not write-lock map (read OK).
3732 * => we may sleep while cleaning if SYNCIO [with map read-locked]
3733 */
3734
3735 int
3736 uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
3737 {
3738 struct vm_map_entry *current, *entry;
3739 struct uvm_object *uobj;
3740 struct vm_amap *amap;
3741 struct vm_anon *anon, *anon_tofree;
3742 struct vm_page *pg;
3743 vaddr_t offset;
3744 vsize_t size;
3745 voff_t uoff;
3746 int error, refs;
3747 UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
3748
3749 UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx,flags=%#x)",
3750 map, start, end, flags);
3751 KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
3752 (PGO_FREE|PGO_DEACTIVATE));
3753
3754 vm_map_lock_read(map);
3755 VM_MAP_RANGE_CHECK(map, start, end);
3756 if (uvm_map_lookup_entry(map, start, &entry) == false) {
3757 vm_map_unlock_read(map);
3758 return EFAULT;
3759 }
3760
3761 /*
3762 * Make a first pass to check for holes and wiring problems.
3763 */
3764
3765 for (current = entry; current->start < end; current = current->next) {
3766 if (UVM_ET_ISSUBMAP(current)) {
3767 vm_map_unlock_read(map);
3768 return EINVAL;
3769 }
3770 if ((flags & PGO_FREE) != 0 && VM_MAPENT_ISWIRED(entry)) {
3771 vm_map_unlock_read(map);
3772 return EBUSY;
3773 }
3774 if (end <= current->end) {
3775 break;
3776 }
3777 if (current->end != current->next->start) {
3778 vm_map_unlock_read(map);
3779 return EFAULT;
3780 }
3781 }
3782
3783 error = 0;
3784 for (current = entry; start < end; current = current->next) {
3785 amap = current->aref.ar_amap; /* upper layer */
3786 uobj = current->object.uvm_obj; /* lower layer */
3787 KASSERT(start >= current->start);
3788
3789 /*
3790 * No amap cleaning necessary if:
3791 *
3792 * (1) There's no amap.
3793 *
3794 * (2) We're not deactivating or freeing pages.
3795 */
3796
3797 if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
3798 goto flush_object;
3799
3800 offset = start - current->start;
3801 size = MIN(end, current->end) - start;
3802 anon_tofree = NULL;
3803
3804 amap_lock(amap);
3805 for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) {
3806 anon = amap_lookup(¤t->aref, offset);
3807 if (anon == NULL)
3808 continue;
3809
3810 KASSERT(anon->an_lock == amap->am_lock);
3811 pg = anon->an_page;
3812 if (pg == NULL) {
3813 continue;
3814 }
3815 if (pg->flags & PG_BUSY) {
3816 continue;
3817 }
3818
3819 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
3820
3821 /*
3822 * In these first 3 cases, we just deactivate the page.
3823 */
3824
3825 case PGO_CLEANIT|PGO_FREE:
3826 case PGO_CLEANIT|PGO_DEACTIVATE:
3827 case PGO_DEACTIVATE:
3828 deactivate_it:
3829 /*
3830 * skip the page if it's loaned or wired,
3831 * since it shouldn't be on a paging queue
3832 * at all in these cases.
3833 */
3834
3835 mutex_enter(&uvm_pageqlock);
3836 if (pg->loan_count != 0 ||
3837 pg->wire_count != 0) {
3838 mutex_exit(&uvm_pageqlock);
3839 continue;
3840 }
3841 KASSERT(pg->uanon == anon);
3842 uvm_pagedeactivate(pg);
3843 mutex_exit(&uvm_pageqlock);
3844 continue;
3845
3846 case PGO_FREE:
3847
3848 /*
3849 * If there are multiple references to
3850 * the amap, just deactivate the page.
3851 */
3852
3853 if (amap_refs(amap) > 1)
3854 goto deactivate_it;
3855
3856 /* skip the page if it's wired */
3857 if (pg->wire_count != 0) {
3858 continue;
3859 }
3860 amap_unadd(¤t->aref, offset);
3861 refs = --anon->an_ref;
3862 if (refs == 0) {
3863 anon->an_link = anon_tofree;
3864 anon_tofree = anon;
3865 }
3866 continue;
3867 }
3868 }
3869 uvm_anon_freelst(amap, anon_tofree);
3870
3871 flush_object:
3872 /*
3873 * flush pages if we've got a valid backing object.
3874 * note that we must always clean object pages before
3875 * freeing them since otherwise we could reveal stale
3876 * data from files.
3877 */
3878
3879 uoff = current->offset + (start - current->start);
3880 size = MIN(end, current->end) - start;
3881 if (uobj != NULL) {
3882 mutex_enter(uobj->vmobjlock);
3883 if (uobj->pgops->pgo_put != NULL)
3884 error = (uobj->pgops->pgo_put)(uobj, uoff,
3885 uoff + size, flags | PGO_CLEANIT);
3886 else
3887 error = 0;
3888 }
3889 start += size;
3890 }
3891 vm_map_unlock_read(map);
3892 return (error);
3893 }
3894
3895
3896 /*
3897 * uvm_map_checkprot: check protection in map
3898 *
3899 * => must allow specified protection in a fully allocated region.
3900 * => map must be read or write locked by caller.
3901 */
3902
3903 bool
3904 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
3905 vm_prot_t protection)
3906 {
3907 struct vm_map_entry *entry;
3908 struct vm_map_entry *tmp_entry;
3909
3910 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
3911 return (false);
3912 }
3913 entry = tmp_entry;
3914 while (start < end) {
3915 if (entry == &map->header) {
3916 return (false);
3917 }
3918
3919 /*
3920 * no holes allowed
3921 */
3922
3923 if (start < entry->start) {
3924 return (false);
3925 }
3926
3927 /*
3928 * check protection associated with entry
3929 */
3930
3931 if ((entry->protection & protection) != protection) {
3932 return (false);
3933 }
3934 start = entry->end;
3935 entry = entry->next;
3936 }
3937 return (true);
3938 }
3939
3940 /*
3941 * uvmspace_alloc: allocate a vmspace structure.
3942 *
3943 * - structure includes vm_map and pmap
3944 * - XXX: no locking on this structure
3945 * - refcnt set to 1, rest must be init'd by caller
3946 */
3947 struct vmspace *
3948 uvmspace_alloc(vaddr_t vmin, vaddr_t vmax, bool topdown)
3949 {
3950 struct vmspace *vm;
3951 UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
3952
3953 vm = pool_cache_get(&uvm_vmspace_cache, PR_WAITOK);
3954 uvmspace_init(vm, NULL, vmin, vmax, topdown);
3955 UVMHIST_LOG(maphist,"<- done (vm=%p)", vm,0,0,0);
3956 return (vm);
3957 }
3958
3959 /*
3960 * uvmspace_init: initialize a vmspace structure.
3961 *
3962 * - XXX: no locking on this structure
3963 * - refcnt set to 1, rest must be init'd by caller
3964 */
3965 void
3966 uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin,
3967 vaddr_t vmax, bool topdown)
3968 {
3969 UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist);
3970
3971 UVMHIST_LOG(maphist, "(vm=%p, pmap=%p, vmin=%#lx, vmax=%#lx",
3972 vm, pmap, vmin, vmax);
3973 UVMHIST_LOG(maphist, " topdown=%u)", topdown, 0, 0, 0);
3974
3975 memset(vm, 0, sizeof(*vm));
3976 uvm_map_setup(&vm->vm_map, vmin, vmax, VM_MAP_PAGEABLE
3977 | (topdown ? VM_MAP_TOPDOWN : 0)
3978 );
3979 if (pmap)
3980 pmap_reference(pmap);
3981 else
3982 pmap = pmap_create();
3983 vm->vm_map.pmap = pmap;
3984 vm->vm_refcnt = 1;
3985 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
3986 }
3987
3988 /*
3989 * uvmspace_share: share a vmspace between two processes
3990 *
3991 * - used for vfork, threads(?)
3992 */
3993
3994 void
3995 uvmspace_share(struct proc *p1, struct proc *p2)
3996 {
3997
3998 uvmspace_addref(p1->p_vmspace);
3999 p2->p_vmspace = p1->p_vmspace;
4000 }
4001
4002 #if 0
4003
4004 /*
4005 * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
4006 *
4007 * - XXX: no locking on vmspace
4008 */
4009
4010 void
4011 uvmspace_unshare(struct lwp *l)
4012 {
4013 struct proc *p = l->l_proc;
4014 struct vmspace *nvm, *ovm = p->p_vmspace;
4015
4016 if (ovm->vm_refcnt == 1)
4017 /* nothing to do: vmspace isn't shared in the first place */
4018 return;
4019
4020 /* make a new vmspace, still holding old one */
4021 nvm = uvmspace_fork(ovm);
4022
4023 kpreempt_disable();
4024 pmap_deactivate(l); /* unbind old vmspace */
4025 p->p_vmspace = nvm;
4026 pmap_activate(l); /* switch to new vmspace */
4027 kpreempt_enable();
4028
4029 uvmspace_free(ovm); /* drop reference to old vmspace */
4030 }
4031
4032 #endif
4033
4034
4035 /*
4036 * uvmspace_spawn: a new process has been spawned and needs a vmspace
4037 */
4038
4039 void
4040 uvmspace_spawn(struct lwp *l, vaddr_t start, vaddr_t end, bool topdown)
4041 {
4042 struct proc *p = l->l_proc;
4043 struct vmspace *nvm;
4044
4045 #ifdef __HAVE_CPU_VMSPACE_EXEC
4046 cpu_vmspace_exec(l, start, end);
4047 #endif
4048
4049 nvm = uvmspace_alloc(start, end, topdown);
4050 kpreempt_disable();
4051 p->p_vmspace = nvm;
4052 pmap_activate(l);
4053 kpreempt_enable();
4054 }
4055
4056 /*
4057 * uvmspace_exec: the process wants to exec a new program
4058 */
4059
4060 void
4061 uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end, bool topdown)
4062 {
4063 struct proc *p = l->l_proc;
4064 struct vmspace *nvm, *ovm = p->p_vmspace;
4065 struct vm_map *map;
4066
4067 KASSERT(ovm != NULL);
4068 #ifdef __HAVE_CPU_VMSPACE_EXEC
4069 cpu_vmspace_exec(l, start, end);
4070 #endif
4071
4072 map = &ovm->vm_map;
4073 /*
4074 * see if more than one process is using this vmspace...
4075 */
4076
4077 if (ovm->vm_refcnt == 1
4078 && topdown == ((ovm->vm_map.flags & VM_MAP_TOPDOWN) != 0)) {
4079
4080 /*
4081 * if p is the only process using its vmspace then we can safely
4082 * recycle that vmspace for the program that is being exec'd.
4083 * But only if TOPDOWN matches the requested value for the new
4084 * vm space!
4085 */
4086
4087 /*
4088 * SYSV SHM semantics require us to kill all segments on an exec
4089 */
4090 if (uvm_shmexit && ovm->vm_shm)
4091 (*uvm_shmexit)(ovm);
4092
4093 /*
4094 * POSIX 1003.1b -- "lock future mappings" is revoked
4095 * when a process execs another program image.
4096 */
4097
4098 map->flags &= ~VM_MAP_WIREFUTURE;
4099
4100 /*
4101 * now unmap the old program
4102 */
4103
4104 pmap_remove_all(map->pmap);
4105 uvm_unmap(map, vm_map_min(map), vm_map_max(map));
4106 KASSERT(map->header.prev == &map->header);
4107 KASSERT(map->nentries == 0);
4108
4109 /*
4110 * resize the map
4111 */
4112
4113 vm_map_setmin(map, start);
4114 vm_map_setmax(map, end);
4115 } else {
4116
4117 /*
4118 * p's vmspace is being shared, so we can't reuse it for p since
4119 * it is still being used for others. allocate a new vmspace
4120 * for p
4121 */
4122
4123 nvm = uvmspace_alloc(start, end, topdown);
4124
4125 /*
4126 * install new vmspace and drop our ref to the old one.
4127 */
4128
4129 kpreempt_disable();
4130 pmap_deactivate(l);
4131 p->p_vmspace = nvm;
4132 pmap_activate(l);
4133 kpreempt_enable();
4134
4135 uvmspace_free(ovm);
4136 }
4137 }
4138
4139 /*
4140 * uvmspace_addref: add a referece to a vmspace.
4141 */
4142
4143 void
4144 uvmspace_addref(struct vmspace *vm)
4145 {
4146 struct vm_map *map = &vm->vm_map;
4147
4148 KASSERT((map->flags & VM_MAP_DYING) == 0);
4149
4150 mutex_enter(&map->misc_lock);
4151 KASSERT(vm->vm_refcnt > 0);
4152 vm->vm_refcnt++;
4153 mutex_exit(&map->misc_lock);
4154 }
4155
4156 /*
4157 * uvmspace_free: free a vmspace data structure
4158 */
4159
4160 void
4161 uvmspace_free(struct vmspace *vm)
4162 {
4163 struct vm_map_entry *dead_entries;
4164 struct vm_map *map = &vm->vm_map;
4165 int n;
4166
4167 UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
4168
4169 UVMHIST_LOG(maphist,"(vm=%p) ref=%d", vm, vm->vm_refcnt,0,0);
4170 mutex_enter(&map->misc_lock);
4171 n = --vm->vm_refcnt;
4172 mutex_exit(&map->misc_lock);
4173 if (n > 0)
4174 return;
4175
4176 /*
4177 * at this point, there should be no other references to the map.
4178 * delete all of the mappings, then destroy the pmap.
4179 */
4180
4181 map->flags |= VM_MAP_DYING;
4182 pmap_remove_all(map->pmap);
4183
4184 /* Get rid of any SYSV shared memory segments. */
4185 if (uvm_shmexit && vm->vm_shm != NULL)
4186 (*uvm_shmexit)(vm);
4187
4188 if (map->nentries) {
4189 uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map),
4190 &dead_entries, 0);
4191 if (dead_entries != NULL)
4192 uvm_unmap_detach(dead_entries, 0);
4193 }
4194 KASSERT(map->nentries == 0);
4195 KASSERT(map->size == 0);
4196
4197 mutex_destroy(&map->misc_lock);
4198 rw_destroy(&map->lock);
4199 cv_destroy(&map->cv);
4200 pmap_destroy(map->pmap);
4201 pool_cache_put(&uvm_vmspace_cache, vm);
4202 }
4203
4204 static struct vm_map_entry *
4205 uvm_mapent_clone(struct vm_map *new_map, struct vm_map_entry *old_entry,
4206 int flags)
4207 {
4208 struct vm_map_entry *new_entry;
4209
4210 new_entry = uvm_mapent_alloc(new_map, 0);
4211 /* old_entry -> new_entry */
4212 uvm_mapent_copy(old_entry, new_entry);
4213
4214 /* new pmap has nothing wired in it */
4215 new_entry->wired_count = 0;
4216
4217 /*
4218 * gain reference to object backing the map (can't
4219 * be a submap, already checked this case).
4220 */
4221
4222 if (new_entry->aref.ar_amap)
4223 uvm_map_reference_amap(new_entry, flags);
4224
4225 if (new_entry->object.uvm_obj &&
4226 new_entry->object.uvm_obj->pgops->pgo_reference)
4227 new_entry->object.uvm_obj->pgops->pgo_reference(
4228 new_entry->object.uvm_obj);
4229
4230 /* insert entry at end of new_map's entry list */
4231 uvm_map_entry_link(new_map, new_map->header.prev,
4232 new_entry);
4233
4234 return new_entry;
4235 }
4236
4237 /*
4238 * share the mapping: this means we want the old and
4239 * new entries to share amaps and backing objects.
4240 */
4241 static void
4242 uvm_mapent_forkshared(struct vm_map *new_map, struct vm_map *old_map,
4243 struct vm_map_entry *old_entry)
4244 {
4245 /*
4246 * if the old_entry needs a new amap (due to prev fork)
4247 * then we need to allocate it now so that we have
4248 * something we own to share with the new_entry. [in
4249 * other words, we need to clear needs_copy]
4250 */
4251
4252 if (UVM_ET_ISNEEDSCOPY(old_entry)) {
4253 /* get our own amap, clears needs_copy */
4254 amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,
4255 0, 0);
4256 /* XXXCDC: WAITOK??? */
4257 }
4258
4259 uvm_mapent_clone(new_map, old_entry, AMAP_SHARED);
4260 }
4261
4262
4263 static void
4264 uvm_mapent_forkcopy(struct vm_map *new_map, struct vm_map *old_map,
4265 struct vm_map_entry *old_entry)
4266 {
4267 struct vm_map_entry *new_entry;
4268
4269 /*
4270 * copy-on-write the mapping (using mmap's
4271 * MAP_PRIVATE semantics)
4272 *
4273 * allocate new_entry, adjust reference counts.
4274 * (note that new references are read-only).
4275 */
4276
4277 new_entry = uvm_mapent_clone(new_map, old_entry, 0);
4278
4279 new_entry->etype |=
4280 (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
4281
4282 /*
4283 * the new entry will need an amap. it will either
4284 * need to be copied from the old entry or created
4285 * from scratch (if the old entry does not have an
4286 * amap). can we defer this process until later
4287 * (by setting "needs_copy") or do we need to copy
4288 * the amap now?
4289 *
4290 * we must copy the amap now if any of the following
4291 * conditions hold:
4292 * 1. the old entry has an amap and that amap is
4293 * being shared. this means that the old (parent)
4294 * process is sharing the amap with another
4295 * process. if we do not clear needs_copy here
4296 * we will end up in a situation where both the
4297 * parent and child process are refering to the
4298 * same amap with "needs_copy" set. if the
4299 * parent write-faults, the fault routine will
4300 * clear "needs_copy" in the parent by allocating
4301 * a new amap. this is wrong because the
4302 * parent is supposed to be sharing the old amap
4303 * and the new amap will break that.
4304 *
4305 * 2. if the old entry has an amap and a non-zero
4306 * wire count then we are going to have to call
4307 * amap_cow_now to avoid page faults in the
4308 * parent process. since amap_cow_now requires
4309 * "needs_copy" to be clear we might as well
4310 * clear it here as well.
4311 *
4312 */
4313
4314 if (old_entry->aref.ar_amap != NULL) {
4315 if ((amap_flags(old_entry->aref.ar_amap) & AMAP_SHARED) != 0 ||
4316 VM_MAPENT_ISWIRED(old_entry)) {
4317
4318 amap_copy(new_map, new_entry,
4319 AMAP_COPY_NOCHUNK, 0, 0);
4320 /* XXXCDC: M_WAITOK ... ok? */
4321 }
4322 }
4323
4324 /*
4325 * if the parent's entry is wired down, then the
4326 * parent process does not want page faults on
4327 * access to that memory. this means that we
4328 * cannot do copy-on-write because we can't write
4329 * protect the old entry. in this case we
4330 * resolve all copy-on-write faults now, using
4331 * amap_cow_now. note that we have already
4332 * allocated any needed amap (above).
4333 */
4334
4335 if (VM_MAPENT_ISWIRED(old_entry)) {
4336
4337 /*
4338 * resolve all copy-on-write faults now
4339 * (note that there is nothing to do if
4340 * the old mapping does not have an amap).
4341 */
4342 if (old_entry->aref.ar_amap)
4343 amap_cow_now(new_map, new_entry);
4344
4345 } else {
4346 /*
4347 * setup mappings to trigger copy-on-write faults
4348 * we must write-protect the parent if it has
4349 * an amap and it is not already "needs_copy"...
4350 * if it is already "needs_copy" then the parent
4351 * has already been write-protected by a previous
4352 * fork operation.
4353 */
4354 if (old_entry->aref.ar_amap &&
4355 !UVM_ET_ISNEEDSCOPY(old_entry)) {
4356 if (old_entry->max_protection & VM_PROT_WRITE) {
4357 pmap_protect(old_map->pmap,
4358 old_entry->start, old_entry->end,
4359 old_entry->protection & ~VM_PROT_WRITE);
4360 }
4361 old_entry->etype |= UVM_ET_NEEDSCOPY;
4362 }
4363 }
4364 }
4365
4366 /*
4367 * zero the mapping: the new entry will be zero initialized
4368 */
4369 static void
4370 uvm_mapent_forkzero(struct vm_map *new_map, struct vm_map *old_map,
4371 struct vm_map_entry *old_entry)
4372 {
4373 struct vm_map_entry *new_entry;
4374
4375 new_entry = uvm_mapent_clone(new_map, old_entry, 0);
4376
4377 new_entry->etype |=
4378 (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
4379
4380 if (new_entry->aref.ar_amap) {
4381 uvm_map_unreference_amap(new_entry, 0);
4382 new_entry->aref.ar_pageoff = 0;
4383 new_entry->aref.ar_amap = NULL;
4384 }
4385
4386 if (UVM_ET_ISOBJ(new_entry)) {
4387 if (new_entry->object.uvm_obj->pgops->pgo_detach)
4388 new_entry->object.uvm_obj->pgops->pgo_detach(
4389 new_entry->object.uvm_obj);
4390 new_entry->object.uvm_obj = NULL;
4391 new_entry->etype &= ~UVM_ET_OBJ;
4392 }
4393 }
4394
4395 /*
4396 * F O R K - m a i n e n t r y p o i n t
4397 */
4398 /*
4399 * uvmspace_fork: fork a process' main map
4400 *
4401 * => create a new vmspace for child process from parent.
4402 * => parent's map must not be locked.
4403 */
4404
4405 struct vmspace *
4406 uvmspace_fork(struct vmspace *vm1)
4407 {
4408 struct vmspace *vm2;
4409 struct vm_map *old_map = &vm1->vm_map;
4410 struct vm_map *new_map;
4411 struct vm_map_entry *old_entry;
4412 UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
4413
4414 vm_map_lock(old_map);
4415
4416 vm2 = uvmspace_alloc(vm_map_min(old_map), vm_map_max(old_map),
4417 vm1->vm_map.flags & VM_MAP_TOPDOWN);
4418 memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
4419 (char *) (vm1 + 1) - (char *) &vm1->vm_startcopy);
4420 new_map = &vm2->vm_map; /* XXX */
4421
4422 old_entry = old_map->header.next;
4423 new_map->size = old_map->size;
4424
4425 /*
4426 * go entry-by-entry
4427 */
4428
4429 while (old_entry != &old_map->header) {
4430
4431 /*
4432 * first, some sanity checks on the old entry
4433 */
4434
4435 KASSERT(!UVM_ET_ISSUBMAP(old_entry));
4436 KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) ||
4437 !UVM_ET_ISNEEDSCOPY(old_entry));
4438
4439 switch (old_entry->inheritance) {
4440 case MAP_INHERIT_NONE:
4441 /*
4442 * drop the mapping, modify size
4443 */
4444 new_map->size -= old_entry->end - old_entry->start;
4445 break;
4446
4447 case MAP_INHERIT_SHARE:
4448 uvm_mapent_forkshared(new_map, old_map, old_entry);
4449 break;
4450
4451 case MAP_INHERIT_COPY:
4452 uvm_mapent_forkcopy(new_map, old_map, old_entry);
4453 break;
4454
4455 case MAP_INHERIT_ZERO:
4456 uvm_mapent_forkzero(new_map, old_map, old_entry);
4457 break;
4458 default:
4459 KASSERT(0);
4460 break;
4461 }
4462 old_entry = old_entry->next;
4463 }
4464
4465 pmap_update(old_map->pmap);
4466 vm_map_unlock(old_map);
4467
4468 if (uvm_shmfork && vm1->vm_shm)
4469 (*uvm_shmfork)(vm1, vm2);
4470
4471 #ifdef PMAP_FORK
4472 pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap);
4473 #endif
4474
4475 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4476 return (vm2);
4477 }
4478
4479
4480 /*
4481 * uvm_mapent_trymerge: try to merge an entry with its neighbors.
4482 *
4483 * => called with map locked.
4484 * => return non zero if successfully merged.
4485 */
4486
4487 int
4488 uvm_mapent_trymerge(struct vm_map *map, struct vm_map_entry *entry, int flags)
4489 {
4490 struct uvm_object *uobj;
4491 struct vm_map_entry *next;
4492 struct vm_map_entry *prev;
4493 vsize_t size;
4494 int merged = 0;
4495 bool copying;
4496 int newetype;
4497
4498 if (entry->aref.ar_amap != NULL) {
4499 return 0;
4500 }
4501 if ((entry->flags & UVM_MAP_NOMERGE) != 0) {
4502 return 0;
4503 }
4504
4505 uobj = entry->object.uvm_obj;
4506 size = entry->end - entry->start;
4507 copying = (flags & UVM_MERGE_COPYING) != 0;
4508 newetype = copying ? (entry->etype & ~UVM_ET_NEEDSCOPY) : entry->etype;
4509
4510 next = entry->next;
4511 if (next != &map->header &&
4512 next->start == entry->end &&
4513 ((copying && next->aref.ar_amap != NULL &&
4514 amap_refs(next->aref.ar_amap) == 1) ||
4515 (!copying && next->aref.ar_amap == NULL)) &&
4516 UVM_ET_ISCOMPATIBLE(next, newetype,
4517 uobj, entry->flags, entry->protection,
4518 entry->max_protection, entry->inheritance, entry->advice,
4519 entry->wired_count) &&
4520 (uobj == NULL || entry->offset + size == next->offset)) {
4521 int error;
4522
4523 if (copying) {
4524 error = amap_extend(next, size,
4525 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_BACKWARDS);
4526 } else {
4527 error = 0;
4528 }
4529 if (error == 0) {
4530 if (uobj) {
4531 if (uobj->pgops->pgo_detach) {
4532 uobj->pgops->pgo_detach(uobj);
4533 }
4534 }
4535
4536 entry->end = next->end;
4537 clear_hints(map, next);
4538 uvm_map_entry_unlink(map, next);
4539 if (copying) {
4540 entry->aref = next->aref;
4541 entry->etype &= ~UVM_ET_NEEDSCOPY;
4542 }
4543 uvm_map_check(map, "trymerge forwardmerge");
4544 uvm_mapent_free(next);
4545 merged++;
4546 }
4547 }
4548
4549 prev = entry->prev;
4550 if (prev != &map->header &&
4551 prev->end == entry->start &&
4552 ((copying && !merged && prev->aref.ar_amap != NULL &&
4553 amap_refs(prev->aref.ar_amap) == 1) ||
4554 (!copying && prev->aref.ar_amap == NULL)) &&
4555 UVM_ET_ISCOMPATIBLE(prev, newetype,
4556 uobj, entry->flags, entry->protection,
4557 entry->max_protection, entry->inheritance, entry->advice,
4558 entry->wired_count) &&
4559 (uobj == NULL ||
4560 prev->offset + prev->end - prev->start == entry->offset)) {
4561 int error;
4562
4563 if (copying) {
4564 error = amap_extend(prev, size,
4565 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_FORWARDS);
4566 } else {
4567 error = 0;
4568 }
4569 if (error == 0) {
4570 if (uobj) {
4571 if (uobj->pgops->pgo_detach) {
4572 uobj->pgops->pgo_detach(uobj);
4573 }
4574 entry->offset = prev->offset;
4575 }
4576
4577 entry->start = prev->start;
4578 clear_hints(map, prev);
4579 uvm_map_entry_unlink(map, prev);
4580 if (copying) {
4581 entry->aref = prev->aref;
4582 entry->etype &= ~UVM_ET_NEEDSCOPY;
4583 }
4584 uvm_map_check(map, "trymerge backmerge");
4585 uvm_mapent_free(prev);
4586 merged++;
4587 }
4588 }
4589
4590 return merged;
4591 }
4592
4593 /*
4594 * uvm_map_setup: init map
4595 *
4596 * => map must not be in service yet.
4597 */
4598
4599 void
4600 uvm_map_setup(struct vm_map *map, vaddr_t vmin, vaddr_t vmax, int flags)
4601 {
4602
4603 rb_tree_init(&map->rb_tree, &uvm_map_tree_ops);
4604 map->header.next = map->header.prev = &map->header;
4605 map->nentries = 0;
4606 map->size = 0;
4607 map->ref_count = 1;
4608 vm_map_setmin(map, vmin);
4609 vm_map_setmax(map, vmax);
4610 map->flags = flags;
4611 map->first_free = &map->header;
4612 map->hint = &map->header;
4613 map->timestamp = 0;
4614 map->busy = NULL;
4615
4616 rw_init(&map->lock);
4617 cv_init(&map->cv, "vm_map");
4618 mutex_init(&map->misc_lock, MUTEX_DRIVER, IPL_NONE);
4619 }
4620
4621 /*
4622 * U N M A P - m a i n e n t r y p o i n t
4623 */
4624
4625 /*
4626 * uvm_unmap1: remove mappings from a vm_map (from "start" up to "stop")
4627 *
4628 * => caller must check alignment and size
4629 * => map must be unlocked (we will lock it)
4630 * => flags is UVM_FLAG_QUANTUM or 0.
4631 */
4632
4633 void
4634 uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
4635 {
4636 struct vm_map_entry *dead_entries;
4637 UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist);
4638
4639 UVMHIST_LOG(maphist, " (map=%p, start=%#lx, end=%#lx)",
4640 map, start, end, 0);
4641 if (map == kernel_map) {
4642 LOCKDEBUG_MEM_CHECK((void *)start, end - start);
4643 }
4644 /*
4645 * work now done by helper functions. wipe the pmap's and then
4646 * detach from the dead entries...
4647 */
4648 vm_map_lock(map);
4649 uvm_unmap_remove(map, start, end, &dead_entries, flags);
4650 vm_map_unlock(map);
4651
4652 if (dead_entries != NULL)
4653 uvm_unmap_detach(dead_entries, 0);
4654
4655 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
4656 }
4657
4658
4659 /*
4660 * uvm_map_reference: add reference to a map
4661 *
4662 * => map need not be locked (we use misc_lock).
4663 */
4664
4665 void
4666 uvm_map_reference(struct vm_map *map)
4667 {
4668 mutex_enter(&map->misc_lock);
4669 map->ref_count++;
4670 mutex_exit(&map->misc_lock);
4671 }
4672
4673 bool
4674 vm_map_starved_p(struct vm_map *map)
4675 {
4676
4677 if ((map->flags & VM_MAP_WANTVA) != 0) {
4678 return true;
4679 }
4680 /* XXX */
4681 if ((vm_map_max(map) - vm_map_min(map)) / 16 * 15 < map->size) {
4682 return true;
4683 }
4684 return false;
4685 }
4686
4687 void
4688 uvm_map_lock_entry(struct vm_map_entry *entry)
4689 {
4690
4691 if (entry->aref.ar_amap != NULL) {
4692 amap_lock(entry->aref.ar_amap);
4693 }
4694 if (UVM_ET_ISOBJ(entry)) {
4695 mutex_enter(entry->object.uvm_obj->vmobjlock);
4696 }
4697 }
4698
4699 void
4700 uvm_map_unlock_entry(struct vm_map_entry *entry)
4701 {
4702
4703 if (UVM_ET_ISOBJ(entry)) {
4704 mutex_exit(entry->object.uvm_obj->vmobjlock);
4705 }
4706 if (entry->aref.ar_amap != NULL) {
4707 amap_unlock(entry->aref.ar_amap);
4708 }
4709 }
4710
4711 #if defined(DDB) || defined(DEBUGPRINT)
4712
4713 /*
4714 * uvm_map_printit: actually prints the map
4715 */
4716
4717 void
4718 uvm_map_printit(struct vm_map *map, bool full,
4719 void (*pr)(const char *, ...))
4720 {
4721 struct vm_map_entry *entry;
4722
4723 (*pr)("MAP %p: [%#lx->%#lx]\n", map, vm_map_min(map),
4724 vm_map_max(map));
4725 (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=%#x\n",
4726 map->nentries, map->size, map->ref_count, map->timestamp,
4727 map->flags);
4728 (*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap,
4729 pmap_resident_count(map->pmap), pmap_wired_count(map->pmap));
4730 if (!full)
4731 return;
4732 for (entry = map->header.next; entry != &map->header;
4733 entry = entry->next) {
4734 (*pr)(" - %p: %#lx->%#lx: obj=%p/%#llx, amap=%p/%d\n",
4735 entry, entry->start, entry->end, entry->object.uvm_obj,
4736 (long long)entry->offset, entry->aref.ar_amap,
4737 entry->aref.ar_pageoff);
4738 (*pr)(
4739 "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
4740 "wc=%d, adv=%d\n",
4741 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
4742 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
4743 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
4744 entry->protection, entry->max_protection,
4745 entry->inheritance, entry->wired_count, entry->advice);
4746 }
4747 }
4748
4749 void
4750 uvm_whatis(uintptr_t addr, void (*pr)(const char *, ...))
4751 {
4752 struct vm_map *map;
4753
4754 for (map = kernel_map;;) {
4755 struct vm_map_entry *entry;
4756
4757 if (!uvm_map_lookup_entry_bytree(map, (vaddr_t)addr, &entry)) {
4758 break;
4759 }
4760 (*pr)("%p is %p+%zu from VMMAP %p\n",
4761 (void *)addr, (void *)entry->start,
4762 (size_t)(addr - (uintptr_t)entry->start), map);
4763 if (!UVM_ET_ISSUBMAP(entry)) {
4764 break;
4765 }
4766 map = entry->object.sub_map;
4767 }
4768 }
4769
4770 #endif /* DDB || DEBUGPRINT */
4771
4772 #ifndef __USER_VA0_IS_SAFE
4773 static int
4774 sysctl_user_va0_disable(SYSCTLFN_ARGS)
4775 {
4776 struct sysctlnode node;
4777 int t, error;
4778
4779 node = *rnode;
4780 node.sysctl_data = &t;
4781 t = user_va0_disable;
4782 error = sysctl_lookup(SYSCTLFN_CALL(&node));
4783 if (error || newp == NULL)
4784 return (error);
4785
4786 if (!t && user_va0_disable &&
4787 kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MAP_VA_ZERO, 0,
4788 NULL, NULL, NULL))
4789 return EPERM;
4790
4791 user_va0_disable = !!t;
4792 return 0;
4793 }
4794 #endif
4795
4796 static int
4797 fill_vmentry(struct lwp *l, struct proc *p, struct kinfo_vmentry *kve,
4798 struct vm_map *m, struct vm_map_entry *e)
4799 {
4800 #ifndef _RUMPKERNEL
4801 int error;
4802
4803 memset(kve, 0, sizeof(*kve));
4804 KASSERT(e != NULL);
4805 if (UVM_ET_ISOBJ(e)) {
4806 struct uvm_object *uobj = e->object.uvm_obj;
4807 KASSERT(uobj != NULL);
4808 kve->kve_ref_count = uobj->uo_refs;
4809 kve->kve_count = uobj->uo_npages;
4810 if (UVM_OBJ_IS_VNODE(uobj)) {
4811 struct vattr va;
4812 struct vnode *vp = (struct vnode *)uobj;
4813 vn_lock(vp, LK_SHARED | LK_RETRY);
4814 error = VOP_GETATTR(vp, &va, l->l_cred);
4815 VOP_UNLOCK(vp);
4816 kve->kve_type = KVME_TYPE_VNODE;
4817 if (error == 0) {
4818 kve->kve_vn_size = vp->v_size;
4819 kve->kve_vn_type = (int)vp->v_type;
4820 kve->kve_vn_mode = va.va_mode;
4821 kve->kve_vn_rdev = va.va_rdev;
4822 kve->kve_vn_fileid = va.va_fileid;
4823 kve->kve_vn_fsid = va.va_fsid;
4824 error = vnode_to_path(kve->kve_path,
4825 sizeof(kve->kve_path) / 2, vp, l, p);
4826 #ifdef DIAGNOSTIC
4827 if (error)
4828 printf("%s: vp %p error %d\n", __func__,
4829 vp, error);
4830 #endif
4831 }
4832 } else if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
4833 kve->kve_type = KVME_TYPE_KERN;
4834 } else if (UVM_OBJ_IS_DEVICE(uobj)) {
4835 kve->kve_type = KVME_TYPE_DEVICE;
4836 } else if (UVM_OBJ_IS_AOBJ(uobj)) {
4837 kve->kve_type = KVME_TYPE_ANON;
4838 } else {
4839 kve->kve_type = KVME_TYPE_OBJECT;
4840 }
4841 } else if (UVM_ET_ISSUBMAP(e)) {
4842 struct vm_map *map = e->object.sub_map;
4843 KASSERT(map != NULL);
4844 kve->kve_ref_count = map->ref_count;
4845 kve->kve_count = map->nentries;
4846 kve->kve_type = KVME_TYPE_SUBMAP;
4847 } else
4848 kve->kve_type = KVME_TYPE_UNKNOWN;
4849
4850 kve->kve_start = e->start;
4851 kve->kve_end = e->end;
4852 kve->kve_offset = e->offset;
4853 kve->kve_wired_count = e->wired_count;
4854 kve->kve_inheritance = e->inheritance;
4855 kve->kve_attributes = e->map_attrib;
4856 kve->kve_advice = e->advice;
4857 #define PROT(p) (((p) & VM_PROT_READ) ? KVME_PROT_READ : 0) | \
4858 (((p) & VM_PROT_WRITE) ? KVME_PROT_WRITE : 0) | \
4859 (((p) & VM_PROT_EXECUTE) ? KVME_PROT_EXEC : 0)
4860 kve->kve_protection = PROT(e->protection);
4861 kve->kve_max_protection = PROT(e->max_protection);
4862 kve->kve_flags |= (e->etype & UVM_ET_COPYONWRITE)
4863 ? KVME_FLAG_COW : 0;
4864 kve->kve_flags |= (e->etype & UVM_ET_NEEDSCOPY)
4865 ? KVME_FLAG_NEEDS_COPY : 0;
4866 kve->kve_flags |= (m->flags & VM_MAP_TOPDOWN)
4867 ? KVME_FLAG_GROWS_DOWN : KVME_FLAG_GROWS_UP;
4868 kve->kve_flags |= (m->flags & VM_MAP_PAGEABLE)
4869 ? KVME_FLAG_PAGEABLE : 0;
4870 #endif
4871 return 0;
4872 }
4873
4874 static int
4875 fill_vmentries(struct lwp *l, pid_t pid, u_int elem_size, void *oldp,
4876 size_t *oldlenp)
4877 {
4878 int error;
4879 struct proc *p;
4880 struct kinfo_vmentry *vme;
4881 struct vmspace *vm;
4882 struct vm_map *map;
4883 struct vm_map_entry *entry;
4884 char *dp;
4885 size_t count, vmesize;
4886
4887 vme = NULL;
4888 vmesize = *oldlenp;
4889 count = 0;
4890 if (oldp && *oldlenp > 1024 * 1024)
4891 return E2BIG;
4892
4893 if ((error = proc_find_locked(l, &p, pid)) != 0)
4894 return error;
4895
4896 if ((error = proc_vmspace_getref(p, &vm)) != 0)
4897 goto out;
4898
4899 map = &vm->vm_map;
4900 vm_map_lock_read(map);
4901
4902 dp = oldp;
4903 if (oldp)
4904 vme = kmem_alloc(vmesize, KM_SLEEP);
4905 for (entry = map->header.next; entry != &map->header;
4906 entry = entry->next) {
4907 if (oldp && (dp - (char *)oldp) < *oldlenp + elem_size) {
4908 error = fill_vmentry(l, p, &vme[count], map, entry);
4909 if (error)
4910 goto out;
4911 dp += elem_size;
4912 }
4913 count++;
4914 }
4915 vm_map_unlock_read(map);
4916 uvmspace_free(vm);
4917
4918 out:
4919 if (pid != -1)
4920 mutex_exit(p->p_lock);
4921 if (error == 0) {
4922 const u_int esize = min(sizeof(*vme), elem_size);
4923 dp = oldp;
4924 for (size_t i = 0; i < count; i++) {
4925 if (oldp && (dp - (char *)oldp) < *oldlenp + elem_size)
4926 {
4927 error = sysctl_copyout(l, &vme[i], dp, esize);
4928 if (error)
4929 break;
4930 dp += elem_size;
4931 } else
4932 break;
4933 }
4934 count *= elem_size;
4935 if (oldp != NULL && *oldlenp < count)
4936 error = ENOSPC;
4937 *oldlenp = count;
4938 }
4939 if (vme)
4940 kmem_free(vme, vmesize);
4941 return error;
4942 }
4943
4944 static int
4945 sysctl_vmproc(SYSCTLFN_ARGS)
4946 {
4947 int error;
4948
4949 if (namelen == 1 && name[0] == CTL_QUERY)
4950 return (sysctl_query(SYSCTLFN_CALL(rnode)));
4951
4952 if (namelen == 0)
4953 return EINVAL;
4954
4955 switch (name[0]) {
4956 case VM_PROC_MAP:
4957 if (namelen != 3)
4958 return EINVAL;
4959 sysctl_unlock();
4960 error = fill_vmentries(l, name[1], name[2],
4961 oldp, oldlenp);
4962 sysctl_relock();
4963 return error;
4964 default:
4965 return EINVAL;
4966 }
4967 }
4968
4969 SYSCTL_SETUP(sysctl_uvmmap_setup, "sysctl uvmmap setup")
4970 {
4971
4972 sysctl_createv(clog, 0, NULL, NULL,
4973 CTLFLAG_PERMANENT,
4974 CTLTYPE_STRUCT, "proc",
4975 SYSCTL_DESCR("Process vm information"),
4976 sysctl_vmproc, 0, NULL, 0,
4977 CTL_VM, VM_PROC, CTL_EOL);
4978 #ifndef __USER_VA0_IS_SAFE
4979 sysctl_createv(clog, 0, NULL, NULL,
4980 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4981 CTLTYPE_INT, "user_va0_disable",
4982 SYSCTL_DESCR("Disable VA 0"),
4983 sysctl_user_va0_disable, 0, &user_va0_disable, 0,
4984 CTL_VM, CTL_CREATE, CTL_EOL);
4985 #endif
4986 }
4987