uvm_map.c revision 1.312 1 /* $NetBSD: uvm_map.c,v 1.312 2012/01/28 00:00:06 rmind Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_map.c 8.3 (Berkeley) 1/12/94
37 * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
38 *
39 *
40 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 * All rights reserved.
42 *
43 * Permission to use, copy, modify and distribute this software and
44 * its documentation is hereby granted, provided that both the copyright
45 * notice and this permission notice appear in all copies of the
46 * software, derivative works or modified versions, and any portions
47 * thereof, and that both notices appear in supporting documentation.
48 *
49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 *
53 * Carnegie Mellon requests users of this software to return to
54 *
55 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
56 * School of Computer Science
57 * Carnegie Mellon University
58 * Pittsburgh PA 15213-3890
59 *
60 * any improvements or extensions that they make and grant Carnegie the
61 * rights to redistribute these changes.
62 */
63
64 /*
65 * uvm_map.c: uvm map operations
66 */
67
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.312 2012/01/28 00:00:06 rmind Exp $");
70
71 #include "opt_ddb.h"
72 #include "opt_uvmhist.h"
73 #include "opt_uvm.h"
74 #include "opt_sysv.h"
75
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/mman.h>
79 #include <sys/proc.h>
80 #include <sys/pool.h>
81 #include <sys/kernel.h>
82 #include <sys/mount.h>
83 #include <sys/vnode.h>
84 #include <sys/lockdebug.h>
85 #include <sys/atomic.h>
86 #ifndef __USER_VA0_IS_SAFE
87 #include <sys/sysctl.h>
88 #include <sys/kauth.h>
89 #include "opt_user_va0_disable_default.h"
90 #endif
91
92 #ifdef SYSVSHM
93 #include <sys/shm.h>
94 #endif
95
96 #include <uvm/uvm.h>
97 #include <uvm/uvm_readahead.h>
98
99 #if defined(DDB) || defined(DEBUGPRINT)
100 #include <uvm/uvm_ddb.h>
101 #endif
102
103 #if !defined(UVMMAP_COUNTERS)
104
105 #define UVMMAP_EVCNT_DEFINE(name) /* nothing */
106 #define UVMMAP_EVCNT_INCR(ev) /* nothing */
107 #define UVMMAP_EVCNT_DECR(ev) /* nothing */
108
109 #else /* defined(UVMMAP_NOCOUNTERS) */
110
111 #include <sys/evcnt.h>
112 #define UVMMAP_EVCNT_DEFINE(name) \
113 struct evcnt uvmmap_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
114 "uvmmap", #name); \
115 EVCNT_ATTACH_STATIC(uvmmap_evcnt_##name);
116 #define UVMMAP_EVCNT_INCR(ev) uvmmap_evcnt_##ev.ev_count++
117 #define UVMMAP_EVCNT_DECR(ev) uvmmap_evcnt_##ev.ev_count--
118
119 #endif /* defined(UVMMAP_NOCOUNTERS) */
120
121 UVMMAP_EVCNT_DEFINE(ubackmerge)
122 UVMMAP_EVCNT_DEFINE(uforwmerge)
123 UVMMAP_EVCNT_DEFINE(ubimerge)
124 UVMMAP_EVCNT_DEFINE(unomerge)
125 UVMMAP_EVCNT_DEFINE(kbackmerge)
126 UVMMAP_EVCNT_DEFINE(kforwmerge)
127 UVMMAP_EVCNT_DEFINE(kbimerge)
128 UVMMAP_EVCNT_DEFINE(knomerge)
129 UVMMAP_EVCNT_DEFINE(map_call)
130 UVMMAP_EVCNT_DEFINE(mlk_call)
131 UVMMAP_EVCNT_DEFINE(mlk_hint)
132 UVMMAP_EVCNT_DEFINE(mlk_list)
133 UVMMAP_EVCNT_DEFINE(mlk_tree)
134 UVMMAP_EVCNT_DEFINE(mlk_treeloop)
135 UVMMAP_EVCNT_DEFINE(mlk_listloop)
136
137 const char vmmapbsy[] = "vmmapbsy";
138
139 /*
140 * cache for vmspace structures.
141 */
142
143 static struct pool_cache uvm_vmspace_cache;
144
145 /*
146 * cache for dynamically-allocated map entries.
147 */
148
149 static struct pool_cache uvm_map_entry_cache;
150
151 #ifdef PMAP_GROWKERNEL
152 /*
153 * This global represents the end of the kernel virtual address
154 * space. If we want to exceed this, we must grow the kernel
155 * virtual address space dynamically.
156 *
157 * Note, this variable is locked by kernel_map's lock.
158 */
159 vaddr_t uvm_maxkaddr;
160 #endif
161
162 #ifndef __USER_VA0_IS_SAFE
163 #ifndef __USER_VA0_DISABLE_DEFAULT
164 #define __USER_VA0_DISABLE_DEFAULT 1
165 #endif
166 #ifdef USER_VA0_DISABLE_DEFAULT /* kernel config option overrides */
167 #undef __USER_VA0_DISABLE_DEFAULT
168 #define __USER_VA0_DISABLE_DEFAULT USER_VA0_DISABLE_DEFAULT
169 #endif
170 static int user_va0_disable = __USER_VA0_DISABLE_DEFAULT;
171 #endif
172
173 /*
174 * macros
175 */
176
177 /*
178 * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging
179 */
180 extern struct vm_map *pager_map;
181
182 #define UVM_ET_ISCOMPATIBLE(ent, type, uobj, meflags, \
183 prot, maxprot, inh, adv, wire) \
184 ((ent)->etype == (type) && \
185 (((ent)->flags ^ (meflags)) & (UVM_MAP_NOMERGE)) == 0 && \
186 (ent)->object.uvm_obj == (uobj) && \
187 (ent)->protection == (prot) && \
188 (ent)->max_protection == (maxprot) && \
189 (ent)->inheritance == (inh) && \
190 (ent)->advice == (adv) && \
191 (ent)->wired_count == (wire))
192
193 /*
194 * uvm_map_entry_link: insert entry into a map
195 *
196 * => map must be locked
197 */
198 #define uvm_map_entry_link(map, after_where, entry) do { \
199 uvm_mapent_check(entry); \
200 (map)->nentries++; \
201 (entry)->prev = (after_where); \
202 (entry)->next = (after_where)->next; \
203 (entry)->prev->next = (entry); \
204 (entry)->next->prev = (entry); \
205 uvm_rb_insert((map), (entry)); \
206 } while (/*CONSTCOND*/ 0)
207
208 /*
209 * uvm_map_entry_unlink: remove entry from a map
210 *
211 * => map must be locked
212 */
213 #define uvm_map_entry_unlink(map, entry) do { \
214 KASSERT((entry) != (map)->first_free); \
215 KASSERT((entry) != (map)->hint); \
216 uvm_mapent_check(entry); \
217 (map)->nentries--; \
218 (entry)->next->prev = (entry)->prev; \
219 (entry)->prev->next = (entry)->next; \
220 uvm_rb_remove((map), (entry)); \
221 } while (/*CONSTCOND*/ 0)
222
223 /*
224 * SAVE_HINT: saves the specified entry as the hint for future lookups.
225 *
226 * => map need not be locked.
227 */
228 #define SAVE_HINT(map, check, value) do { \
229 if ((map)->hint == (check)) \
230 (map)->hint = (value); \
231 } while (/*CONSTCOND*/ 0)
232
233 /*
234 * clear_hints: ensure that hints don't point to the entry.
235 *
236 * => map must be write-locked.
237 */
238 static void
239 clear_hints(struct vm_map *map, struct vm_map_entry *ent)
240 {
241
242 SAVE_HINT(map, ent, ent->prev);
243 if (map->first_free == ent) {
244 map->first_free = ent->prev;
245 }
246 }
247
248 /*
249 * VM_MAP_RANGE_CHECK: check and correct range
250 *
251 * => map must at least be read locked
252 */
253
254 #define VM_MAP_RANGE_CHECK(map, start, end) do { \
255 if (start < vm_map_min(map)) \
256 start = vm_map_min(map); \
257 if (end > vm_map_max(map)) \
258 end = vm_map_max(map); \
259 if (start > end) \
260 start = end; \
261 } while (/*CONSTCOND*/ 0)
262
263 /*
264 * local prototypes
265 */
266
267 static struct vm_map_entry *
268 uvm_mapent_alloc(struct vm_map *, int);
269 static void uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *);
270 static void uvm_mapent_free(struct vm_map_entry *);
271 #if defined(DEBUG)
272 static void _uvm_mapent_check(const struct vm_map_entry *, const char *,
273 int);
274 #define uvm_mapent_check(map) _uvm_mapent_check(map, __FILE__, __LINE__)
275 #else /* defined(DEBUG) */
276 #define uvm_mapent_check(e) /* nothing */
277 #endif /* defined(DEBUG) */
278
279 static void uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
280 static void uvm_map_reference_amap(struct vm_map_entry *, int);
281 static int uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int,
282 int, struct vm_map_entry *);
283 static void uvm_map_unreference_amap(struct vm_map_entry *, int);
284
285 int _uvm_map_sanity(struct vm_map *);
286 int _uvm_tree_sanity(struct vm_map *);
287 static vsize_t uvm_rb_maxgap(const struct vm_map_entry *);
288
289 #define ROOT_ENTRY(map) ((struct vm_map_entry *)(map)->rb_tree.rbt_root)
290 #define LEFT_ENTRY(entry) ((struct vm_map_entry *)(entry)->rb_node.rb_left)
291 #define RIGHT_ENTRY(entry) ((struct vm_map_entry *)(entry)->rb_node.rb_right)
292 #define PARENT_ENTRY(map, entry) \
293 (ROOT_ENTRY(map) == (entry) \
294 ? NULL : (struct vm_map_entry *)RB_FATHER(&(entry)->rb_node))
295
296 static int
297 uvm_map_compare_nodes(void *ctx, const void *nparent, const void *nkey)
298 {
299 const struct vm_map_entry *eparent = nparent;
300 const struct vm_map_entry *ekey = nkey;
301
302 KASSERT(eparent->start < ekey->start || eparent->start >= ekey->end);
303 KASSERT(ekey->start < eparent->start || ekey->start >= eparent->end);
304
305 if (eparent->start < ekey->start)
306 return -1;
307 if (eparent->end >= ekey->start)
308 return 1;
309 return 0;
310 }
311
312 static int
313 uvm_map_compare_key(void *ctx, const void *nparent, const void *vkey)
314 {
315 const struct vm_map_entry *eparent = nparent;
316 const vaddr_t va = *(const vaddr_t *) vkey;
317
318 if (eparent->start < va)
319 return -1;
320 if (eparent->end >= va)
321 return 1;
322 return 0;
323 }
324
325 static const rb_tree_ops_t uvm_map_tree_ops = {
326 .rbto_compare_nodes = uvm_map_compare_nodes,
327 .rbto_compare_key = uvm_map_compare_key,
328 .rbto_node_offset = offsetof(struct vm_map_entry, rb_node),
329 .rbto_context = NULL
330 };
331
332 /*
333 * uvm_rb_gap: return the gap size between our entry and next entry.
334 */
335 static inline vsize_t
336 uvm_rb_gap(const struct vm_map_entry *entry)
337 {
338
339 KASSERT(entry->next != NULL);
340 return entry->next->start - entry->end;
341 }
342
343 static vsize_t
344 uvm_rb_maxgap(const struct vm_map_entry *entry)
345 {
346 struct vm_map_entry *child;
347 vsize_t maxgap = entry->gap;
348
349 /*
350 * We need maxgap to be the largest gap of us or any of our
351 * descendents. Since each of our children's maxgap is the
352 * cached value of their largest gap of themselves or their
353 * descendents, we can just use that value and avoid recursing
354 * down the tree to calculate it.
355 */
356 if ((child = LEFT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
357 maxgap = child->maxgap;
358
359 if ((child = RIGHT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
360 maxgap = child->maxgap;
361
362 return maxgap;
363 }
364
365 static void
366 uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
367 {
368 struct vm_map_entry *parent;
369
370 KASSERT(entry->gap == uvm_rb_gap(entry));
371 entry->maxgap = uvm_rb_maxgap(entry);
372
373 while ((parent = PARENT_ENTRY(map, entry)) != NULL) {
374 struct vm_map_entry *brother;
375 vsize_t maxgap = parent->gap;
376 unsigned int which;
377
378 KDASSERT(parent->gap == uvm_rb_gap(parent));
379 if (maxgap < entry->maxgap)
380 maxgap = entry->maxgap;
381 /*
382 * Since we work towards the root, we know entry's maxgap
383 * value is OK, but its brothers may now be out-of-date due
384 * to rebalancing. So refresh it.
385 */
386 which = RB_POSITION(&entry->rb_node) ^ RB_DIR_OTHER;
387 brother = (struct vm_map_entry *)parent->rb_node.rb_nodes[which];
388 if (brother != NULL) {
389 KDASSERT(brother->gap == uvm_rb_gap(brother));
390 brother->maxgap = uvm_rb_maxgap(brother);
391 if (maxgap < brother->maxgap)
392 maxgap = brother->maxgap;
393 }
394
395 parent->maxgap = maxgap;
396 entry = parent;
397 }
398 }
399
400 static void
401 uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
402 {
403 struct vm_map_entry *ret;
404
405 entry->gap = entry->maxgap = uvm_rb_gap(entry);
406 if (entry->prev != &map->header)
407 entry->prev->gap = uvm_rb_gap(entry->prev);
408
409 ret = rb_tree_insert_node(&map->rb_tree, entry);
410 KASSERTMSG(ret == entry,
411 "uvm_rb_insert: map %p: duplicate entry %p", map, ret);
412
413 /*
414 * If the previous entry is not our immediate left child, then it's an
415 * ancestor and will be fixed up on the way to the root. We don't
416 * have to check entry->prev against &map->header since &map->header
417 * will never be in the tree.
418 */
419 uvm_rb_fixup(map,
420 LEFT_ENTRY(entry) == entry->prev ? entry->prev : entry);
421 }
422
423 static void
424 uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
425 {
426 struct vm_map_entry *prev_parent = NULL, *next_parent = NULL;
427
428 /*
429 * If we are removing an interior node, then an adjacent node will
430 * be used to replace its position in the tree. Therefore we will
431 * need to fixup the tree starting at the parent of the replacement
432 * node. So record their parents for later use.
433 */
434 if (entry->prev != &map->header)
435 prev_parent = PARENT_ENTRY(map, entry->prev);
436 if (entry->next != &map->header)
437 next_parent = PARENT_ENTRY(map, entry->next);
438
439 rb_tree_remove_node(&map->rb_tree, entry);
440
441 /*
442 * If the previous node has a new parent, fixup the tree starting
443 * at the previous node's old parent.
444 */
445 if (entry->prev != &map->header) {
446 /*
447 * Update the previous entry's gap due to our absence.
448 */
449 entry->prev->gap = uvm_rb_gap(entry->prev);
450 uvm_rb_fixup(map, entry->prev);
451 if (prev_parent != NULL
452 && prev_parent != entry
453 && prev_parent != PARENT_ENTRY(map, entry->prev))
454 uvm_rb_fixup(map, prev_parent);
455 }
456
457 /*
458 * If the next node has a new parent, fixup the tree starting
459 * at the next node's old parent.
460 */
461 if (entry->next != &map->header) {
462 uvm_rb_fixup(map, entry->next);
463 if (next_parent != NULL
464 && next_parent != entry
465 && next_parent != PARENT_ENTRY(map, entry->next))
466 uvm_rb_fixup(map, next_parent);
467 }
468 }
469
470 #if defined(DEBUG)
471 int uvm_debug_check_map = 0;
472 int uvm_debug_check_rbtree = 0;
473 #define uvm_map_check(map, name) \
474 _uvm_map_check((map), (name), __FILE__, __LINE__)
475 static void
476 _uvm_map_check(struct vm_map *map, const char *name,
477 const char *file, int line)
478 {
479
480 if ((uvm_debug_check_map && _uvm_map_sanity(map)) ||
481 (uvm_debug_check_rbtree && _uvm_tree_sanity(map))) {
482 panic("uvm_map_check failed: \"%s\" map=%p (%s:%d)",
483 name, map, file, line);
484 }
485 }
486 #else /* defined(DEBUG) */
487 #define uvm_map_check(map, name) /* nothing */
488 #endif /* defined(DEBUG) */
489
490 #if defined(DEBUG) || defined(DDB)
491 int
492 _uvm_map_sanity(struct vm_map *map)
493 {
494 bool first_free_found = false;
495 bool hint_found = false;
496 const struct vm_map_entry *e;
497 struct vm_map_entry *hint = map->hint;
498
499 e = &map->header;
500 for (;;) {
501 if (map->first_free == e) {
502 first_free_found = true;
503 } else if (!first_free_found && e->next->start > e->end) {
504 printf("first_free %p should be %p\n",
505 map->first_free, e);
506 return -1;
507 }
508 if (hint == e) {
509 hint_found = true;
510 }
511
512 e = e->next;
513 if (e == &map->header) {
514 break;
515 }
516 }
517 if (!first_free_found) {
518 printf("stale first_free\n");
519 return -1;
520 }
521 if (!hint_found) {
522 printf("stale hint\n");
523 return -1;
524 }
525 return 0;
526 }
527
528 int
529 _uvm_tree_sanity(struct vm_map *map)
530 {
531 struct vm_map_entry *tmp, *trtmp;
532 int n = 0, i = 1;
533
534 for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
535 if (tmp->gap != uvm_rb_gap(tmp)) {
536 printf("%d/%d gap %lx != %lx %s\n",
537 n + 1, map->nentries,
538 (ulong)tmp->gap, (ulong)uvm_rb_gap(tmp),
539 tmp->next == &map->header ? "(last)" : "");
540 goto error;
541 }
542 /*
543 * If any entries are out of order, tmp->gap will be unsigned
544 * and will likely exceed the size of the map.
545 */
546 if (tmp->gap >= vm_map_max(map) - vm_map_min(map)) {
547 printf("too large gap %zu\n", (size_t)tmp->gap);
548 goto error;
549 }
550 n++;
551 }
552
553 if (n != map->nentries) {
554 printf("nentries: %d vs %d\n", n, map->nentries);
555 goto error;
556 }
557
558 trtmp = NULL;
559 for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
560 if (tmp->maxgap != uvm_rb_maxgap(tmp)) {
561 printf("maxgap %lx != %lx\n",
562 (ulong)tmp->maxgap,
563 (ulong)uvm_rb_maxgap(tmp));
564 goto error;
565 }
566 if (trtmp != NULL && trtmp->start >= tmp->start) {
567 printf("corrupt: 0x%"PRIxVADDR"x >= 0x%"PRIxVADDR"x\n",
568 trtmp->start, tmp->start);
569 goto error;
570 }
571
572 trtmp = tmp;
573 }
574
575 for (tmp = map->header.next; tmp != &map->header;
576 tmp = tmp->next, i++) {
577 trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_LEFT);
578 if (trtmp == NULL)
579 trtmp = &map->header;
580 if (tmp->prev != trtmp) {
581 printf("lookup: %d: %p->prev=%p: %p\n",
582 i, tmp, tmp->prev, trtmp);
583 goto error;
584 }
585 trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_RIGHT);
586 if (trtmp == NULL)
587 trtmp = &map->header;
588 if (tmp->next != trtmp) {
589 printf("lookup: %d: %p->next=%p: %p\n",
590 i, tmp, tmp->next, trtmp);
591 goto error;
592 }
593 trtmp = rb_tree_find_node(&map->rb_tree, &tmp->start);
594 if (trtmp != tmp) {
595 printf("lookup: %d: %p - %p: %p\n", i, tmp, trtmp,
596 PARENT_ENTRY(map, tmp));
597 goto error;
598 }
599 }
600
601 return (0);
602 error:
603 return (-1);
604 }
605 #endif /* defined(DEBUG) || defined(DDB) */
606
607 /*
608 * vm_map_lock: acquire an exclusive (write) lock on a map.
609 *
610 * => Note that "intrsafe" maps use only exclusive, spin locks.
611 *
612 * => The locking protocol provides for guaranteed upgrade from shared ->
613 * exclusive by whichever thread currently has the map marked busy.
614 * See "LOCKING PROTOCOL NOTES" in uvm_map.h. This is horrible; among
615 * other problems, it defeats any fairness guarantees provided by RW
616 * locks.
617 */
618
619 void
620 vm_map_lock(struct vm_map *map)
621 {
622
623 if ((map->flags & VM_MAP_INTRSAFE) != 0) {
624 mutex_spin_enter(&map->mutex);
625 return;
626 }
627
628 for (;;) {
629 rw_enter(&map->lock, RW_WRITER);
630 if (map->busy == NULL)
631 break;
632 if (map->busy == curlwp)
633 break;
634 mutex_enter(&map->misc_lock);
635 rw_exit(&map->lock);
636 if (map->busy != NULL)
637 cv_wait(&map->cv, &map->misc_lock);
638 mutex_exit(&map->misc_lock);
639 }
640
641 map->timestamp++;
642 }
643
644 /*
645 * vm_map_lock_try: try to lock a map, failing if it is already locked.
646 */
647
648 bool
649 vm_map_lock_try(struct vm_map *map)
650 {
651
652 if ((map->flags & VM_MAP_INTRSAFE) != 0)
653 return mutex_tryenter(&map->mutex);
654 if (!rw_tryenter(&map->lock, RW_WRITER))
655 return false;
656 if (map->busy != NULL) {
657 rw_exit(&map->lock);
658 return false;
659 }
660
661 map->timestamp++;
662 return true;
663 }
664
665 /*
666 * vm_map_unlock: release an exclusive lock on a map.
667 */
668
669 void
670 vm_map_unlock(struct vm_map *map)
671 {
672
673 if ((map->flags & VM_MAP_INTRSAFE) != 0)
674 mutex_spin_exit(&map->mutex);
675 else {
676 KASSERT(rw_write_held(&map->lock));
677 KASSERT(map->busy == NULL || map->busy == curlwp);
678 rw_exit(&map->lock);
679 }
680 }
681
682 /*
683 * vm_map_unbusy: mark the map as unbusy, and wake any waiters that
684 * want an exclusive lock.
685 */
686
687 void
688 vm_map_unbusy(struct vm_map *map)
689 {
690
691 KASSERT(map->busy == curlwp);
692
693 /*
694 * Safe to clear 'busy' and 'waiters' with only a read lock held:
695 *
696 * o they can only be set with a write lock held
697 * o writers are blocked out with a read or write hold
698 * o at any time, only one thread owns the set of values
699 */
700 mutex_enter(&map->misc_lock);
701 map->busy = NULL;
702 cv_broadcast(&map->cv);
703 mutex_exit(&map->misc_lock);
704 }
705
706 /*
707 * vm_map_lock_read: acquire a shared (read) lock on a map.
708 */
709
710 void
711 vm_map_lock_read(struct vm_map *map)
712 {
713
714 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
715
716 rw_enter(&map->lock, RW_READER);
717 }
718
719 /*
720 * vm_map_unlock_read: release a shared lock on a map.
721 */
722
723 void
724 vm_map_unlock_read(struct vm_map *map)
725 {
726
727 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
728
729 rw_exit(&map->lock);
730 }
731
732 /*
733 * vm_map_busy: mark a map as busy.
734 *
735 * => the caller must hold the map write locked
736 */
737
738 void
739 vm_map_busy(struct vm_map *map)
740 {
741
742 KASSERT(rw_write_held(&map->lock));
743 KASSERT(map->busy == NULL);
744
745 map->busy = curlwp;
746 }
747
748 /*
749 * vm_map_locked_p: return true if the map is write locked.
750 *
751 * => only for debug purposes like KASSERTs.
752 * => should not be used to verify that a map is not locked.
753 */
754
755 bool
756 vm_map_locked_p(struct vm_map *map)
757 {
758
759 if ((map->flags & VM_MAP_INTRSAFE) != 0) {
760 return mutex_owned(&map->mutex);
761 } else {
762 return rw_write_held(&map->lock);
763 }
764 }
765
766 /*
767 * uvm_mapent_alloc: allocate a map entry
768 */
769
770 static struct vm_map_entry *
771 uvm_mapent_alloc(struct vm_map *map, int flags)
772 {
773 struct vm_map_entry *me;
774 int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
775 UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
776
777 me = pool_cache_get(&uvm_map_entry_cache, pflags);
778 if (__predict_false(me == NULL))
779 return NULL;
780 me->flags = 0;
781
782
783 UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me,
784 ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0);
785 return (me);
786 }
787
788 /*
789 * uvm_mapent_free: free map entry
790 */
791
792 static void
793 uvm_mapent_free(struct vm_map_entry *me)
794 {
795 UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
796
797 UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
798 me, me->flags, 0, 0);
799 pool_cache_put(&uvm_map_entry_cache, me);
800 }
801
802 /*
803 * uvm_mapent_copy: copy a map entry, preserving flags
804 */
805
806 static inline void
807 uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
808 {
809
810 memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) -
811 ((char *)src));
812 }
813
814 #if defined(DEBUG)
815 static void
816 _uvm_mapent_check(const struct vm_map_entry *entry, const char *file, int line)
817 {
818
819 if (entry->start >= entry->end) {
820 goto bad;
821 }
822 if (UVM_ET_ISOBJ(entry)) {
823 if (entry->object.uvm_obj == NULL) {
824 goto bad;
825 }
826 } else if (UVM_ET_ISSUBMAP(entry)) {
827 if (entry->object.sub_map == NULL) {
828 goto bad;
829 }
830 } else {
831 if (entry->object.uvm_obj != NULL ||
832 entry->object.sub_map != NULL) {
833 goto bad;
834 }
835 }
836 if (!UVM_ET_ISOBJ(entry)) {
837 if (entry->offset != 0) {
838 goto bad;
839 }
840 }
841
842 return;
843
844 bad:
845 panic("%s: bad entry %p (%s:%d)", __func__, entry, file, line);
846 }
847 #endif /* defined(DEBUG) */
848
849 /*
850 * uvm_map_entry_unwire: unwire a map entry
851 *
852 * => map should be locked by caller
853 */
854
855 static inline void
856 uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
857 {
858
859 entry->wired_count = 0;
860 uvm_fault_unwire_locked(map, entry->start, entry->end);
861 }
862
863
864 /*
865 * wrapper for calling amap_ref()
866 */
867 static inline void
868 uvm_map_reference_amap(struct vm_map_entry *entry, int flags)
869 {
870
871 amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
872 (entry->end - entry->start) >> PAGE_SHIFT, flags);
873 }
874
875
876 /*
877 * wrapper for calling amap_unref()
878 */
879 static inline void
880 uvm_map_unreference_amap(struct vm_map_entry *entry, int flags)
881 {
882
883 amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
884 (entry->end - entry->start) >> PAGE_SHIFT, flags);
885 }
886
887
888 /*
889 * uvm_map_init: init mapping system at boot time.
890 */
891
892 void
893 uvm_map_init(void)
894 {
895 #if defined(UVMHIST)
896 static struct kern_history_ent maphistbuf[100];
897 static struct kern_history_ent pdhistbuf[100];
898 #endif
899
900 /*
901 * first, init logging system.
902 */
903
904 UVMHIST_FUNC("uvm_map_init");
905 UVMHIST_INIT_STATIC(maphist, maphistbuf);
906 UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
907 UVMHIST_CALLED(maphist);
908 UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
909
910 /*
911 * initialize the global lock for kernel map entry.
912 */
913
914 mutex_init(&uvm_kentry_lock, MUTEX_DRIVER, IPL_VM);
915 }
916
917 /*
918 * uvm_map_init_caches: init mapping system caches.
919 */
920 void
921 uvm_map_init_caches(void)
922 {
923 /*
924 * initialize caches.
925 */
926
927 pool_cache_bootstrap(&uvm_map_entry_cache, sizeof(struct vm_map_entry),
928 0, 0, 0, "vmmpepl", NULL, IPL_NONE, NULL, NULL, NULL);
929 pool_cache_bootstrap(&uvm_vmspace_cache, sizeof(struct vmspace),
930 0, 0, 0, "vmsppl", NULL, IPL_NONE, NULL, NULL, NULL);
931 }
932
933 /*
934 * clippers
935 */
936
937 /*
938 * uvm_mapent_splitadj: adjust map entries for splitting, after uvm_mapent_copy.
939 */
940
941 static void
942 uvm_mapent_splitadj(struct vm_map_entry *entry1, struct vm_map_entry *entry2,
943 vaddr_t splitat)
944 {
945 vaddr_t adj;
946
947 KASSERT(entry1->start < splitat);
948 KASSERT(splitat < entry1->end);
949
950 adj = splitat - entry1->start;
951 entry1->end = entry2->start = splitat;
952
953 if (entry1->aref.ar_amap) {
954 amap_splitref(&entry1->aref, &entry2->aref, adj);
955 }
956 if (UVM_ET_ISSUBMAP(entry1)) {
957 /* ... unlikely to happen, but play it safe */
958 uvm_map_reference(entry1->object.sub_map);
959 } else if (UVM_ET_ISOBJ(entry1)) {
960 KASSERT(entry1->object.uvm_obj != NULL); /* suppress coverity */
961 entry2->offset += adj;
962 if (entry1->object.uvm_obj->pgops &&
963 entry1->object.uvm_obj->pgops->pgo_reference)
964 entry1->object.uvm_obj->pgops->pgo_reference(
965 entry1->object.uvm_obj);
966 }
967 }
968
969 /*
970 * uvm_map_clip_start: ensure that the entry begins at or after
971 * the starting address, if it doesn't we split the entry.
972 *
973 * => caller should use UVM_MAP_CLIP_START macro rather than calling
974 * this directly
975 * => map must be locked by caller
976 */
977
978 void
979 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
980 vaddr_t start)
981 {
982 struct vm_map_entry *new_entry;
983
984 /* uvm_map_simplify_entry(map, entry); */ /* XXX */
985
986 uvm_map_check(map, "clip_start entry");
987 uvm_mapent_check(entry);
988
989 /*
990 * Split off the front portion. note that we must insert the new
991 * entry BEFORE this one, so that this entry has the specified
992 * starting address.
993 */
994 new_entry = uvm_mapent_alloc(map, 0);
995 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
996 uvm_mapent_splitadj(new_entry, entry, start);
997 uvm_map_entry_link(map, entry->prev, new_entry);
998
999 uvm_map_check(map, "clip_start leave");
1000 }
1001
1002 /*
1003 * uvm_map_clip_end: ensure that the entry ends at or before
1004 * the ending address, if it does't we split the reference
1005 *
1006 * => caller should use UVM_MAP_CLIP_END macro rather than calling
1007 * this directly
1008 * => map must be locked by caller
1009 */
1010
1011 void
1012 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end)
1013 {
1014 struct vm_map_entry *new_entry;
1015
1016 uvm_map_check(map, "clip_end entry");
1017 uvm_mapent_check(entry);
1018
1019 /*
1020 * Create a new entry and insert it
1021 * AFTER the specified entry
1022 */
1023 new_entry = uvm_mapent_alloc(map, 0);
1024 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1025 uvm_mapent_splitadj(entry, new_entry, end);
1026 uvm_map_entry_link(map, entry, new_entry);
1027
1028 uvm_map_check(map, "clip_end leave");
1029 }
1030
1031 /*
1032 * M A P - m a i n e n t r y p o i n t
1033 */
1034 /*
1035 * uvm_map: establish a valid mapping in a map
1036 *
1037 * => assume startp is page aligned.
1038 * => assume size is a multiple of PAGE_SIZE.
1039 * => assume sys_mmap provides enough of a "hint" to have us skip
1040 * over text/data/bss area.
1041 * => map must be unlocked (we will lock it)
1042 * => <uobj,uoffset> value meanings (4 cases):
1043 * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER
1044 * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER
1045 * [3] <uobj,uoffset> == normal mapping
1046 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA
1047 *
1048 * case [4] is for kernel mappings where we don't know the offset until
1049 * we've found a virtual address. note that kernel object offsets are
1050 * always relative to vm_map_min(kernel_map).
1051 *
1052 * => if `align' is non-zero, we align the virtual address to the specified
1053 * alignment.
1054 * this is provided as a mechanism for large pages.
1055 *
1056 * => XXXCDC: need way to map in external amap?
1057 */
1058
1059 int
1060 uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
1061 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags)
1062 {
1063 struct uvm_map_args args;
1064 struct vm_map_entry *new_entry;
1065 int error;
1066
1067 KASSERT((size & PAGE_MASK) == 0);
1068
1069 #ifndef __USER_VA0_IS_SAFE
1070 if ((flags & UVM_FLAG_FIXED) && *startp == 0 &&
1071 !VM_MAP_IS_KERNEL(map) && user_va0_disable)
1072 return EACCES;
1073 #endif
1074
1075 /*
1076 * for pager_map, allocate the new entry first to avoid sleeping
1077 * for memory while we have the map locked.
1078 */
1079
1080 new_entry = NULL;
1081 if (map == pager_map) {
1082 new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
1083 if (__predict_false(new_entry == NULL))
1084 return ENOMEM;
1085 }
1086 if (map == pager_map)
1087 flags |= UVM_FLAG_NOMERGE;
1088
1089 error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align,
1090 flags, &args);
1091 if (!error) {
1092 error = uvm_map_enter(map, &args, new_entry);
1093 *startp = args.uma_start;
1094 } else if (new_entry) {
1095 uvm_mapent_free(new_entry);
1096 }
1097
1098 #if defined(DEBUG)
1099 if (!error && VM_MAP_IS_KERNEL(map)) {
1100 uvm_km_check_empty(map, *startp, *startp + size);
1101 }
1102 #endif /* defined(DEBUG) */
1103
1104 return error;
1105 }
1106
1107 /*
1108 * uvm_map_prepare:
1109 *
1110 * called with map unlocked.
1111 * on success, returns the map locked.
1112 */
1113
1114 int
1115 uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size,
1116 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags,
1117 struct uvm_map_args *args)
1118 {
1119 struct vm_map_entry *prev_entry;
1120 vm_prot_t prot = UVM_PROTECTION(flags);
1121 vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1122
1123 UVMHIST_FUNC("uvm_map_prepare");
1124 UVMHIST_CALLED(maphist);
1125
1126 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
1127 map, start, size, flags);
1128 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
1129
1130 /*
1131 * detect a popular device driver bug.
1132 */
1133
1134 KASSERT(doing_shutdown || curlwp != NULL ||
1135 (map->flags & VM_MAP_INTRSAFE));
1136
1137 /*
1138 * zero-sized mapping doesn't make any sense.
1139 */
1140 KASSERT(size > 0);
1141
1142 KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0);
1143
1144 uvm_map_check(map, "map entry");
1145
1146 /*
1147 * check sanity of protection code
1148 */
1149
1150 if ((prot & maxprot) != prot) {
1151 UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x",
1152 prot, maxprot,0,0);
1153 return EACCES;
1154 }
1155
1156 /*
1157 * figure out where to put new VM range
1158 */
1159
1160 retry:
1161 if (vm_map_lock_try(map) == false) {
1162 if ((flags & UVM_FLAG_TRYLOCK) != 0 &&
1163 (map->flags & VM_MAP_INTRSAFE) == 0) {
1164 return EAGAIN;
1165 }
1166 vm_map_lock(map); /* could sleep here */
1167 }
1168 prev_entry = uvm_map_findspace(map, start, size, &start,
1169 uobj, uoffset, align, flags);
1170 if (prev_entry == NULL) {
1171 unsigned int timestamp;
1172
1173 timestamp = map->timestamp;
1174 UVMHIST_LOG(maphist,"waiting va timestamp=0x%x",
1175 timestamp,0,0,0);
1176 map->flags |= VM_MAP_WANTVA;
1177 vm_map_unlock(map);
1178
1179 /*
1180 * try to reclaim kva and wait until someone does unmap.
1181 * fragile locking here, so we awaken every second to
1182 * recheck the condition.
1183 */
1184
1185 mutex_enter(&map->misc_lock);
1186 while ((map->flags & VM_MAP_WANTVA) != 0 &&
1187 map->timestamp == timestamp) {
1188 if ((flags & UVM_FLAG_WAITVA) == 0) {
1189 mutex_exit(&map->misc_lock);
1190 UVMHIST_LOG(maphist,
1191 "<- uvm_map_findspace failed!", 0,0,0,0);
1192 return ENOMEM;
1193 } else {
1194 cv_timedwait(&map->cv, &map->misc_lock, hz);
1195 }
1196 }
1197 mutex_exit(&map->misc_lock);
1198 goto retry;
1199 }
1200
1201 #ifdef PMAP_GROWKERNEL
1202 /*
1203 * If the kernel pmap can't map the requested space,
1204 * then allocate more resources for it.
1205 */
1206 if (map == kernel_map && uvm_maxkaddr < (start + size))
1207 uvm_maxkaddr = pmap_growkernel(start + size);
1208 #endif
1209
1210 UVMMAP_EVCNT_INCR(map_call);
1211
1212 /*
1213 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
1214 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in
1215 * either case we want to zero it before storing it in the map entry
1216 * (because it looks strange and confusing when debugging...)
1217 *
1218 * if uobj is not null
1219 * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
1220 * and we do not need to change uoffset.
1221 * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
1222 * now (based on the starting address of the map). this case is
1223 * for kernel object mappings where we don't know the offset until
1224 * the virtual address is found (with uvm_map_findspace). the
1225 * offset is the distance we are from the start of the map.
1226 */
1227
1228 if (uobj == NULL) {
1229 uoffset = 0;
1230 } else {
1231 if (uoffset == UVM_UNKNOWN_OFFSET) {
1232 KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
1233 uoffset = start - vm_map_min(kernel_map);
1234 }
1235 }
1236
1237 args->uma_flags = flags;
1238 args->uma_prev = prev_entry;
1239 args->uma_start = start;
1240 args->uma_size = size;
1241 args->uma_uobj = uobj;
1242 args->uma_uoffset = uoffset;
1243
1244 UVMHIST_LOG(maphist, "<- done!", 0,0,0,0);
1245 return 0;
1246 }
1247
1248 /*
1249 * uvm_map_enter:
1250 *
1251 * called with map locked.
1252 * unlock the map before returning.
1253 */
1254
1255 int
1256 uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args,
1257 struct vm_map_entry *new_entry)
1258 {
1259 struct vm_map_entry *prev_entry = args->uma_prev;
1260 struct vm_map_entry *dead = NULL;
1261
1262 const uvm_flag_t flags = args->uma_flags;
1263 const vm_prot_t prot = UVM_PROTECTION(flags);
1264 const vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1265 const vm_inherit_t inherit = UVM_INHERIT(flags);
1266 const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
1267 AMAP_EXTEND_NOWAIT : 0;
1268 const int advice = UVM_ADVICE(flags);
1269
1270 vaddr_t start = args->uma_start;
1271 vsize_t size = args->uma_size;
1272 struct uvm_object *uobj = args->uma_uobj;
1273 voff_t uoffset = args->uma_uoffset;
1274
1275 const int kmap = (vm_map_pmap(map) == pmap_kernel());
1276 int merged = 0;
1277 int error;
1278 int newetype;
1279
1280 UVMHIST_FUNC("uvm_map_enter");
1281 UVMHIST_CALLED(maphist);
1282
1283 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
1284 map, start, size, flags);
1285 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
1286
1287 KASSERT(map->hint == prev_entry); /* bimerge case assumes this */
1288 KASSERT(vm_map_locked_p(map));
1289
1290 if (uobj)
1291 newetype = UVM_ET_OBJ;
1292 else
1293 newetype = 0;
1294
1295 if (flags & UVM_FLAG_COPYONW) {
1296 newetype |= UVM_ET_COPYONWRITE;
1297 if ((flags & UVM_FLAG_OVERLAY) == 0)
1298 newetype |= UVM_ET_NEEDSCOPY;
1299 }
1300
1301 /*
1302 * try and insert in map by extending previous entry, if possible.
1303 * XXX: we don't try and pull back the next entry. might be useful
1304 * for a stack, but we are currently allocating our stack in advance.
1305 */
1306
1307 if (flags & UVM_FLAG_NOMERGE)
1308 goto nomerge;
1309
1310 if (prev_entry->end == start &&
1311 prev_entry != &map->header &&
1312 UVM_ET_ISCOMPATIBLE(prev_entry, newetype, uobj, 0,
1313 prot, maxprot, inherit, advice, 0)) {
1314
1315 if (uobj && prev_entry->offset +
1316 (prev_entry->end - prev_entry->start) != uoffset)
1317 goto forwardmerge;
1318
1319 /*
1320 * can't extend a shared amap. note: no need to lock amap to
1321 * look at refs since we don't care about its exact value.
1322 * if it is one (i.e. we have only reference) it will stay there
1323 */
1324
1325 if (prev_entry->aref.ar_amap &&
1326 amap_refs(prev_entry->aref.ar_amap) != 1) {
1327 goto forwardmerge;
1328 }
1329
1330 if (prev_entry->aref.ar_amap) {
1331 error = amap_extend(prev_entry, size,
1332 amapwaitflag | AMAP_EXTEND_FORWARDS);
1333 if (error)
1334 goto nomerge;
1335 }
1336
1337 if (kmap) {
1338 UVMMAP_EVCNT_INCR(kbackmerge);
1339 } else {
1340 UVMMAP_EVCNT_INCR(ubackmerge);
1341 }
1342 UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0);
1343
1344 /*
1345 * drop our reference to uobj since we are extending a reference
1346 * that we already have (the ref count can not drop to zero).
1347 */
1348
1349 if (uobj && uobj->pgops->pgo_detach)
1350 uobj->pgops->pgo_detach(uobj);
1351
1352 /*
1353 * Now that we've merged the entries, note that we've grown
1354 * and our gap has shrunk. Then fix the tree.
1355 */
1356 prev_entry->end += size;
1357 prev_entry->gap -= size;
1358 uvm_rb_fixup(map, prev_entry);
1359
1360 uvm_map_check(map, "map backmerged");
1361
1362 UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
1363 merged++;
1364 }
1365
1366 forwardmerge:
1367 if (prev_entry->next->start == (start + size) &&
1368 prev_entry->next != &map->header &&
1369 UVM_ET_ISCOMPATIBLE(prev_entry->next, newetype, uobj, 0,
1370 prot, maxprot, inherit, advice, 0)) {
1371
1372 if (uobj && prev_entry->next->offset != uoffset + size)
1373 goto nomerge;
1374
1375 /*
1376 * can't extend a shared amap. note: no need to lock amap to
1377 * look at refs since we don't care about its exact value.
1378 * if it is one (i.e. we have only reference) it will stay there.
1379 *
1380 * note that we also can't merge two amaps, so if we
1381 * merged with the previous entry which has an amap,
1382 * and the next entry also has an amap, we give up.
1383 *
1384 * Interesting cases:
1385 * amap, new, amap -> give up second merge (single fwd extend)
1386 * amap, new, none -> double forward extend (extend again here)
1387 * none, new, amap -> double backward extend (done here)
1388 * uobj, new, amap -> single backward extend (done here)
1389 *
1390 * XXX should we attempt to deal with someone refilling
1391 * the deallocated region between two entries that are
1392 * backed by the same amap (ie, arefs is 2, "prev" and
1393 * "next" refer to it, and adding this allocation will
1394 * close the hole, thus restoring arefs to 1 and
1395 * deallocating the "next" vm_map_entry)? -- @@@
1396 */
1397
1398 if (prev_entry->next->aref.ar_amap &&
1399 (amap_refs(prev_entry->next->aref.ar_amap) != 1 ||
1400 (merged && prev_entry->aref.ar_amap))) {
1401 goto nomerge;
1402 }
1403
1404 if (merged) {
1405 /*
1406 * Try to extend the amap of the previous entry to
1407 * cover the next entry as well. If it doesn't work
1408 * just skip on, don't actually give up, since we've
1409 * already completed the back merge.
1410 */
1411 if (prev_entry->aref.ar_amap) {
1412 if (amap_extend(prev_entry,
1413 prev_entry->next->end -
1414 prev_entry->next->start,
1415 amapwaitflag | AMAP_EXTEND_FORWARDS))
1416 goto nomerge;
1417 }
1418
1419 /*
1420 * Try to extend the amap of the *next* entry
1421 * back to cover the new allocation *and* the
1422 * previous entry as well (the previous merge
1423 * didn't have an amap already otherwise we
1424 * wouldn't be checking here for an amap). If
1425 * it doesn't work just skip on, again, don't
1426 * actually give up, since we've already
1427 * completed the back merge.
1428 */
1429 else if (prev_entry->next->aref.ar_amap) {
1430 if (amap_extend(prev_entry->next,
1431 prev_entry->end -
1432 prev_entry->start,
1433 amapwaitflag | AMAP_EXTEND_BACKWARDS))
1434 goto nomerge;
1435 }
1436 } else {
1437 /*
1438 * Pull the next entry's amap backwards to cover this
1439 * new allocation.
1440 */
1441 if (prev_entry->next->aref.ar_amap) {
1442 error = amap_extend(prev_entry->next, size,
1443 amapwaitflag | AMAP_EXTEND_BACKWARDS);
1444 if (error)
1445 goto nomerge;
1446 }
1447 }
1448
1449 if (merged) {
1450 if (kmap) {
1451 UVMMAP_EVCNT_DECR(kbackmerge);
1452 UVMMAP_EVCNT_INCR(kbimerge);
1453 } else {
1454 UVMMAP_EVCNT_DECR(ubackmerge);
1455 UVMMAP_EVCNT_INCR(ubimerge);
1456 }
1457 } else {
1458 if (kmap) {
1459 UVMMAP_EVCNT_INCR(kforwmerge);
1460 } else {
1461 UVMMAP_EVCNT_INCR(uforwmerge);
1462 }
1463 }
1464 UVMHIST_LOG(maphist," starting forward merge", 0, 0, 0, 0);
1465
1466 /*
1467 * drop our reference to uobj since we are extending a reference
1468 * that we already have (the ref count can not drop to zero).
1469 * (if merged, we've already detached)
1470 */
1471 if (uobj && uobj->pgops->pgo_detach && !merged)
1472 uobj->pgops->pgo_detach(uobj);
1473
1474 if (merged) {
1475 dead = prev_entry->next;
1476 prev_entry->end = dead->end;
1477 uvm_map_entry_unlink(map, dead);
1478 if (dead->aref.ar_amap != NULL) {
1479 prev_entry->aref = dead->aref;
1480 dead->aref.ar_amap = NULL;
1481 }
1482 } else {
1483 prev_entry->next->start -= size;
1484 if (prev_entry != &map->header) {
1485 prev_entry->gap -= size;
1486 KASSERT(prev_entry->gap == uvm_rb_gap(prev_entry));
1487 uvm_rb_fixup(map, prev_entry);
1488 }
1489 if (uobj)
1490 prev_entry->next->offset = uoffset;
1491 }
1492
1493 uvm_map_check(map, "map forwardmerged");
1494
1495 UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0);
1496 merged++;
1497 }
1498
1499 nomerge:
1500 if (!merged) {
1501 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0);
1502 if (kmap) {
1503 UVMMAP_EVCNT_INCR(knomerge);
1504 } else {
1505 UVMMAP_EVCNT_INCR(unomerge);
1506 }
1507
1508 /*
1509 * allocate new entry and link it in.
1510 */
1511
1512 if (new_entry == NULL) {
1513 new_entry = uvm_mapent_alloc(map,
1514 (flags & UVM_FLAG_NOWAIT));
1515 if (__predict_false(new_entry == NULL)) {
1516 error = ENOMEM;
1517 goto done;
1518 }
1519 }
1520 new_entry->start = start;
1521 new_entry->end = new_entry->start + size;
1522 new_entry->object.uvm_obj = uobj;
1523 new_entry->offset = uoffset;
1524
1525 new_entry->etype = newetype;
1526
1527 if (flags & UVM_FLAG_NOMERGE) {
1528 new_entry->flags |= UVM_MAP_NOMERGE;
1529 }
1530
1531 new_entry->protection = prot;
1532 new_entry->max_protection = maxprot;
1533 new_entry->inheritance = inherit;
1534 new_entry->wired_count = 0;
1535 new_entry->advice = advice;
1536 if (flags & UVM_FLAG_OVERLAY) {
1537
1538 /*
1539 * to_add: for BSS we overallocate a little since we
1540 * are likely to extend
1541 */
1542
1543 vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
1544 UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
1545 struct vm_amap *amap = amap_alloc(size, to_add,
1546 (flags & UVM_FLAG_NOWAIT));
1547 if (__predict_false(amap == NULL)) {
1548 error = ENOMEM;
1549 goto done;
1550 }
1551 new_entry->aref.ar_pageoff = 0;
1552 new_entry->aref.ar_amap = amap;
1553 } else {
1554 new_entry->aref.ar_pageoff = 0;
1555 new_entry->aref.ar_amap = NULL;
1556 }
1557 uvm_map_entry_link(map, prev_entry, new_entry);
1558
1559 /*
1560 * Update the free space hint
1561 */
1562
1563 if ((map->first_free == prev_entry) &&
1564 (prev_entry->end >= new_entry->start))
1565 map->first_free = new_entry;
1566
1567 new_entry = NULL;
1568 }
1569
1570 map->size += size;
1571
1572 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1573
1574 error = 0;
1575 done:
1576 vm_map_unlock(map);
1577
1578 if (new_entry) {
1579 uvm_mapent_free(new_entry);
1580 }
1581
1582 if (dead) {
1583 KDASSERT(merged);
1584 uvm_mapent_free(dead);
1585 }
1586
1587 return error;
1588 }
1589
1590 /*
1591 * uvm_map_lookup_entry_bytree: lookup an entry in tree
1592 */
1593
1594 static inline bool
1595 uvm_map_lookup_entry_bytree(struct vm_map *map, vaddr_t address,
1596 struct vm_map_entry **entry /* OUT */)
1597 {
1598 struct vm_map_entry *prev = &map->header;
1599 struct vm_map_entry *cur = ROOT_ENTRY(map);
1600
1601 while (cur) {
1602 UVMMAP_EVCNT_INCR(mlk_treeloop);
1603 if (address >= cur->start) {
1604 if (address < cur->end) {
1605 *entry = cur;
1606 return true;
1607 }
1608 prev = cur;
1609 cur = RIGHT_ENTRY(cur);
1610 } else
1611 cur = LEFT_ENTRY(cur);
1612 }
1613 *entry = prev;
1614 return false;
1615 }
1616
1617 /*
1618 * uvm_map_lookup_entry: find map entry at or before an address
1619 *
1620 * => map must at least be read-locked by caller
1621 * => entry is returned in "entry"
1622 * => return value is true if address is in the returned entry
1623 */
1624
1625 bool
1626 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
1627 struct vm_map_entry **entry /* OUT */)
1628 {
1629 struct vm_map_entry *cur;
1630 bool use_tree = false;
1631 UVMHIST_FUNC("uvm_map_lookup_entry");
1632 UVMHIST_CALLED(maphist);
1633
1634 UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)",
1635 map, address, entry, 0);
1636
1637 /*
1638 * start looking either from the head of the
1639 * list, or from the hint.
1640 */
1641
1642 cur = map->hint;
1643
1644 if (cur == &map->header)
1645 cur = cur->next;
1646
1647 UVMMAP_EVCNT_INCR(mlk_call);
1648 if (address >= cur->start) {
1649
1650 /*
1651 * go from hint to end of list.
1652 *
1653 * but first, make a quick check to see if
1654 * we are already looking at the entry we
1655 * want (which is usually the case).
1656 * note also that we don't need to save the hint
1657 * here... it is the same hint (unless we are
1658 * at the header, in which case the hint didn't
1659 * buy us anything anyway).
1660 */
1661
1662 if (cur != &map->header && cur->end > address) {
1663 UVMMAP_EVCNT_INCR(mlk_hint);
1664 *entry = cur;
1665 UVMHIST_LOG(maphist,"<- got it via hint (0x%x)",
1666 cur, 0, 0, 0);
1667 uvm_mapent_check(*entry);
1668 return (true);
1669 }
1670
1671 if (map->nentries > 15)
1672 use_tree = true;
1673 } else {
1674
1675 /*
1676 * invalid hint. use tree.
1677 */
1678 use_tree = true;
1679 }
1680
1681 uvm_map_check(map, __func__);
1682
1683 if (use_tree) {
1684 /*
1685 * Simple lookup in the tree. Happens when the hint is
1686 * invalid, or nentries reach a threshold.
1687 */
1688 UVMMAP_EVCNT_INCR(mlk_tree);
1689 if (uvm_map_lookup_entry_bytree(map, address, entry)) {
1690 goto got;
1691 } else {
1692 goto failed;
1693 }
1694 }
1695
1696 /*
1697 * search linearly
1698 */
1699
1700 UVMMAP_EVCNT_INCR(mlk_list);
1701 while (cur != &map->header) {
1702 UVMMAP_EVCNT_INCR(mlk_listloop);
1703 if (cur->end > address) {
1704 if (address >= cur->start) {
1705 /*
1706 * save this lookup for future
1707 * hints, and return
1708 */
1709
1710 *entry = cur;
1711 got:
1712 SAVE_HINT(map, map->hint, *entry);
1713 UVMHIST_LOG(maphist,"<- search got it (0x%x)",
1714 cur, 0, 0, 0);
1715 KDASSERT((*entry)->start <= address);
1716 KDASSERT(address < (*entry)->end);
1717 uvm_mapent_check(*entry);
1718 return (true);
1719 }
1720 break;
1721 }
1722 cur = cur->next;
1723 }
1724 *entry = cur->prev;
1725 failed:
1726 SAVE_HINT(map, map->hint, *entry);
1727 UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
1728 KDASSERT((*entry) == &map->header || (*entry)->end <= address);
1729 KDASSERT((*entry)->next == &map->header ||
1730 address < (*entry)->next->start);
1731 return (false);
1732 }
1733
1734 /*
1735 * See if the range between start and start + length fits in the gap
1736 * entry->next->start and entry->end. Returns 1 if fits, 0 if doesn't
1737 * fit, and -1 address wraps around.
1738 */
1739 static int
1740 uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
1741 vsize_t align, int flags, int topdown, struct vm_map_entry *entry)
1742 {
1743 vaddr_t end;
1744
1745 #ifdef PMAP_PREFER
1746 /*
1747 * push start address forward as needed to avoid VAC alias problems.
1748 * we only do this if a valid offset is specified.
1749 */
1750
1751 if (uoffset != UVM_UNKNOWN_OFFSET)
1752 PMAP_PREFER(uoffset, start, length, topdown);
1753 #endif
1754 if ((flags & UVM_FLAG_COLORMATCH) != 0) {
1755 KASSERT(align < uvmexp.ncolors);
1756 if (uvmexp.ncolors > 1) {
1757 const u_int colormask = uvmexp.colormask;
1758 const u_int colorsize = colormask + 1;
1759 vaddr_t hint = atop(*start);
1760 const u_int color = hint & colormask;
1761 if (color != align) {
1762 hint -= color; /* adjust to color boundary */
1763 KASSERT((hint & colormask) == 0);
1764 if (topdown) {
1765 if (align > color)
1766 hint -= colorsize;
1767 } else {
1768 if (align < color)
1769 hint += colorsize;
1770 }
1771 *start = ptoa(hint + align); /* adjust to color */
1772 }
1773 }
1774 } else if (align != 0) {
1775 if ((*start & (align - 1)) != 0) {
1776 if (topdown)
1777 *start &= ~(align - 1);
1778 else
1779 *start = roundup(*start, align);
1780 }
1781 /*
1782 * XXX Should we PMAP_PREFER() here again?
1783 * eh...i think we're okay
1784 */
1785 }
1786
1787 /*
1788 * Find the end of the proposed new region. Be sure we didn't
1789 * wrap around the address; if so, we lose. Otherwise, if the
1790 * proposed new region fits before the next entry, we win.
1791 */
1792
1793 end = *start + length;
1794 if (end < *start)
1795 return (-1);
1796
1797 if (entry->next->start >= end && *start >= entry->end)
1798 return (1);
1799
1800 return (0);
1801 }
1802
1803 /*
1804 * uvm_map_findspace: find "length" sized space in "map".
1805 *
1806 * => "hint" is a hint about where we want it, unless UVM_FLAG_FIXED is
1807 * set in "flags" (in which case we insist on using "hint").
1808 * => "result" is VA returned
1809 * => uobj/uoffset are to be used to handle VAC alignment, if required
1810 * => if "align" is non-zero, we attempt to align to that value.
1811 * => caller must at least have read-locked map
1812 * => returns NULL on failure, or pointer to prev. map entry if success
1813 * => note this is a cross between the old vm_map_findspace and vm_map_find
1814 */
1815
1816 struct vm_map_entry *
1817 uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
1818 vaddr_t *result /* OUT */, struct uvm_object *uobj, voff_t uoffset,
1819 vsize_t align, int flags)
1820 {
1821 struct vm_map_entry *entry;
1822 struct vm_map_entry *child, *prev, *tmp;
1823 vaddr_t orig_hint;
1824 const int topdown = map->flags & VM_MAP_TOPDOWN;
1825 UVMHIST_FUNC("uvm_map_findspace");
1826 UVMHIST_CALLED(maphist);
1827
1828 UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)",
1829 map, hint, length, flags);
1830 KASSERT((flags & UVM_FLAG_COLORMATCH) != 0 || (align & (align - 1)) == 0);
1831 KASSERT((flags & UVM_FLAG_COLORMATCH) == 0 || align < uvmexp.ncolors);
1832 KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1833
1834 uvm_map_check(map, "map_findspace entry");
1835
1836 /*
1837 * remember the original hint. if we are aligning, then we
1838 * may have to try again with no alignment constraint if
1839 * we fail the first time.
1840 */
1841
1842 orig_hint = hint;
1843 if (hint < vm_map_min(map)) { /* check ranges ... */
1844 if (flags & UVM_FLAG_FIXED) {
1845 UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
1846 return (NULL);
1847 }
1848 hint = vm_map_min(map);
1849 }
1850 if (hint > vm_map_max(map)) {
1851 UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]",
1852 hint, vm_map_min(map), vm_map_max(map), 0);
1853 return (NULL);
1854 }
1855
1856 /*
1857 * Look for the first possible address; if there's already
1858 * something at this address, we have to start after it.
1859 */
1860
1861 /*
1862 * @@@: there are four, no, eight cases to consider.
1863 *
1864 * 0: found, fixed, bottom up -> fail
1865 * 1: found, fixed, top down -> fail
1866 * 2: found, not fixed, bottom up -> start after entry->end,
1867 * loop up
1868 * 3: found, not fixed, top down -> start before entry->start,
1869 * loop down
1870 * 4: not found, fixed, bottom up -> check entry->next->start, fail
1871 * 5: not found, fixed, top down -> check entry->next->start, fail
1872 * 6: not found, not fixed, bottom up -> check entry->next->start,
1873 * loop up
1874 * 7: not found, not fixed, top down -> check entry->next->start,
1875 * loop down
1876 *
1877 * as you can see, it reduces to roughly five cases, and that
1878 * adding top down mapping only adds one unique case (without
1879 * it, there would be four cases).
1880 */
1881
1882 if ((flags & UVM_FLAG_FIXED) == 0 && hint == vm_map_min(map)) {
1883 entry = map->first_free;
1884 } else {
1885 if (uvm_map_lookup_entry(map, hint, &entry)) {
1886 /* "hint" address already in use ... */
1887 if (flags & UVM_FLAG_FIXED) {
1888 UVMHIST_LOG(maphist, "<- fixed & VA in use",
1889 0, 0, 0, 0);
1890 return (NULL);
1891 }
1892 if (topdown)
1893 /* Start from lower gap. */
1894 entry = entry->prev;
1895 } else if (flags & UVM_FLAG_FIXED) {
1896 if (entry->next->start >= hint + length &&
1897 hint + length > hint)
1898 goto found;
1899
1900 /* "hint" address is gap but too small */
1901 UVMHIST_LOG(maphist, "<- fixed mapping failed",
1902 0, 0, 0, 0);
1903 return (NULL); /* only one shot at it ... */
1904 } else {
1905 /*
1906 * See if given hint fits in this gap.
1907 */
1908 switch (uvm_map_space_avail(&hint, length,
1909 uoffset, align, flags, topdown, entry)) {
1910 case 1:
1911 goto found;
1912 case -1:
1913 goto wraparound;
1914 }
1915
1916 if (topdown) {
1917 /*
1918 * Still there is a chance to fit
1919 * if hint > entry->end.
1920 */
1921 } else {
1922 /* Start from higher gap. */
1923 entry = entry->next;
1924 if (entry == &map->header)
1925 goto notfound;
1926 goto nextgap;
1927 }
1928 }
1929 }
1930
1931 /*
1932 * Note that all UVM_FLAGS_FIXED case is already handled.
1933 */
1934 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
1935
1936 /* Try to find the space in the red-black tree */
1937
1938 /* Check slot before any entry */
1939 hint = topdown ? entry->next->start - length : entry->end;
1940 switch (uvm_map_space_avail(&hint, length, uoffset, align, flags,
1941 topdown, entry)) {
1942 case 1:
1943 goto found;
1944 case -1:
1945 goto wraparound;
1946 }
1947
1948 nextgap:
1949 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
1950 /* If there is not enough space in the whole tree, we fail */
1951 tmp = ROOT_ENTRY(map);
1952 if (tmp == NULL || tmp->maxgap < length)
1953 goto notfound;
1954
1955 prev = NULL; /* previous candidate */
1956
1957 /* Find an entry close to hint that has enough space */
1958 for (; tmp;) {
1959 KASSERT(tmp->next->start == tmp->end + tmp->gap);
1960 if (topdown) {
1961 if (tmp->next->start < hint + length &&
1962 (prev == NULL || tmp->end > prev->end)) {
1963 if (tmp->gap >= length)
1964 prev = tmp;
1965 else if ((child = LEFT_ENTRY(tmp)) != NULL
1966 && child->maxgap >= length)
1967 prev = tmp;
1968 }
1969 } else {
1970 if (tmp->end >= hint &&
1971 (prev == NULL || tmp->end < prev->end)) {
1972 if (tmp->gap >= length)
1973 prev = tmp;
1974 else if ((child = RIGHT_ENTRY(tmp)) != NULL
1975 && child->maxgap >= length)
1976 prev = tmp;
1977 }
1978 }
1979 if (tmp->next->start < hint + length)
1980 child = RIGHT_ENTRY(tmp);
1981 else if (tmp->end > hint)
1982 child = LEFT_ENTRY(tmp);
1983 else {
1984 if (tmp->gap >= length)
1985 break;
1986 if (topdown)
1987 child = LEFT_ENTRY(tmp);
1988 else
1989 child = RIGHT_ENTRY(tmp);
1990 }
1991 if (child == NULL || child->maxgap < length)
1992 break;
1993 tmp = child;
1994 }
1995
1996 if (tmp != NULL && tmp->start < hint && hint < tmp->next->start) {
1997 /*
1998 * Check if the entry that we found satifies the
1999 * space requirement
2000 */
2001 if (topdown) {
2002 if (hint > tmp->next->start - length)
2003 hint = tmp->next->start - length;
2004 } else {
2005 if (hint < tmp->end)
2006 hint = tmp->end;
2007 }
2008 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2009 flags, topdown, tmp)) {
2010 case 1:
2011 entry = tmp;
2012 goto found;
2013 case -1:
2014 goto wraparound;
2015 }
2016 if (tmp->gap >= length)
2017 goto listsearch;
2018 }
2019 if (prev == NULL)
2020 goto notfound;
2021
2022 if (topdown) {
2023 KASSERT(orig_hint >= prev->next->start - length ||
2024 prev->next->start - length > prev->next->start);
2025 hint = prev->next->start - length;
2026 } else {
2027 KASSERT(orig_hint <= prev->end);
2028 hint = prev->end;
2029 }
2030 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2031 flags, topdown, prev)) {
2032 case 1:
2033 entry = prev;
2034 goto found;
2035 case -1:
2036 goto wraparound;
2037 }
2038 if (prev->gap >= length)
2039 goto listsearch;
2040
2041 if (topdown)
2042 tmp = LEFT_ENTRY(prev);
2043 else
2044 tmp = RIGHT_ENTRY(prev);
2045 for (;;) {
2046 KASSERT(tmp && tmp->maxgap >= length);
2047 if (topdown)
2048 child = RIGHT_ENTRY(tmp);
2049 else
2050 child = LEFT_ENTRY(tmp);
2051 if (child && child->maxgap >= length) {
2052 tmp = child;
2053 continue;
2054 }
2055 if (tmp->gap >= length)
2056 break;
2057 if (topdown)
2058 tmp = LEFT_ENTRY(tmp);
2059 else
2060 tmp = RIGHT_ENTRY(tmp);
2061 }
2062
2063 if (topdown) {
2064 KASSERT(orig_hint >= tmp->next->start - length ||
2065 tmp->next->start - length > tmp->next->start);
2066 hint = tmp->next->start - length;
2067 } else {
2068 KASSERT(orig_hint <= tmp->end);
2069 hint = tmp->end;
2070 }
2071 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2072 flags, topdown, tmp)) {
2073 case 1:
2074 entry = tmp;
2075 goto found;
2076 case -1:
2077 goto wraparound;
2078 }
2079
2080 /*
2081 * The tree fails to find an entry because of offset or alignment
2082 * restrictions. Search the list instead.
2083 */
2084 listsearch:
2085 /*
2086 * Look through the rest of the map, trying to fit a new region in
2087 * the gap between existing regions, or after the very last region.
2088 * note: entry->end = base VA of current gap,
2089 * entry->next->start = VA of end of current gap
2090 */
2091
2092 for (;;) {
2093 /* Update hint for current gap. */
2094 hint = topdown ? entry->next->start - length : entry->end;
2095
2096 /* See if it fits. */
2097 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2098 flags, topdown, entry)) {
2099 case 1:
2100 goto found;
2101 case -1:
2102 goto wraparound;
2103 }
2104
2105 /* Advance to next/previous gap */
2106 if (topdown) {
2107 if (entry == &map->header) {
2108 UVMHIST_LOG(maphist, "<- failed (off start)",
2109 0,0,0,0);
2110 goto notfound;
2111 }
2112 entry = entry->prev;
2113 } else {
2114 entry = entry->next;
2115 if (entry == &map->header) {
2116 UVMHIST_LOG(maphist, "<- failed (off end)",
2117 0,0,0,0);
2118 goto notfound;
2119 }
2120 }
2121 }
2122
2123 found:
2124 SAVE_HINT(map, map->hint, entry);
2125 *result = hint;
2126 UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0);
2127 KASSERT( topdown || hint >= orig_hint);
2128 KASSERT(!topdown || hint <= orig_hint);
2129 KASSERT(entry->end <= hint);
2130 KASSERT(hint + length <= entry->next->start);
2131 return (entry);
2132
2133 wraparound:
2134 UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0);
2135
2136 return (NULL);
2137
2138 notfound:
2139 UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0);
2140
2141 return (NULL);
2142 }
2143
2144 /*
2145 * U N M A P - m a i n h e l p e r f u n c t i o n s
2146 */
2147
2148 /*
2149 * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
2150 *
2151 * => caller must check alignment and size
2152 * => map must be locked by caller
2153 * => we return a list of map entries that we've remove from the map
2154 * in "entry_list"
2155 */
2156
2157 void
2158 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
2159 struct vm_map_entry **entry_list /* OUT */, int flags)
2160 {
2161 struct vm_map_entry *entry, *first_entry, *next;
2162 vaddr_t len;
2163 UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist);
2164
2165 UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)",
2166 map, start, end, 0);
2167 VM_MAP_RANGE_CHECK(map, start, end);
2168
2169 uvm_map_check(map, "unmap_remove entry");
2170
2171 /*
2172 * find first entry
2173 */
2174
2175 if (uvm_map_lookup_entry(map, start, &first_entry) == true) {
2176 /* clip and go... */
2177 entry = first_entry;
2178 UVM_MAP_CLIP_START(map, entry, start);
2179 /* critical! prevents stale hint */
2180 SAVE_HINT(map, entry, entry->prev);
2181 } else {
2182 entry = first_entry->next;
2183 }
2184
2185 /*
2186 * Save the free space hint
2187 */
2188
2189 if (map->first_free != &map->header && map->first_free->start >= start)
2190 map->first_free = entry->prev;
2191
2192 /*
2193 * note: we now re-use first_entry for a different task. we remove
2194 * a number of map entries from the map and save them in a linked
2195 * list headed by "first_entry". once we remove them from the map
2196 * the caller should unlock the map and drop the references to the
2197 * backing objects [c.f. uvm_unmap_detach]. the object is to
2198 * separate unmapping from reference dropping. why?
2199 * [1] the map has to be locked for unmapping
2200 * [2] the map need not be locked for reference dropping
2201 * [3] dropping references may trigger pager I/O, and if we hit
2202 * a pager that does synchronous I/O we may have to wait for it.
2203 * [4] we would like all waiting for I/O to occur with maps unlocked
2204 * so that we don't block other threads.
2205 */
2206
2207 first_entry = NULL;
2208 *entry_list = NULL;
2209
2210 /*
2211 * break up the area into map entry sized regions and unmap. note
2212 * that all mappings have to be removed before we can even consider
2213 * dropping references to amaps or VM objects (otherwise we could end
2214 * up with a mapping to a page on the free list which would be very bad)
2215 */
2216
2217 while ((entry != &map->header) && (entry->start < end)) {
2218 KASSERT((entry->flags & UVM_MAP_STATIC) == 0);
2219
2220 UVM_MAP_CLIP_END(map, entry, end);
2221 next = entry->next;
2222 len = entry->end - entry->start;
2223
2224 /*
2225 * unwire before removing addresses from the pmap; otherwise
2226 * unwiring will put the entries back into the pmap (XXX).
2227 */
2228
2229 if (VM_MAPENT_ISWIRED(entry)) {
2230 uvm_map_entry_unwire(map, entry);
2231 }
2232 if (flags & UVM_FLAG_VAONLY) {
2233
2234 /* nothing */
2235
2236 } else if ((map->flags & VM_MAP_PAGEABLE) == 0) {
2237
2238 /*
2239 * if the map is non-pageable, any pages mapped there
2240 * must be wired and entered with pmap_kenter_pa(),
2241 * and we should free any such pages immediately.
2242 * this is mostly used for kmem_map.
2243 */
2244 KASSERT(vm_map_pmap(map) == pmap_kernel());
2245
2246 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
2247 uvm_km_pgremove_intrsafe(map, entry->start,
2248 entry->end);
2249 pmap_kremove(entry->start, len);
2250 }
2251 } else if (UVM_ET_ISOBJ(entry) &&
2252 UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
2253 panic("%s: kernel object %p %p\n",
2254 __func__, map, entry);
2255 } else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
2256 /*
2257 * remove mappings the standard way. lock object
2258 * and/or amap to ensure vm_page state does not
2259 * change while in pmap_remove().
2260 */
2261
2262 uvm_map_lock_entry(entry);
2263 pmap_remove(map->pmap, entry->start, entry->end);
2264 uvm_map_unlock_entry(entry);
2265 }
2266
2267 #if defined(DEBUG)
2268 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
2269
2270 /*
2271 * check if there's remaining mapping,
2272 * which is a bug in caller.
2273 */
2274
2275 vaddr_t va;
2276 for (va = entry->start; va < entry->end;
2277 va += PAGE_SIZE) {
2278 if (pmap_extract(vm_map_pmap(map), va, NULL)) {
2279 panic("%s: %#"PRIxVADDR" has mapping",
2280 __func__, va);
2281 }
2282 }
2283
2284 if (VM_MAP_IS_KERNEL(map)) {
2285 uvm_km_check_empty(map, entry->start,
2286 entry->end);
2287 }
2288 }
2289 #endif /* defined(DEBUG) */
2290
2291 /*
2292 * remove entry from map and put it on our list of entries
2293 * that we've nuked. then go to next entry.
2294 */
2295
2296 UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0,0);
2297
2298 /* critical! prevents stale hint */
2299 SAVE_HINT(map, entry, entry->prev);
2300
2301 uvm_map_entry_unlink(map, entry);
2302 KASSERT(map->size >= len);
2303 map->size -= len;
2304 entry->prev = NULL;
2305 entry->next = first_entry;
2306 first_entry = entry;
2307 entry = next;
2308 }
2309
2310 /*
2311 * Note: if map is dying, leave pmap_update() for pmap_destroy(),
2312 * which will be called later.
2313 */
2314 if ((map->flags & VM_MAP_DYING) == 0) {
2315 pmap_update(vm_map_pmap(map));
2316 } else {
2317 KASSERT(vm_map_pmap(map) != pmap_kernel());
2318 }
2319
2320 uvm_map_check(map, "unmap_remove leave");
2321
2322 /*
2323 * now we've cleaned up the map and are ready for the caller to drop
2324 * references to the mapped objects.
2325 */
2326
2327 *entry_list = first_entry;
2328 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
2329
2330 if (map->flags & VM_MAP_WANTVA) {
2331 mutex_enter(&map->misc_lock);
2332 map->flags &= ~VM_MAP_WANTVA;
2333 cv_broadcast(&map->cv);
2334 mutex_exit(&map->misc_lock);
2335 }
2336 }
2337
2338 /*
2339 * uvm_unmap_detach: drop references in a chain of map entries
2340 *
2341 * => we will free the map entries as we traverse the list.
2342 */
2343
2344 void
2345 uvm_unmap_detach(struct vm_map_entry *first_entry, int flags)
2346 {
2347 struct vm_map_entry *next_entry;
2348 UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
2349
2350 while (first_entry) {
2351 KASSERT(!VM_MAPENT_ISWIRED(first_entry));
2352 UVMHIST_LOG(maphist,
2353 " detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d",
2354 first_entry, first_entry->aref.ar_amap,
2355 first_entry->object.uvm_obj,
2356 UVM_ET_ISSUBMAP(first_entry));
2357
2358 /*
2359 * drop reference to amap, if we've got one
2360 */
2361
2362 if (first_entry->aref.ar_amap)
2363 uvm_map_unreference_amap(first_entry, flags);
2364
2365 /*
2366 * drop reference to our backing object, if we've got one
2367 */
2368
2369 KASSERT(!UVM_ET_ISSUBMAP(first_entry));
2370 if (UVM_ET_ISOBJ(first_entry) &&
2371 first_entry->object.uvm_obj->pgops->pgo_detach) {
2372 (*first_entry->object.uvm_obj->pgops->pgo_detach)
2373 (first_entry->object.uvm_obj);
2374 }
2375 next_entry = first_entry->next;
2376 uvm_mapent_free(first_entry);
2377 first_entry = next_entry;
2378 }
2379 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
2380 }
2381
2382 /*
2383 * E X T R A C T I O N F U N C T I O N S
2384 */
2385
2386 /*
2387 * uvm_map_reserve: reserve space in a vm_map for future use.
2388 *
2389 * => we reserve space in a map by putting a dummy map entry in the
2390 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
2391 * => map should be unlocked (we will write lock it)
2392 * => we return true if we were able to reserve space
2393 * => XXXCDC: should be inline?
2394 */
2395
2396 int
2397 uvm_map_reserve(struct vm_map *map, vsize_t size,
2398 vaddr_t offset /* hint for pmap_prefer */,
2399 vsize_t align /* alignment */,
2400 vaddr_t *raddr /* IN:hint, OUT: reserved VA */,
2401 uvm_flag_t flags /* UVM_FLAG_FIXED or 0 */)
2402 {
2403 UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
2404
2405 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
2406 map,size,offset,raddr);
2407
2408 size = round_page(size);
2409
2410 /*
2411 * reserve some virtual space.
2412 */
2413
2414 if (uvm_map(map, raddr, size, NULL, offset, align,
2415 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
2416 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE|flags)) != 0) {
2417 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
2418 return (false);
2419 }
2420
2421 UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
2422 return (true);
2423 }
2424
2425 /*
2426 * uvm_map_replace: replace a reserved (blank) area of memory with
2427 * real mappings.
2428 *
2429 * => caller must WRITE-LOCK the map
2430 * => we return true if replacement was a success
2431 * => we expect the newents chain to have nnewents entrys on it and
2432 * we expect newents->prev to point to the last entry on the list
2433 * => note newents is allowed to be NULL
2434 */
2435
2436 static int
2437 uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
2438 struct vm_map_entry *newents, int nnewents, vsize_t nsize,
2439 struct vm_map_entry **oldentryp)
2440 {
2441 struct vm_map_entry *oldent, *last;
2442
2443 uvm_map_check(map, "map_replace entry");
2444
2445 /*
2446 * first find the blank map entry at the specified address
2447 */
2448
2449 if (!uvm_map_lookup_entry(map, start, &oldent)) {
2450 return (false);
2451 }
2452
2453 /*
2454 * check to make sure we have a proper blank entry
2455 */
2456
2457 if (end < oldent->end) {
2458 UVM_MAP_CLIP_END(map, oldent, end);
2459 }
2460 if (oldent->start != start || oldent->end != end ||
2461 oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
2462 return (false);
2463 }
2464
2465 #ifdef DIAGNOSTIC
2466
2467 /*
2468 * sanity check the newents chain
2469 */
2470
2471 {
2472 struct vm_map_entry *tmpent = newents;
2473 int nent = 0;
2474 vsize_t sz = 0;
2475 vaddr_t cur = start;
2476
2477 while (tmpent) {
2478 nent++;
2479 sz += tmpent->end - tmpent->start;
2480 if (tmpent->start < cur)
2481 panic("uvm_map_replace1");
2482 if (tmpent->start >= tmpent->end || tmpent->end > end) {
2483 panic("uvm_map_replace2: "
2484 "tmpent->start=0x%"PRIxVADDR
2485 ", tmpent->end=0x%"PRIxVADDR
2486 ", end=0x%"PRIxVADDR,
2487 tmpent->start, tmpent->end, end);
2488 }
2489 cur = tmpent->end;
2490 if (tmpent->next) {
2491 if (tmpent->next->prev != tmpent)
2492 panic("uvm_map_replace3");
2493 } else {
2494 if (newents->prev != tmpent)
2495 panic("uvm_map_replace4");
2496 }
2497 tmpent = tmpent->next;
2498 }
2499 if (nent != nnewents)
2500 panic("uvm_map_replace5");
2501 if (sz != nsize)
2502 panic("uvm_map_replace6");
2503 }
2504 #endif
2505
2506 /*
2507 * map entry is a valid blank! replace it. (this does all the
2508 * work of map entry link/unlink...).
2509 */
2510
2511 if (newents) {
2512 last = newents->prev;
2513
2514 /* critical: flush stale hints out of map */
2515 SAVE_HINT(map, map->hint, newents);
2516 if (map->first_free == oldent)
2517 map->first_free = last;
2518
2519 last->next = oldent->next;
2520 last->next->prev = last;
2521
2522 /* Fix RB tree */
2523 uvm_rb_remove(map, oldent);
2524
2525 newents->prev = oldent->prev;
2526 newents->prev->next = newents;
2527 map->nentries = map->nentries + (nnewents - 1);
2528
2529 /* Fixup the RB tree */
2530 {
2531 int i;
2532 struct vm_map_entry *tmp;
2533
2534 tmp = newents;
2535 for (i = 0; i < nnewents && tmp; i++) {
2536 uvm_rb_insert(map, tmp);
2537 tmp = tmp->next;
2538 }
2539 }
2540 } else {
2541 /* NULL list of new entries: just remove the old one */
2542 clear_hints(map, oldent);
2543 uvm_map_entry_unlink(map, oldent);
2544 }
2545 map->size -= end - start - nsize;
2546
2547 uvm_map_check(map, "map_replace leave");
2548
2549 /*
2550 * now we can free the old blank entry and return.
2551 */
2552
2553 *oldentryp = oldent;
2554 return (true);
2555 }
2556
2557 /*
2558 * uvm_map_extract: extract a mapping from a map and put it somewhere
2559 * (maybe removing the old mapping)
2560 *
2561 * => maps should be unlocked (we will write lock them)
2562 * => returns 0 on success, error code otherwise
2563 * => start must be page aligned
2564 * => len must be page sized
2565 * => flags:
2566 * UVM_EXTRACT_REMOVE: remove mappings from srcmap
2567 * UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
2568 * UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
2569 * UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
2570 * >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
2571 * >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
2572 * be used from within the kernel in a kernel level map <<<
2573 */
2574
2575 int
2576 uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
2577 struct vm_map *dstmap, vaddr_t *dstaddrp, int flags)
2578 {
2579 vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge;
2580 struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry,
2581 *deadentry, *oldentry;
2582 struct vm_map_entry *resentry = NULL; /* a dummy reservation entry */
2583 vsize_t elen;
2584 int nchain, error, copy_ok;
2585 vsize_t nsize;
2586 UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
2587
2588 UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start,
2589 len,0);
2590 UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0);
2591
2592 /*
2593 * step 0: sanity check: start must be on a page boundary, length
2594 * must be page sized. can't ask for CONTIG/QREF if you asked for
2595 * REMOVE.
2596 */
2597
2598 KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0);
2599 KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
2600 (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
2601
2602 /*
2603 * step 1: reserve space in the target map for the extracted area
2604 */
2605
2606 if ((flags & UVM_EXTRACT_RESERVED) == 0) {
2607 dstaddr = vm_map_min(dstmap);
2608 if (!uvm_map_reserve(dstmap, len, start, 0, &dstaddr, 0))
2609 return (ENOMEM);
2610 *dstaddrp = dstaddr; /* pass address back to caller */
2611 UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0);
2612 } else {
2613 dstaddr = *dstaddrp;
2614 }
2615
2616 /*
2617 * step 2: setup for the extraction process loop by init'ing the
2618 * map entry chain, locking src map, and looking up the first useful
2619 * entry in the map.
2620 */
2621
2622 end = start + len;
2623 newend = dstaddr + len;
2624 chain = endchain = NULL;
2625 nchain = 0;
2626 nsize = 0;
2627 vm_map_lock(srcmap);
2628
2629 if (uvm_map_lookup_entry(srcmap, start, &entry)) {
2630
2631 /* "start" is within an entry */
2632 if (flags & UVM_EXTRACT_QREF) {
2633
2634 /*
2635 * for quick references we don't clip the entry, so
2636 * the entry may map space "before" the starting
2637 * virtual address... this is the "fudge" factor
2638 * (which can be non-zero only the first time
2639 * through the "while" loop in step 3).
2640 */
2641
2642 fudge = start - entry->start;
2643 } else {
2644
2645 /*
2646 * normal reference: we clip the map to fit (thus
2647 * fudge is zero)
2648 */
2649
2650 UVM_MAP_CLIP_START(srcmap, entry, start);
2651 SAVE_HINT(srcmap, srcmap->hint, entry->prev);
2652 fudge = 0;
2653 }
2654 } else {
2655
2656 /* "start" is not within an entry ... skip to next entry */
2657 if (flags & UVM_EXTRACT_CONTIG) {
2658 error = EINVAL;
2659 goto bad; /* definite hole here ... */
2660 }
2661
2662 entry = entry->next;
2663 fudge = 0;
2664 }
2665
2666 /* save values from srcmap for step 6 */
2667 orig_entry = entry;
2668 orig_fudge = fudge;
2669
2670 /*
2671 * step 3: now start looping through the map entries, extracting
2672 * as we go.
2673 */
2674
2675 while (entry->start < end && entry != &srcmap->header) {
2676
2677 /* if we are not doing a quick reference, clip it */
2678 if ((flags & UVM_EXTRACT_QREF) == 0)
2679 UVM_MAP_CLIP_END(srcmap, entry, end);
2680
2681 /* clear needs_copy (allow chunking) */
2682 if (UVM_ET_ISNEEDSCOPY(entry)) {
2683 amap_copy(srcmap, entry,
2684 AMAP_COPY_NOWAIT|AMAP_COPY_NOMERGE, start, end);
2685 if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */
2686 error = ENOMEM;
2687 goto bad;
2688 }
2689
2690 /* amap_copy could clip (during chunk)! update fudge */
2691 if (fudge) {
2692 fudge = start - entry->start;
2693 orig_fudge = fudge;
2694 }
2695 }
2696
2697 /* calculate the offset of this from "start" */
2698 oldoffset = (entry->start + fudge) - start;
2699
2700 /* allocate a new map entry */
2701 newentry = uvm_mapent_alloc(dstmap, 0);
2702 if (newentry == NULL) {
2703 error = ENOMEM;
2704 goto bad;
2705 }
2706
2707 /* set up new map entry */
2708 newentry->next = NULL;
2709 newentry->prev = endchain;
2710 newentry->start = dstaddr + oldoffset;
2711 newentry->end =
2712 newentry->start + (entry->end - (entry->start + fudge));
2713 if (newentry->end > newend || newentry->end < newentry->start)
2714 newentry->end = newend;
2715 newentry->object.uvm_obj = entry->object.uvm_obj;
2716 if (newentry->object.uvm_obj) {
2717 if (newentry->object.uvm_obj->pgops->pgo_reference)
2718 newentry->object.uvm_obj->pgops->
2719 pgo_reference(newentry->object.uvm_obj);
2720 newentry->offset = entry->offset + fudge;
2721 } else {
2722 newentry->offset = 0;
2723 }
2724 newentry->etype = entry->etype;
2725 newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
2726 entry->max_protection : entry->protection;
2727 newentry->max_protection = entry->max_protection;
2728 newentry->inheritance = entry->inheritance;
2729 newentry->wired_count = 0;
2730 newentry->aref.ar_amap = entry->aref.ar_amap;
2731 if (newentry->aref.ar_amap) {
2732 newentry->aref.ar_pageoff =
2733 entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
2734 uvm_map_reference_amap(newentry, AMAP_SHARED |
2735 ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
2736 } else {
2737 newentry->aref.ar_pageoff = 0;
2738 }
2739 newentry->advice = entry->advice;
2740 if ((flags & UVM_EXTRACT_QREF) != 0) {
2741 newentry->flags |= UVM_MAP_NOMERGE;
2742 }
2743
2744 /* now link it on the chain */
2745 nchain++;
2746 nsize += newentry->end - newentry->start;
2747 if (endchain == NULL) {
2748 chain = endchain = newentry;
2749 } else {
2750 endchain->next = newentry;
2751 endchain = newentry;
2752 }
2753
2754 /* end of 'while' loop! */
2755 if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
2756 (entry->next == &srcmap->header ||
2757 entry->next->start != entry->end)) {
2758 error = EINVAL;
2759 goto bad;
2760 }
2761 entry = entry->next;
2762 fudge = 0;
2763 }
2764
2765 /*
2766 * step 4: close off chain (in format expected by uvm_map_replace)
2767 */
2768
2769 if (chain)
2770 chain->prev = endchain;
2771
2772 /*
2773 * step 5: attempt to lock the dest map so we can pmap_copy.
2774 * note usage of copy_ok:
2775 * 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
2776 * 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
2777 */
2778
2779 if (srcmap == dstmap || vm_map_lock_try(dstmap) == true) {
2780 copy_ok = 1;
2781 if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2782 nchain, nsize, &resentry)) {
2783 if (srcmap != dstmap)
2784 vm_map_unlock(dstmap);
2785 error = EIO;
2786 goto bad;
2787 }
2788 } else {
2789 copy_ok = 0;
2790 /* replace defered until step 7 */
2791 }
2792
2793 /*
2794 * step 6: traverse the srcmap a second time to do the following:
2795 * - if we got a lock on the dstmap do pmap_copy
2796 * - if UVM_EXTRACT_REMOVE remove the entries
2797 * we make use of orig_entry and orig_fudge (saved in step 2)
2798 */
2799
2800 if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
2801
2802 /* purge possible stale hints from srcmap */
2803 if (flags & UVM_EXTRACT_REMOVE) {
2804 SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
2805 if (srcmap->first_free != &srcmap->header &&
2806 srcmap->first_free->start >= start)
2807 srcmap->first_free = orig_entry->prev;
2808 }
2809
2810 entry = orig_entry;
2811 fudge = orig_fudge;
2812 deadentry = NULL; /* for UVM_EXTRACT_REMOVE */
2813
2814 while (entry->start < end && entry != &srcmap->header) {
2815 if (copy_ok) {
2816 oldoffset = (entry->start + fudge) - start;
2817 elen = MIN(end, entry->end) -
2818 (entry->start + fudge);
2819 pmap_copy(dstmap->pmap, srcmap->pmap,
2820 dstaddr + oldoffset, elen,
2821 entry->start + fudge);
2822 }
2823
2824 /* we advance "entry" in the following if statement */
2825 if (flags & UVM_EXTRACT_REMOVE) {
2826 uvm_map_lock_entry(entry);
2827 pmap_remove(srcmap->pmap, entry->start,
2828 entry->end);
2829 uvm_map_unlock_entry(entry);
2830 oldentry = entry; /* save entry */
2831 entry = entry->next; /* advance */
2832 uvm_map_entry_unlink(srcmap, oldentry);
2833 /* add to dead list */
2834 oldentry->next = deadentry;
2835 deadentry = oldentry;
2836 } else {
2837 entry = entry->next; /* advance */
2838 }
2839
2840 /* end of 'while' loop */
2841 fudge = 0;
2842 }
2843 pmap_update(srcmap->pmap);
2844
2845 /*
2846 * unlock dstmap. we will dispose of deadentry in
2847 * step 7 if needed
2848 */
2849
2850 if (copy_ok && srcmap != dstmap)
2851 vm_map_unlock(dstmap);
2852
2853 } else {
2854 deadentry = NULL;
2855 }
2856
2857 /*
2858 * step 7: we are done with the source map, unlock. if copy_ok
2859 * is 0 then we have not replaced the dummy mapping in dstmap yet
2860 * and we need to do so now.
2861 */
2862
2863 vm_map_unlock(srcmap);
2864 if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
2865 uvm_unmap_detach(deadentry, 0); /* dispose of old entries */
2866
2867 /* now do the replacement if we didn't do it in step 5 */
2868 if (copy_ok == 0) {
2869 vm_map_lock(dstmap);
2870 error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2871 nchain, nsize, &resentry);
2872 vm_map_unlock(dstmap);
2873
2874 if (error == false) {
2875 error = EIO;
2876 goto bad2;
2877 }
2878 }
2879
2880 if (resentry != NULL)
2881 uvm_mapent_free(resentry);
2882
2883 return (0);
2884
2885 /*
2886 * bad: failure recovery
2887 */
2888 bad:
2889 vm_map_unlock(srcmap);
2890 bad2: /* src already unlocked */
2891 if (chain)
2892 uvm_unmap_detach(chain,
2893 (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
2894
2895 if (resentry != NULL)
2896 uvm_mapent_free(resentry);
2897
2898 if ((flags & UVM_EXTRACT_RESERVED) == 0) {
2899 uvm_unmap(dstmap, dstaddr, dstaddr+len); /* ??? */
2900 }
2901 return (error);
2902 }
2903
2904 /* end of extraction functions */
2905
2906 /*
2907 * uvm_map_submap: punch down part of a map into a submap
2908 *
2909 * => only the kernel_map is allowed to be submapped
2910 * => the purpose of submapping is to break up the locking granularity
2911 * of a larger map
2912 * => the range specified must have been mapped previously with a uvm_map()
2913 * call [with uobj==NULL] to create a blank map entry in the main map.
2914 * [And it had better still be blank!]
2915 * => maps which contain submaps should never be copied or forked.
2916 * => to remove a submap, use uvm_unmap() on the main map
2917 * and then uvm_map_deallocate() the submap.
2918 * => main map must be unlocked.
2919 * => submap must have been init'd and have a zero reference count.
2920 * [need not be locked as we don't actually reference it]
2921 */
2922
2923 int
2924 uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
2925 struct vm_map *submap)
2926 {
2927 struct vm_map_entry *entry;
2928 int error;
2929
2930 vm_map_lock(map);
2931 VM_MAP_RANGE_CHECK(map, start, end);
2932
2933 if (uvm_map_lookup_entry(map, start, &entry)) {
2934 UVM_MAP_CLIP_START(map, entry, start);
2935 UVM_MAP_CLIP_END(map, entry, end); /* to be safe */
2936 } else {
2937 entry = NULL;
2938 }
2939
2940 if (entry != NULL &&
2941 entry->start == start && entry->end == end &&
2942 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
2943 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
2944 entry->etype |= UVM_ET_SUBMAP;
2945 entry->object.sub_map = submap;
2946 entry->offset = 0;
2947 uvm_map_reference(submap);
2948 error = 0;
2949 } else {
2950 error = EINVAL;
2951 }
2952 vm_map_unlock(map);
2953
2954 return error;
2955 }
2956
2957 /*
2958 * uvm_map_protect: change map protection
2959 *
2960 * => set_max means set max_protection.
2961 * => map must be unlocked.
2962 */
2963
2964 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
2965 ~VM_PROT_WRITE : VM_PROT_ALL)
2966
2967 int
2968 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
2969 vm_prot_t new_prot, bool set_max)
2970 {
2971 struct vm_map_entry *current, *entry;
2972 int error = 0;
2973 UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
2974 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
2975 map, start, end, new_prot);
2976
2977 vm_map_lock(map);
2978 VM_MAP_RANGE_CHECK(map, start, end);
2979 if (uvm_map_lookup_entry(map, start, &entry)) {
2980 UVM_MAP_CLIP_START(map, entry, start);
2981 } else {
2982 entry = entry->next;
2983 }
2984
2985 /*
2986 * make a first pass to check for protection violations.
2987 */
2988
2989 current = entry;
2990 while ((current != &map->header) && (current->start < end)) {
2991 if (UVM_ET_ISSUBMAP(current)) {
2992 error = EINVAL;
2993 goto out;
2994 }
2995 if ((new_prot & current->max_protection) != new_prot) {
2996 error = EACCES;
2997 goto out;
2998 }
2999 /*
3000 * Don't allow VM_PROT_EXECUTE to be set on entries that
3001 * point to vnodes that are associated with a NOEXEC file
3002 * system.
3003 */
3004 if (UVM_ET_ISOBJ(current) &&
3005 UVM_OBJ_IS_VNODE(current->object.uvm_obj)) {
3006 struct vnode *vp =
3007 (struct vnode *) current->object.uvm_obj;
3008
3009 if ((new_prot & VM_PROT_EXECUTE) != 0 &&
3010 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
3011 error = EACCES;
3012 goto out;
3013 }
3014 }
3015
3016 current = current->next;
3017 }
3018
3019 /* go back and fix up protections (no need to clip this time). */
3020
3021 current = entry;
3022 while ((current != &map->header) && (current->start < end)) {
3023 vm_prot_t old_prot;
3024
3025 UVM_MAP_CLIP_END(map, current, end);
3026 old_prot = current->protection;
3027 if (set_max)
3028 current->protection =
3029 (current->max_protection = new_prot) & old_prot;
3030 else
3031 current->protection = new_prot;
3032
3033 /*
3034 * update physical map if necessary. worry about copy-on-write
3035 * here -- CHECK THIS XXX
3036 */
3037
3038 if (current->protection != old_prot) {
3039 /* update pmap! */
3040 uvm_map_lock_entry(current);
3041 pmap_protect(map->pmap, current->start, current->end,
3042 current->protection & MASK(entry));
3043 uvm_map_unlock_entry(current);
3044
3045 /*
3046 * If this entry points at a vnode, and the
3047 * protection includes VM_PROT_EXECUTE, mark
3048 * the vnode as VEXECMAP.
3049 */
3050 if (UVM_ET_ISOBJ(current)) {
3051 struct uvm_object *uobj =
3052 current->object.uvm_obj;
3053
3054 if (UVM_OBJ_IS_VNODE(uobj) &&
3055 (current->protection & VM_PROT_EXECUTE)) {
3056 vn_markexec((struct vnode *) uobj);
3057 }
3058 }
3059 }
3060
3061 /*
3062 * If the map is configured to lock any future mappings,
3063 * wire this entry now if the old protection was VM_PROT_NONE
3064 * and the new protection is not VM_PROT_NONE.
3065 */
3066
3067 if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
3068 VM_MAPENT_ISWIRED(entry) == 0 &&
3069 old_prot == VM_PROT_NONE &&
3070 new_prot != VM_PROT_NONE) {
3071 if (uvm_map_pageable(map, entry->start,
3072 entry->end, false,
3073 UVM_LK_ENTER|UVM_LK_EXIT) != 0) {
3074
3075 /*
3076 * If locking the entry fails, remember the
3077 * error if it's the first one. Note we
3078 * still continue setting the protection in
3079 * the map, but will return the error
3080 * condition regardless.
3081 *
3082 * XXX Ignore what the actual error is,
3083 * XXX just call it a resource shortage
3084 * XXX so that it doesn't get confused
3085 * XXX what uvm_map_protect() itself would
3086 * XXX normally return.
3087 */
3088
3089 error = ENOMEM;
3090 }
3091 }
3092 current = current->next;
3093 }
3094 pmap_update(map->pmap);
3095
3096 out:
3097 vm_map_unlock(map);
3098
3099 UVMHIST_LOG(maphist, "<- done, error=%d",error,0,0,0);
3100 return error;
3101 }
3102
3103 #undef MASK
3104
3105 /*
3106 * uvm_map_inherit: set inheritance code for range of addrs in map.
3107 *
3108 * => map must be unlocked
3109 * => note that the inherit code is used during a "fork". see fork
3110 * code for details.
3111 */
3112
3113 int
3114 uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
3115 vm_inherit_t new_inheritance)
3116 {
3117 struct vm_map_entry *entry, *temp_entry;
3118 UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
3119 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)",
3120 map, start, end, new_inheritance);
3121
3122 switch (new_inheritance) {
3123 case MAP_INHERIT_NONE:
3124 case MAP_INHERIT_COPY:
3125 case MAP_INHERIT_SHARE:
3126 break;
3127 default:
3128 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3129 return EINVAL;
3130 }
3131
3132 vm_map_lock(map);
3133 VM_MAP_RANGE_CHECK(map, start, end);
3134 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3135 entry = temp_entry;
3136 UVM_MAP_CLIP_START(map, entry, start);
3137 } else {
3138 entry = temp_entry->next;
3139 }
3140 while ((entry != &map->header) && (entry->start < end)) {
3141 UVM_MAP_CLIP_END(map, entry, end);
3142 entry->inheritance = new_inheritance;
3143 entry = entry->next;
3144 }
3145 vm_map_unlock(map);
3146 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3147 return 0;
3148 }
3149
3150 /*
3151 * uvm_map_advice: set advice code for range of addrs in map.
3152 *
3153 * => map must be unlocked
3154 */
3155
3156 int
3157 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
3158 {
3159 struct vm_map_entry *entry, *temp_entry;
3160 UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist);
3161 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_adv=0x%x)",
3162 map, start, end, new_advice);
3163
3164 vm_map_lock(map);
3165 VM_MAP_RANGE_CHECK(map, start, end);
3166 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3167 entry = temp_entry;
3168 UVM_MAP_CLIP_START(map, entry, start);
3169 } else {
3170 entry = temp_entry->next;
3171 }
3172
3173 /*
3174 * XXXJRT: disallow holes?
3175 */
3176
3177 while ((entry != &map->header) && (entry->start < end)) {
3178 UVM_MAP_CLIP_END(map, entry, end);
3179
3180 switch (new_advice) {
3181 case MADV_NORMAL:
3182 case MADV_RANDOM:
3183 case MADV_SEQUENTIAL:
3184 /* nothing special here */
3185 break;
3186
3187 default:
3188 vm_map_unlock(map);
3189 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3190 return EINVAL;
3191 }
3192 entry->advice = new_advice;
3193 entry = entry->next;
3194 }
3195
3196 vm_map_unlock(map);
3197 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3198 return 0;
3199 }
3200
3201 /*
3202 * uvm_map_willneed: apply MADV_WILLNEED
3203 */
3204
3205 int
3206 uvm_map_willneed(struct vm_map *map, vaddr_t start, vaddr_t end)
3207 {
3208 struct vm_map_entry *entry;
3209 UVMHIST_FUNC("uvm_map_willneed"); UVMHIST_CALLED(maphist);
3210 UVMHIST_LOG(maphist,"(map=0x%lx,start=0x%lx,end=0x%lx)",
3211 map, start, end, 0);
3212
3213 vm_map_lock_read(map);
3214 VM_MAP_RANGE_CHECK(map, start, end);
3215 if (!uvm_map_lookup_entry(map, start, &entry)) {
3216 entry = entry->next;
3217 }
3218 while (entry->start < end) {
3219 struct vm_amap * const amap = entry->aref.ar_amap;
3220 struct uvm_object * const uobj = entry->object.uvm_obj;
3221
3222 KASSERT(entry != &map->header);
3223 KASSERT(start < entry->end);
3224 /*
3225 * For now, we handle only the easy but commonly-requested case.
3226 * ie. start prefetching of backing uobj pages.
3227 *
3228 * XXX It might be useful to pmap_enter() the already-in-core
3229 * pages by inventing a "weak" mode for uvm_fault() which would
3230 * only do the PGO_LOCKED pgo_get().
3231 */
3232 if (UVM_ET_ISOBJ(entry) && amap == NULL && uobj != NULL) {
3233 off_t offset;
3234 off_t size;
3235
3236 offset = entry->offset;
3237 if (start < entry->start) {
3238 offset += entry->start - start;
3239 }
3240 size = entry->offset + (entry->end - entry->start);
3241 if (entry->end < end) {
3242 size -= end - entry->end;
3243 }
3244 uvm_readahead(uobj, offset, size);
3245 }
3246 entry = entry->next;
3247 }
3248 vm_map_unlock_read(map);
3249 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3250 return 0;
3251 }
3252
3253 /*
3254 * uvm_map_pageable: sets the pageability of a range in a map.
3255 *
3256 * => wires map entries. should not be used for transient page locking.
3257 * for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
3258 * => regions specified as not pageable require lock-down (wired) memory
3259 * and page tables.
3260 * => map must never be read-locked
3261 * => if islocked is true, map is already write-locked
3262 * => we always unlock the map, since we must downgrade to a read-lock
3263 * to call uvm_fault_wire()
3264 * => XXXCDC: check this and try and clean it up.
3265 */
3266
3267 int
3268 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
3269 bool new_pageable, int lockflags)
3270 {
3271 struct vm_map_entry *entry, *start_entry, *failed_entry;
3272 int rv;
3273 #ifdef DIAGNOSTIC
3274 u_int timestamp_save;
3275 #endif
3276 UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
3277 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
3278 map, start, end, new_pageable);
3279 KASSERT(map->flags & VM_MAP_PAGEABLE);
3280
3281 if ((lockflags & UVM_LK_ENTER) == 0)
3282 vm_map_lock(map);
3283 VM_MAP_RANGE_CHECK(map, start, end);
3284
3285 /*
3286 * only one pageability change may take place at one time, since
3287 * uvm_fault_wire assumes it will be called only once for each
3288 * wiring/unwiring. therefore, we have to make sure we're actually
3289 * changing the pageability for the entire region. we do so before
3290 * making any changes.
3291 */
3292
3293 if (uvm_map_lookup_entry(map, start, &start_entry) == false) {
3294 if ((lockflags & UVM_LK_EXIT) == 0)
3295 vm_map_unlock(map);
3296
3297 UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0);
3298 return EFAULT;
3299 }
3300 entry = start_entry;
3301
3302 /*
3303 * handle wiring and unwiring separately.
3304 */
3305
3306 if (new_pageable) { /* unwire */
3307 UVM_MAP_CLIP_START(map, entry, start);
3308
3309 /*
3310 * unwiring. first ensure that the range to be unwired is
3311 * really wired down and that there are no holes.
3312 */
3313
3314 while ((entry != &map->header) && (entry->start < end)) {
3315 if (entry->wired_count == 0 ||
3316 (entry->end < end &&
3317 (entry->next == &map->header ||
3318 entry->next->start > entry->end))) {
3319 if ((lockflags & UVM_LK_EXIT) == 0)
3320 vm_map_unlock(map);
3321 UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0);
3322 return EINVAL;
3323 }
3324 entry = entry->next;
3325 }
3326
3327 /*
3328 * POSIX 1003.1b - a single munlock call unlocks a region,
3329 * regardless of the number of mlock calls made on that
3330 * region.
3331 */
3332
3333 entry = start_entry;
3334 while ((entry != &map->header) && (entry->start < end)) {
3335 UVM_MAP_CLIP_END(map, entry, end);
3336 if (VM_MAPENT_ISWIRED(entry))
3337 uvm_map_entry_unwire(map, entry);
3338 entry = entry->next;
3339 }
3340 if ((lockflags & UVM_LK_EXIT) == 0)
3341 vm_map_unlock(map);
3342 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3343 return 0;
3344 }
3345
3346 /*
3347 * wire case: in two passes [XXXCDC: ugly block of code here]
3348 *
3349 * 1: holding the write lock, we create any anonymous maps that need
3350 * to be created. then we clip each map entry to the region to
3351 * be wired and increment its wiring count.
3352 *
3353 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
3354 * in the pages for any newly wired area (wired_count == 1).
3355 *
3356 * downgrading to a read lock for uvm_fault_wire avoids a possible
3357 * deadlock with another thread that may have faulted on one of
3358 * the pages to be wired (it would mark the page busy, blocking
3359 * us, then in turn block on the map lock that we hold). because
3360 * of problems in the recursive lock package, we cannot upgrade
3361 * to a write lock in vm_map_lookup. thus, any actions that
3362 * require the write lock must be done beforehand. because we
3363 * keep the read lock on the map, the copy-on-write status of the
3364 * entries we modify here cannot change.
3365 */
3366
3367 while ((entry != &map->header) && (entry->start < end)) {
3368 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3369
3370 /*
3371 * perform actions of vm_map_lookup that need the
3372 * write lock on the map: create an anonymous map
3373 * for a copy-on-write region, or an anonymous map
3374 * for a zero-fill region. (XXXCDC: submap case
3375 * ok?)
3376 */
3377
3378 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3379 if (UVM_ET_ISNEEDSCOPY(entry) &&
3380 ((entry->max_protection & VM_PROT_WRITE) ||
3381 (entry->object.uvm_obj == NULL))) {
3382 amap_copy(map, entry, 0, start, end);
3383 /* XXXCDC: wait OK? */
3384 }
3385 }
3386 }
3387 UVM_MAP_CLIP_START(map, entry, start);
3388 UVM_MAP_CLIP_END(map, entry, end);
3389 entry->wired_count++;
3390
3391 /*
3392 * Check for holes
3393 */
3394
3395 if (entry->protection == VM_PROT_NONE ||
3396 (entry->end < end &&
3397 (entry->next == &map->header ||
3398 entry->next->start > entry->end))) {
3399
3400 /*
3401 * found one. amap creation actions do not need to
3402 * be undone, but the wired counts need to be restored.
3403 */
3404
3405 while (entry != &map->header && entry->end > start) {
3406 entry->wired_count--;
3407 entry = entry->prev;
3408 }
3409 if ((lockflags & UVM_LK_EXIT) == 0)
3410 vm_map_unlock(map);
3411 UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
3412 return EINVAL;
3413 }
3414 entry = entry->next;
3415 }
3416
3417 /*
3418 * Pass 2.
3419 */
3420
3421 #ifdef DIAGNOSTIC
3422 timestamp_save = map->timestamp;
3423 #endif
3424 vm_map_busy(map);
3425 vm_map_unlock(map);
3426
3427 rv = 0;
3428 entry = start_entry;
3429 while (entry != &map->header && entry->start < end) {
3430 if (entry->wired_count == 1) {
3431 rv = uvm_fault_wire(map, entry->start, entry->end,
3432 entry->max_protection, 1);
3433 if (rv) {
3434
3435 /*
3436 * wiring failed. break out of the loop.
3437 * we'll clean up the map below, once we
3438 * have a write lock again.
3439 */
3440
3441 break;
3442 }
3443 }
3444 entry = entry->next;
3445 }
3446
3447 if (rv) { /* failed? */
3448
3449 /*
3450 * Get back to an exclusive (write) lock.
3451 */
3452
3453 vm_map_lock(map);
3454 vm_map_unbusy(map);
3455
3456 #ifdef DIAGNOSTIC
3457 if (timestamp_save + 1 != map->timestamp)
3458 panic("uvm_map_pageable: stale map");
3459 #endif
3460
3461 /*
3462 * first drop the wiring count on all the entries
3463 * which haven't actually been wired yet.
3464 */
3465
3466 failed_entry = entry;
3467 while (entry != &map->header && entry->start < end) {
3468 entry->wired_count--;
3469 entry = entry->next;
3470 }
3471
3472 /*
3473 * now, unwire all the entries that were successfully
3474 * wired above.
3475 */
3476
3477 entry = start_entry;
3478 while (entry != failed_entry) {
3479 entry->wired_count--;
3480 if (VM_MAPENT_ISWIRED(entry) == 0)
3481 uvm_map_entry_unwire(map, entry);
3482 entry = entry->next;
3483 }
3484 if ((lockflags & UVM_LK_EXIT) == 0)
3485 vm_map_unlock(map);
3486 UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
3487 return (rv);
3488 }
3489
3490 if ((lockflags & UVM_LK_EXIT) == 0) {
3491 vm_map_unbusy(map);
3492 } else {
3493
3494 /*
3495 * Get back to an exclusive (write) lock.
3496 */
3497
3498 vm_map_lock(map);
3499 vm_map_unbusy(map);
3500 }
3501
3502 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3503 return 0;
3504 }
3505
3506 /*
3507 * uvm_map_pageable_all: special case of uvm_map_pageable - affects
3508 * all mapped regions.
3509 *
3510 * => map must not be locked.
3511 * => if no flags are specified, all regions are unwired.
3512 * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
3513 */
3514
3515 int
3516 uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
3517 {
3518 struct vm_map_entry *entry, *failed_entry;
3519 vsize_t size;
3520 int rv;
3521 #ifdef DIAGNOSTIC
3522 u_int timestamp_save;
3523 #endif
3524 UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist);
3525 UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0);
3526
3527 KASSERT(map->flags & VM_MAP_PAGEABLE);
3528
3529 vm_map_lock(map);
3530
3531 /*
3532 * handle wiring and unwiring separately.
3533 */
3534
3535 if (flags == 0) { /* unwire */
3536
3537 /*
3538 * POSIX 1003.1b -- munlockall unlocks all regions,
3539 * regardless of how many times mlockall has been called.
3540 */
3541
3542 for (entry = map->header.next; entry != &map->header;
3543 entry = entry->next) {
3544 if (VM_MAPENT_ISWIRED(entry))
3545 uvm_map_entry_unwire(map, entry);
3546 }
3547 map->flags &= ~VM_MAP_WIREFUTURE;
3548 vm_map_unlock(map);
3549 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3550 return 0;
3551 }
3552
3553 if (flags & MCL_FUTURE) {
3554
3555 /*
3556 * must wire all future mappings; remember this.
3557 */
3558
3559 map->flags |= VM_MAP_WIREFUTURE;
3560 }
3561
3562 if ((flags & MCL_CURRENT) == 0) {
3563
3564 /*
3565 * no more work to do!
3566 */
3567
3568 UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
3569 vm_map_unlock(map);
3570 return 0;
3571 }
3572
3573 /*
3574 * wire case: in three passes [XXXCDC: ugly block of code here]
3575 *
3576 * 1: holding the write lock, count all pages mapped by non-wired
3577 * entries. if this would cause us to go over our limit, we fail.
3578 *
3579 * 2: still holding the write lock, we create any anonymous maps that
3580 * need to be created. then we increment its wiring count.
3581 *
3582 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
3583 * in the pages for any newly wired area (wired_count == 1).
3584 *
3585 * downgrading to a read lock for uvm_fault_wire avoids a possible
3586 * deadlock with another thread that may have faulted on one of
3587 * the pages to be wired (it would mark the page busy, blocking
3588 * us, then in turn block on the map lock that we hold). because
3589 * of problems in the recursive lock package, we cannot upgrade
3590 * to a write lock in vm_map_lookup. thus, any actions that
3591 * require the write lock must be done beforehand. because we
3592 * keep the read lock on the map, the copy-on-write status of the
3593 * entries we modify here cannot change.
3594 */
3595
3596 for (size = 0, entry = map->header.next; entry != &map->header;
3597 entry = entry->next) {
3598 if (entry->protection != VM_PROT_NONE &&
3599 VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3600 size += entry->end - entry->start;
3601 }
3602 }
3603
3604 if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
3605 vm_map_unlock(map);
3606 return ENOMEM;
3607 }
3608
3609 if (limit != 0 &&
3610 (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
3611 vm_map_unlock(map);
3612 return ENOMEM;
3613 }
3614
3615 /*
3616 * Pass 2.
3617 */
3618
3619 for (entry = map->header.next; entry != &map->header;
3620 entry = entry->next) {
3621 if (entry->protection == VM_PROT_NONE)
3622 continue;
3623 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3624
3625 /*
3626 * perform actions of vm_map_lookup that need the
3627 * write lock on the map: create an anonymous map
3628 * for a copy-on-write region, or an anonymous map
3629 * for a zero-fill region. (XXXCDC: submap case
3630 * ok?)
3631 */
3632
3633 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3634 if (UVM_ET_ISNEEDSCOPY(entry) &&
3635 ((entry->max_protection & VM_PROT_WRITE) ||
3636 (entry->object.uvm_obj == NULL))) {
3637 amap_copy(map, entry, 0, entry->start,
3638 entry->end);
3639 /* XXXCDC: wait OK? */
3640 }
3641 }
3642 }
3643 entry->wired_count++;
3644 }
3645
3646 /*
3647 * Pass 3.
3648 */
3649
3650 #ifdef DIAGNOSTIC
3651 timestamp_save = map->timestamp;
3652 #endif
3653 vm_map_busy(map);
3654 vm_map_unlock(map);
3655
3656 rv = 0;
3657 for (entry = map->header.next; entry != &map->header;
3658 entry = entry->next) {
3659 if (entry->wired_count == 1) {
3660 rv = uvm_fault_wire(map, entry->start, entry->end,
3661 entry->max_protection, 1);
3662 if (rv) {
3663
3664 /*
3665 * wiring failed. break out of the loop.
3666 * we'll clean up the map below, once we
3667 * have a write lock again.
3668 */
3669
3670 break;
3671 }
3672 }
3673 }
3674
3675 if (rv) {
3676
3677 /*
3678 * Get back an exclusive (write) lock.
3679 */
3680
3681 vm_map_lock(map);
3682 vm_map_unbusy(map);
3683
3684 #ifdef DIAGNOSTIC
3685 if (timestamp_save + 1 != map->timestamp)
3686 panic("uvm_map_pageable_all: stale map");
3687 #endif
3688
3689 /*
3690 * first drop the wiring count on all the entries
3691 * which haven't actually been wired yet.
3692 *
3693 * Skip VM_PROT_NONE entries like we did above.
3694 */
3695
3696 failed_entry = entry;
3697 for (/* nothing */; entry != &map->header;
3698 entry = entry->next) {
3699 if (entry->protection == VM_PROT_NONE)
3700 continue;
3701 entry->wired_count--;
3702 }
3703
3704 /*
3705 * now, unwire all the entries that were successfully
3706 * wired above.
3707 *
3708 * Skip VM_PROT_NONE entries like we did above.
3709 */
3710
3711 for (entry = map->header.next; entry != failed_entry;
3712 entry = entry->next) {
3713 if (entry->protection == VM_PROT_NONE)
3714 continue;
3715 entry->wired_count--;
3716 if (VM_MAPENT_ISWIRED(entry))
3717 uvm_map_entry_unwire(map, entry);
3718 }
3719 vm_map_unlock(map);
3720 UVMHIST_LOG(maphist,"<- done (RV=%d)", rv,0,0,0);
3721 return (rv);
3722 }
3723
3724 vm_map_unbusy(map);
3725
3726 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3727 return 0;
3728 }
3729
3730 /*
3731 * uvm_map_clean: clean out a map range
3732 *
3733 * => valid flags:
3734 * if (flags & PGO_CLEANIT): dirty pages are cleaned first
3735 * if (flags & PGO_SYNCIO): dirty pages are written synchronously
3736 * if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
3737 * if (flags & PGO_FREE): any cached pages are freed after clean
3738 * => returns an error if any part of the specified range isn't mapped
3739 * => never a need to flush amap layer since the anonymous memory has
3740 * no permanent home, but may deactivate pages there
3741 * => called from sys_msync() and sys_madvise()
3742 * => caller must not write-lock map (read OK).
3743 * => we may sleep while cleaning if SYNCIO [with map read-locked]
3744 */
3745
3746 int
3747 uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
3748 {
3749 struct vm_map_entry *current, *entry;
3750 struct uvm_object *uobj;
3751 struct vm_amap *amap;
3752 struct vm_anon *anon, *anon_tofree;
3753 struct vm_page *pg;
3754 vaddr_t offset;
3755 vsize_t size;
3756 voff_t uoff;
3757 int error, refs;
3758 UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
3759
3760 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
3761 map, start, end, flags);
3762 KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
3763 (PGO_FREE|PGO_DEACTIVATE));
3764
3765 vm_map_lock_read(map);
3766 VM_MAP_RANGE_CHECK(map, start, end);
3767 if (uvm_map_lookup_entry(map, start, &entry) == false) {
3768 vm_map_unlock_read(map);
3769 return EFAULT;
3770 }
3771
3772 /*
3773 * Make a first pass to check for holes and wiring problems.
3774 */
3775
3776 for (current = entry; current->start < end; current = current->next) {
3777 if (UVM_ET_ISSUBMAP(current)) {
3778 vm_map_unlock_read(map);
3779 return EINVAL;
3780 }
3781 if ((flags & PGO_FREE) != 0 && VM_MAPENT_ISWIRED(entry)) {
3782 vm_map_unlock_read(map);
3783 return EBUSY;
3784 }
3785 if (end <= current->end) {
3786 break;
3787 }
3788 if (current->end != current->next->start) {
3789 vm_map_unlock_read(map);
3790 return EFAULT;
3791 }
3792 }
3793
3794 error = 0;
3795 for (current = entry; start < end; current = current->next) {
3796 amap = current->aref.ar_amap; /* upper layer */
3797 uobj = current->object.uvm_obj; /* lower layer */
3798 KASSERT(start >= current->start);
3799
3800 /*
3801 * No amap cleaning necessary if:
3802 *
3803 * (1) There's no amap.
3804 *
3805 * (2) We're not deactivating or freeing pages.
3806 */
3807
3808 if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
3809 goto flush_object;
3810
3811 offset = start - current->start;
3812 size = MIN(end, current->end) - start;
3813 anon_tofree = NULL;
3814
3815 amap_lock(amap);
3816 for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) {
3817 anon = amap_lookup(¤t->aref, offset);
3818 if (anon == NULL)
3819 continue;
3820
3821 KASSERT(anon->an_lock == amap->am_lock);
3822 pg = anon->an_page;
3823 if (pg == NULL) {
3824 continue;
3825 }
3826
3827 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
3828
3829 /*
3830 * In these first 3 cases, we just deactivate the page.
3831 */
3832
3833 case PGO_CLEANIT|PGO_FREE:
3834 case PGO_CLEANIT|PGO_DEACTIVATE:
3835 case PGO_DEACTIVATE:
3836 deactivate_it:
3837 /*
3838 * skip the page if it's loaned or wired,
3839 * since it shouldn't be on a paging queue
3840 * at all in these cases.
3841 */
3842
3843 mutex_enter(&uvm_pageqlock);
3844 if (pg->loan_count != 0 ||
3845 pg->wire_count != 0) {
3846 mutex_exit(&uvm_pageqlock);
3847 continue;
3848 }
3849 KASSERT(pg->uanon == anon);
3850 uvm_pagedeactivate(pg);
3851 mutex_exit(&uvm_pageqlock);
3852 continue;
3853
3854 case PGO_FREE:
3855
3856 /*
3857 * If there are multiple references to
3858 * the amap, just deactivate the page.
3859 */
3860
3861 if (amap_refs(amap) > 1)
3862 goto deactivate_it;
3863
3864 /* skip the page if it's wired */
3865 if (pg->wire_count != 0) {
3866 continue;
3867 }
3868 amap_unadd(¤t->aref, offset);
3869 refs = --anon->an_ref;
3870 if (refs == 0) {
3871 anon->an_link = anon_tofree;
3872 anon_tofree = anon;
3873 }
3874 continue;
3875 }
3876 }
3877 uvm_anon_freelst(amap, anon_tofree);
3878
3879 flush_object:
3880 /*
3881 * flush pages if we've got a valid backing object.
3882 * note that we must always clean object pages before
3883 * freeing them since otherwise we could reveal stale
3884 * data from files.
3885 */
3886
3887 uoff = current->offset + (start - current->start);
3888 size = MIN(end, current->end) - start;
3889 if (uobj != NULL) {
3890 mutex_enter(uobj->vmobjlock);
3891 if (uobj->pgops->pgo_put != NULL)
3892 error = (uobj->pgops->pgo_put)(uobj, uoff,
3893 uoff + size, flags | PGO_CLEANIT);
3894 else
3895 error = 0;
3896 }
3897 start += size;
3898 }
3899 vm_map_unlock_read(map);
3900 return (error);
3901 }
3902
3903
3904 /*
3905 * uvm_map_checkprot: check protection in map
3906 *
3907 * => must allow specified protection in a fully allocated region.
3908 * => map must be read or write locked by caller.
3909 */
3910
3911 bool
3912 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
3913 vm_prot_t protection)
3914 {
3915 struct vm_map_entry *entry;
3916 struct vm_map_entry *tmp_entry;
3917
3918 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
3919 return (false);
3920 }
3921 entry = tmp_entry;
3922 while (start < end) {
3923 if (entry == &map->header) {
3924 return (false);
3925 }
3926
3927 /*
3928 * no holes allowed
3929 */
3930
3931 if (start < entry->start) {
3932 return (false);
3933 }
3934
3935 /*
3936 * check protection associated with entry
3937 */
3938
3939 if ((entry->protection & protection) != protection) {
3940 return (false);
3941 }
3942 start = entry->end;
3943 entry = entry->next;
3944 }
3945 return (true);
3946 }
3947
3948 /*
3949 * uvmspace_alloc: allocate a vmspace structure.
3950 *
3951 * - structure includes vm_map and pmap
3952 * - XXX: no locking on this structure
3953 * - refcnt set to 1, rest must be init'd by caller
3954 */
3955 struct vmspace *
3956 uvmspace_alloc(vaddr_t vmin, vaddr_t vmax)
3957 {
3958 struct vmspace *vm;
3959 UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
3960
3961 vm = pool_cache_get(&uvm_vmspace_cache, PR_WAITOK);
3962 uvmspace_init(vm, NULL, vmin, vmax);
3963 UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0);
3964 return (vm);
3965 }
3966
3967 /*
3968 * uvmspace_init: initialize a vmspace structure.
3969 *
3970 * - XXX: no locking on this structure
3971 * - refcnt set to 1, rest must be init'd by caller
3972 */
3973 void
3974 uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin, vaddr_t vmax)
3975 {
3976 UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist);
3977
3978 memset(vm, 0, sizeof(*vm));
3979 uvm_map_setup(&vm->vm_map, vmin, vmax, VM_MAP_PAGEABLE
3980 #ifdef __USING_TOPDOWN_VM
3981 | VM_MAP_TOPDOWN
3982 #endif
3983 );
3984 if (pmap)
3985 pmap_reference(pmap);
3986 else
3987 pmap = pmap_create();
3988 vm->vm_map.pmap = pmap;
3989 vm->vm_refcnt = 1;
3990 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
3991 }
3992
3993 /*
3994 * uvmspace_share: share a vmspace between two processes
3995 *
3996 * - used for vfork, threads(?)
3997 */
3998
3999 void
4000 uvmspace_share(struct proc *p1, struct proc *p2)
4001 {
4002
4003 uvmspace_addref(p1->p_vmspace);
4004 p2->p_vmspace = p1->p_vmspace;
4005 }
4006
4007 #if 0
4008
4009 /*
4010 * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
4011 *
4012 * - XXX: no locking on vmspace
4013 */
4014
4015 void
4016 uvmspace_unshare(struct lwp *l)
4017 {
4018 struct proc *p = l->l_proc;
4019 struct vmspace *nvm, *ovm = p->p_vmspace;
4020
4021 if (ovm->vm_refcnt == 1)
4022 /* nothing to do: vmspace isn't shared in the first place */
4023 return;
4024
4025 /* make a new vmspace, still holding old one */
4026 nvm = uvmspace_fork(ovm);
4027
4028 kpreempt_disable();
4029 pmap_deactivate(l); /* unbind old vmspace */
4030 p->p_vmspace = nvm;
4031 pmap_activate(l); /* switch to new vmspace */
4032 kpreempt_enable();
4033
4034 uvmspace_free(ovm); /* drop reference to old vmspace */
4035 }
4036
4037 #endif
4038
4039 /*
4040 * uvmspace_exec: the process wants to exec a new program
4041 */
4042
4043 void
4044 uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end)
4045 {
4046 struct proc *p = l->l_proc;
4047 struct vmspace *nvm, *ovm = p->p_vmspace;
4048 struct vm_map *map;
4049
4050 #ifdef __HAVE_CPU_VMSPACE_EXEC
4051 cpu_vmspace_exec(l, start, end);
4052 #endif
4053
4054 /*
4055 * Special case: no vmspace yet (see posix_spawn) -
4056 * no races possible in this case.
4057 */
4058 if (ovm == NULL) {
4059 p->p_vmspace = uvmspace_alloc(start, end);
4060 pmap_activate(l);
4061 return;
4062 }
4063
4064 map = &ovm->vm_map;
4065 /*
4066 * see if more than one process is using this vmspace...
4067 */
4068
4069 if (ovm->vm_refcnt == 1) {
4070
4071 /*
4072 * if p is the only process using its vmspace then we can safely
4073 * recycle that vmspace for the program that is being exec'd.
4074 */
4075
4076 #ifdef SYSVSHM
4077 /*
4078 * SYSV SHM semantics require us to kill all segments on an exec
4079 */
4080
4081 if (ovm->vm_shm)
4082 shmexit(ovm);
4083 #endif
4084
4085 /*
4086 * POSIX 1003.1b -- "lock future mappings" is revoked
4087 * when a process execs another program image.
4088 */
4089
4090 map->flags &= ~VM_MAP_WIREFUTURE;
4091
4092 /*
4093 * now unmap the old program
4094 */
4095
4096 pmap_remove_all(map->pmap);
4097 uvm_unmap(map, vm_map_min(map), vm_map_max(map));
4098 KASSERT(map->header.prev == &map->header);
4099 KASSERT(map->nentries == 0);
4100
4101 /*
4102 * resize the map
4103 */
4104
4105 vm_map_setmin(map, start);
4106 vm_map_setmax(map, end);
4107 } else {
4108
4109 /*
4110 * p's vmspace is being shared, so we can't reuse it for p since
4111 * it is still being used for others. allocate a new vmspace
4112 * for p
4113 */
4114
4115 nvm = uvmspace_alloc(start, end);
4116
4117 /*
4118 * install new vmspace and drop our ref to the old one.
4119 */
4120
4121 kpreempt_disable();
4122 pmap_deactivate(l);
4123 p->p_vmspace = nvm;
4124 pmap_activate(l);
4125 kpreempt_enable();
4126
4127 uvmspace_free(ovm);
4128 }
4129 }
4130
4131 /*
4132 * uvmspace_addref: add a referece to a vmspace.
4133 */
4134
4135 void
4136 uvmspace_addref(struct vmspace *vm)
4137 {
4138 struct vm_map *map = &vm->vm_map;
4139
4140 KASSERT((map->flags & VM_MAP_DYING) == 0);
4141
4142 mutex_enter(&map->misc_lock);
4143 KASSERT(vm->vm_refcnt > 0);
4144 vm->vm_refcnt++;
4145 mutex_exit(&map->misc_lock);
4146 }
4147
4148 /*
4149 * uvmspace_free: free a vmspace data structure
4150 */
4151
4152 void
4153 uvmspace_free(struct vmspace *vm)
4154 {
4155 struct vm_map_entry *dead_entries;
4156 struct vm_map *map = &vm->vm_map;
4157 int n;
4158
4159 UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
4160
4161 UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0);
4162 mutex_enter(&map->misc_lock);
4163 n = --vm->vm_refcnt;
4164 mutex_exit(&map->misc_lock);
4165 if (n > 0)
4166 return;
4167
4168 /*
4169 * at this point, there should be no other references to the map.
4170 * delete all of the mappings, then destroy the pmap.
4171 */
4172
4173 map->flags |= VM_MAP_DYING;
4174 pmap_remove_all(map->pmap);
4175 #ifdef SYSVSHM
4176 /* Get rid of any SYSV shared memory segments. */
4177 if (vm->vm_shm != NULL)
4178 shmexit(vm);
4179 #endif
4180 if (map->nentries) {
4181 uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map),
4182 &dead_entries, 0);
4183 if (dead_entries != NULL)
4184 uvm_unmap_detach(dead_entries, 0);
4185 }
4186 KASSERT(map->nentries == 0);
4187 KASSERT(map->size == 0);
4188 mutex_destroy(&map->misc_lock);
4189 mutex_destroy(&map->mutex);
4190 rw_destroy(&map->lock);
4191 cv_destroy(&map->cv);
4192 pmap_destroy(map->pmap);
4193 pool_cache_put(&uvm_vmspace_cache, vm);
4194 }
4195
4196 /*
4197 * F O R K - m a i n e n t r y p o i n t
4198 */
4199 /*
4200 * uvmspace_fork: fork a process' main map
4201 *
4202 * => create a new vmspace for child process from parent.
4203 * => parent's map must not be locked.
4204 */
4205
4206 struct vmspace *
4207 uvmspace_fork(struct vmspace *vm1)
4208 {
4209 struct vmspace *vm2;
4210 struct vm_map *old_map = &vm1->vm_map;
4211 struct vm_map *new_map;
4212 struct vm_map_entry *old_entry;
4213 struct vm_map_entry *new_entry;
4214 UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
4215
4216 vm_map_lock(old_map);
4217
4218 vm2 = uvmspace_alloc(vm_map_min(old_map), vm_map_max(old_map));
4219 memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
4220 (char *) (vm1 + 1) - (char *) &vm1->vm_startcopy);
4221 new_map = &vm2->vm_map; /* XXX */
4222
4223 old_entry = old_map->header.next;
4224 new_map->size = old_map->size;
4225
4226 /*
4227 * go entry-by-entry
4228 */
4229
4230 while (old_entry != &old_map->header) {
4231
4232 /*
4233 * first, some sanity checks on the old entry
4234 */
4235
4236 KASSERT(!UVM_ET_ISSUBMAP(old_entry));
4237 KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) ||
4238 !UVM_ET_ISNEEDSCOPY(old_entry));
4239
4240 switch (old_entry->inheritance) {
4241 case MAP_INHERIT_NONE:
4242
4243 /*
4244 * drop the mapping, modify size
4245 */
4246 new_map->size -= old_entry->end - old_entry->start;
4247 break;
4248
4249 case MAP_INHERIT_SHARE:
4250
4251 /*
4252 * share the mapping: this means we want the old and
4253 * new entries to share amaps and backing objects.
4254 */
4255 /*
4256 * if the old_entry needs a new amap (due to prev fork)
4257 * then we need to allocate it now so that we have
4258 * something we own to share with the new_entry. [in
4259 * other words, we need to clear needs_copy]
4260 */
4261
4262 if (UVM_ET_ISNEEDSCOPY(old_entry)) {
4263 /* get our own amap, clears needs_copy */
4264 amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,
4265 0, 0);
4266 /* XXXCDC: WAITOK??? */
4267 }
4268
4269 new_entry = uvm_mapent_alloc(new_map, 0);
4270 /* old_entry -> new_entry */
4271 uvm_mapent_copy(old_entry, new_entry);
4272
4273 /* new pmap has nothing wired in it */
4274 new_entry->wired_count = 0;
4275
4276 /*
4277 * gain reference to object backing the map (can't
4278 * be a submap, already checked this case).
4279 */
4280
4281 if (new_entry->aref.ar_amap)
4282 uvm_map_reference_amap(new_entry, AMAP_SHARED);
4283
4284 if (new_entry->object.uvm_obj &&
4285 new_entry->object.uvm_obj->pgops->pgo_reference)
4286 new_entry->object.uvm_obj->
4287 pgops->pgo_reference(
4288 new_entry->object.uvm_obj);
4289
4290 /* insert entry at end of new_map's entry list */
4291 uvm_map_entry_link(new_map, new_map->header.prev,
4292 new_entry);
4293
4294 break;
4295
4296 case MAP_INHERIT_COPY:
4297
4298 /*
4299 * copy-on-write the mapping (using mmap's
4300 * MAP_PRIVATE semantics)
4301 *
4302 * allocate new_entry, adjust reference counts.
4303 * (note that new references are read-only).
4304 */
4305
4306 new_entry = uvm_mapent_alloc(new_map, 0);
4307 /* old_entry -> new_entry */
4308 uvm_mapent_copy(old_entry, new_entry);
4309
4310 if (new_entry->aref.ar_amap)
4311 uvm_map_reference_amap(new_entry, 0);
4312
4313 if (new_entry->object.uvm_obj &&
4314 new_entry->object.uvm_obj->pgops->pgo_reference)
4315 new_entry->object.uvm_obj->pgops->pgo_reference
4316 (new_entry->object.uvm_obj);
4317
4318 /* new pmap has nothing wired in it */
4319 new_entry->wired_count = 0;
4320
4321 new_entry->etype |=
4322 (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
4323 uvm_map_entry_link(new_map, new_map->header.prev,
4324 new_entry);
4325
4326 /*
4327 * the new entry will need an amap. it will either
4328 * need to be copied from the old entry or created
4329 * from scratch (if the old entry does not have an
4330 * amap). can we defer this process until later
4331 * (by setting "needs_copy") or do we need to copy
4332 * the amap now?
4333 *
4334 * we must copy the amap now if any of the following
4335 * conditions hold:
4336 * 1. the old entry has an amap and that amap is
4337 * being shared. this means that the old (parent)
4338 * process is sharing the amap with another
4339 * process. if we do not clear needs_copy here
4340 * we will end up in a situation where both the
4341 * parent and child process are refering to the
4342 * same amap with "needs_copy" set. if the
4343 * parent write-faults, the fault routine will
4344 * clear "needs_copy" in the parent by allocating
4345 * a new amap. this is wrong because the
4346 * parent is supposed to be sharing the old amap
4347 * and the new amap will break that.
4348 *
4349 * 2. if the old entry has an amap and a non-zero
4350 * wire count then we are going to have to call
4351 * amap_cow_now to avoid page faults in the
4352 * parent process. since amap_cow_now requires
4353 * "needs_copy" to be clear we might as well
4354 * clear it here as well.
4355 *
4356 */
4357
4358 if (old_entry->aref.ar_amap != NULL) {
4359 if ((amap_flags(old_entry->aref.ar_amap) &
4360 AMAP_SHARED) != 0 ||
4361 VM_MAPENT_ISWIRED(old_entry)) {
4362
4363 amap_copy(new_map, new_entry,
4364 AMAP_COPY_NOCHUNK, 0, 0);
4365 /* XXXCDC: M_WAITOK ... ok? */
4366 }
4367 }
4368
4369 /*
4370 * if the parent's entry is wired down, then the
4371 * parent process does not want page faults on
4372 * access to that memory. this means that we
4373 * cannot do copy-on-write because we can't write
4374 * protect the old entry. in this case we
4375 * resolve all copy-on-write faults now, using
4376 * amap_cow_now. note that we have already
4377 * allocated any needed amap (above).
4378 */
4379
4380 if (VM_MAPENT_ISWIRED(old_entry)) {
4381
4382 /*
4383 * resolve all copy-on-write faults now
4384 * (note that there is nothing to do if
4385 * the old mapping does not have an amap).
4386 */
4387 if (old_entry->aref.ar_amap)
4388 amap_cow_now(new_map, new_entry);
4389
4390 } else {
4391
4392 /*
4393 * setup mappings to trigger copy-on-write faults
4394 * we must write-protect the parent if it has
4395 * an amap and it is not already "needs_copy"...
4396 * if it is already "needs_copy" then the parent
4397 * has already been write-protected by a previous
4398 * fork operation.
4399 */
4400
4401 if (old_entry->aref.ar_amap &&
4402 !UVM_ET_ISNEEDSCOPY(old_entry)) {
4403 if (old_entry->max_protection & VM_PROT_WRITE) {
4404 pmap_protect(old_map->pmap,
4405 old_entry->start,
4406 old_entry->end,
4407 old_entry->protection &
4408 ~VM_PROT_WRITE);
4409 }
4410 old_entry->etype |= UVM_ET_NEEDSCOPY;
4411 }
4412 }
4413 break;
4414 } /* end of switch statement */
4415 old_entry = old_entry->next;
4416 }
4417
4418 pmap_update(old_map->pmap);
4419 vm_map_unlock(old_map);
4420
4421 #ifdef SYSVSHM
4422 if (vm1->vm_shm)
4423 shmfork(vm1, vm2);
4424 #endif
4425
4426 #ifdef PMAP_FORK
4427 pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap);
4428 #endif
4429
4430 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4431 return (vm2);
4432 }
4433
4434
4435 /*
4436 * uvm_mapent_trymerge: try to merge an entry with its neighbors.
4437 *
4438 * => called with map locked.
4439 * => return non zero if successfully merged.
4440 */
4441
4442 int
4443 uvm_mapent_trymerge(struct vm_map *map, struct vm_map_entry *entry, int flags)
4444 {
4445 struct uvm_object *uobj;
4446 struct vm_map_entry *next;
4447 struct vm_map_entry *prev;
4448 vsize_t size;
4449 int merged = 0;
4450 bool copying;
4451 int newetype;
4452
4453 if (entry->aref.ar_amap != NULL) {
4454 return 0;
4455 }
4456 if ((entry->flags & UVM_MAP_NOMERGE) != 0) {
4457 return 0;
4458 }
4459
4460 uobj = entry->object.uvm_obj;
4461 size = entry->end - entry->start;
4462 copying = (flags & UVM_MERGE_COPYING) != 0;
4463 newetype = copying ? (entry->etype & ~UVM_ET_NEEDSCOPY) : entry->etype;
4464
4465 next = entry->next;
4466 if (next != &map->header &&
4467 next->start == entry->end &&
4468 ((copying && next->aref.ar_amap != NULL &&
4469 amap_refs(next->aref.ar_amap) == 1) ||
4470 (!copying && next->aref.ar_amap == NULL)) &&
4471 UVM_ET_ISCOMPATIBLE(next, newetype,
4472 uobj, entry->flags, entry->protection,
4473 entry->max_protection, entry->inheritance, entry->advice,
4474 entry->wired_count) &&
4475 (uobj == NULL || entry->offset + size == next->offset)) {
4476 int error;
4477
4478 if (copying) {
4479 error = amap_extend(next, size,
4480 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_BACKWARDS);
4481 } else {
4482 error = 0;
4483 }
4484 if (error == 0) {
4485 if (uobj) {
4486 if (uobj->pgops->pgo_detach) {
4487 uobj->pgops->pgo_detach(uobj);
4488 }
4489 }
4490
4491 entry->end = next->end;
4492 clear_hints(map, next);
4493 uvm_map_entry_unlink(map, next);
4494 if (copying) {
4495 entry->aref = next->aref;
4496 entry->etype &= ~UVM_ET_NEEDSCOPY;
4497 }
4498 uvm_map_check(map, "trymerge forwardmerge");
4499 uvm_mapent_free(next);
4500 merged++;
4501 }
4502 }
4503
4504 prev = entry->prev;
4505 if (prev != &map->header &&
4506 prev->end == entry->start &&
4507 ((copying && !merged && prev->aref.ar_amap != NULL &&
4508 amap_refs(prev->aref.ar_amap) == 1) ||
4509 (!copying && prev->aref.ar_amap == NULL)) &&
4510 UVM_ET_ISCOMPATIBLE(prev, newetype,
4511 uobj, entry->flags, entry->protection,
4512 entry->max_protection, entry->inheritance, entry->advice,
4513 entry->wired_count) &&
4514 (uobj == NULL ||
4515 prev->offset + prev->end - prev->start == entry->offset)) {
4516 int error;
4517
4518 if (copying) {
4519 error = amap_extend(prev, size,
4520 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_FORWARDS);
4521 } else {
4522 error = 0;
4523 }
4524 if (error == 0) {
4525 if (uobj) {
4526 if (uobj->pgops->pgo_detach) {
4527 uobj->pgops->pgo_detach(uobj);
4528 }
4529 entry->offset = prev->offset;
4530 }
4531
4532 entry->start = prev->start;
4533 clear_hints(map, prev);
4534 uvm_map_entry_unlink(map, prev);
4535 if (copying) {
4536 entry->aref = prev->aref;
4537 entry->etype &= ~UVM_ET_NEEDSCOPY;
4538 }
4539 uvm_map_check(map, "trymerge backmerge");
4540 uvm_mapent_free(prev);
4541 merged++;
4542 }
4543 }
4544
4545 return merged;
4546 }
4547
4548 /*
4549 * uvm_map_setup: init map
4550 *
4551 * => map must not be in service yet.
4552 */
4553
4554 void
4555 uvm_map_setup(struct vm_map *map, vaddr_t vmin, vaddr_t vmax, int flags)
4556 {
4557 int ipl;
4558
4559 rb_tree_init(&map->rb_tree, &uvm_map_tree_ops);
4560 map->header.next = map->header.prev = &map->header;
4561 map->nentries = 0;
4562 map->size = 0;
4563 map->ref_count = 1;
4564 vm_map_setmin(map, vmin);
4565 vm_map_setmax(map, vmax);
4566 map->flags = flags;
4567 map->first_free = &map->header;
4568 map->hint = &map->header;
4569 map->timestamp = 0;
4570 map->busy = NULL;
4571
4572 if ((flags & VM_MAP_INTRSAFE) != 0) {
4573 ipl = IPL_VM;
4574 } else {
4575 ipl = IPL_NONE;
4576 }
4577
4578 rw_init(&map->lock);
4579 cv_init(&map->cv, "vm_map");
4580 mutex_init(&map->misc_lock, MUTEX_DRIVER, ipl);
4581 mutex_init(&map->mutex, MUTEX_DRIVER, ipl);
4582 }
4583
4584
4585 /*
4586 * U N M A P - m a i n e n t r y p o i n t
4587 */
4588
4589 /*
4590 * uvm_unmap1: remove mappings from a vm_map (from "start" up to "stop")
4591 *
4592 * => caller must check alignment and size
4593 * => map must be unlocked (we will lock it)
4594 * => flags is UVM_FLAG_QUANTUM or 0.
4595 */
4596
4597 void
4598 uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
4599 {
4600 struct vm_map_entry *dead_entries;
4601 UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist);
4602
4603 UVMHIST_LOG(maphist, " (map=0x%x, start=0x%x, end=0x%x)",
4604 map, start, end, 0);
4605 if (map == kernel_map) {
4606 LOCKDEBUG_MEM_CHECK((void *)start, end - start);
4607 }
4608 /*
4609 * work now done by helper functions. wipe the pmap's and then
4610 * detach from the dead entries...
4611 */
4612 vm_map_lock(map);
4613 uvm_unmap_remove(map, start, end, &dead_entries, flags);
4614 vm_map_unlock(map);
4615
4616 if (dead_entries != NULL)
4617 uvm_unmap_detach(dead_entries, 0);
4618
4619 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
4620 }
4621
4622
4623 /*
4624 * uvm_map_reference: add reference to a map
4625 *
4626 * => map need not be locked (we use misc_lock).
4627 */
4628
4629 void
4630 uvm_map_reference(struct vm_map *map)
4631 {
4632 mutex_enter(&map->misc_lock);
4633 map->ref_count++;
4634 mutex_exit(&map->misc_lock);
4635 }
4636
4637 bool
4638 vm_map_starved_p(struct vm_map *map)
4639 {
4640
4641 if ((map->flags & VM_MAP_WANTVA) != 0) {
4642 return true;
4643 }
4644 /* XXX */
4645 if ((vm_map_max(map) - vm_map_min(map)) / 16 * 15 < map->size) {
4646 return true;
4647 }
4648 return false;
4649 }
4650
4651 void
4652 uvm_map_lock_entry(struct vm_map_entry *entry)
4653 {
4654
4655 if (entry->aref.ar_amap != NULL) {
4656 amap_lock(entry->aref.ar_amap);
4657 }
4658 if (UVM_ET_ISOBJ(entry)) {
4659 mutex_enter(entry->object.uvm_obj->vmobjlock);
4660 }
4661 }
4662
4663 void
4664 uvm_map_unlock_entry(struct vm_map_entry *entry)
4665 {
4666
4667 if (UVM_ET_ISOBJ(entry)) {
4668 mutex_exit(entry->object.uvm_obj->vmobjlock);
4669 }
4670 if (entry->aref.ar_amap != NULL) {
4671 amap_unlock(entry->aref.ar_amap);
4672 }
4673 }
4674
4675 #if defined(DDB) || defined(DEBUGPRINT)
4676
4677 /*
4678 * uvm_map_printit: actually prints the map
4679 */
4680
4681 void
4682 uvm_map_printit(struct vm_map *map, bool full,
4683 void (*pr)(const char *, ...))
4684 {
4685 struct vm_map_entry *entry;
4686
4687 (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, vm_map_min(map),
4688 vm_map_max(map));
4689 (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=0x%x\n",
4690 map->nentries, map->size, map->ref_count, map->timestamp,
4691 map->flags);
4692 (*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap,
4693 pmap_resident_count(map->pmap), pmap_wired_count(map->pmap));
4694 if (!full)
4695 return;
4696 for (entry = map->header.next; entry != &map->header;
4697 entry = entry->next) {
4698 (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%llx, amap=%p/%d\n",
4699 entry, entry->start, entry->end, entry->object.uvm_obj,
4700 (long long)entry->offset, entry->aref.ar_amap,
4701 entry->aref.ar_pageoff);
4702 (*pr)(
4703 "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
4704 "wc=%d, adv=%d\n",
4705 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
4706 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
4707 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
4708 entry->protection, entry->max_protection,
4709 entry->inheritance, entry->wired_count, entry->advice);
4710 }
4711 }
4712
4713 void
4714 uvm_whatis(uintptr_t addr, void (*pr)(const char *, ...))
4715 {
4716 struct vm_map *map;
4717
4718 for (map = kernel_map;;) {
4719 struct vm_map_entry *entry;
4720
4721 if (!uvm_map_lookup_entry_bytree(map, (vaddr_t)addr, &entry)) {
4722 break;
4723 }
4724 (*pr)("%p is %p+%zu from VMMAP %p\n",
4725 (void *)addr, (void *)entry->start,
4726 (size_t)(addr - (uintptr_t)entry->start), map);
4727 if (!UVM_ET_ISSUBMAP(entry)) {
4728 break;
4729 }
4730 map = entry->object.sub_map;
4731 }
4732 }
4733
4734 #endif /* DDB || DEBUGPRINT */
4735
4736 #ifndef __USER_VA0_IS_SAFE
4737 static int
4738 sysctl_user_va0_disable(SYSCTLFN_ARGS)
4739 {
4740 struct sysctlnode node;
4741 int t, error;
4742
4743 node = *rnode;
4744 node.sysctl_data = &t;
4745 t = user_va0_disable;
4746 error = sysctl_lookup(SYSCTLFN_CALL(&node));
4747 if (error || newp == NULL)
4748 return (error);
4749
4750 /* lower only at securelevel < 1 */
4751 if (!t && user_va0_disable &&
4752 kauth_authorize_system(l->l_cred,
4753 KAUTH_SYSTEM_CHSYSFLAGS /* XXX */, 0,
4754 NULL, NULL, NULL))
4755 return EPERM;
4756
4757 user_va0_disable = !!t;
4758 return 0;
4759 }
4760
4761 SYSCTL_SETUP(sysctl_uvmmap_setup, "sysctl uvmmap setup")
4762 {
4763
4764 sysctl_createv(clog, 0, NULL, NULL,
4765 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4766 CTLTYPE_INT, "user_va0_disable",
4767 SYSCTL_DESCR("Disable VA 0"),
4768 sysctl_user_va0_disable, 0, &user_va0_disable, 0,
4769 CTL_VM, CTL_CREATE, CTL_EOL);
4770 }
4771 #endif
4772