uvm_map.c revision 1.248 1 /* $NetBSD: uvm_map.c,v 1.248 2008/01/02 11:49:18 ad Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_map.c 8.3 (Berkeley) 1/12/94
42 * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 /*
70 * uvm_map.c: uvm map operations
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.248 2008/01/02 11:49:18 ad Exp $");
75
76 #include "opt_ddb.h"
77 #include "opt_uvmhist.h"
78 #include "opt_uvm.h"
79 #include "opt_sysv.h"
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/mman.h>
84 #include <sys/proc.h>
85 #include <sys/malloc.h>
86 #include <sys/pool.h>
87 #include <sys/kernel.h>
88 #include <sys/mount.h>
89 #include <sys/vnode.h>
90 #include <sys/lockdebug.h>
91 #include <sys/atomic.h>
92
93 #ifdef SYSVSHM
94 #include <sys/shm.h>
95 #endif
96
97 #include <uvm/uvm.h>
98 #undef RB_AUGMENT
99 #define RB_AUGMENT(x) uvm_rb_augment(x)
100
101 #ifdef DDB
102 #include <uvm/uvm_ddb.h>
103 #endif
104
105 #if defined(UVMMAP_NOCOUNTERS)
106
107 #define UVMMAP_EVCNT_DEFINE(name) /* nothing */
108 #define UVMMAP_EVCNT_INCR(ev) /* nothing */
109 #define UVMMAP_EVCNT_DECR(ev) /* nothing */
110
111 #else /* defined(UVMMAP_NOCOUNTERS) */
112
113 #include <sys/evcnt.h>
114 #define UVMMAP_EVCNT_DEFINE(name) \
115 struct evcnt uvmmap_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
116 "uvmmap", #name); \
117 EVCNT_ATTACH_STATIC(uvmmap_evcnt_##name);
118 #define UVMMAP_EVCNT_INCR(ev) uvmmap_evcnt_##ev.ev_count++
119 #define UVMMAP_EVCNT_DECR(ev) uvmmap_evcnt_##ev.ev_count--
120
121 #endif /* defined(UVMMAP_NOCOUNTERS) */
122
123 UVMMAP_EVCNT_DEFINE(ubackmerge)
124 UVMMAP_EVCNT_DEFINE(uforwmerge)
125 UVMMAP_EVCNT_DEFINE(ubimerge)
126 UVMMAP_EVCNT_DEFINE(unomerge)
127 UVMMAP_EVCNT_DEFINE(kbackmerge)
128 UVMMAP_EVCNT_DEFINE(kforwmerge)
129 UVMMAP_EVCNT_DEFINE(kbimerge)
130 UVMMAP_EVCNT_DEFINE(knomerge)
131 UVMMAP_EVCNT_DEFINE(map_call)
132 UVMMAP_EVCNT_DEFINE(mlk_call)
133 UVMMAP_EVCNT_DEFINE(mlk_hint)
134
135 UVMMAP_EVCNT_DEFINE(uke_alloc)
136 UVMMAP_EVCNT_DEFINE(uke_free)
137 UVMMAP_EVCNT_DEFINE(ukh_alloc)
138 UVMMAP_EVCNT_DEFINE(ukh_free)
139
140 const char vmmapbsy[] = "vmmapbsy";
141
142 /*
143 * cache for vmspace structures.
144 */
145
146 static struct pool_cache uvm_vmspace_cache;
147
148 /*
149 * cache for dynamically-allocated map entries.
150 */
151
152 static struct pool_cache uvm_map_entry_cache;
153
154 MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures");
155 MALLOC_DEFINE(M_VMPMAP, "VM pmap", "VM pmap");
156
157 #ifdef PMAP_GROWKERNEL
158 /*
159 * This global represents the end of the kernel virtual address
160 * space. If we want to exceed this, we must grow the kernel
161 * virtual address space dynamically.
162 *
163 * Note, this variable is locked by kernel_map's lock.
164 */
165 vaddr_t uvm_maxkaddr;
166 #endif
167
168 /*
169 * macros
170 */
171
172 /*
173 * VM_MAP_USE_KMAPENT: determine if uvm_kmapent_alloc/free is used
174 * for the vm_map.
175 */
176 extern struct vm_map *pager_map; /* XXX */
177 #define VM_MAP_USE_KMAPENT_FLAGS(flags) \
178 (((flags) & VM_MAP_INTRSAFE) != 0)
179 #define VM_MAP_USE_KMAPENT(map) \
180 (VM_MAP_USE_KMAPENT_FLAGS((map)->flags) || (map) == kernel_map)
181
182 /*
183 * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging
184 */
185
186 #define UVM_ET_ISCOMPATIBLE(ent, type, uobj, meflags, \
187 prot, maxprot, inh, adv, wire) \
188 ((ent)->etype == (type) && \
189 (((ent)->flags ^ (meflags)) & (UVM_MAP_NOMERGE | UVM_MAP_QUANTUM)) \
190 == 0 && \
191 (ent)->object.uvm_obj == (uobj) && \
192 (ent)->protection == (prot) && \
193 (ent)->max_protection == (maxprot) && \
194 (ent)->inheritance == (inh) && \
195 (ent)->advice == (adv) && \
196 (ent)->wired_count == (wire))
197
198 /*
199 * uvm_map_entry_link: insert entry into a map
200 *
201 * => map must be locked
202 */
203 #define uvm_map_entry_link(map, after_where, entry) do { \
204 uvm_mapent_check(entry); \
205 (map)->nentries++; \
206 (entry)->prev = (after_where); \
207 (entry)->next = (after_where)->next; \
208 (entry)->prev->next = (entry); \
209 (entry)->next->prev = (entry); \
210 uvm_rb_insert((map), (entry)); \
211 } while (/*CONSTCOND*/ 0)
212
213 /*
214 * uvm_map_entry_unlink: remove entry from a map
215 *
216 * => map must be locked
217 */
218 #define uvm_map_entry_unlink(map, entry) do { \
219 KASSERT((entry) != (map)->first_free); \
220 KASSERT((entry) != (map)->hint); \
221 uvm_mapent_check(entry); \
222 (map)->nentries--; \
223 (entry)->next->prev = (entry)->prev; \
224 (entry)->prev->next = (entry)->next; \
225 uvm_rb_remove((map), (entry)); \
226 } while (/*CONSTCOND*/ 0)
227
228 /*
229 * SAVE_HINT: saves the specified entry as the hint for future lookups.
230 *
231 * => map need not be locked.
232 */
233 #define SAVE_HINT(map, check, value) do { \
234 atomic_cas_ptr(&(map)->hint, (check), (value)); \
235 } while (/*CONSTCOND*/ 0)
236
237 /*
238 * clear_hints: ensure that hints don't point to the entry.
239 *
240 * => map must be write-locked.
241 */
242 static void
243 clear_hints(struct vm_map *map, struct vm_map_entry *ent)
244 {
245
246 SAVE_HINT(map, ent, ent->prev);
247 if (map->first_free == ent) {
248 map->first_free = ent->prev;
249 }
250 }
251
252 /*
253 * VM_MAP_RANGE_CHECK: check and correct range
254 *
255 * => map must at least be read locked
256 */
257
258 #define VM_MAP_RANGE_CHECK(map, start, end) do { \
259 if (start < vm_map_min(map)) \
260 start = vm_map_min(map); \
261 if (end > vm_map_max(map)) \
262 end = vm_map_max(map); \
263 if (start > end) \
264 start = end; \
265 } while (/*CONSTCOND*/ 0)
266
267 /*
268 * local prototypes
269 */
270
271 static struct vm_map_entry *
272 uvm_mapent_alloc(struct vm_map *, int);
273 static struct vm_map_entry *
274 uvm_mapent_alloc_split(struct vm_map *,
275 const struct vm_map_entry *, int,
276 struct uvm_mapent_reservation *);
277 static void uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *);
278 static void uvm_mapent_free(struct vm_map_entry *);
279 #if defined(DEBUG)
280 static void _uvm_mapent_check(const struct vm_map_entry *, const char *,
281 int);
282 #define uvm_mapent_check(map) _uvm_mapent_check(map, __FILE__, __LINE__)
283 #else /* defined(DEBUG) */
284 #define uvm_mapent_check(e) /* nothing */
285 #endif /* defined(DEBUG) */
286 static struct vm_map_entry *
287 uvm_kmapent_alloc(struct vm_map *, int);
288 static void uvm_kmapent_free(struct vm_map_entry *);
289 static vsize_t uvm_kmapent_overhead(vsize_t);
290
291 static void uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
292 static void uvm_map_reference_amap(struct vm_map_entry *, int);
293 static int uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int,
294 struct vm_map_entry *);
295 static void uvm_map_unreference_amap(struct vm_map_entry *, int);
296
297 int _uvm_map_sanity(struct vm_map *);
298 int _uvm_tree_sanity(struct vm_map *);
299 static vsize_t uvm_rb_subtree_space(const struct vm_map_entry *);
300
301 static inline int
302 uvm_compare(const struct vm_map_entry *a, const struct vm_map_entry *b)
303 {
304
305 if (a->start < b->start)
306 return (-1);
307 else if (a->start > b->start)
308 return (1);
309
310 return (0);
311 }
312
313 static inline void
314 uvm_rb_augment(struct vm_map_entry *entry)
315 {
316
317 entry->space = uvm_rb_subtree_space(entry);
318 }
319
320 RB_PROTOTYPE(uvm_tree, vm_map_entry, rb_entry, uvm_compare);
321
322 RB_GENERATE(uvm_tree, vm_map_entry, rb_entry, uvm_compare);
323
324 static inline vsize_t
325 uvm_rb_space(const struct vm_map *map, const struct vm_map_entry *entry)
326 {
327 /* XXX map is not used */
328
329 KASSERT(entry->next != NULL);
330 return entry->next->start - entry->end;
331 }
332
333 static vsize_t
334 uvm_rb_subtree_space(const struct vm_map_entry *entry)
335 {
336 vaddr_t space, tmp;
337
338 space = entry->ownspace;
339 if (RB_LEFT(entry, rb_entry)) {
340 tmp = RB_LEFT(entry, rb_entry)->space;
341 if (tmp > space)
342 space = tmp;
343 }
344
345 if (RB_RIGHT(entry, rb_entry)) {
346 tmp = RB_RIGHT(entry, rb_entry)->space;
347 if (tmp > space)
348 space = tmp;
349 }
350
351 return (space);
352 }
353
354 static inline void
355 uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
356 {
357 /* We need to traverse to the very top */
358 do {
359 entry->ownspace = uvm_rb_space(map, entry);
360 entry->space = uvm_rb_subtree_space(entry);
361 } while ((entry = RB_PARENT(entry, rb_entry)) != NULL);
362 }
363
364 static void
365 uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
366 {
367 vaddr_t space = uvm_rb_space(map, entry);
368 struct vm_map_entry *tmp;
369
370 entry->ownspace = entry->space = space;
371 tmp = RB_INSERT(uvm_tree, &(map)->rbhead, entry);
372 #ifdef DIAGNOSTIC
373 if (tmp != NULL)
374 panic("uvm_rb_insert: duplicate entry?");
375 #endif
376 uvm_rb_fixup(map, entry);
377 if (entry->prev != &map->header)
378 uvm_rb_fixup(map, entry->prev);
379 }
380
381 static void
382 uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
383 {
384 struct vm_map_entry *parent;
385
386 parent = RB_PARENT(entry, rb_entry);
387 RB_REMOVE(uvm_tree, &(map)->rbhead, entry);
388 if (entry->prev != &map->header)
389 uvm_rb_fixup(map, entry->prev);
390 if (parent)
391 uvm_rb_fixup(map, parent);
392 }
393
394 #if defined(DEBUG)
395 int uvm_debug_check_map = 0;
396 int uvm_debug_check_rbtree = 0;
397 #define uvm_map_check(map, name) \
398 _uvm_map_check((map), (name), __FILE__, __LINE__)
399 static void
400 _uvm_map_check(struct vm_map *map, const char *name,
401 const char *file, int line)
402 {
403
404 if ((uvm_debug_check_map && _uvm_map_sanity(map)) ||
405 (uvm_debug_check_rbtree && _uvm_tree_sanity(map))) {
406 panic("uvm_map_check failed: \"%s\" map=%p (%s:%d)",
407 name, map, file, line);
408 }
409 }
410 #else /* defined(DEBUG) */
411 #define uvm_map_check(map, name) /* nothing */
412 #endif /* defined(DEBUG) */
413
414 #if defined(DEBUG) || defined(DDB)
415 int
416 _uvm_map_sanity(struct vm_map *map)
417 {
418 bool first_free_found = false;
419 bool hint_found = false;
420 const struct vm_map_entry *e;
421
422 e = &map->header;
423 for (;;) {
424 if (map->first_free == e) {
425 first_free_found = true;
426 } else if (!first_free_found && e->next->start > e->end) {
427 printf("first_free %p should be %p\n",
428 map->first_free, e);
429 return -1;
430 }
431 if (map->hint == e) {
432 hint_found = true;
433 }
434
435 e = e->next;
436 if (e == &map->header) {
437 break;
438 }
439 }
440 if (!first_free_found) {
441 printf("stale first_free\n");
442 return -1;
443 }
444 if (!hint_found) {
445 printf("stale hint\n");
446 return -1;
447 }
448 return 0;
449 }
450
451 int
452 _uvm_tree_sanity(struct vm_map *map)
453 {
454 struct vm_map_entry *tmp, *trtmp;
455 int n = 0, i = 1;
456
457 RB_FOREACH(tmp, uvm_tree, &map->rbhead) {
458 if (tmp->ownspace != uvm_rb_space(map, tmp)) {
459 printf("%d/%d ownspace %lx != %lx %s\n",
460 n + 1, map->nentries,
461 (ulong)tmp->ownspace, (ulong)uvm_rb_space(map, tmp),
462 tmp->next == &map->header ? "(last)" : "");
463 goto error;
464 }
465 }
466 trtmp = NULL;
467 RB_FOREACH(tmp, uvm_tree, &map->rbhead) {
468 if (tmp->space != uvm_rb_subtree_space(tmp)) {
469 printf("space %lx != %lx\n",
470 (ulong)tmp->space,
471 (ulong)uvm_rb_subtree_space(tmp));
472 goto error;
473 }
474 if (trtmp != NULL && trtmp->start >= tmp->start) {
475 printf("corrupt: 0x%lx >= 0x%lx\n",
476 trtmp->start, tmp->start);
477 goto error;
478 }
479 n++;
480
481 trtmp = tmp;
482 }
483
484 if (n != map->nentries) {
485 printf("nentries: %d vs %d\n", n, map->nentries);
486 goto error;
487 }
488
489 for (tmp = map->header.next; tmp && tmp != &map->header;
490 tmp = tmp->next, i++) {
491 trtmp = RB_FIND(uvm_tree, &map->rbhead, tmp);
492 if (trtmp != tmp) {
493 printf("lookup: %d: %p - %p: %p\n", i, tmp, trtmp,
494 RB_PARENT(tmp, rb_entry));
495 goto error;
496 }
497 }
498
499 return (0);
500 error:
501 return (-1);
502 }
503 #endif /* defined(DEBUG) || defined(DDB) */
504
505 #ifdef DIAGNOSTIC
506 static struct vm_map *uvm_kmapent_map(struct vm_map_entry *);
507 #endif
508
509 /*
510 * vm_map_lock: acquire an exclusive (write) lock on a map.
511 *
512 * => Note that "intrsafe" maps use only exclusive, spin locks.
513 *
514 * => The locking protocol provides for guaranteed upgrade from shared ->
515 * exclusive by whichever thread currently has the map marked busy.
516 * See "LOCKING PROTOCOL NOTES" in uvm_map.h. This is horrible; among
517 * other problems, it defeats any fairness guarantees provided by RW
518 * locks.
519 */
520
521 void
522 vm_map_lock(struct vm_map *map)
523 {
524
525 if ((map->flags & VM_MAP_INTRSAFE) != 0) {
526 mutex_spin_enter(&map->mutex);
527 return;
528 }
529
530 for (;;) {
531 rw_enter(&map->lock, RW_WRITER);
532 if (map->busy == NULL)
533 break;
534 KASSERT(map->busy != curlwp);
535 mutex_enter(&map->misc_lock);
536 rw_exit(&map->lock);
537 if (map->busy != NULL)
538 cv_wait(&map->cv, &map->misc_lock);
539 mutex_exit(&map->misc_lock);
540 }
541
542 map->timestamp++;
543 }
544
545 /*
546 * vm_map_lock_try: try to lock a map, failing if it is already locked.
547 */
548
549 bool
550 vm_map_lock_try(struct vm_map *map)
551 {
552
553 if ((map->flags & VM_MAP_INTRSAFE) != 0)
554 return mutex_tryenter(&map->mutex);
555 if (!rw_tryenter(&map->lock, RW_WRITER))
556 return false;
557 if (map->busy != NULL) {
558 rw_exit(&map->lock);
559 return false;
560 }
561
562 map->timestamp++;
563 return true;
564 }
565
566 /*
567 * vm_map_unlock: release an exclusive lock on a map.
568 */
569
570 void
571 vm_map_unlock(struct vm_map *map)
572 {
573
574 if ((map->flags & VM_MAP_INTRSAFE) != 0)
575 mutex_spin_exit(&map->mutex);
576 else {
577 KASSERT(rw_write_held(&map->lock));
578 KASSERT(map->busy == NULL);
579 rw_exit(&map->lock);
580 }
581 }
582
583 /*
584 * vm_map_upgrade: upgrade a shared lock to an exclusive lock.
585 *
586 * => the caller must hold the map busy
587 */
588
589 void
590 vm_map_upgrade(struct vm_map *map)
591 {
592
593 KASSERT(rw_read_held(&map->lock));
594 KASSERT(map->busy == curlwp);
595
596 if (rw_tryupgrade(&map->lock))
597 return;
598
599 rw_exit(&map->lock);
600 rw_enter(&map->lock, RW_WRITER);
601 }
602
603 /*
604 * vm_map_unbusy: mark the map as unbusy, and wake any waiters that
605 * want an exclusive lock.
606 */
607
608 void
609 vm_map_unbusy(struct vm_map *map)
610 {
611
612 KASSERT(rw_lock_held(&map->lock));
613 KASSERT(map->busy == curlwp);
614
615 /*
616 * Safe to clear 'busy' and 'waiters' with only a read lock held:
617 *
618 * o they can only be set with a write lock held
619 * o writers are blocked out with a read or write hold
620 * o at any time, only one thread owns the set of values
621 */
622 mutex_enter(&map->misc_lock);
623 map->busy = NULL;
624 cv_broadcast(&map->cv);
625 mutex_exit(&map->misc_lock);
626 }
627
628 /*
629 * vm_map_lock_read: acquire a shared (read) lock on a map.
630 */
631
632 void
633 vm_map_lock_read(struct vm_map *map)
634 {
635
636 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
637
638 rw_enter(&map->lock, RW_READER);
639 }
640
641 /*
642 * vm_map_unlock_read: release a shared lock on a map.
643 */
644
645 void
646 vm_map_unlock_read(struct vm_map *map)
647 {
648
649 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
650
651 rw_exit(&map->lock);
652 }
653
654 /*
655 * vm_map_downgrade: downgrade an exclusive lock to a shared lock.
656 */
657
658 void
659 vm_map_downgrade(struct vm_map *map)
660 {
661
662 rw_downgrade(&map->lock);
663 }
664
665 /*
666 * vm_map_busy: mark a map as busy.
667 *
668 * => the caller must hold the map write locked
669 */
670
671 void
672 vm_map_busy(struct vm_map *map)
673 {
674
675 KASSERT(rw_write_held(&map->lock));
676 KASSERT(map->busy == NULL);
677
678 map->busy = curlwp;
679 }
680
681 /*
682 * vm_map_locked_p: return true if the map is write locked.
683 */
684
685 bool
686 vm_map_locked_p(struct vm_map *map)
687 {
688
689 if ((map->flags & VM_MAP_INTRSAFE) != 0) {
690 return mutex_owned(&map->mutex);
691 } else {
692 return rw_write_held(&map->lock);
693 }
694 }
695
696 /*
697 * uvm_mapent_alloc: allocate a map entry
698 */
699
700 static struct vm_map_entry *
701 uvm_mapent_alloc(struct vm_map *map, int flags)
702 {
703 struct vm_map_entry *me;
704 int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
705 UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
706
707 if (VM_MAP_USE_KMAPENT(map)) {
708 me = uvm_kmapent_alloc(map, flags);
709 } else {
710 me = pool_cache_get(&uvm_map_entry_cache, pflags);
711 if (__predict_false(me == NULL))
712 return NULL;
713 me->flags = 0;
714 }
715
716 UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me,
717 ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0);
718 return (me);
719 }
720
721 /*
722 * uvm_mapent_alloc_split: allocate a map entry for clipping.
723 *
724 * => map must be locked by caller if UVM_MAP_QUANTUM is set.
725 */
726
727 static struct vm_map_entry *
728 uvm_mapent_alloc_split(struct vm_map *map,
729 const struct vm_map_entry *old_entry, int flags,
730 struct uvm_mapent_reservation *umr)
731 {
732 struct vm_map_entry *me;
733
734 KASSERT(!VM_MAP_USE_KMAPENT(map) ||
735 (old_entry->flags & UVM_MAP_QUANTUM) || !UMR_EMPTY(umr));
736
737 if (old_entry->flags & UVM_MAP_QUANTUM) {
738 struct vm_map_kernel *vmk = vm_map_to_kernel(map);
739
740 KASSERT(vm_map_locked_p(map));
741 me = vmk->vmk_merged_entries;
742 KASSERT(me);
743 vmk->vmk_merged_entries = me->next;
744 KASSERT(me->flags & UVM_MAP_QUANTUM);
745 } else {
746 me = uvm_mapent_alloc(map, flags);
747 }
748
749 return me;
750 }
751
752 /*
753 * uvm_mapent_free: free map entry
754 */
755
756 static void
757 uvm_mapent_free(struct vm_map_entry *me)
758 {
759 UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
760
761 UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
762 me, me->flags, 0, 0);
763 if (me->flags & UVM_MAP_KERNEL) {
764 uvm_kmapent_free(me);
765 } else {
766 pool_cache_put(&uvm_map_entry_cache, me);
767 }
768 }
769
770 /*
771 * uvm_mapent_free_merged: free merged map entry
772 *
773 * => keep the entry if needed.
774 * => caller shouldn't hold map locked if VM_MAP_USE_KMAPENT(map) is true.
775 * => map should be locked if UVM_MAP_QUANTUM is set.
776 */
777
778 static void
779 uvm_mapent_free_merged(struct vm_map *map, struct vm_map_entry *me)
780 {
781
782 KASSERT(!(me->flags & UVM_MAP_KERNEL) || uvm_kmapent_map(me) == map);
783
784 if (me->flags & UVM_MAP_QUANTUM) {
785 /*
786 * keep this entry for later splitting.
787 */
788 struct vm_map_kernel *vmk;
789
790 KASSERT(vm_map_locked_p(map));
791 KASSERT(VM_MAP_IS_KERNEL(map));
792 KASSERT(!VM_MAP_USE_KMAPENT(map) ||
793 (me->flags & UVM_MAP_KERNEL));
794
795 vmk = vm_map_to_kernel(map);
796 me->next = vmk->vmk_merged_entries;
797 vmk->vmk_merged_entries = me;
798 } else {
799 uvm_mapent_free(me);
800 }
801 }
802
803 /*
804 * uvm_mapent_copy: copy a map entry, preserving flags
805 */
806
807 static inline void
808 uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
809 {
810
811 memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) -
812 ((char *)src));
813 }
814
815 /*
816 * uvm_mapent_overhead: calculate maximum kva overhead necessary for
817 * map entries.
818 *
819 * => size and flags are the same as uvm_km_suballoc's ones.
820 */
821
822 vsize_t
823 uvm_mapent_overhead(vsize_t size, int flags)
824 {
825
826 if (VM_MAP_USE_KMAPENT_FLAGS(flags)) {
827 return uvm_kmapent_overhead(size);
828 }
829 return 0;
830 }
831
832 #if defined(DEBUG)
833 static void
834 _uvm_mapent_check(const struct vm_map_entry *entry, const char *file, int line)
835 {
836
837 if (entry->start >= entry->end) {
838 goto bad;
839 }
840 if (UVM_ET_ISOBJ(entry)) {
841 if (entry->object.uvm_obj == NULL) {
842 goto bad;
843 }
844 } else if (UVM_ET_ISSUBMAP(entry)) {
845 if (entry->object.sub_map == NULL) {
846 goto bad;
847 }
848 } else {
849 if (entry->object.uvm_obj != NULL ||
850 entry->object.sub_map != NULL) {
851 goto bad;
852 }
853 }
854 if (!UVM_ET_ISOBJ(entry)) {
855 if (entry->offset != 0) {
856 goto bad;
857 }
858 }
859
860 return;
861
862 bad:
863 panic("%s: bad entry %p (%s:%d)", __func__, entry, file, line);
864 }
865 #endif /* defined(DEBUG) */
866
867 /*
868 * uvm_map_entry_unwire: unwire a map entry
869 *
870 * => map should be locked by caller
871 */
872
873 static inline void
874 uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
875 {
876
877 entry->wired_count = 0;
878 uvm_fault_unwire_locked(map, entry->start, entry->end);
879 }
880
881
882 /*
883 * wrapper for calling amap_ref()
884 */
885 static inline void
886 uvm_map_reference_amap(struct vm_map_entry *entry, int flags)
887 {
888
889 amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
890 (entry->end - entry->start) >> PAGE_SHIFT, flags);
891 }
892
893
894 /*
895 * wrapper for calling amap_unref()
896 */
897 static inline void
898 uvm_map_unreference_amap(struct vm_map_entry *entry, int flags)
899 {
900
901 amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
902 (entry->end - entry->start) >> PAGE_SHIFT, flags);
903 }
904
905
906 /*
907 * uvm_map_init: init mapping system at boot time.
908 */
909
910 void
911 uvm_map_init(void)
912 {
913 #if defined(UVMHIST)
914 static struct uvm_history_ent maphistbuf[100];
915 static struct uvm_history_ent pdhistbuf[100];
916 #endif
917
918 /*
919 * first, init logging system.
920 */
921
922 UVMHIST_FUNC("uvm_map_init");
923 UVMHIST_INIT_STATIC(maphist, maphistbuf);
924 UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
925 UVMHIST_CALLED(maphist);
926 UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
927
928 /*
929 * initialize the global lock for kernel map entry.
930 */
931
932 mutex_init(&uvm_kentry_lock, MUTEX_DRIVER, IPL_VM);
933
934 /*
935 * initialize caches.
936 */
937
938 pool_cache_bootstrap(&uvm_map_entry_cache, sizeof(struct vm_map_entry),
939 0, 0, 0, "vmmpepl", NULL, IPL_NONE, NULL, NULL, NULL);
940 pool_cache_bootstrap(&uvm_vmspace_cache, sizeof(struct vmspace),
941 0, 0, 0, "vmsppl", NULL, IPL_NONE, NULL, NULL, NULL);
942 }
943
944 /*
945 * clippers
946 */
947
948 /*
949 * uvm_mapent_splitadj: adjust map entries for splitting, after uvm_mapent_copy.
950 */
951
952 static void
953 uvm_mapent_splitadj(struct vm_map_entry *entry1, struct vm_map_entry *entry2,
954 vaddr_t splitat)
955 {
956 vaddr_t adj;
957
958 KASSERT(entry1->start < splitat);
959 KASSERT(splitat < entry1->end);
960
961 adj = splitat - entry1->start;
962 entry1->end = entry2->start = splitat;
963
964 if (entry1->aref.ar_amap) {
965 amap_splitref(&entry1->aref, &entry2->aref, adj);
966 }
967 if (UVM_ET_ISSUBMAP(entry1)) {
968 /* ... unlikely to happen, but play it safe */
969 uvm_map_reference(entry1->object.sub_map);
970 } else if (UVM_ET_ISOBJ(entry1)) {
971 KASSERT(entry1->object.uvm_obj != NULL); /* suppress coverity */
972 entry2->offset += adj;
973 if (entry1->object.uvm_obj->pgops &&
974 entry1->object.uvm_obj->pgops->pgo_reference)
975 entry1->object.uvm_obj->pgops->pgo_reference(
976 entry1->object.uvm_obj);
977 }
978 }
979
980 /*
981 * uvm_map_clip_start: ensure that the entry begins at or after
982 * the starting address, if it doesn't we split the entry.
983 *
984 * => caller should use UVM_MAP_CLIP_START macro rather than calling
985 * this directly
986 * => map must be locked by caller
987 */
988
989 void
990 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
991 vaddr_t start, struct uvm_mapent_reservation *umr)
992 {
993 struct vm_map_entry *new_entry;
994
995 /* uvm_map_simplify_entry(map, entry); */ /* XXX */
996
997 uvm_map_check(map, "clip_start entry");
998 uvm_mapent_check(entry);
999
1000 /*
1001 * Split off the front portion. note that we must insert the new
1002 * entry BEFORE this one, so that this entry has the specified
1003 * starting address.
1004 */
1005 new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
1006 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1007 uvm_mapent_splitadj(new_entry, entry, start);
1008 uvm_map_entry_link(map, entry->prev, new_entry);
1009
1010 uvm_map_check(map, "clip_start leave");
1011 }
1012
1013 /*
1014 * uvm_map_clip_end: ensure that the entry ends at or before
1015 * the ending address, if it does't we split the reference
1016 *
1017 * => caller should use UVM_MAP_CLIP_END macro rather than calling
1018 * this directly
1019 * => map must be locked by caller
1020 */
1021
1022 void
1023 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end,
1024 struct uvm_mapent_reservation *umr)
1025 {
1026 struct vm_map_entry *new_entry;
1027
1028 uvm_map_check(map, "clip_end entry");
1029 uvm_mapent_check(entry);
1030
1031 /*
1032 * Create a new entry and insert it
1033 * AFTER the specified entry
1034 */
1035 new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
1036 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1037 uvm_mapent_splitadj(entry, new_entry, end);
1038 uvm_map_entry_link(map, entry, new_entry);
1039
1040 uvm_map_check(map, "clip_end leave");
1041 }
1042
1043 static void
1044 vm_map_drain(struct vm_map *map, uvm_flag_t flags)
1045 {
1046
1047 if (!VM_MAP_IS_KERNEL(map)) {
1048 return;
1049 }
1050
1051 uvm_km_va_drain(map, flags);
1052 }
1053
1054 /*
1055 * M A P - m a i n e n t r y p o i n t
1056 */
1057 /*
1058 * uvm_map: establish a valid mapping in a map
1059 *
1060 * => assume startp is page aligned.
1061 * => assume size is a multiple of PAGE_SIZE.
1062 * => assume sys_mmap provides enough of a "hint" to have us skip
1063 * over text/data/bss area.
1064 * => map must be unlocked (we will lock it)
1065 * => <uobj,uoffset> value meanings (4 cases):
1066 * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER
1067 * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER
1068 * [3] <uobj,uoffset> == normal mapping
1069 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA
1070 *
1071 * case [4] is for kernel mappings where we don't know the offset until
1072 * we've found a virtual address. note that kernel object offsets are
1073 * always relative to vm_map_min(kernel_map).
1074 *
1075 * => if `align' is non-zero, we align the virtual address to the specified
1076 * alignment.
1077 * this is provided as a mechanism for large pages.
1078 *
1079 * => XXXCDC: need way to map in external amap?
1080 */
1081
1082 int
1083 uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
1084 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags)
1085 {
1086 struct uvm_map_args args;
1087 struct vm_map_entry *new_entry;
1088 int error;
1089
1090 KASSERT((flags & UVM_FLAG_QUANTUM) == 0 || VM_MAP_IS_KERNEL(map));
1091 KASSERT((size & PAGE_MASK) == 0);
1092
1093 /*
1094 * for pager_map, allocate the new entry first to avoid sleeping
1095 * for memory while we have the map locked.
1096 *
1097 * besides, because we allocates entries for in-kernel maps
1098 * a bit differently (cf. uvm_kmapent_alloc/free), we need to
1099 * allocate them before locking the map.
1100 */
1101
1102 new_entry = NULL;
1103 if (VM_MAP_USE_KMAPENT(map) || (flags & UVM_FLAG_QUANTUM) ||
1104 map == pager_map) {
1105 new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
1106 if (__predict_false(new_entry == NULL))
1107 return ENOMEM;
1108 if (flags & UVM_FLAG_QUANTUM)
1109 new_entry->flags |= UVM_MAP_QUANTUM;
1110 }
1111 if (map == pager_map)
1112 flags |= UVM_FLAG_NOMERGE;
1113
1114 error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align,
1115 flags, &args);
1116 if (!error) {
1117 error = uvm_map_enter(map, &args, new_entry);
1118 *startp = args.uma_start;
1119 } else if (new_entry) {
1120 uvm_mapent_free(new_entry);
1121 }
1122
1123 #if defined(DEBUG)
1124 if (!error && VM_MAP_IS_KERNEL(map)) {
1125 uvm_km_check_empty(*startp, *startp + size,
1126 (map->flags & VM_MAP_INTRSAFE) != 0);
1127 }
1128 #endif /* defined(DEBUG) */
1129
1130 return error;
1131 }
1132
1133 int
1134 uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size,
1135 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags,
1136 struct uvm_map_args *args)
1137 {
1138 struct vm_map_entry *prev_entry;
1139 vm_prot_t prot = UVM_PROTECTION(flags);
1140 vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1141
1142 UVMHIST_FUNC("uvm_map_prepare");
1143 UVMHIST_CALLED(maphist);
1144
1145 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
1146 map, start, size, flags);
1147 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
1148
1149 /*
1150 * detect a popular device driver bug.
1151 */
1152
1153 KASSERT(doing_shutdown || curlwp != NULL ||
1154 (map->flags & VM_MAP_INTRSAFE));
1155
1156 /*
1157 * zero-sized mapping doesn't make any sense.
1158 */
1159 KASSERT(size > 0);
1160
1161 KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0);
1162
1163 uvm_map_check(map, "map entry");
1164
1165 /*
1166 * check sanity of protection code
1167 */
1168
1169 if ((prot & maxprot) != prot) {
1170 UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x",
1171 prot, maxprot,0,0);
1172 return EACCES;
1173 }
1174
1175 /*
1176 * figure out where to put new VM range
1177 */
1178
1179 retry:
1180 if (vm_map_lock_try(map) == false) {
1181 if ((flags & UVM_FLAG_TRYLOCK) != 0 &&
1182 (map->flags & VM_MAP_INTRSAFE) == 0) {
1183 return EAGAIN;
1184 }
1185 vm_map_lock(map); /* could sleep here */
1186 }
1187 prev_entry = uvm_map_findspace(map, start, size, &start,
1188 uobj, uoffset, align, flags);
1189 if (prev_entry == NULL) {
1190 unsigned int timestamp;
1191
1192 timestamp = map->timestamp;
1193 UVMHIST_LOG(maphist,"waiting va timestamp=0x%x",
1194 timestamp,0,0,0);
1195 map->flags |= VM_MAP_WANTVA;
1196 vm_map_unlock(map);
1197
1198 /*
1199 * try to reclaim kva and wait until someone does unmap.
1200 * fragile locking here, so we awaken every second to
1201 * recheck the condition.
1202 */
1203
1204 vm_map_drain(map, flags);
1205
1206 mutex_enter(&map->misc_lock);
1207 while ((map->flags & VM_MAP_WANTVA) != 0 &&
1208 map->timestamp == timestamp) {
1209 if ((flags & UVM_FLAG_WAITVA) == 0) {
1210 mutex_exit(&map->misc_lock);
1211 UVMHIST_LOG(maphist,
1212 "<- uvm_map_findspace failed!", 0,0,0,0);
1213 return ENOMEM;
1214 } else {
1215 cv_timedwait(&map->cv, &map->misc_lock, hz);
1216 }
1217 }
1218 mutex_exit(&map->misc_lock);
1219 goto retry;
1220 }
1221
1222 #ifdef PMAP_GROWKERNEL
1223 /*
1224 * If the kernel pmap can't map the requested space,
1225 * then allocate more resources for it.
1226 */
1227 if (map == kernel_map && uvm_maxkaddr < (start + size))
1228 uvm_maxkaddr = pmap_growkernel(start + size);
1229 #endif
1230
1231 UVMMAP_EVCNT_INCR(map_call);
1232
1233 /*
1234 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
1235 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in
1236 * either case we want to zero it before storing it in the map entry
1237 * (because it looks strange and confusing when debugging...)
1238 *
1239 * if uobj is not null
1240 * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
1241 * and we do not need to change uoffset.
1242 * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
1243 * now (based on the starting address of the map). this case is
1244 * for kernel object mappings where we don't know the offset until
1245 * the virtual address is found (with uvm_map_findspace). the
1246 * offset is the distance we are from the start of the map.
1247 */
1248
1249 if (uobj == NULL) {
1250 uoffset = 0;
1251 } else {
1252 if (uoffset == UVM_UNKNOWN_OFFSET) {
1253 KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
1254 uoffset = start - vm_map_min(kernel_map);
1255 }
1256 }
1257
1258 args->uma_flags = flags;
1259 args->uma_prev = prev_entry;
1260 args->uma_start = start;
1261 args->uma_size = size;
1262 args->uma_uobj = uobj;
1263 args->uma_uoffset = uoffset;
1264
1265 return 0;
1266 }
1267
1268 int
1269 uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args,
1270 struct vm_map_entry *new_entry)
1271 {
1272 struct vm_map_entry *prev_entry = args->uma_prev;
1273 struct vm_map_entry *dead = NULL;
1274
1275 const uvm_flag_t flags = args->uma_flags;
1276 const vm_prot_t prot = UVM_PROTECTION(flags);
1277 const vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1278 const vm_inherit_t inherit = UVM_INHERIT(flags);
1279 const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
1280 AMAP_EXTEND_NOWAIT : 0;
1281 const int advice = UVM_ADVICE(flags);
1282 const int meflagval = (flags & UVM_FLAG_QUANTUM) ?
1283 UVM_MAP_QUANTUM : 0;
1284
1285 vaddr_t start = args->uma_start;
1286 vsize_t size = args->uma_size;
1287 struct uvm_object *uobj = args->uma_uobj;
1288 voff_t uoffset = args->uma_uoffset;
1289
1290 const int kmap = (vm_map_pmap(map) == pmap_kernel());
1291 int merged = 0;
1292 int error;
1293 int newetype;
1294
1295 UVMHIST_FUNC("uvm_map_enter");
1296 UVMHIST_CALLED(maphist);
1297
1298 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
1299 map, start, size, flags);
1300 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
1301
1302 KASSERT(map->hint == prev_entry); /* bimerge case assumes this */
1303
1304 if (flags & UVM_FLAG_QUANTUM) {
1305 KASSERT(new_entry);
1306 KASSERT(new_entry->flags & UVM_MAP_QUANTUM);
1307 }
1308
1309 if (uobj)
1310 newetype = UVM_ET_OBJ;
1311 else
1312 newetype = 0;
1313
1314 if (flags & UVM_FLAG_COPYONW) {
1315 newetype |= UVM_ET_COPYONWRITE;
1316 if ((flags & UVM_FLAG_OVERLAY) == 0)
1317 newetype |= UVM_ET_NEEDSCOPY;
1318 }
1319
1320 /*
1321 * try and insert in map by extending previous entry, if possible.
1322 * XXX: we don't try and pull back the next entry. might be useful
1323 * for a stack, but we are currently allocating our stack in advance.
1324 */
1325
1326 if (flags & UVM_FLAG_NOMERGE)
1327 goto nomerge;
1328
1329 if (prev_entry->end == start &&
1330 prev_entry != &map->header &&
1331 UVM_ET_ISCOMPATIBLE(prev_entry, newetype, uobj, meflagval,
1332 prot, maxprot, inherit, advice, 0)) {
1333
1334 if (uobj && prev_entry->offset +
1335 (prev_entry->end - prev_entry->start) != uoffset)
1336 goto forwardmerge;
1337
1338 /*
1339 * can't extend a shared amap. note: no need to lock amap to
1340 * look at refs since we don't care about its exact value.
1341 * if it is one (i.e. we have only reference) it will stay there
1342 */
1343
1344 if (prev_entry->aref.ar_amap &&
1345 amap_refs(prev_entry->aref.ar_amap) != 1) {
1346 goto forwardmerge;
1347 }
1348
1349 if (prev_entry->aref.ar_amap) {
1350 error = amap_extend(prev_entry, size,
1351 amapwaitflag | AMAP_EXTEND_FORWARDS);
1352 if (error)
1353 goto nomerge;
1354 }
1355
1356 if (kmap)
1357 UVMMAP_EVCNT_INCR(kbackmerge);
1358 else
1359 UVMMAP_EVCNT_INCR(ubackmerge);
1360 UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0);
1361
1362 /*
1363 * drop our reference to uobj since we are extending a reference
1364 * that we already have (the ref count can not drop to zero).
1365 */
1366
1367 if (uobj && uobj->pgops->pgo_detach)
1368 uobj->pgops->pgo_detach(uobj);
1369
1370 prev_entry->end += size;
1371 uvm_rb_fixup(map, prev_entry);
1372
1373 uvm_map_check(map, "map backmerged");
1374
1375 UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
1376 merged++;
1377 }
1378
1379 forwardmerge:
1380 if (prev_entry->next->start == (start + size) &&
1381 prev_entry->next != &map->header &&
1382 UVM_ET_ISCOMPATIBLE(prev_entry->next, newetype, uobj, meflagval,
1383 prot, maxprot, inherit, advice, 0)) {
1384
1385 if (uobj && prev_entry->next->offset != uoffset + size)
1386 goto nomerge;
1387
1388 /*
1389 * can't extend a shared amap. note: no need to lock amap to
1390 * look at refs since we don't care about its exact value.
1391 * if it is one (i.e. we have only reference) it will stay there.
1392 *
1393 * note that we also can't merge two amaps, so if we
1394 * merged with the previous entry which has an amap,
1395 * and the next entry also has an amap, we give up.
1396 *
1397 * Interesting cases:
1398 * amap, new, amap -> give up second merge (single fwd extend)
1399 * amap, new, none -> double forward extend (extend again here)
1400 * none, new, amap -> double backward extend (done here)
1401 * uobj, new, amap -> single backward extend (done here)
1402 *
1403 * XXX should we attempt to deal with someone refilling
1404 * the deallocated region between two entries that are
1405 * backed by the same amap (ie, arefs is 2, "prev" and
1406 * "next" refer to it, and adding this allocation will
1407 * close the hole, thus restoring arefs to 1 and
1408 * deallocating the "next" vm_map_entry)? -- @@@
1409 */
1410
1411 if (prev_entry->next->aref.ar_amap &&
1412 (amap_refs(prev_entry->next->aref.ar_amap) != 1 ||
1413 (merged && prev_entry->aref.ar_amap))) {
1414 goto nomerge;
1415 }
1416
1417 if (merged) {
1418 /*
1419 * Try to extend the amap of the previous entry to
1420 * cover the next entry as well. If it doesn't work
1421 * just skip on, don't actually give up, since we've
1422 * already completed the back merge.
1423 */
1424 if (prev_entry->aref.ar_amap) {
1425 if (amap_extend(prev_entry,
1426 prev_entry->next->end -
1427 prev_entry->next->start,
1428 amapwaitflag | AMAP_EXTEND_FORWARDS))
1429 goto nomerge;
1430 }
1431
1432 /*
1433 * Try to extend the amap of the *next* entry
1434 * back to cover the new allocation *and* the
1435 * previous entry as well (the previous merge
1436 * didn't have an amap already otherwise we
1437 * wouldn't be checking here for an amap). If
1438 * it doesn't work just skip on, again, don't
1439 * actually give up, since we've already
1440 * completed the back merge.
1441 */
1442 else if (prev_entry->next->aref.ar_amap) {
1443 if (amap_extend(prev_entry->next,
1444 prev_entry->end -
1445 prev_entry->start,
1446 amapwaitflag | AMAP_EXTEND_BACKWARDS))
1447 goto nomerge;
1448 }
1449 } else {
1450 /*
1451 * Pull the next entry's amap backwards to cover this
1452 * new allocation.
1453 */
1454 if (prev_entry->next->aref.ar_amap) {
1455 error = amap_extend(prev_entry->next, size,
1456 amapwaitflag | AMAP_EXTEND_BACKWARDS);
1457 if (error)
1458 goto nomerge;
1459 }
1460 }
1461
1462 if (merged) {
1463 if (kmap) {
1464 UVMMAP_EVCNT_DECR(kbackmerge);
1465 UVMMAP_EVCNT_INCR(kbimerge);
1466 } else {
1467 UVMMAP_EVCNT_DECR(ubackmerge);
1468 UVMMAP_EVCNT_INCR(ubimerge);
1469 }
1470 } else {
1471 if (kmap)
1472 UVMMAP_EVCNT_INCR(kforwmerge);
1473 else
1474 UVMMAP_EVCNT_INCR(uforwmerge);
1475 }
1476 UVMHIST_LOG(maphist," starting forward merge", 0, 0, 0, 0);
1477
1478 /*
1479 * drop our reference to uobj since we are extending a reference
1480 * that we already have (the ref count can not drop to zero).
1481 * (if merged, we've already detached)
1482 */
1483 if (uobj && uobj->pgops->pgo_detach && !merged)
1484 uobj->pgops->pgo_detach(uobj);
1485
1486 if (merged) {
1487 dead = prev_entry->next;
1488 prev_entry->end = dead->end;
1489 uvm_map_entry_unlink(map, dead);
1490 if (dead->aref.ar_amap != NULL) {
1491 prev_entry->aref = dead->aref;
1492 dead->aref.ar_amap = NULL;
1493 }
1494 } else {
1495 prev_entry->next->start -= size;
1496 if (prev_entry != &map->header)
1497 uvm_rb_fixup(map, prev_entry);
1498 if (uobj)
1499 prev_entry->next->offset = uoffset;
1500 }
1501
1502 uvm_map_check(map, "map forwardmerged");
1503
1504 UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0);
1505 merged++;
1506 }
1507
1508 nomerge:
1509 if (!merged) {
1510 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0);
1511 if (kmap)
1512 UVMMAP_EVCNT_INCR(knomerge);
1513 else
1514 UVMMAP_EVCNT_INCR(unomerge);
1515
1516 /*
1517 * allocate new entry and link it in.
1518 */
1519
1520 if (new_entry == NULL) {
1521 new_entry = uvm_mapent_alloc(map,
1522 (flags & UVM_FLAG_NOWAIT));
1523 if (__predict_false(new_entry == NULL)) {
1524 error = ENOMEM;
1525 goto done;
1526 }
1527 }
1528 new_entry->start = start;
1529 new_entry->end = new_entry->start + size;
1530 new_entry->object.uvm_obj = uobj;
1531 new_entry->offset = uoffset;
1532
1533 new_entry->etype = newetype;
1534
1535 if (flags & UVM_FLAG_NOMERGE) {
1536 new_entry->flags |= UVM_MAP_NOMERGE;
1537 }
1538
1539 new_entry->protection = prot;
1540 new_entry->max_protection = maxprot;
1541 new_entry->inheritance = inherit;
1542 new_entry->wired_count = 0;
1543 new_entry->advice = advice;
1544 if (flags & UVM_FLAG_OVERLAY) {
1545
1546 /*
1547 * to_add: for BSS we overallocate a little since we
1548 * are likely to extend
1549 */
1550
1551 vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
1552 UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
1553 struct vm_amap *amap = amap_alloc(size, to_add,
1554 (flags & UVM_FLAG_NOWAIT));
1555 if (__predict_false(amap == NULL)) {
1556 error = ENOMEM;
1557 goto done;
1558 }
1559 new_entry->aref.ar_pageoff = 0;
1560 new_entry->aref.ar_amap = amap;
1561 } else {
1562 new_entry->aref.ar_pageoff = 0;
1563 new_entry->aref.ar_amap = NULL;
1564 }
1565 uvm_map_entry_link(map, prev_entry, new_entry);
1566
1567 /*
1568 * Update the free space hint
1569 */
1570
1571 if ((map->first_free == prev_entry) &&
1572 (prev_entry->end >= new_entry->start))
1573 map->first_free = new_entry;
1574
1575 new_entry = NULL;
1576 }
1577
1578 map->size += size;
1579
1580 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1581
1582 error = 0;
1583 done:
1584 if ((flags & UVM_FLAG_QUANTUM) == 0) {
1585 /*
1586 * vmk_merged_entries is locked by the map's lock.
1587 */
1588 vm_map_unlock(map);
1589 }
1590 if (new_entry && error == 0) {
1591 KDASSERT(merged);
1592 uvm_mapent_free_merged(map, new_entry);
1593 new_entry = NULL;
1594 }
1595 if (dead) {
1596 KDASSERT(merged);
1597 uvm_mapent_free_merged(map, dead);
1598 }
1599 if ((flags & UVM_FLAG_QUANTUM) != 0) {
1600 vm_map_unlock(map);
1601 }
1602 if (new_entry != NULL) {
1603 uvm_mapent_free(new_entry);
1604 }
1605 return error;
1606 }
1607
1608 /*
1609 * uvm_map_lookup_entry_bytree: lookup an entry in tree
1610 */
1611
1612 static bool
1613 uvm_map_lookup_entry_bytree(struct vm_map *map, vaddr_t address,
1614 struct vm_map_entry **entry /* OUT */)
1615 {
1616 struct vm_map_entry *prev = &map->header;
1617 struct vm_map_entry *cur = RB_ROOT(&map->rbhead);
1618
1619 while (cur) {
1620 if (address >= cur->start) {
1621 if (address < cur->end) {
1622 *entry = cur;
1623 return true;
1624 }
1625 prev = cur;
1626 cur = RB_RIGHT(cur, rb_entry);
1627 } else
1628 cur = RB_LEFT(cur, rb_entry);
1629 }
1630 *entry = prev;
1631 return false;
1632 }
1633
1634 /*
1635 * uvm_map_lookup_entry: find map entry at or before an address
1636 *
1637 * => map must at least be read-locked by caller
1638 * => entry is returned in "entry"
1639 * => return value is true if address is in the returned entry
1640 */
1641
1642 bool
1643 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
1644 struct vm_map_entry **entry /* OUT */)
1645 {
1646 struct vm_map_entry *cur;
1647 bool use_tree = false;
1648 UVMHIST_FUNC("uvm_map_lookup_entry");
1649 UVMHIST_CALLED(maphist);
1650
1651 UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)",
1652 map, address, entry, 0);
1653
1654 /*
1655 * start looking either from the head of the
1656 * list, or from the hint.
1657 */
1658
1659 cur = map->hint;
1660
1661 if (cur == &map->header)
1662 cur = cur->next;
1663
1664 UVMMAP_EVCNT_INCR(mlk_call);
1665 if (address >= cur->start) {
1666
1667 /*
1668 * go from hint to end of list.
1669 *
1670 * but first, make a quick check to see if
1671 * we are already looking at the entry we
1672 * want (which is usually the case).
1673 * note also that we don't need to save the hint
1674 * here... it is the same hint (unless we are
1675 * at the header, in which case the hint didn't
1676 * buy us anything anyway).
1677 */
1678
1679 if (cur != &map->header && cur->end > address) {
1680 UVMMAP_EVCNT_INCR(mlk_hint);
1681 *entry = cur;
1682 UVMHIST_LOG(maphist,"<- got it via hint (0x%x)",
1683 cur, 0, 0, 0);
1684 uvm_mapent_check(*entry);
1685 return (true);
1686 }
1687
1688 if (map->nentries > 30)
1689 use_tree = true;
1690 } else {
1691
1692 /*
1693 * invalid hint. use tree.
1694 */
1695 use_tree = true;
1696 }
1697
1698 uvm_map_check(map, __func__);
1699
1700 if (use_tree) {
1701 /*
1702 * Simple lookup in the tree. Happens when the hint is
1703 * invalid, or nentries reach a threshold.
1704 */
1705 if (uvm_map_lookup_entry_bytree(map, address, entry)) {
1706 goto got;
1707 } else {
1708 goto failed;
1709 }
1710 }
1711
1712 /*
1713 * search linearly
1714 */
1715
1716 while (cur != &map->header) {
1717 if (cur->end > address) {
1718 if (address >= cur->start) {
1719 /*
1720 * save this lookup for future
1721 * hints, and return
1722 */
1723
1724 *entry = cur;
1725 got:
1726 SAVE_HINT(map, map->hint, *entry);
1727 UVMHIST_LOG(maphist,"<- search got it (0x%x)",
1728 cur, 0, 0, 0);
1729 KDASSERT((*entry)->start <= address);
1730 KDASSERT(address < (*entry)->end);
1731 uvm_mapent_check(*entry);
1732 return (true);
1733 }
1734 break;
1735 }
1736 cur = cur->next;
1737 }
1738 *entry = cur->prev;
1739 failed:
1740 SAVE_HINT(map, map->hint, *entry);
1741 UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
1742 KDASSERT((*entry) == &map->header || (*entry)->end <= address);
1743 KDASSERT((*entry)->next == &map->header ||
1744 address < (*entry)->next->start);
1745 return (false);
1746 }
1747
1748 /*
1749 * See if the range between start and start + length fits in the gap
1750 * entry->next->start and entry->end. Returns 1 if fits, 0 if doesn't
1751 * fit, and -1 address wraps around.
1752 */
1753 static int
1754 uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
1755 vsize_t align, int topdown, struct vm_map_entry *entry)
1756 {
1757 vaddr_t end;
1758
1759 #ifdef PMAP_PREFER
1760 /*
1761 * push start address forward as needed to avoid VAC alias problems.
1762 * we only do this if a valid offset is specified.
1763 */
1764
1765 if (uoffset != UVM_UNKNOWN_OFFSET)
1766 PMAP_PREFER(uoffset, start, length, topdown);
1767 #endif
1768 if (align != 0) {
1769 if ((*start & (align - 1)) != 0) {
1770 if (topdown)
1771 *start &= ~(align - 1);
1772 else
1773 *start = roundup(*start, align);
1774 }
1775 /*
1776 * XXX Should we PMAP_PREFER() here again?
1777 * eh...i think we're okay
1778 */
1779 }
1780
1781 /*
1782 * Find the end of the proposed new region. Be sure we didn't
1783 * wrap around the address; if so, we lose. Otherwise, if the
1784 * proposed new region fits before the next entry, we win.
1785 */
1786
1787 end = *start + length;
1788 if (end < *start)
1789 return (-1);
1790
1791 if (entry->next->start >= end && *start >= entry->end)
1792 return (1);
1793
1794 return (0);
1795 }
1796
1797 /*
1798 * uvm_map_findspace: find "length" sized space in "map".
1799 *
1800 * => "hint" is a hint about where we want it, unless UVM_FLAG_FIXED is
1801 * set in "flags" (in which case we insist on using "hint").
1802 * => "result" is VA returned
1803 * => uobj/uoffset are to be used to handle VAC alignment, if required
1804 * => if "align" is non-zero, we attempt to align to that value.
1805 * => caller must at least have read-locked map
1806 * => returns NULL on failure, or pointer to prev. map entry if success
1807 * => note this is a cross between the old vm_map_findspace and vm_map_find
1808 */
1809
1810 struct vm_map_entry *
1811 uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
1812 vaddr_t *result /* OUT */, struct uvm_object *uobj, voff_t uoffset,
1813 vsize_t align, int flags)
1814 {
1815 struct vm_map_entry *entry;
1816 struct vm_map_entry *child, *prev, *tmp;
1817 vaddr_t orig_hint;
1818 const int topdown = map->flags & VM_MAP_TOPDOWN;
1819 UVMHIST_FUNC("uvm_map_findspace");
1820 UVMHIST_CALLED(maphist);
1821
1822 UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)",
1823 map, hint, length, flags);
1824 KASSERT((align & (align - 1)) == 0);
1825 KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1826
1827 uvm_map_check(map, "map_findspace entry");
1828
1829 /*
1830 * remember the original hint. if we are aligning, then we
1831 * may have to try again with no alignment constraint if
1832 * we fail the first time.
1833 */
1834
1835 orig_hint = hint;
1836 if (hint < vm_map_min(map)) { /* check ranges ... */
1837 if (flags & UVM_FLAG_FIXED) {
1838 UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
1839 return (NULL);
1840 }
1841 hint = vm_map_min(map);
1842 }
1843 if (hint > vm_map_max(map)) {
1844 UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]",
1845 hint, vm_map_min(map), vm_map_max(map), 0);
1846 return (NULL);
1847 }
1848
1849 /*
1850 * Look for the first possible address; if there's already
1851 * something at this address, we have to start after it.
1852 */
1853
1854 /*
1855 * @@@: there are four, no, eight cases to consider.
1856 *
1857 * 0: found, fixed, bottom up -> fail
1858 * 1: found, fixed, top down -> fail
1859 * 2: found, not fixed, bottom up -> start after entry->end,
1860 * loop up
1861 * 3: found, not fixed, top down -> start before entry->start,
1862 * loop down
1863 * 4: not found, fixed, bottom up -> check entry->next->start, fail
1864 * 5: not found, fixed, top down -> check entry->next->start, fail
1865 * 6: not found, not fixed, bottom up -> check entry->next->start,
1866 * loop up
1867 * 7: not found, not fixed, top down -> check entry->next->start,
1868 * loop down
1869 *
1870 * as you can see, it reduces to roughly five cases, and that
1871 * adding top down mapping only adds one unique case (without
1872 * it, there would be four cases).
1873 */
1874
1875 if ((flags & UVM_FLAG_FIXED) == 0 && hint == vm_map_min(map)) {
1876 entry = map->first_free;
1877 } else {
1878 if (uvm_map_lookup_entry(map, hint, &entry)) {
1879 /* "hint" address already in use ... */
1880 if (flags & UVM_FLAG_FIXED) {
1881 UVMHIST_LOG(maphist, "<- fixed & VA in use",
1882 0, 0, 0, 0);
1883 return (NULL);
1884 }
1885 if (topdown)
1886 /* Start from lower gap. */
1887 entry = entry->prev;
1888 } else if (flags & UVM_FLAG_FIXED) {
1889 if (entry->next->start >= hint + length &&
1890 hint + length > hint)
1891 goto found;
1892
1893 /* "hint" address is gap but too small */
1894 UVMHIST_LOG(maphist, "<- fixed mapping failed",
1895 0, 0, 0, 0);
1896 return (NULL); /* only one shot at it ... */
1897 } else {
1898 /*
1899 * See if given hint fits in this gap.
1900 */
1901 switch (uvm_map_space_avail(&hint, length,
1902 uoffset, align, topdown, entry)) {
1903 case 1:
1904 goto found;
1905 case -1:
1906 goto wraparound;
1907 }
1908
1909 if (topdown) {
1910 /*
1911 * Still there is a chance to fit
1912 * if hint > entry->end.
1913 */
1914 } else {
1915 /* Start from higher gap. */
1916 entry = entry->next;
1917 if (entry == &map->header)
1918 goto notfound;
1919 goto nextgap;
1920 }
1921 }
1922 }
1923
1924 /*
1925 * Note that all UVM_FLAGS_FIXED case is already handled.
1926 */
1927 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
1928
1929 /* Try to find the space in the red-black tree */
1930
1931 /* Check slot before any entry */
1932 hint = topdown ? entry->next->start - length : entry->end;
1933 switch (uvm_map_space_avail(&hint, length, uoffset, align,
1934 topdown, entry)) {
1935 case 1:
1936 goto found;
1937 case -1:
1938 goto wraparound;
1939 }
1940
1941 nextgap:
1942 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
1943 /* If there is not enough space in the whole tree, we fail */
1944 tmp = RB_ROOT(&map->rbhead);
1945 if (tmp == NULL || tmp->space < length)
1946 goto notfound;
1947
1948 prev = NULL; /* previous candidate */
1949
1950 /* Find an entry close to hint that has enough space */
1951 for (; tmp;) {
1952 KASSERT(tmp->next->start == tmp->end + tmp->ownspace);
1953 if (topdown) {
1954 if (tmp->next->start < hint + length &&
1955 (prev == NULL || tmp->end > prev->end)) {
1956 if (tmp->ownspace >= length)
1957 prev = tmp;
1958 else if ((child = RB_LEFT(tmp, rb_entry))
1959 != NULL && child->space >= length)
1960 prev = tmp;
1961 }
1962 } else {
1963 if (tmp->end >= hint &&
1964 (prev == NULL || tmp->end < prev->end)) {
1965 if (tmp->ownspace >= length)
1966 prev = tmp;
1967 else if ((child = RB_RIGHT(tmp, rb_entry))
1968 != NULL && child->space >= length)
1969 prev = tmp;
1970 }
1971 }
1972 if (tmp->next->start < hint + length)
1973 child = RB_RIGHT(tmp, rb_entry);
1974 else if (tmp->end > hint)
1975 child = RB_LEFT(tmp, rb_entry);
1976 else {
1977 if (tmp->ownspace >= length)
1978 break;
1979 if (topdown)
1980 child = RB_LEFT(tmp, rb_entry);
1981 else
1982 child = RB_RIGHT(tmp, rb_entry);
1983 }
1984 if (child == NULL || child->space < length)
1985 break;
1986 tmp = child;
1987 }
1988
1989 if (tmp != NULL && tmp->start < hint && hint < tmp->next->start) {
1990 /*
1991 * Check if the entry that we found satifies the
1992 * space requirement
1993 */
1994 if (topdown) {
1995 if (hint > tmp->next->start - length)
1996 hint = tmp->next->start - length;
1997 } else {
1998 if (hint < tmp->end)
1999 hint = tmp->end;
2000 }
2001 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2002 topdown, tmp)) {
2003 case 1:
2004 entry = tmp;
2005 goto found;
2006 case -1:
2007 goto wraparound;
2008 }
2009 if (tmp->ownspace >= length)
2010 goto listsearch;
2011 }
2012 if (prev == NULL)
2013 goto notfound;
2014
2015 if (topdown) {
2016 KASSERT(orig_hint >= prev->next->start - length ||
2017 prev->next->start - length > prev->next->start);
2018 hint = prev->next->start - length;
2019 } else {
2020 KASSERT(orig_hint <= prev->end);
2021 hint = prev->end;
2022 }
2023 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2024 topdown, prev)) {
2025 case 1:
2026 entry = prev;
2027 goto found;
2028 case -1:
2029 goto wraparound;
2030 }
2031 if (prev->ownspace >= length)
2032 goto listsearch;
2033
2034 if (topdown)
2035 tmp = RB_LEFT(prev, rb_entry);
2036 else
2037 tmp = RB_RIGHT(prev, rb_entry);
2038 for (;;) {
2039 KASSERT(tmp && tmp->space >= length);
2040 if (topdown)
2041 child = RB_RIGHT(tmp, rb_entry);
2042 else
2043 child = RB_LEFT(tmp, rb_entry);
2044 if (child && child->space >= length) {
2045 tmp = child;
2046 continue;
2047 }
2048 if (tmp->ownspace >= length)
2049 break;
2050 if (topdown)
2051 tmp = RB_LEFT(tmp, rb_entry);
2052 else
2053 tmp = RB_RIGHT(tmp, rb_entry);
2054 }
2055
2056 if (topdown) {
2057 KASSERT(orig_hint >= tmp->next->start - length ||
2058 tmp->next->start - length > tmp->next->start);
2059 hint = tmp->next->start - length;
2060 } else {
2061 KASSERT(orig_hint <= tmp->end);
2062 hint = tmp->end;
2063 }
2064 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2065 topdown, tmp)) {
2066 case 1:
2067 entry = tmp;
2068 goto found;
2069 case -1:
2070 goto wraparound;
2071 }
2072
2073 /*
2074 * The tree fails to find an entry because of offset or alignment
2075 * restrictions. Search the list instead.
2076 */
2077 listsearch:
2078 /*
2079 * Look through the rest of the map, trying to fit a new region in
2080 * the gap between existing regions, or after the very last region.
2081 * note: entry->end = base VA of current gap,
2082 * entry->next->start = VA of end of current gap
2083 */
2084
2085 for (;;) {
2086 /* Update hint for current gap. */
2087 hint = topdown ? entry->next->start - length : entry->end;
2088
2089 /* See if it fits. */
2090 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2091 topdown, entry)) {
2092 case 1:
2093 goto found;
2094 case -1:
2095 goto wraparound;
2096 }
2097
2098 /* Advance to next/previous gap */
2099 if (topdown) {
2100 if (entry == &map->header) {
2101 UVMHIST_LOG(maphist, "<- failed (off start)",
2102 0,0,0,0);
2103 goto notfound;
2104 }
2105 entry = entry->prev;
2106 } else {
2107 entry = entry->next;
2108 if (entry == &map->header) {
2109 UVMHIST_LOG(maphist, "<- failed (off end)",
2110 0,0,0,0);
2111 goto notfound;
2112 }
2113 }
2114 }
2115
2116 found:
2117 SAVE_HINT(map, map->hint, entry);
2118 *result = hint;
2119 UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0);
2120 KASSERT( topdown || hint >= orig_hint);
2121 KASSERT(!topdown || hint <= orig_hint);
2122 KASSERT(entry->end <= hint);
2123 KASSERT(hint + length <= entry->next->start);
2124 return (entry);
2125
2126 wraparound:
2127 UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0);
2128
2129 return (NULL);
2130
2131 notfound:
2132 UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0);
2133
2134 return (NULL);
2135 }
2136
2137 /*
2138 * U N M A P - m a i n h e l p e r f u n c t i o n s
2139 */
2140
2141 /*
2142 * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
2143 *
2144 * => caller must check alignment and size
2145 * => map must be locked by caller
2146 * => we return a list of map entries that we've remove from the map
2147 * in "entry_list"
2148 */
2149
2150 void
2151 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
2152 struct vm_map_entry **entry_list /* OUT */,
2153 struct uvm_mapent_reservation *umr, int flags)
2154 {
2155 struct vm_map_entry *entry, *first_entry, *next;
2156 vaddr_t len;
2157 UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist);
2158
2159 UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)",
2160 map, start, end, 0);
2161 VM_MAP_RANGE_CHECK(map, start, end);
2162
2163 uvm_map_check(map, "unmap_remove entry");
2164
2165 /*
2166 * find first entry
2167 */
2168
2169 if (uvm_map_lookup_entry(map, start, &first_entry) == true) {
2170 /* clip and go... */
2171 entry = first_entry;
2172 UVM_MAP_CLIP_START(map, entry, start, umr);
2173 /* critical! prevents stale hint */
2174 SAVE_HINT(map, entry, entry->prev);
2175 } else {
2176 entry = first_entry->next;
2177 }
2178
2179 /*
2180 * Save the free space hint
2181 */
2182
2183 if (map->first_free != &map->header && map->first_free->start >= start)
2184 map->first_free = entry->prev;
2185
2186 /*
2187 * note: we now re-use first_entry for a different task. we remove
2188 * a number of map entries from the map and save them in a linked
2189 * list headed by "first_entry". once we remove them from the map
2190 * the caller should unlock the map and drop the references to the
2191 * backing objects [c.f. uvm_unmap_detach]. the object is to
2192 * separate unmapping from reference dropping. why?
2193 * [1] the map has to be locked for unmapping
2194 * [2] the map need not be locked for reference dropping
2195 * [3] dropping references may trigger pager I/O, and if we hit
2196 * a pager that does synchronous I/O we may have to wait for it.
2197 * [4] we would like all waiting for I/O to occur with maps unlocked
2198 * so that we don't block other threads.
2199 */
2200
2201 first_entry = NULL;
2202 *entry_list = NULL;
2203
2204 /*
2205 * break up the area into map entry sized regions and unmap. note
2206 * that all mappings have to be removed before we can even consider
2207 * dropping references to amaps or VM objects (otherwise we could end
2208 * up with a mapping to a page on the free list which would be very bad)
2209 */
2210
2211 while ((entry != &map->header) && (entry->start < end)) {
2212 KASSERT((entry->flags & UVM_MAP_FIRST) == 0);
2213
2214 UVM_MAP_CLIP_END(map, entry, end, umr);
2215 next = entry->next;
2216 len = entry->end - entry->start;
2217
2218 /*
2219 * unwire before removing addresses from the pmap; otherwise
2220 * unwiring will put the entries back into the pmap (XXX).
2221 */
2222
2223 if (VM_MAPENT_ISWIRED(entry)) {
2224 uvm_map_entry_unwire(map, entry);
2225 }
2226 if (flags & UVM_FLAG_VAONLY) {
2227
2228 /* nothing */
2229
2230 } else if ((map->flags & VM_MAP_PAGEABLE) == 0) {
2231
2232 /*
2233 * if the map is non-pageable, any pages mapped there
2234 * must be wired and entered with pmap_kenter_pa(),
2235 * and we should free any such pages immediately.
2236 * this is mostly used for kmem_map and mb_map.
2237 */
2238
2239 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
2240 uvm_km_pgremove_intrsafe(entry->start,
2241 entry->end);
2242 pmap_kremove(entry->start, len);
2243 }
2244 } else if (UVM_ET_ISOBJ(entry) &&
2245 UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
2246 KASSERT(vm_map_pmap(map) == pmap_kernel());
2247
2248 /*
2249 * note: kernel object mappings are currently used in
2250 * two ways:
2251 * [1] "normal" mappings of pages in the kernel object
2252 * [2] uvm_km_valloc'd allocations in which we
2253 * pmap_enter in some non-kernel-object page
2254 * (e.g. vmapbuf).
2255 *
2256 * for case [1], we need to remove the mapping from
2257 * the pmap and then remove the page from the kernel
2258 * object (because, once pages in a kernel object are
2259 * unmapped they are no longer needed, unlike, say,
2260 * a vnode where you might want the data to persist
2261 * until flushed out of a queue).
2262 *
2263 * for case [2], we need to remove the mapping from
2264 * the pmap. there shouldn't be any pages at the
2265 * specified offset in the kernel object [but it
2266 * doesn't hurt to call uvm_km_pgremove just to be
2267 * safe?]
2268 *
2269 * uvm_km_pgremove currently does the following:
2270 * for pages in the kernel object in range:
2271 * - drops the swap slot
2272 * - uvm_pagefree the page
2273 */
2274
2275 /*
2276 * remove mappings from pmap and drop the pages
2277 * from the object. offsets are always relative
2278 * to vm_map_min(kernel_map).
2279 */
2280
2281 pmap_remove(pmap_kernel(), entry->start,
2282 entry->start + len);
2283 uvm_km_pgremove(entry->start, entry->end);
2284
2285 /*
2286 * null out kernel_object reference, we've just
2287 * dropped it
2288 */
2289
2290 entry->etype &= ~UVM_ET_OBJ;
2291 entry->object.uvm_obj = NULL;
2292 } else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
2293
2294 /*
2295 * remove mappings the standard way.
2296 */
2297
2298 pmap_remove(map->pmap, entry->start, entry->end);
2299 }
2300
2301 #if defined(DEBUG)
2302 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
2303
2304 /*
2305 * check if there's remaining mapping,
2306 * which is a bug in caller.
2307 */
2308
2309 vaddr_t va;
2310 for (va = entry->start; va < entry->end;
2311 va += PAGE_SIZE) {
2312 if (pmap_extract(vm_map_pmap(map), va, NULL)) {
2313 panic("uvm_unmap_remove: has mapping");
2314 }
2315 }
2316
2317 if (VM_MAP_IS_KERNEL(map)) {
2318 uvm_km_check_empty(entry->start, entry->end,
2319 (map->flags & VM_MAP_INTRSAFE) != 0);
2320 }
2321 }
2322 #endif /* defined(DEBUG) */
2323
2324 /*
2325 * remove entry from map and put it on our list of entries
2326 * that we've nuked. then go to next entry.
2327 */
2328
2329 UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0,0);
2330
2331 /* critical! prevents stale hint */
2332 SAVE_HINT(map, entry, entry->prev);
2333
2334 uvm_map_entry_unlink(map, entry);
2335 KASSERT(map->size >= len);
2336 map->size -= len;
2337 entry->prev = NULL;
2338 entry->next = first_entry;
2339 first_entry = entry;
2340 entry = next;
2341 }
2342 if ((map->flags & VM_MAP_DYING) == 0) {
2343 pmap_update(vm_map_pmap(map));
2344 }
2345
2346 uvm_map_check(map, "unmap_remove leave");
2347
2348 /*
2349 * now we've cleaned up the map and are ready for the caller to drop
2350 * references to the mapped objects.
2351 */
2352
2353 *entry_list = first_entry;
2354 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
2355
2356 if (map->flags & VM_MAP_WANTVA) {
2357 mutex_enter(&map->misc_lock);
2358 map->flags &= ~VM_MAP_WANTVA;
2359 cv_broadcast(&map->cv);
2360 mutex_exit(&map->misc_lock);
2361 }
2362 }
2363
2364 /*
2365 * uvm_unmap_detach: drop references in a chain of map entries
2366 *
2367 * => we will free the map entries as we traverse the list.
2368 */
2369
2370 void
2371 uvm_unmap_detach(struct vm_map_entry *first_entry, int flags)
2372 {
2373 struct vm_map_entry *next_entry;
2374 UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
2375
2376 while (first_entry) {
2377 KASSERT(!VM_MAPENT_ISWIRED(first_entry));
2378 UVMHIST_LOG(maphist,
2379 " detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d",
2380 first_entry, first_entry->aref.ar_amap,
2381 first_entry->object.uvm_obj,
2382 UVM_ET_ISSUBMAP(first_entry));
2383
2384 /*
2385 * drop reference to amap, if we've got one
2386 */
2387
2388 if (first_entry->aref.ar_amap)
2389 uvm_map_unreference_amap(first_entry, flags);
2390
2391 /*
2392 * drop reference to our backing object, if we've got one
2393 */
2394
2395 KASSERT(!UVM_ET_ISSUBMAP(first_entry));
2396 if (UVM_ET_ISOBJ(first_entry) &&
2397 first_entry->object.uvm_obj->pgops->pgo_detach) {
2398 (*first_entry->object.uvm_obj->pgops->pgo_detach)
2399 (first_entry->object.uvm_obj);
2400 }
2401 next_entry = first_entry->next;
2402 uvm_mapent_free(first_entry);
2403 first_entry = next_entry;
2404 }
2405 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
2406 }
2407
2408 /*
2409 * E X T R A C T I O N F U N C T I O N S
2410 */
2411
2412 /*
2413 * uvm_map_reserve: reserve space in a vm_map for future use.
2414 *
2415 * => we reserve space in a map by putting a dummy map entry in the
2416 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
2417 * => map should be unlocked (we will write lock it)
2418 * => we return true if we were able to reserve space
2419 * => XXXCDC: should be inline?
2420 */
2421
2422 int
2423 uvm_map_reserve(struct vm_map *map, vsize_t size,
2424 vaddr_t offset /* hint for pmap_prefer */,
2425 vsize_t align /* alignment */,
2426 vaddr_t *raddr /* IN:hint, OUT: reserved VA */,
2427 uvm_flag_t flags /* UVM_FLAG_FIXED or 0 */)
2428 {
2429 UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
2430
2431 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
2432 map,size,offset,raddr);
2433
2434 size = round_page(size);
2435
2436 /*
2437 * reserve some virtual space.
2438 */
2439
2440 if (uvm_map(map, raddr, size, NULL, offset, align,
2441 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
2442 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE|flags)) != 0) {
2443 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
2444 return (false);
2445 }
2446
2447 UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
2448 return (true);
2449 }
2450
2451 /*
2452 * uvm_map_replace: replace a reserved (blank) area of memory with
2453 * real mappings.
2454 *
2455 * => caller must WRITE-LOCK the map
2456 * => we return true if replacement was a success
2457 * => we expect the newents chain to have nnewents entrys on it and
2458 * we expect newents->prev to point to the last entry on the list
2459 * => note newents is allowed to be NULL
2460 */
2461
2462 int
2463 uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
2464 struct vm_map_entry *newents, int nnewents)
2465 {
2466 struct vm_map_entry *oldent, *last;
2467
2468 uvm_map_check(map, "map_replace entry");
2469
2470 /*
2471 * first find the blank map entry at the specified address
2472 */
2473
2474 if (!uvm_map_lookup_entry(map, start, &oldent)) {
2475 return (false);
2476 }
2477
2478 /*
2479 * check to make sure we have a proper blank entry
2480 */
2481
2482 if (end < oldent->end && !VM_MAP_USE_KMAPENT(map)) {
2483 UVM_MAP_CLIP_END(map, oldent, end, NULL);
2484 }
2485 if (oldent->start != start || oldent->end != end ||
2486 oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
2487 return (false);
2488 }
2489
2490 #ifdef DIAGNOSTIC
2491
2492 /*
2493 * sanity check the newents chain
2494 */
2495
2496 {
2497 struct vm_map_entry *tmpent = newents;
2498 int nent = 0;
2499 vaddr_t cur = start;
2500
2501 while (tmpent) {
2502 nent++;
2503 if (tmpent->start < cur)
2504 panic("uvm_map_replace1");
2505 if (tmpent->start > tmpent->end || tmpent->end > end) {
2506 printf("tmpent->start=0x%lx, tmpent->end=0x%lx, end=0x%lx\n",
2507 tmpent->start, tmpent->end, end);
2508 panic("uvm_map_replace2");
2509 }
2510 cur = tmpent->end;
2511 if (tmpent->next) {
2512 if (tmpent->next->prev != tmpent)
2513 panic("uvm_map_replace3");
2514 } else {
2515 if (newents->prev != tmpent)
2516 panic("uvm_map_replace4");
2517 }
2518 tmpent = tmpent->next;
2519 }
2520 if (nent != nnewents)
2521 panic("uvm_map_replace5");
2522 }
2523 #endif
2524
2525 /*
2526 * map entry is a valid blank! replace it. (this does all the
2527 * work of map entry link/unlink...).
2528 */
2529
2530 if (newents) {
2531 last = newents->prev;
2532
2533 /* critical: flush stale hints out of map */
2534 SAVE_HINT(map, map->hint, newents);
2535 if (map->first_free == oldent)
2536 map->first_free = last;
2537
2538 last->next = oldent->next;
2539 last->next->prev = last;
2540
2541 /* Fix RB tree */
2542 uvm_rb_remove(map, oldent);
2543
2544 newents->prev = oldent->prev;
2545 newents->prev->next = newents;
2546 map->nentries = map->nentries + (nnewents - 1);
2547
2548 /* Fixup the RB tree */
2549 {
2550 int i;
2551 struct vm_map_entry *tmp;
2552
2553 tmp = newents;
2554 for (i = 0; i < nnewents && tmp; i++) {
2555 uvm_rb_insert(map, tmp);
2556 tmp = tmp->next;
2557 }
2558 }
2559 } else {
2560 /* NULL list of new entries: just remove the old one */
2561 clear_hints(map, oldent);
2562 uvm_map_entry_unlink(map, oldent);
2563 }
2564
2565 uvm_map_check(map, "map_replace leave");
2566
2567 /*
2568 * now we can free the old blank entry and return.
2569 */
2570
2571 uvm_mapent_free(oldent);
2572 return (true);
2573 }
2574
2575 /*
2576 * uvm_map_extract: extract a mapping from a map and put it somewhere
2577 * (maybe removing the old mapping)
2578 *
2579 * => maps should be unlocked (we will write lock them)
2580 * => returns 0 on success, error code otherwise
2581 * => start must be page aligned
2582 * => len must be page sized
2583 * => flags:
2584 * UVM_EXTRACT_REMOVE: remove mappings from srcmap
2585 * UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
2586 * UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
2587 * UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
2588 * >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
2589 * >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
2590 * be used from within the kernel in a kernel level map <<<
2591 */
2592
2593 int
2594 uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
2595 struct vm_map *dstmap, vaddr_t *dstaddrp, int flags)
2596 {
2597 vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge;
2598 struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry,
2599 *deadentry, *oldentry;
2600 vsize_t elen;
2601 int nchain, error, copy_ok;
2602 UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
2603
2604 UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start,
2605 len,0);
2606 UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0);
2607
2608 uvm_map_check(srcmap, "map_extract src enter");
2609 uvm_map_check(dstmap, "map_extract dst enter");
2610
2611 /*
2612 * step 0: sanity check: start must be on a page boundary, length
2613 * must be page sized. can't ask for CONTIG/QREF if you asked for
2614 * REMOVE.
2615 */
2616
2617 KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0);
2618 KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
2619 (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
2620
2621 /*
2622 * step 1: reserve space in the target map for the extracted area
2623 */
2624
2625 if ((flags & UVM_EXTRACT_RESERVED) == 0) {
2626 dstaddr = vm_map_min(dstmap);
2627 if (!uvm_map_reserve(dstmap, len, start, 0, &dstaddr, 0))
2628 return (ENOMEM);
2629 *dstaddrp = dstaddr; /* pass address back to caller */
2630 UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0);
2631 } else {
2632 dstaddr = *dstaddrp;
2633 }
2634
2635 /*
2636 * step 2: setup for the extraction process loop by init'ing the
2637 * map entry chain, locking src map, and looking up the first useful
2638 * entry in the map.
2639 */
2640
2641 end = start + len;
2642 newend = dstaddr + len;
2643 chain = endchain = NULL;
2644 nchain = 0;
2645 vm_map_lock(srcmap);
2646
2647 if (uvm_map_lookup_entry(srcmap, start, &entry)) {
2648
2649 /* "start" is within an entry */
2650 if (flags & UVM_EXTRACT_QREF) {
2651
2652 /*
2653 * for quick references we don't clip the entry, so
2654 * the entry may map space "before" the starting
2655 * virtual address... this is the "fudge" factor
2656 * (which can be non-zero only the first time
2657 * through the "while" loop in step 3).
2658 */
2659
2660 fudge = start - entry->start;
2661 } else {
2662
2663 /*
2664 * normal reference: we clip the map to fit (thus
2665 * fudge is zero)
2666 */
2667
2668 UVM_MAP_CLIP_START(srcmap, entry, start, NULL);
2669 SAVE_HINT(srcmap, srcmap->hint, entry->prev);
2670 fudge = 0;
2671 }
2672 } else {
2673
2674 /* "start" is not within an entry ... skip to next entry */
2675 if (flags & UVM_EXTRACT_CONTIG) {
2676 error = EINVAL;
2677 goto bad; /* definite hole here ... */
2678 }
2679
2680 entry = entry->next;
2681 fudge = 0;
2682 }
2683
2684 /* save values from srcmap for step 6 */
2685 orig_entry = entry;
2686 orig_fudge = fudge;
2687
2688 /*
2689 * step 3: now start looping through the map entries, extracting
2690 * as we go.
2691 */
2692
2693 while (entry->start < end && entry != &srcmap->header) {
2694
2695 /* if we are not doing a quick reference, clip it */
2696 if ((flags & UVM_EXTRACT_QREF) == 0)
2697 UVM_MAP_CLIP_END(srcmap, entry, end, NULL);
2698
2699 /* clear needs_copy (allow chunking) */
2700 if (UVM_ET_ISNEEDSCOPY(entry)) {
2701 amap_copy(srcmap, entry,
2702 AMAP_COPY_NOWAIT|AMAP_COPY_NOMERGE, start, end);
2703 if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */
2704 error = ENOMEM;
2705 goto bad;
2706 }
2707
2708 /* amap_copy could clip (during chunk)! update fudge */
2709 if (fudge) {
2710 fudge = start - entry->start;
2711 orig_fudge = fudge;
2712 }
2713 }
2714
2715 /* calculate the offset of this from "start" */
2716 oldoffset = (entry->start + fudge) - start;
2717
2718 /* allocate a new map entry */
2719 newentry = uvm_mapent_alloc(dstmap, 0);
2720 if (newentry == NULL) {
2721 error = ENOMEM;
2722 goto bad;
2723 }
2724
2725 /* set up new map entry */
2726 newentry->next = NULL;
2727 newentry->prev = endchain;
2728 newentry->start = dstaddr + oldoffset;
2729 newentry->end =
2730 newentry->start + (entry->end - (entry->start + fudge));
2731 if (newentry->end > newend || newentry->end < newentry->start)
2732 newentry->end = newend;
2733 newentry->object.uvm_obj = entry->object.uvm_obj;
2734 if (newentry->object.uvm_obj) {
2735 if (newentry->object.uvm_obj->pgops->pgo_reference)
2736 newentry->object.uvm_obj->pgops->
2737 pgo_reference(newentry->object.uvm_obj);
2738 newentry->offset = entry->offset + fudge;
2739 } else {
2740 newentry->offset = 0;
2741 }
2742 newentry->etype = entry->etype;
2743 newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
2744 entry->max_protection : entry->protection;
2745 newentry->max_protection = entry->max_protection;
2746 newentry->inheritance = entry->inheritance;
2747 newentry->wired_count = 0;
2748 newentry->aref.ar_amap = entry->aref.ar_amap;
2749 if (newentry->aref.ar_amap) {
2750 newentry->aref.ar_pageoff =
2751 entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
2752 uvm_map_reference_amap(newentry, AMAP_SHARED |
2753 ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
2754 } else {
2755 newentry->aref.ar_pageoff = 0;
2756 }
2757 newentry->advice = entry->advice;
2758 if ((flags & UVM_EXTRACT_QREF) != 0) {
2759 newentry->flags |= UVM_MAP_NOMERGE;
2760 }
2761
2762 /* now link it on the chain */
2763 nchain++;
2764 if (endchain == NULL) {
2765 chain = endchain = newentry;
2766 } else {
2767 endchain->next = newentry;
2768 endchain = newentry;
2769 }
2770
2771 /* end of 'while' loop! */
2772 if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
2773 (entry->next == &srcmap->header ||
2774 entry->next->start != entry->end)) {
2775 error = EINVAL;
2776 goto bad;
2777 }
2778 entry = entry->next;
2779 fudge = 0;
2780 }
2781
2782 /*
2783 * step 4: close off chain (in format expected by uvm_map_replace)
2784 */
2785
2786 if (chain)
2787 chain->prev = endchain;
2788
2789 /*
2790 * step 5: attempt to lock the dest map so we can pmap_copy.
2791 * note usage of copy_ok:
2792 * 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
2793 * 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
2794 */
2795
2796 if (srcmap == dstmap || vm_map_lock_try(dstmap) == true) {
2797 copy_ok = 1;
2798 if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2799 nchain)) {
2800 if (srcmap != dstmap)
2801 vm_map_unlock(dstmap);
2802 error = EIO;
2803 goto bad;
2804 }
2805 } else {
2806 copy_ok = 0;
2807 /* replace defered until step 7 */
2808 }
2809
2810 /*
2811 * step 6: traverse the srcmap a second time to do the following:
2812 * - if we got a lock on the dstmap do pmap_copy
2813 * - if UVM_EXTRACT_REMOVE remove the entries
2814 * we make use of orig_entry and orig_fudge (saved in step 2)
2815 */
2816
2817 if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
2818
2819 /* purge possible stale hints from srcmap */
2820 if (flags & UVM_EXTRACT_REMOVE) {
2821 SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
2822 if (srcmap->first_free != &srcmap->header &&
2823 srcmap->first_free->start >= start)
2824 srcmap->first_free = orig_entry->prev;
2825 }
2826
2827 entry = orig_entry;
2828 fudge = orig_fudge;
2829 deadentry = NULL; /* for UVM_EXTRACT_REMOVE */
2830
2831 while (entry->start < end && entry != &srcmap->header) {
2832 if (copy_ok) {
2833 oldoffset = (entry->start + fudge) - start;
2834 elen = MIN(end, entry->end) -
2835 (entry->start + fudge);
2836 pmap_copy(dstmap->pmap, srcmap->pmap,
2837 dstaddr + oldoffset, elen,
2838 entry->start + fudge);
2839 }
2840
2841 /* we advance "entry" in the following if statement */
2842 if (flags & UVM_EXTRACT_REMOVE) {
2843 pmap_remove(srcmap->pmap, entry->start,
2844 entry->end);
2845 oldentry = entry; /* save entry */
2846 entry = entry->next; /* advance */
2847 uvm_map_entry_unlink(srcmap, oldentry);
2848 /* add to dead list */
2849 oldentry->next = deadentry;
2850 deadentry = oldentry;
2851 } else {
2852 entry = entry->next; /* advance */
2853 }
2854
2855 /* end of 'while' loop */
2856 fudge = 0;
2857 }
2858 pmap_update(srcmap->pmap);
2859
2860 /*
2861 * unlock dstmap. we will dispose of deadentry in
2862 * step 7 if needed
2863 */
2864
2865 if (copy_ok && srcmap != dstmap)
2866 vm_map_unlock(dstmap);
2867
2868 } else {
2869 deadentry = NULL;
2870 }
2871
2872 /*
2873 * step 7: we are done with the source map, unlock. if copy_ok
2874 * is 0 then we have not replaced the dummy mapping in dstmap yet
2875 * and we need to do so now.
2876 */
2877
2878 vm_map_unlock(srcmap);
2879 if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
2880 uvm_unmap_detach(deadentry, 0); /* dispose of old entries */
2881
2882 /* now do the replacement if we didn't do it in step 5 */
2883 if (copy_ok == 0) {
2884 vm_map_lock(dstmap);
2885 error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2886 nchain);
2887 vm_map_unlock(dstmap);
2888
2889 if (error == false) {
2890 error = EIO;
2891 goto bad2;
2892 }
2893 }
2894
2895 uvm_map_check(srcmap, "map_extract src leave");
2896 uvm_map_check(dstmap, "map_extract dst leave");
2897
2898 return (0);
2899
2900 /*
2901 * bad: failure recovery
2902 */
2903 bad:
2904 vm_map_unlock(srcmap);
2905 bad2: /* src already unlocked */
2906 if (chain)
2907 uvm_unmap_detach(chain,
2908 (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
2909
2910 uvm_map_check(srcmap, "map_extract src err leave");
2911 uvm_map_check(dstmap, "map_extract dst err leave");
2912
2913 if ((flags & UVM_EXTRACT_RESERVED) == 0) {
2914 uvm_unmap(dstmap, dstaddr, dstaddr+len); /* ??? */
2915 }
2916 return (error);
2917 }
2918
2919 /* end of extraction functions */
2920
2921 /*
2922 * uvm_map_submap: punch down part of a map into a submap
2923 *
2924 * => only the kernel_map is allowed to be submapped
2925 * => the purpose of submapping is to break up the locking granularity
2926 * of a larger map
2927 * => the range specified must have been mapped previously with a uvm_map()
2928 * call [with uobj==NULL] to create a blank map entry in the main map.
2929 * [And it had better still be blank!]
2930 * => maps which contain submaps should never be copied or forked.
2931 * => to remove a submap, use uvm_unmap() on the main map
2932 * and then uvm_map_deallocate() the submap.
2933 * => main map must be unlocked.
2934 * => submap must have been init'd and have a zero reference count.
2935 * [need not be locked as we don't actually reference it]
2936 */
2937
2938 int
2939 uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
2940 struct vm_map *submap)
2941 {
2942 struct vm_map_entry *entry;
2943 struct uvm_mapent_reservation umr;
2944 int error;
2945
2946 uvm_mapent_reserve(map, &umr, 2, 0);
2947
2948 vm_map_lock(map);
2949 VM_MAP_RANGE_CHECK(map, start, end);
2950
2951 if (uvm_map_lookup_entry(map, start, &entry)) {
2952 UVM_MAP_CLIP_START(map, entry, start, &umr);
2953 UVM_MAP_CLIP_END(map, entry, end, &umr); /* to be safe */
2954 } else {
2955 entry = NULL;
2956 }
2957
2958 if (entry != NULL &&
2959 entry->start == start && entry->end == end &&
2960 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
2961 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
2962 entry->etype |= UVM_ET_SUBMAP;
2963 entry->object.sub_map = submap;
2964 entry->offset = 0;
2965 uvm_map_reference(submap);
2966 error = 0;
2967 } else {
2968 error = EINVAL;
2969 }
2970 vm_map_unlock(map);
2971
2972 uvm_mapent_unreserve(map, &umr);
2973
2974 return error;
2975 }
2976
2977 /*
2978 * uvm_map_setup_kernel: init in-kernel map
2979 *
2980 * => map must not be in service yet.
2981 */
2982
2983 void
2984 uvm_map_setup_kernel(struct vm_map_kernel *map,
2985 vaddr_t vmin, vaddr_t vmax, int flags)
2986 {
2987
2988 uvm_map_setup(&map->vmk_map, vmin, vmax, flags);
2989 callback_head_init(&map->vmk_reclaim_callback, IPL_VM);
2990 LIST_INIT(&map->vmk_kentry_free);
2991 map->vmk_merged_entries = NULL;
2992 }
2993
2994
2995 /*
2996 * uvm_map_protect: change map protection
2997 *
2998 * => set_max means set max_protection.
2999 * => map must be unlocked.
3000 */
3001
3002 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
3003 ~VM_PROT_WRITE : VM_PROT_ALL)
3004
3005 int
3006 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
3007 vm_prot_t new_prot, bool set_max)
3008 {
3009 struct vm_map_entry *current, *entry;
3010 int error = 0;
3011 UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
3012 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
3013 map, start, end, new_prot);
3014
3015 vm_map_lock(map);
3016 VM_MAP_RANGE_CHECK(map, start, end);
3017 if (uvm_map_lookup_entry(map, start, &entry)) {
3018 UVM_MAP_CLIP_START(map, entry, start, NULL);
3019 } else {
3020 entry = entry->next;
3021 }
3022
3023 /*
3024 * make a first pass to check for protection violations.
3025 */
3026
3027 current = entry;
3028 while ((current != &map->header) && (current->start < end)) {
3029 if (UVM_ET_ISSUBMAP(current)) {
3030 error = EINVAL;
3031 goto out;
3032 }
3033 if ((new_prot & current->max_protection) != new_prot) {
3034 error = EACCES;
3035 goto out;
3036 }
3037 /*
3038 * Don't allow VM_PROT_EXECUTE to be set on entries that
3039 * point to vnodes that are associated with a NOEXEC file
3040 * system.
3041 */
3042 if (UVM_ET_ISOBJ(current) &&
3043 UVM_OBJ_IS_VNODE(current->object.uvm_obj)) {
3044 struct vnode *vp =
3045 (struct vnode *) current->object.uvm_obj;
3046
3047 if ((new_prot & VM_PROT_EXECUTE) != 0 &&
3048 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
3049 error = EACCES;
3050 goto out;
3051 }
3052 }
3053
3054 current = current->next;
3055 }
3056
3057 /* go back and fix up protections (no need to clip this time). */
3058
3059 current = entry;
3060 while ((current != &map->header) && (current->start < end)) {
3061 vm_prot_t old_prot;
3062
3063 UVM_MAP_CLIP_END(map, current, end, NULL);
3064 old_prot = current->protection;
3065 if (set_max)
3066 current->protection =
3067 (current->max_protection = new_prot) & old_prot;
3068 else
3069 current->protection = new_prot;
3070
3071 /*
3072 * update physical map if necessary. worry about copy-on-write
3073 * here -- CHECK THIS XXX
3074 */
3075
3076 if (current->protection != old_prot) {
3077 /* update pmap! */
3078 pmap_protect(map->pmap, current->start, current->end,
3079 current->protection & MASK(entry));
3080
3081 /*
3082 * If this entry points at a vnode, and the
3083 * protection includes VM_PROT_EXECUTE, mark
3084 * the vnode as VEXECMAP.
3085 */
3086 if (UVM_ET_ISOBJ(current)) {
3087 struct uvm_object *uobj =
3088 current->object.uvm_obj;
3089
3090 if (UVM_OBJ_IS_VNODE(uobj) &&
3091 (current->protection & VM_PROT_EXECUTE)) {
3092 mutex_enter(&uobj->vmobjlock);
3093 vn_markexec((struct vnode *) uobj);
3094 mutex_exit(&uobj->vmobjlock);
3095 }
3096 }
3097 }
3098
3099 /*
3100 * If the map is configured to lock any future mappings,
3101 * wire this entry now if the old protection was VM_PROT_NONE
3102 * and the new protection is not VM_PROT_NONE.
3103 */
3104
3105 if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
3106 VM_MAPENT_ISWIRED(entry) == 0 &&
3107 old_prot == VM_PROT_NONE &&
3108 new_prot != VM_PROT_NONE) {
3109 if (uvm_map_pageable(map, entry->start,
3110 entry->end, false,
3111 UVM_LK_ENTER|UVM_LK_EXIT) != 0) {
3112
3113 /*
3114 * If locking the entry fails, remember the
3115 * error if it's the first one. Note we
3116 * still continue setting the protection in
3117 * the map, but will return the error
3118 * condition regardless.
3119 *
3120 * XXX Ignore what the actual error is,
3121 * XXX just call it a resource shortage
3122 * XXX so that it doesn't get confused
3123 * XXX what uvm_map_protect() itself would
3124 * XXX normally return.
3125 */
3126
3127 error = ENOMEM;
3128 }
3129 }
3130 current = current->next;
3131 }
3132 pmap_update(map->pmap);
3133
3134 out:
3135 vm_map_unlock(map);
3136
3137 UVMHIST_LOG(maphist, "<- done, error=%d",error,0,0,0);
3138 return error;
3139 }
3140
3141 #undef MASK
3142
3143 /*
3144 * uvm_map_inherit: set inheritance code for range of addrs in map.
3145 *
3146 * => map must be unlocked
3147 * => note that the inherit code is used during a "fork". see fork
3148 * code for details.
3149 */
3150
3151 int
3152 uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
3153 vm_inherit_t new_inheritance)
3154 {
3155 struct vm_map_entry *entry, *temp_entry;
3156 UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
3157 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)",
3158 map, start, end, new_inheritance);
3159
3160 switch (new_inheritance) {
3161 case MAP_INHERIT_NONE:
3162 case MAP_INHERIT_COPY:
3163 case MAP_INHERIT_SHARE:
3164 break;
3165 default:
3166 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3167 return EINVAL;
3168 }
3169
3170 vm_map_lock(map);
3171 VM_MAP_RANGE_CHECK(map, start, end);
3172 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3173 entry = temp_entry;
3174 UVM_MAP_CLIP_START(map, entry, start, NULL);
3175 } else {
3176 entry = temp_entry->next;
3177 }
3178 while ((entry != &map->header) && (entry->start < end)) {
3179 UVM_MAP_CLIP_END(map, entry, end, NULL);
3180 entry->inheritance = new_inheritance;
3181 entry = entry->next;
3182 }
3183 vm_map_unlock(map);
3184 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3185 return 0;
3186 }
3187
3188 /*
3189 * uvm_map_advice: set advice code for range of addrs in map.
3190 *
3191 * => map must be unlocked
3192 */
3193
3194 int
3195 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
3196 {
3197 struct vm_map_entry *entry, *temp_entry;
3198 UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist);
3199 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_adv=0x%x)",
3200 map, start, end, new_advice);
3201
3202 vm_map_lock(map);
3203 VM_MAP_RANGE_CHECK(map, start, end);
3204 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3205 entry = temp_entry;
3206 UVM_MAP_CLIP_START(map, entry, start, NULL);
3207 } else {
3208 entry = temp_entry->next;
3209 }
3210
3211 /*
3212 * XXXJRT: disallow holes?
3213 */
3214
3215 while ((entry != &map->header) && (entry->start < end)) {
3216 UVM_MAP_CLIP_END(map, entry, end, NULL);
3217
3218 switch (new_advice) {
3219 case MADV_NORMAL:
3220 case MADV_RANDOM:
3221 case MADV_SEQUENTIAL:
3222 /* nothing special here */
3223 break;
3224
3225 default:
3226 vm_map_unlock(map);
3227 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3228 return EINVAL;
3229 }
3230 entry->advice = new_advice;
3231 entry = entry->next;
3232 }
3233
3234 vm_map_unlock(map);
3235 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3236 return 0;
3237 }
3238
3239 /*
3240 * uvm_map_pageable: sets the pageability of a range in a map.
3241 *
3242 * => wires map entries. should not be used for transient page locking.
3243 * for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
3244 * => regions specified as not pageable require lock-down (wired) memory
3245 * and page tables.
3246 * => map must never be read-locked
3247 * => if islocked is true, map is already write-locked
3248 * => we always unlock the map, since we must downgrade to a read-lock
3249 * to call uvm_fault_wire()
3250 * => XXXCDC: check this and try and clean it up.
3251 */
3252
3253 int
3254 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
3255 bool new_pageable, int lockflags)
3256 {
3257 struct vm_map_entry *entry, *start_entry, *failed_entry;
3258 int rv;
3259 #ifdef DIAGNOSTIC
3260 u_int timestamp_save;
3261 #endif
3262 UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
3263 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
3264 map, start, end, new_pageable);
3265 KASSERT(map->flags & VM_MAP_PAGEABLE);
3266
3267 if ((lockflags & UVM_LK_ENTER) == 0)
3268 vm_map_lock(map);
3269 VM_MAP_RANGE_CHECK(map, start, end);
3270
3271 /*
3272 * only one pageability change may take place at one time, since
3273 * uvm_fault_wire assumes it will be called only once for each
3274 * wiring/unwiring. therefore, we have to make sure we're actually
3275 * changing the pageability for the entire region. we do so before
3276 * making any changes.
3277 */
3278
3279 if (uvm_map_lookup_entry(map, start, &start_entry) == false) {
3280 if ((lockflags & UVM_LK_EXIT) == 0)
3281 vm_map_unlock(map);
3282
3283 UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0);
3284 return EFAULT;
3285 }
3286 entry = start_entry;
3287
3288 /*
3289 * handle wiring and unwiring separately.
3290 */
3291
3292 if (new_pageable) { /* unwire */
3293 UVM_MAP_CLIP_START(map, entry, start, NULL);
3294
3295 /*
3296 * unwiring. first ensure that the range to be unwired is
3297 * really wired down and that there are no holes.
3298 */
3299
3300 while ((entry != &map->header) && (entry->start < end)) {
3301 if (entry->wired_count == 0 ||
3302 (entry->end < end &&
3303 (entry->next == &map->header ||
3304 entry->next->start > entry->end))) {
3305 if ((lockflags & UVM_LK_EXIT) == 0)
3306 vm_map_unlock(map);
3307 UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0);
3308 return EINVAL;
3309 }
3310 entry = entry->next;
3311 }
3312
3313 /*
3314 * POSIX 1003.1b - a single munlock call unlocks a region,
3315 * regardless of the number of mlock calls made on that
3316 * region.
3317 */
3318
3319 entry = start_entry;
3320 while ((entry != &map->header) && (entry->start < end)) {
3321 UVM_MAP_CLIP_END(map, entry, end, NULL);
3322 if (VM_MAPENT_ISWIRED(entry))
3323 uvm_map_entry_unwire(map, entry);
3324 entry = entry->next;
3325 }
3326 if ((lockflags & UVM_LK_EXIT) == 0)
3327 vm_map_unlock(map);
3328 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3329 return 0;
3330 }
3331
3332 /*
3333 * wire case: in two passes [XXXCDC: ugly block of code here]
3334 *
3335 * 1: holding the write lock, we create any anonymous maps that need
3336 * to be created. then we clip each map entry to the region to
3337 * be wired and increment its wiring count.
3338 *
3339 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
3340 * in the pages for any newly wired area (wired_count == 1).
3341 *
3342 * downgrading to a read lock for uvm_fault_wire avoids a possible
3343 * deadlock with another thread that may have faulted on one of
3344 * the pages to be wired (it would mark the page busy, blocking
3345 * us, then in turn block on the map lock that we hold). because
3346 * of problems in the recursive lock package, we cannot upgrade
3347 * to a write lock in vm_map_lookup. thus, any actions that
3348 * require the write lock must be done beforehand. because we
3349 * keep the read lock on the map, the copy-on-write status of the
3350 * entries we modify here cannot change.
3351 */
3352
3353 while ((entry != &map->header) && (entry->start < end)) {
3354 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3355
3356 /*
3357 * perform actions of vm_map_lookup that need the
3358 * write lock on the map: create an anonymous map
3359 * for a copy-on-write region, or an anonymous map
3360 * for a zero-fill region. (XXXCDC: submap case
3361 * ok?)
3362 */
3363
3364 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3365 if (UVM_ET_ISNEEDSCOPY(entry) &&
3366 ((entry->max_protection & VM_PROT_WRITE) ||
3367 (entry->object.uvm_obj == NULL))) {
3368 amap_copy(map, entry, 0, start, end);
3369 /* XXXCDC: wait OK? */
3370 }
3371 }
3372 }
3373 UVM_MAP_CLIP_START(map, entry, start, NULL);
3374 UVM_MAP_CLIP_END(map, entry, end, NULL);
3375 entry->wired_count++;
3376
3377 /*
3378 * Check for holes
3379 */
3380
3381 if (entry->protection == VM_PROT_NONE ||
3382 (entry->end < end &&
3383 (entry->next == &map->header ||
3384 entry->next->start > entry->end))) {
3385
3386 /*
3387 * found one. amap creation actions do not need to
3388 * be undone, but the wired counts need to be restored.
3389 */
3390
3391 while (entry != &map->header && entry->end > start) {
3392 entry->wired_count--;
3393 entry = entry->prev;
3394 }
3395 if ((lockflags & UVM_LK_EXIT) == 0)
3396 vm_map_unlock(map);
3397 UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
3398 return EINVAL;
3399 }
3400 entry = entry->next;
3401 }
3402
3403 /*
3404 * Pass 2.
3405 */
3406
3407 #ifdef DIAGNOSTIC
3408 timestamp_save = map->timestamp;
3409 #endif
3410 vm_map_busy(map);
3411 vm_map_downgrade(map);
3412
3413 rv = 0;
3414 entry = start_entry;
3415 while (entry != &map->header && entry->start < end) {
3416 if (entry->wired_count == 1) {
3417 rv = uvm_fault_wire(map, entry->start, entry->end,
3418 entry->max_protection, 1);
3419 if (rv) {
3420
3421 /*
3422 * wiring failed. break out of the loop.
3423 * we'll clean up the map below, once we
3424 * have a write lock again.
3425 */
3426
3427 break;
3428 }
3429 }
3430 entry = entry->next;
3431 }
3432
3433 if (rv) { /* failed? */
3434
3435 /*
3436 * Get back to an exclusive (write) lock.
3437 */
3438
3439 vm_map_upgrade(map);
3440 vm_map_unbusy(map);
3441
3442 #ifdef DIAGNOSTIC
3443 if (timestamp_save != map->timestamp)
3444 panic("uvm_map_pageable: stale map");
3445 #endif
3446
3447 /*
3448 * first drop the wiring count on all the entries
3449 * which haven't actually been wired yet.
3450 */
3451
3452 failed_entry = entry;
3453 while (entry != &map->header && entry->start < end) {
3454 entry->wired_count--;
3455 entry = entry->next;
3456 }
3457
3458 /*
3459 * now, unwire all the entries that were successfully
3460 * wired above.
3461 */
3462
3463 entry = start_entry;
3464 while (entry != failed_entry) {
3465 entry->wired_count--;
3466 if (VM_MAPENT_ISWIRED(entry) == 0)
3467 uvm_map_entry_unwire(map, entry);
3468 entry = entry->next;
3469 }
3470 if ((lockflags & UVM_LK_EXIT) == 0)
3471 vm_map_unlock(map);
3472 UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
3473 return (rv);
3474 }
3475
3476 /* We are holding a read lock here. */
3477 if ((lockflags & UVM_LK_EXIT) == 0) {
3478 vm_map_unbusy(map);
3479 vm_map_unlock_read(map);
3480 } else {
3481
3482 /*
3483 * Get back to an exclusive (write) lock.
3484 */
3485
3486 vm_map_upgrade(map);
3487 vm_map_unbusy(map);
3488 }
3489
3490 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3491 return 0;
3492 }
3493
3494 /*
3495 * uvm_map_pageable_all: special case of uvm_map_pageable - affects
3496 * all mapped regions.
3497 *
3498 * => map must not be locked.
3499 * => if no flags are specified, all regions are unwired.
3500 * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
3501 */
3502
3503 int
3504 uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
3505 {
3506 struct vm_map_entry *entry, *failed_entry;
3507 vsize_t size;
3508 int rv;
3509 #ifdef DIAGNOSTIC
3510 u_int timestamp_save;
3511 #endif
3512 UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist);
3513 UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0);
3514
3515 KASSERT(map->flags & VM_MAP_PAGEABLE);
3516
3517 vm_map_lock(map);
3518
3519 /*
3520 * handle wiring and unwiring separately.
3521 */
3522
3523 if (flags == 0) { /* unwire */
3524
3525 /*
3526 * POSIX 1003.1b -- munlockall unlocks all regions,
3527 * regardless of how many times mlockall has been called.
3528 */
3529
3530 for (entry = map->header.next; entry != &map->header;
3531 entry = entry->next) {
3532 if (VM_MAPENT_ISWIRED(entry))
3533 uvm_map_entry_unwire(map, entry);
3534 }
3535 map->flags &= ~VM_MAP_WIREFUTURE;
3536 vm_map_unlock(map);
3537 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3538 return 0;
3539 }
3540
3541 if (flags & MCL_FUTURE) {
3542
3543 /*
3544 * must wire all future mappings; remember this.
3545 */
3546
3547 map->flags |= VM_MAP_WIREFUTURE;
3548 }
3549
3550 if ((flags & MCL_CURRENT) == 0) {
3551
3552 /*
3553 * no more work to do!
3554 */
3555
3556 UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
3557 vm_map_unlock(map);
3558 return 0;
3559 }
3560
3561 /*
3562 * wire case: in three passes [XXXCDC: ugly block of code here]
3563 *
3564 * 1: holding the write lock, count all pages mapped by non-wired
3565 * entries. if this would cause us to go over our limit, we fail.
3566 *
3567 * 2: still holding the write lock, we create any anonymous maps that
3568 * need to be created. then we increment its wiring count.
3569 *
3570 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
3571 * in the pages for any newly wired area (wired_count == 1).
3572 *
3573 * downgrading to a read lock for uvm_fault_wire avoids a possible
3574 * deadlock with another thread that may have faulted on one of
3575 * the pages to be wired (it would mark the page busy, blocking
3576 * us, then in turn block on the map lock that we hold). because
3577 * of problems in the recursive lock package, we cannot upgrade
3578 * to a write lock in vm_map_lookup. thus, any actions that
3579 * require the write lock must be done beforehand. because we
3580 * keep the read lock on the map, the copy-on-write status of the
3581 * entries we modify here cannot change.
3582 */
3583
3584 for (size = 0, entry = map->header.next; entry != &map->header;
3585 entry = entry->next) {
3586 if (entry->protection != VM_PROT_NONE &&
3587 VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3588 size += entry->end - entry->start;
3589 }
3590 }
3591
3592 if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
3593 vm_map_unlock(map);
3594 return ENOMEM;
3595 }
3596
3597 if (limit != 0 &&
3598 (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
3599 vm_map_unlock(map);
3600 return ENOMEM;
3601 }
3602
3603 /*
3604 * Pass 2.
3605 */
3606
3607 for (entry = map->header.next; entry != &map->header;
3608 entry = entry->next) {
3609 if (entry->protection == VM_PROT_NONE)
3610 continue;
3611 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3612
3613 /*
3614 * perform actions of vm_map_lookup that need the
3615 * write lock on the map: create an anonymous map
3616 * for a copy-on-write region, or an anonymous map
3617 * for a zero-fill region. (XXXCDC: submap case
3618 * ok?)
3619 */
3620
3621 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3622 if (UVM_ET_ISNEEDSCOPY(entry) &&
3623 ((entry->max_protection & VM_PROT_WRITE) ||
3624 (entry->object.uvm_obj == NULL))) {
3625 amap_copy(map, entry, 0, entry->start,
3626 entry->end);
3627 /* XXXCDC: wait OK? */
3628 }
3629 }
3630 }
3631 entry->wired_count++;
3632 }
3633
3634 /*
3635 * Pass 3.
3636 */
3637
3638 #ifdef DIAGNOSTIC
3639 timestamp_save = map->timestamp;
3640 #endif
3641 vm_map_busy(map);
3642 vm_map_downgrade(map);
3643
3644 rv = 0;
3645 for (entry = map->header.next; entry != &map->header;
3646 entry = entry->next) {
3647 if (entry->wired_count == 1) {
3648 rv = uvm_fault_wire(map, entry->start, entry->end,
3649 entry->max_protection, 1);
3650 if (rv) {
3651
3652 /*
3653 * wiring failed. break out of the loop.
3654 * we'll clean up the map below, once we
3655 * have a write lock again.
3656 */
3657
3658 break;
3659 }
3660 }
3661 }
3662
3663 if (rv) {
3664
3665 /*
3666 * Get back an exclusive (write) lock.
3667 */
3668
3669 vm_map_upgrade(map);
3670 vm_map_unbusy(map);
3671
3672 #ifdef DIAGNOSTIC
3673 if (timestamp_save != map->timestamp)
3674 panic("uvm_map_pageable_all: stale map");
3675 #endif
3676
3677 /*
3678 * first drop the wiring count on all the entries
3679 * which haven't actually been wired yet.
3680 *
3681 * Skip VM_PROT_NONE entries like we did above.
3682 */
3683
3684 failed_entry = entry;
3685 for (/* nothing */; entry != &map->header;
3686 entry = entry->next) {
3687 if (entry->protection == VM_PROT_NONE)
3688 continue;
3689 entry->wired_count--;
3690 }
3691
3692 /*
3693 * now, unwire all the entries that were successfully
3694 * wired above.
3695 *
3696 * Skip VM_PROT_NONE entries like we did above.
3697 */
3698
3699 for (entry = map->header.next; entry != failed_entry;
3700 entry = entry->next) {
3701 if (entry->protection == VM_PROT_NONE)
3702 continue;
3703 entry->wired_count--;
3704 if (VM_MAPENT_ISWIRED(entry))
3705 uvm_map_entry_unwire(map, entry);
3706 }
3707 vm_map_unlock(map);
3708 UVMHIST_LOG(maphist,"<- done (RV=%d)", rv,0,0,0);
3709 return (rv);
3710 }
3711
3712 /* We are holding a read lock here. */
3713 vm_map_unbusy(map);
3714 vm_map_unlock_read(map);
3715
3716 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3717 return 0;
3718 }
3719
3720 /*
3721 * uvm_map_clean: clean out a map range
3722 *
3723 * => valid flags:
3724 * if (flags & PGO_CLEANIT): dirty pages are cleaned first
3725 * if (flags & PGO_SYNCIO): dirty pages are written synchronously
3726 * if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
3727 * if (flags & PGO_FREE): any cached pages are freed after clean
3728 * => returns an error if any part of the specified range isn't mapped
3729 * => never a need to flush amap layer since the anonymous memory has
3730 * no permanent home, but may deactivate pages there
3731 * => called from sys_msync() and sys_madvise()
3732 * => caller must not write-lock map (read OK).
3733 * => we may sleep while cleaning if SYNCIO [with map read-locked]
3734 */
3735
3736 int
3737 uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
3738 {
3739 struct vm_map_entry *current, *entry;
3740 struct uvm_object *uobj;
3741 struct vm_amap *amap;
3742 struct vm_anon *anon;
3743 struct vm_page *pg;
3744 vaddr_t offset;
3745 vsize_t size;
3746 voff_t uoff;
3747 int error, refs;
3748 UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
3749
3750 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
3751 map, start, end, flags);
3752 KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
3753 (PGO_FREE|PGO_DEACTIVATE));
3754
3755 vm_map_lock_read(map);
3756 VM_MAP_RANGE_CHECK(map, start, end);
3757 if (uvm_map_lookup_entry(map, start, &entry) == false) {
3758 vm_map_unlock_read(map);
3759 return EFAULT;
3760 }
3761
3762 /*
3763 * Make a first pass to check for holes and wiring problems.
3764 */
3765
3766 for (current = entry; current->start < end; current = current->next) {
3767 if (UVM_ET_ISSUBMAP(current)) {
3768 vm_map_unlock_read(map);
3769 return EINVAL;
3770 }
3771 if ((flags & PGO_FREE) != 0 && VM_MAPENT_ISWIRED(entry)) {
3772 vm_map_unlock_read(map);
3773 return EBUSY;
3774 }
3775 if (end <= current->end) {
3776 break;
3777 }
3778 if (current->end != current->next->start) {
3779 vm_map_unlock_read(map);
3780 return EFAULT;
3781 }
3782 }
3783
3784 error = 0;
3785 for (current = entry; start < end; current = current->next) {
3786 amap = current->aref.ar_amap; /* top layer */
3787 uobj = current->object.uvm_obj; /* bottom layer */
3788 KASSERT(start >= current->start);
3789
3790 /*
3791 * No amap cleaning necessary if:
3792 *
3793 * (1) There's no amap.
3794 *
3795 * (2) We're not deactivating or freeing pages.
3796 */
3797
3798 if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
3799 goto flush_object;
3800
3801 amap_lock(amap);
3802 offset = start - current->start;
3803 size = MIN(end, current->end) - start;
3804 for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) {
3805 anon = amap_lookup(¤t->aref, offset);
3806 if (anon == NULL)
3807 continue;
3808
3809 mutex_enter(&anon->an_lock);
3810 pg = anon->an_page;
3811 if (pg == NULL) {
3812 mutex_exit(&anon->an_lock);
3813 continue;
3814 }
3815
3816 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
3817
3818 /*
3819 * In these first 3 cases, we just deactivate the page.
3820 */
3821
3822 case PGO_CLEANIT|PGO_FREE:
3823 case PGO_CLEANIT|PGO_DEACTIVATE:
3824 case PGO_DEACTIVATE:
3825 deactivate_it:
3826 /*
3827 * skip the page if it's loaned or wired,
3828 * since it shouldn't be on a paging queue
3829 * at all in these cases.
3830 */
3831
3832 mutex_enter(&uvm_pageqlock);
3833 if (pg->loan_count != 0 ||
3834 pg->wire_count != 0) {
3835 mutex_exit(&uvm_pageqlock);
3836 mutex_exit(&anon->an_lock);
3837 continue;
3838 }
3839 KASSERT(pg->uanon == anon);
3840 pmap_clear_reference(pg);
3841 uvm_pagedeactivate(pg);
3842 mutex_exit(&uvm_pageqlock);
3843 mutex_exit(&anon->an_lock);
3844 continue;
3845
3846 case PGO_FREE:
3847
3848 /*
3849 * If there are multiple references to
3850 * the amap, just deactivate the page.
3851 */
3852
3853 if (amap_refs(amap) > 1)
3854 goto deactivate_it;
3855
3856 /* skip the page if it's wired */
3857 if (pg->wire_count != 0) {
3858 mutex_exit(&anon->an_lock);
3859 continue;
3860 }
3861 amap_unadd(¤t->aref, offset);
3862 refs = --anon->an_ref;
3863 mutex_exit(&anon->an_lock);
3864 if (refs == 0)
3865 uvm_anfree(anon);
3866 continue;
3867 }
3868 }
3869 amap_unlock(amap);
3870
3871 flush_object:
3872 /*
3873 * flush pages if we've got a valid backing object.
3874 * note that we must always clean object pages before
3875 * freeing them since otherwise we could reveal stale
3876 * data from files.
3877 */
3878
3879 uoff = current->offset + (start - current->start);
3880 size = MIN(end, current->end) - start;
3881 if (uobj != NULL) {
3882 mutex_enter(&uobj->vmobjlock);
3883 if (uobj->pgops->pgo_put != NULL)
3884 error = (uobj->pgops->pgo_put)(uobj, uoff,
3885 uoff + size, flags | PGO_CLEANIT);
3886 else
3887 error = 0;
3888 }
3889 start += size;
3890 }
3891 vm_map_unlock_read(map);
3892 return (error);
3893 }
3894
3895
3896 /*
3897 * uvm_map_checkprot: check protection in map
3898 *
3899 * => must allow specified protection in a fully allocated region.
3900 * => map must be read or write locked by caller.
3901 */
3902
3903 bool
3904 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
3905 vm_prot_t protection)
3906 {
3907 struct vm_map_entry *entry;
3908 struct vm_map_entry *tmp_entry;
3909
3910 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
3911 return (false);
3912 }
3913 entry = tmp_entry;
3914 while (start < end) {
3915 if (entry == &map->header) {
3916 return (false);
3917 }
3918
3919 /*
3920 * no holes allowed
3921 */
3922
3923 if (start < entry->start) {
3924 return (false);
3925 }
3926
3927 /*
3928 * check protection associated with entry
3929 */
3930
3931 if ((entry->protection & protection) != protection) {
3932 return (false);
3933 }
3934 start = entry->end;
3935 entry = entry->next;
3936 }
3937 return (true);
3938 }
3939
3940 /*
3941 * uvmspace_alloc: allocate a vmspace structure.
3942 *
3943 * - structure includes vm_map and pmap
3944 * - XXX: no locking on this structure
3945 * - refcnt set to 1, rest must be init'd by caller
3946 */
3947 struct vmspace *
3948 uvmspace_alloc(vaddr_t vmin, vaddr_t vmax)
3949 {
3950 struct vmspace *vm;
3951 UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
3952
3953 vm = pool_cache_get(&uvm_vmspace_cache, PR_WAITOK);
3954 uvmspace_init(vm, NULL, vmin, vmax);
3955 UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0);
3956 return (vm);
3957 }
3958
3959 /*
3960 * uvmspace_init: initialize a vmspace structure.
3961 *
3962 * - XXX: no locking on this structure
3963 * - refcnt set to 1, rest must be init'd by caller
3964 */
3965 void
3966 uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin, vaddr_t vmax)
3967 {
3968 UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist);
3969
3970 memset(vm, 0, sizeof(*vm));
3971 uvm_map_setup(&vm->vm_map, vmin, vmax, VM_MAP_PAGEABLE
3972 #ifdef __USING_TOPDOWN_VM
3973 | VM_MAP_TOPDOWN
3974 #endif
3975 );
3976 if (pmap)
3977 pmap_reference(pmap);
3978 else
3979 pmap = pmap_create();
3980 vm->vm_map.pmap = pmap;
3981 vm->vm_refcnt = 1;
3982 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
3983 }
3984
3985 /*
3986 * uvmspace_share: share a vmspace between two processes
3987 *
3988 * - used for vfork, threads(?)
3989 */
3990
3991 void
3992 uvmspace_share(struct proc *p1, struct proc *p2)
3993 {
3994
3995 uvmspace_addref(p1->p_vmspace);
3996 p2->p_vmspace = p1->p_vmspace;
3997 }
3998
3999 /*
4000 * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
4001 *
4002 * - XXX: no locking on vmspace
4003 */
4004
4005 void
4006 uvmspace_unshare(struct lwp *l)
4007 {
4008 struct proc *p = l->l_proc;
4009 struct vmspace *nvm, *ovm = p->p_vmspace;
4010
4011 if (ovm->vm_refcnt == 1)
4012 /* nothing to do: vmspace isn't shared in the first place */
4013 return;
4014
4015 /* make a new vmspace, still holding old one */
4016 nvm = uvmspace_fork(ovm);
4017
4018 pmap_deactivate(l); /* unbind old vmspace */
4019 p->p_vmspace = nvm;
4020 pmap_activate(l); /* switch to new vmspace */
4021
4022 uvmspace_free(ovm); /* drop reference to old vmspace */
4023 }
4024
4025 /*
4026 * uvmspace_exec: the process wants to exec a new program
4027 */
4028
4029 void
4030 uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end)
4031 {
4032 struct proc *p = l->l_proc;
4033 struct vmspace *nvm, *ovm = p->p_vmspace;
4034 struct vm_map *map = &ovm->vm_map;
4035
4036 #ifdef __sparc__
4037 /* XXX cgd 960926: the sparc #ifdef should be a MD hook */
4038 kill_user_windows(l); /* before stack addresses go away */
4039 #endif
4040
4041 /*
4042 * see if more than one process is using this vmspace...
4043 */
4044
4045 if (ovm->vm_refcnt == 1) {
4046
4047 /*
4048 * if p is the only process using its vmspace then we can safely
4049 * recycle that vmspace for the program that is being exec'd.
4050 */
4051
4052 #ifdef SYSVSHM
4053 /*
4054 * SYSV SHM semantics require us to kill all segments on an exec
4055 */
4056
4057 if (ovm->vm_shm)
4058 shmexit(ovm);
4059 #endif
4060
4061 /*
4062 * POSIX 1003.1b -- "lock future mappings" is revoked
4063 * when a process execs another program image.
4064 */
4065
4066 map->flags &= ~VM_MAP_WIREFUTURE;
4067
4068 /*
4069 * now unmap the old program
4070 */
4071
4072 pmap_remove_all(map->pmap);
4073 uvm_unmap(map, vm_map_min(map), vm_map_max(map));
4074 KASSERT(map->header.prev == &map->header);
4075 KASSERT(map->nentries == 0);
4076
4077 /*
4078 * resize the map
4079 */
4080
4081 vm_map_setmin(map, start);
4082 vm_map_setmax(map, end);
4083 } else {
4084
4085 /*
4086 * p's vmspace is being shared, so we can't reuse it for p since
4087 * it is still being used for others. allocate a new vmspace
4088 * for p
4089 */
4090
4091 nvm = uvmspace_alloc(start, end);
4092
4093 /*
4094 * install new vmspace and drop our ref to the old one.
4095 */
4096
4097 pmap_deactivate(l);
4098 p->p_vmspace = nvm;
4099 pmap_activate(l);
4100
4101 uvmspace_free(ovm);
4102 }
4103 }
4104
4105 /*
4106 * uvmspace_addref: add a referece to a vmspace.
4107 */
4108
4109 void
4110 uvmspace_addref(struct vmspace *vm)
4111 {
4112 struct vm_map *map = &vm->vm_map;
4113
4114 KASSERT((map->flags & VM_MAP_DYING) == 0);
4115
4116 mutex_enter(&map->misc_lock);
4117 KASSERT(vm->vm_refcnt > 0);
4118 vm->vm_refcnt++;
4119 mutex_exit(&map->misc_lock);
4120 }
4121
4122 /*
4123 * uvmspace_free: free a vmspace data structure
4124 */
4125
4126 void
4127 uvmspace_free(struct vmspace *vm)
4128 {
4129 struct vm_map_entry *dead_entries;
4130 struct vm_map *map = &vm->vm_map;
4131 int n;
4132
4133 UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
4134
4135 UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0);
4136 mutex_enter(&map->misc_lock);
4137 n = --vm->vm_refcnt;
4138 mutex_exit(&map->misc_lock);
4139 if (n > 0)
4140 return;
4141
4142 /*
4143 * at this point, there should be no other references to the map.
4144 * delete all of the mappings, then destroy the pmap.
4145 */
4146
4147 map->flags |= VM_MAP_DYING;
4148 pmap_remove_all(map->pmap);
4149 #ifdef SYSVSHM
4150 /* Get rid of any SYSV shared memory segments. */
4151 if (vm->vm_shm != NULL)
4152 shmexit(vm);
4153 #endif
4154 if (map->nentries) {
4155 uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map),
4156 &dead_entries, NULL, 0);
4157 if (dead_entries != NULL)
4158 uvm_unmap_detach(dead_entries, 0);
4159 }
4160 KASSERT(map->nentries == 0);
4161 KASSERT(map->size == 0);
4162 mutex_destroy(&map->misc_lock);
4163 mutex_destroy(&map->mutex);
4164 rw_destroy(&map->lock);
4165 pmap_destroy(map->pmap);
4166 pool_cache_put(&uvm_vmspace_cache, vm);
4167 }
4168
4169 /*
4170 * F O R K - m a i n e n t r y p o i n t
4171 */
4172 /*
4173 * uvmspace_fork: fork a process' main map
4174 *
4175 * => create a new vmspace for child process from parent.
4176 * => parent's map must not be locked.
4177 */
4178
4179 struct vmspace *
4180 uvmspace_fork(struct vmspace *vm1)
4181 {
4182 struct vmspace *vm2;
4183 struct vm_map *old_map = &vm1->vm_map;
4184 struct vm_map *new_map;
4185 struct vm_map_entry *old_entry;
4186 struct vm_map_entry *new_entry;
4187 UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
4188
4189 vm_map_lock(old_map);
4190
4191 vm2 = uvmspace_alloc(vm_map_min(old_map), vm_map_max(old_map));
4192 memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
4193 (char *) (vm1 + 1) - (char *) &vm1->vm_startcopy);
4194 new_map = &vm2->vm_map; /* XXX */
4195
4196 old_entry = old_map->header.next;
4197 new_map->size = old_map->size;
4198
4199 /*
4200 * go entry-by-entry
4201 */
4202
4203 while (old_entry != &old_map->header) {
4204
4205 /*
4206 * first, some sanity checks on the old entry
4207 */
4208
4209 KASSERT(!UVM_ET_ISSUBMAP(old_entry));
4210 KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) ||
4211 !UVM_ET_ISNEEDSCOPY(old_entry));
4212
4213 switch (old_entry->inheritance) {
4214 case MAP_INHERIT_NONE:
4215
4216 /*
4217 * drop the mapping, modify size
4218 */
4219 new_map->size -= old_entry->end - old_entry->start;
4220 break;
4221
4222 case MAP_INHERIT_SHARE:
4223
4224 /*
4225 * share the mapping: this means we want the old and
4226 * new entries to share amaps and backing objects.
4227 */
4228 /*
4229 * if the old_entry needs a new amap (due to prev fork)
4230 * then we need to allocate it now so that we have
4231 * something we own to share with the new_entry. [in
4232 * other words, we need to clear needs_copy]
4233 */
4234
4235 if (UVM_ET_ISNEEDSCOPY(old_entry)) {
4236 /* get our own amap, clears needs_copy */
4237 amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,
4238 0, 0);
4239 /* XXXCDC: WAITOK??? */
4240 }
4241
4242 new_entry = uvm_mapent_alloc(new_map, 0);
4243 /* old_entry -> new_entry */
4244 uvm_mapent_copy(old_entry, new_entry);
4245
4246 /* new pmap has nothing wired in it */
4247 new_entry->wired_count = 0;
4248
4249 /*
4250 * gain reference to object backing the map (can't
4251 * be a submap, already checked this case).
4252 */
4253
4254 if (new_entry->aref.ar_amap)
4255 uvm_map_reference_amap(new_entry, AMAP_SHARED);
4256
4257 if (new_entry->object.uvm_obj &&
4258 new_entry->object.uvm_obj->pgops->pgo_reference)
4259 new_entry->object.uvm_obj->
4260 pgops->pgo_reference(
4261 new_entry->object.uvm_obj);
4262
4263 /* insert entry at end of new_map's entry list */
4264 uvm_map_entry_link(new_map, new_map->header.prev,
4265 new_entry);
4266
4267 break;
4268
4269 case MAP_INHERIT_COPY:
4270
4271 /*
4272 * copy-on-write the mapping (using mmap's
4273 * MAP_PRIVATE semantics)
4274 *
4275 * allocate new_entry, adjust reference counts.
4276 * (note that new references are read-only).
4277 */
4278
4279 new_entry = uvm_mapent_alloc(new_map, 0);
4280 /* old_entry -> new_entry */
4281 uvm_mapent_copy(old_entry, new_entry);
4282
4283 if (new_entry->aref.ar_amap)
4284 uvm_map_reference_amap(new_entry, 0);
4285
4286 if (new_entry->object.uvm_obj &&
4287 new_entry->object.uvm_obj->pgops->pgo_reference)
4288 new_entry->object.uvm_obj->pgops->pgo_reference
4289 (new_entry->object.uvm_obj);
4290
4291 /* new pmap has nothing wired in it */
4292 new_entry->wired_count = 0;
4293
4294 new_entry->etype |=
4295 (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
4296 uvm_map_entry_link(new_map, new_map->header.prev,
4297 new_entry);
4298
4299 /*
4300 * the new entry will need an amap. it will either
4301 * need to be copied from the old entry or created
4302 * from scratch (if the old entry does not have an
4303 * amap). can we defer this process until later
4304 * (by setting "needs_copy") or do we need to copy
4305 * the amap now?
4306 *
4307 * we must copy the amap now if any of the following
4308 * conditions hold:
4309 * 1. the old entry has an amap and that amap is
4310 * being shared. this means that the old (parent)
4311 * process is sharing the amap with another
4312 * process. if we do not clear needs_copy here
4313 * we will end up in a situation where both the
4314 * parent and child process are refering to the
4315 * same amap with "needs_copy" set. if the
4316 * parent write-faults, the fault routine will
4317 * clear "needs_copy" in the parent by allocating
4318 * a new amap. this is wrong because the
4319 * parent is supposed to be sharing the old amap
4320 * and the new amap will break that.
4321 *
4322 * 2. if the old entry has an amap and a non-zero
4323 * wire count then we are going to have to call
4324 * amap_cow_now to avoid page faults in the
4325 * parent process. since amap_cow_now requires
4326 * "needs_copy" to be clear we might as well
4327 * clear it here as well.
4328 *
4329 */
4330
4331 if (old_entry->aref.ar_amap != NULL) {
4332 if ((amap_flags(old_entry->aref.ar_amap) &
4333 AMAP_SHARED) != 0 ||
4334 VM_MAPENT_ISWIRED(old_entry)) {
4335
4336 amap_copy(new_map, new_entry,
4337 AMAP_COPY_NOCHUNK, 0, 0);
4338 /* XXXCDC: M_WAITOK ... ok? */
4339 }
4340 }
4341
4342 /*
4343 * if the parent's entry is wired down, then the
4344 * parent process does not want page faults on
4345 * access to that memory. this means that we
4346 * cannot do copy-on-write because we can't write
4347 * protect the old entry. in this case we
4348 * resolve all copy-on-write faults now, using
4349 * amap_cow_now. note that we have already
4350 * allocated any needed amap (above).
4351 */
4352
4353 if (VM_MAPENT_ISWIRED(old_entry)) {
4354
4355 /*
4356 * resolve all copy-on-write faults now
4357 * (note that there is nothing to do if
4358 * the old mapping does not have an amap).
4359 */
4360 if (old_entry->aref.ar_amap)
4361 amap_cow_now(new_map, new_entry);
4362
4363 } else {
4364
4365 /*
4366 * setup mappings to trigger copy-on-write faults
4367 * we must write-protect the parent if it has
4368 * an amap and it is not already "needs_copy"...
4369 * if it is already "needs_copy" then the parent
4370 * has already been write-protected by a previous
4371 * fork operation.
4372 */
4373
4374 if (old_entry->aref.ar_amap &&
4375 !UVM_ET_ISNEEDSCOPY(old_entry)) {
4376 if (old_entry->max_protection & VM_PROT_WRITE) {
4377 pmap_protect(old_map->pmap,
4378 old_entry->start,
4379 old_entry->end,
4380 old_entry->protection &
4381 ~VM_PROT_WRITE);
4382 pmap_update(old_map->pmap);
4383 }
4384 old_entry->etype |= UVM_ET_NEEDSCOPY;
4385 }
4386 }
4387 break;
4388 } /* end of switch statement */
4389 old_entry = old_entry->next;
4390 }
4391
4392 vm_map_unlock(old_map);
4393
4394 #ifdef SYSVSHM
4395 if (vm1->vm_shm)
4396 shmfork(vm1, vm2);
4397 #endif
4398
4399 #ifdef PMAP_FORK
4400 pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap);
4401 #endif
4402
4403 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4404 return (vm2);
4405 }
4406
4407
4408 /*
4409 * in-kernel map entry allocation.
4410 */
4411
4412 struct uvm_kmapent_hdr {
4413 LIST_ENTRY(uvm_kmapent_hdr) ukh_listq;
4414 int ukh_nused;
4415 struct vm_map_entry *ukh_freelist;
4416 struct vm_map *ukh_map;
4417 struct vm_map_entry ukh_entries[0];
4418 };
4419
4420 #define UVM_KMAPENT_CHUNK \
4421 ((PAGE_SIZE - sizeof(struct uvm_kmapent_hdr)) \
4422 / sizeof(struct vm_map_entry))
4423
4424 #define UVM_KHDR_FIND(entry) \
4425 ((struct uvm_kmapent_hdr *)(((vaddr_t)entry) & ~PAGE_MASK))
4426
4427
4428 #ifdef DIAGNOSTIC
4429 static struct vm_map *
4430 uvm_kmapent_map(struct vm_map_entry *entry)
4431 {
4432 const struct uvm_kmapent_hdr *ukh;
4433
4434 ukh = UVM_KHDR_FIND(entry);
4435 return ukh->ukh_map;
4436 }
4437 #endif
4438
4439 static inline struct vm_map_entry *
4440 uvm_kmapent_get(struct uvm_kmapent_hdr *ukh)
4441 {
4442 struct vm_map_entry *entry;
4443
4444 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4445 KASSERT(ukh->ukh_nused >= 0);
4446
4447 entry = ukh->ukh_freelist;
4448 if (entry) {
4449 KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4450 == UVM_MAP_KERNEL);
4451 ukh->ukh_freelist = entry->next;
4452 ukh->ukh_nused++;
4453 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4454 } else {
4455 KASSERT(ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4456 }
4457
4458 return entry;
4459 }
4460
4461 static inline void
4462 uvm_kmapent_put(struct uvm_kmapent_hdr *ukh, struct vm_map_entry *entry)
4463 {
4464
4465 KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4466 == UVM_MAP_KERNEL);
4467 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4468 KASSERT(ukh->ukh_nused > 0);
4469 KASSERT(ukh->ukh_freelist != NULL ||
4470 ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4471 KASSERT(ukh->ukh_freelist == NULL ||
4472 ukh->ukh_nused < UVM_KMAPENT_CHUNK);
4473
4474 ukh->ukh_nused--;
4475 entry->next = ukh->ukh_freelist;
4476 ukh->ukh_freelist = entry;
4477 }
4478
4479 /*
4480 * uvm_kmapent_alloc: allocate a map entry for in-kernel map
4481 */
4482
4483 static struct vm_map_entry *
4484 uvm_kmapent_alloc(struct vm_map *map, int flags)
4485 {
4486 struct vm_page *pg;
4487 struct uvm_map_args args;
4488 struct uvm_kmapent_hdr *ukh;
4489 struct vm_map_entry *entry;
4490 uvm_flag_t mapflags = UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
4491 UVM_INH_NONE, UVM_ADV_RANDOM, flags | UVM_FLAG_NOMERGE);
4492 vaddr_t va;
4493 int error;
4494 int i;
4495
4496 KDASSERT(UVM_KMAPENT_CHUNK > 2);
4497 KDASSERT(kernel_map != NULL);
4498 KASSERT(vm_map_pmap(map) == pmap_kernel());
4499
4500 UVMMAP_EVCNT_INCR(uke_alloc);
4501 entry = NULL;
4502 again:
4503 /*
4504 * try to grab an entry from freelist.
4505 */
4506 mutex_spin_enter(&uvm_kentry_lock);
4507 ukh = LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free);
4508 if (ukh) {
4509 entry = uvm_kmapent_get(ukh);
4510 if (ukh->ukh_nused == UVM_KMAPENT_CHUNK)
4511 LIST_REMOVE(ukh, ukh_listq);
4512 }
4513 mutex_spin_exit(&uvm_kentry_lock);
4514
4515 if (entry)
4516 return entry;
4517
4518 /*
4519 * there's no free entry for this vm_map.
4520 * now we need to allocate some vm_map_entry.
4521 * for simplicity, always allocate one page chunk of them at once.
4522 */
4523
4524 pg = uvm_pagealloc(NULL, 0, NULL, 0);
4525 if (__predict_false(pg == NULL)) {
4526 if (flags & UVM_FLAG_NOWAIT)
4527 return NULL;
4528 uvm_wait("kme_alloc");
4529 goto again;
4530 }
4531
4532 error = uvm_map_prepare(map, 0, PAGE_SIZE, NULL, UVM_UNKNOWN_OFFSET,
4533 0, mapflags, &args);
4534 if (error) {
4535 uvm_pagefree(pg);
4536 return NULL;
4537 }
4538
4539 va = args.uma_start;
4540
4541 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE);
4542 pmap_update(vm_map_pmap(map));
4543
4544 ukh = (void *)va;
4545
4546 /*
4547 * use the first entry for ukh itsself.
4548 */
4549
4550 entry = &ukh->ukh_entries[0];
4551 entry->flags = UVM_MAP_KERNEL | UVM_MAP_KMAPENT;
4552 error = uvm_map_enter(map, &args, entry);
4553 KASSERT(error == 0);
4554
4555 ukh->ukh_nused = UVM_KMAPENT_CHUNK;
4556 ukh->ukh_map = map;
4557 ukh->ukh_freelist = NULL;
4558 for (i = UVM_KMAPENT_CHUNK - 1; i >= 2; i--) {
4559 struct vm_map_entry *xentry = &ukh->ukh_entries[i];
4560
4561 xentry->flags = UVM_MAP_KERNEL;
4562 uvm_kmapent_put(ukh, xentry);
4563 }
4564 KASSERT(ukh->ukh_nused == 2);
4565
4566 mutex_spin_enter(&uvm_kentry_lock);
4567 LIST_INSERT_HEAD(&vm_map_to_kernel(map)->vmk_kentry_free,
4568 ukh, ukh_listq);
4569 mutex_spin_exit(&uvm_kentry_lock);
4570
4571 /*
4572 * return second entry.
4573 */
4574
4575 entry = &ukh->ukh_entries[1];
4576 entry->flags = UVM_MAP_KERNEL;
4577 UVMMAP_EVCNT_INCR(ukh_alloc);
4578 return entry;
4579 }
4580
4581 /*
4582 * uvm_mapent_free: free map entry for in-kernel map
4583 */
4584
4585 static void
4586 uvm_kmapent_free(struct vm_map_entry *entry)
4587 {
4588 struct uvm_kmapent_hdr *ukh;
4589 struct vm_page *pg;
4590 struct vm_map *map;
4591 struct pmap *pmap;
4592 vaddr_t va;
4593 paddr_t pa;
4594 struct vm_map_entry *deadentry;
4595
4596 UVMMAP_EVCNT_INCR(uke_free);
4597 ukh = UVM_KHDR_FIND(entry);
4598 map = ukh->ukh_map;
4599
4600 mutex_spin_enter(&uvm_kentry_lock);
4601 uvm_kmapent_put(ukh, entry);
4602 if (ukh->ukh_nused > 1) {
4603 if (ukh->ukh_nused == UVM_KMAPENT_CHUNK - 1)
4604 LIST_INSERT_HEAD(
4605 &vm_map_to_kernel(map)->vmk_kentry_free,
4606 ukh, ukh_listq);
4607 mutex_spin_exit(&uvm_kentry_lock);
4608 return;
4609 }
4610
4611 /*
4612 * now we can free this ukh.
4613 *
4614 * however, keep an empty ukh to avoid ping-pong.
4615 */
4616
4617 if (LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free) == ukh &&
4618 LIST_NEXT(ukh, ukh_listq) == NULL) {
4619 mutex_spin_exit(&uvm_kentry_lock);
4620 return;
4621 }
4622 LIST_REMOVE(ukh, ukh_listq);
4623 mutex_spin_exit(&uvm_kentry_lock);
4624
4625 KASSERT(ukh->ukh_nused == 1);
4626
4627 /*
4628 * remove map entry for ukh itsself.
4629 */
4630
4631 va = (vaddr_t)ukh;
4632 KASSERT((va & PAGE_MASK) == 0);
4633 vm_map_lock(map);
4634 uvm_unmap_remove(map, va, va + PAGE_SIZE, &deadentry, NULL, 0);
4635 KASSERT(deadentry->flags & UVM_MAP_KERNEL);
4636 KASSERT(deadentry->flags & UVM_MAP_KMAPENT);
4637 KASSERT(deadentry->next == NULL);
4638 KASSERT(deadentry == &ukh->ukh_entries[0]);
4639
4640 /*
4641 * unmap the page from pmap and free it.
4642 */
4643
4644 pmap = vm_map_pmap(map);
4645 KASSERT(pmap == pmap_kernel());
4646 if (!pmap_extract(pmap, va, &pa))
4647 panic("%s: no mapping", __func__);
4648 pmap_kremove(va, PAGE_SIZE);
4649 vm_map_unlock(map);
4650 pg = PHYS_TO_VM_PAGE(pa);
4651 uvm_pagefree(pg);
4652 UVMMAP_EVCNT_INCR(ukh_free);
4653 }
4654
4655 static vsize_t
4656 uvm_kmapent_overhead(vsize_t size)
4657 {
4658
4659 /*
4660 * - the max number of unmerged entries is howmany(size, PAGE_SIZE)
4661 * as the min allocation unit is PAGE_SIZE.
4662 * - UVM_KMAPENT_CHUNK "kmapent"s are allocated from a page.
4663 * one of them are used to map the page itself.
4664 */
4665
4666 return howmany(howmany(size, PAGE_SIZE), (UVM_KMAPENT_CHUNK - 1)) *
4667 PAGE_SIZE;
4668 }
4669
4670 /*
4671 * map entry reservation
4672 */
4673
4674 /*
4675 * uvm_mapent_reserve: reserve map entries for clipping before locking map.
4676 *
4677 * => needed when unmapping entries allocated without UVM_FLAG_QUANTUM.
4678 * => caller shouldn't hold map locked.
4679 */
4680 int
4681 uvm_mapent_reserve(struct vm_map *map, struct uvm_mapent_reservation *umr,
4682 int nentries, int flags)
4683 {
4684
4685 umr->umr_nentries = 0;
4686
4687 if ((flags & UVM_FLAG_QUANTUM) != 0)
4688 return 0;
4689
4690 if (!VM_MAP_USE_KMAPENT(map))
4691 return 0;
4692
4693 while (nentries--) {
4694 struct vm_map_entry *ent;
4695 ent = uvm_kmapent_alloc(map, flags);
4696 if (!ent) {
4697 uvm_mapent_unreserve(map, umr);
4698 return ENOMEM;
4699 }
4700 UMR_PUTENTRY(umr, ent);
4701 }
4702
4703 return 0;
4704 }
4705
4706 /*
4707 * uvm_mapent_unreserve:
4708 *
4709 * => caller shouldn't hold map locked.
4710 * => never fail or sleep.
4711 */
4712 void
4713 uvm_mapent_unreserve(struct vm_map *map, struct uvm_mapent_reservation *umr)
4714 {
4715
4716 while (!UMR_EMPTY(umr))
4717 uvm_kmapent_free(UMR_GETENTRY(umr));
4718 }
4719
4720 /*
4721 * uvm_mapent_trymerge: try to merge an entry with its neighbors.
4722 *
4723 * => called with map locked.
4724 * => return non zero if successfully merged.
4725 */
4726
4727 int
4728 uvm_mapent_trymerge(struct vm_map *map, struct vm_map_entry *entry, int flags)
4729 {
4730 struct uvm_object *uobj;
4731 struct vm_map_entry *next;
4732 struct vm_map_entry *prev;
4733 vsize_t size;
4734 int merged = 0;
4735 bool copying;
4736 int newetype;
4737
4738 if (VM_MAP_USE_KMAPENT(map)) {
4739 return 0;
4740 }
4741 if (entry->aref.ar_amap != NULL) {
4742 return 0;
4743 }
4744 if ((entry->flags & UVM_MAP_NOMERGE) != 0) {
4745 return 0;
4746 }
4747
4748 uobj = entry->object.uvm_obj;
4749 size = entry->end - entry->start;
4750 copying = (flags & UVM_MERGE_COPYING) != 0;
4751 newetype = copying ? (entry->etype & ~UVM_ET_NEEDSCOPY) : entry->etype;
4752
4753 next = entry->next;
4754 if (next != &map->header &&
4755 next->start == entry->end &&
4756 ((copying && next->aref.ar_amap != NULL &&
4757 amap_refs(next->aref.ar_amap) == 1) ||
4758 (!copying && next->aref.ar_amap == NULL)) &&
4759 UVM_ET_ISCOMPATIBLE(next, newetype,
4760 uobj, entry->flags, entry->protection,
4761 entry->max_protection, entry->inheritance, entry->advice,
4762 entry->wired_count) &&
4763 (uobj == NULL || entry->offset + size == next->offset)) {
4764 int error;
4765
4766 if (copying) {
4767 error = amap_extend(next, size,
4768 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_BACKWARDS);
4769 } else {
4770 error = 0;
4771 }
4772 if (error == 0) {
4773 if (uobj) {
4774 if (uobj->pgops->pgo_detach) {
4775 uobj->pgops->pgo_detach(uobj);
4776 }
4777 }
4778
4779 entry->end = next->end;
4780 clear_hints(map, next);
4781 uvm_map_entry_unlink(map, next);
4782 if (copying) {
4783 entry->aref = next->aref;
4784 entry->etype &= ~UVM_ET_NEEDSCOPY;
4785 }
4786 uvm_map_check(map, "trymerge forwardmerge");
4787 uvm_mapent_free_merged(map, next);
4788 merged++;
4789 }
4790 }
4791
4792 prev = entry->prev;
4793 if (prev != &map->header &&
4794 prev->end == entry->start &&
4795 ((copying && !merged && prev->aref.ar_amap != NULL &&
4796 amap_refs(prev->aref.ar_amap) == 1) ||
4797 (!copying && prev->aref.ar_amap == NULL)) &&
4798 UVM_ET_ISCOMPATIBLE(prev, newetype,
4799 uobj, entry->flags, entry->protection,
4800 entry->max_protection, entry->inheritance, entry->advice,
4801 entry->wired_count) &&
4802 (uobj == NULL ||
4803 prev->offset + prev->end - prev->start == entry->offset)) {
4804 int error;
4805
4806 if (copying) {
4807 error = amap_extend(prev, size,
4808 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_FORWARDS);
4809 } else {
4810 error = 0;
4811 }
4812 if (error == 0) {
4813 if (uobj) {
4814 if (uobj->pgops->pgo_detach) {
4815 uobj->pgops->pgo_detach(uobj);
4816 }
4817 entry->offset = prev->offset;
4818 }
4819
4820 entry->start = prev->start;
4821 clear_hints(map, prev);
4822 uvm_map_entry_unlink(map, prev);
4823 if (copying) {
4824 entry->aref = prev->aref;
4825 entry->etype &= ~UVM_ET_NEEDSCOPY;
4826 }
4827 uvm_map_check(map, "trymerge backmerge");
4828 uvm_mapent_free_merged(map, prev);
4829 merged++;
4830 }
4831 }
4832
4833 return merged;
4834 }
4835
4836 #if defined(DDB)
4837
4838 /*
4839 * DDB hooks
4840 */
4841
4842 /*
4843 * uvm_map_printit: actually prints the map
4844 */
4845
4846 void
4847 uvm_map_printit(struct vm_map *map, bool full,
4848 void (*pr)(const char *, ...))
4849 {
4850 struct vm_map_entry *entry;
4851
4852 (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, vm_map_min(map),
4853 vm_map_max(map));
4854 (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=0x%x\n",
4855 map->nentries, map->size, map->ref_count, map->timestamp,
4856 map->flags);
4857 (*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap,
4858 pmap_resident_count(map->pmap), pmap_wired_count(map->pmap));
4859 if (!full)
4860 return;
4861 for (entry = map->header.next; entry != &map->header;
4862 entry = entry->next) {
4863 (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%llx, amap=%p/%d\n",
4864 entry, entry->start, entry->end, entry->object.uvm_obj,
4865 (long long)entry->offset, entry->aref.ar_amap,
4866 entry->aref.ar_pageoff);
4867 (*pr)(
4868 "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
4869 "wc=%d, adv=%d\n",
4870 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
4871 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
4872 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
4873 entry->protection, entry->max_protection,
4874 entry->inheritance, entry->wired_count, entry->advice);
4875 }
4876 }
4877
4878 /*
4879 * uvm_object_printit: actually prints the object
4880 */
4881
4882 void
4883 uvm_object_printit(struct uvm_object *uobj, bool full,
4884 void (*pr)(const char *, ...))
4885 {
4886 struct vm_page *pg;
4887 int cnt = 0;
4888
4889 (*pr)("OBJECT %p: locked=%d, pgops=%p, npages=%d, ",
4890 uobj, mutex_owned(&uobj->vmobjlock), uobj->pgops, uobj->uo_npages);
4891 if (UVM_OBJ_IS_KERN_OBJECT(uobj))
4892 (*pr)("refs=<SYSTEM>\n");
4893 else
4894 (*pr)("refs=%d\n", uobj->uo_refs);
4895
4896 if (!full) {
4897 return;
4898 }
4899 (*pr)(" PAGES <pg,offset>:\n ");
4900 TAILQ_FOREACH(pg, &uobj->memq, listq) {
4901 cnt++;
4902 (*pr)("<%p,0x%llx> ", pg, (long long)pg->offset);
4903 if ((cnt % 3) == 0) {
4904 (*pr)("\n ");
4905 }
4906 }
4907 if ((cnt % 3) != 0) {
4908 (*pr)("\n");
4909 }
4910 }
4911
4912 /*
4913 * uvm_page_printit: actually print the page
4914 */
4915
4916 static const char page_flagbits[] = UVM_PGFLAGBITS;
4917 static const char page_pqflagbits[] = UVM_PQFLAGBITS;
4918
4919 void
4920 uvm_page_printit(struct vm_page *pg, bool full,
4921 void (*pr)(const char *, ...))
4922 {
4923 struct vm_page *tpg;
4924 struct uvm_object *uobj;
4925 struct pglist *pgl;
4926 char pgbuf[128];
4927 char pqbuf[128];
4928
4929 (*pr)("PAGE %p:\n", pg);
4930 bitmask_snprintf(pg->flags, page_flagbits, pgbuf, sizeof(pgbuf));
4931 bitmask_snprintf(pg->pqflags, page_pqflagbits, pqbuf, sizeof(pqbuf));
4932 (*pr)(" flags=%s, pqflags=%s, wire_count=%d, pa=0x%lx\n",
4933 pgbuf, pqbuf, pg->wire_count, (long)VM_PAGE_TO_PHYS(pg));
4934 (*pr)(" uobject=%p, uanon=%p, offset=0x%llx loan_count=%d\n",
4935 pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count);
4936 #if defined(UVM_PAGE_TRKOWN)
4937 if (pg->flags & PG_BUSY)
4938 (*pr)(" owning process = %d, tag=%s\n",
4939 pg->owner, pg->owner_tag);
4940 else
4941 (*pr)(" page not busy, no owner\n");
4942 #else
4943 (*pr)(" [page ownership tracking disabled]\n");
4944 #endif
4945
4946 if (!full)
4947 return;
4948
4949 /* cross-verify object/anon */
4950 if ((pg->pqflags & PQ_FREE) == 0) {
4951 if (pg->pqflags & PQ_ANON) {
4952 if (pg->uanon == NULL || pg->uanon->an_page != pg)
4953 (*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n",
4954 (pg->uanon) ? pg->uanon->an_page : NULL);
4955 else
4956 (*pr)(" anon backpointer is OK\n");
4957 } else {
4958 uobj = pg->uobject;
4959 if (uobj) {
4960 (*pr)(" checking object list\n");
4961 TAILQ_FOREACH(tpg, &uobj->memq, listq) {
4962 if (tpg == pg) {
4963 break;
4964 }
4965 }
4966 if (tpg)
4967 (*pr)(" page found on object list\n");
4968 else
4969 (*pr)(" >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
4970 }
4971 }
4972 }
4973
4974 /* cross-verify page queue */
4975 if (pg->pqflags & PQ_FREE) {
4976 int fl = uvm_page_lookup_freelist(pg);
4977 int color = VM_PGCOLOR_BUCKET(pg);
4978 pgl = &uvm.page_free[fl].pgfl_buckets[color].pgfl_queues[
4979 ((pg)->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN];
4980 } else {
4981 pgl = NULL;
4982 }
4983
4984 if (pgl) {
4985 (*pr)(" checking pageq list\n");
4986 TAILQ_FOREACH(tpg, pgl, pageq) {
4987 if (tpg == pg) {
4988 break;
4989 }
4990 }
4991 if (tpg)
4992 (*pr)(" page found on pageq list\n");
4993 else
4994 (*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
4995 }
4996 }
4997
4998 /*
4999 * uvm_pages_printthem - print a summary of all managed pages
5000 */
5001
5002 void
5003 uvm_page_printall(void (*pr)(const char *, ...))
5004 {
5005 unsigned i;
5006 struct vm_page *pg;
5007
5008 (*pr)("%18s %4s %4s %18s %18s"
5009 #ifdef UVM_PAGE_TRKOWN
5010 " OWNER"
5011 #endif
5012 "\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON");
5013 for (i = 0; i < vm_nphysseg; i++) {
5014 for (pg = vm_physmem[i].pgs; pg <= vm_physmem[i].lastpg; pg++) {
5015 (*pr)("%18p %04x %04x %18p %18p",
5016 pg, pg->flags, pg->pqflags, pg->uobject,
5017 pg->uanon);
5018 #ifdef UVM_PAGE_TRKOWN
5019 if (pg->flags & PG_BUSY)
5020 (*pr)(" %d [%s]", pg->owner, pg->owner_tag);
5021 #endif
5022 (*pr)("\n");
5023 }
5024 }
5025 }
5026
5027 #endif
5028
5029 /*
5030 * uvm_map_create: create map
5031 */
5032
5033 struct vm_map *
5034 uvm_map_create(pmap_t pmap, vaddr_t vmin, vaddr_t vmax, int flags)
5035 {
5036 struct vm_map *result;
5037
5038 MALLOC(result, struct vm_map *, sizeof(struct vm_map),
5039 M_VMMAP, M_WAITOK);
5040 uvm_map_setup(result, vmin, vmax, flags);
5041 result->pmap = pmap;
5042 return(result);
5043 }
5044
5045 /*
5046 * uvm_map_setup: init map
5047 *
5048 * => map must not be in service yet.
5049 */
5050
5051 void
5052 uvm_map_setup(struct vm_map *map, vaddr_t vmin, vaddr_t vmax, int flags)
5053 {
5054 int ipl;
5055
5056 RB_INIT(&map->rbhead);
5057 map->header.next = map->header.prev = &map->header;
5058 map->nentries = 0;
5059 map->size = 0;
5060 map->ref_count = 1;
5061 vm_map_setmin(map, vmin);
5062 vm_map_setmax(map, vmax);
5063 map->flags = flags;
5064 map->first_free = &map->header;
5065 map->hint = &map->header;
5066 map->timestamp = 0;
5067 map->busy = NULL;
5068
5069 if ((flags & VM_MAP_INTRSAFE) != 0) {
5070 ipl = IPL_VM;
5071 } else {
5072 ipl = IPL_NONE;
5073 }
5074
5075 rw_init(&map->lock);
5076 cv_init(&map->cv, "vm_map");
5077 mutex_init(&map->misc_lock, MUTEX_DRIVER, ipl);
5078 mutex_init(&map->mutex, MUTEX_DRIVER, ipl);
5079 }
5080
5081
5082 /*
5083 * U N M A P - m a i n e n t r y p o i n t
5084 */
5085
5086 /*
5087 * uvm_unmap1: remove mappings from a vm_map (from "start" up to "stop")
5088 *
5089 * => caller must check alignment and size
5090 * => map must be unlocked (we will lock it)
5091 * => flags is UVM_FLAG_QUANTUM or 0.
5092 */
5093
5094 void
5095 uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
5096 {
5097 struct vm_map_entry *dead_entries;
5098 struct uvm_mapent_reservation umr;
5099 UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist);
5100
5101 UVMHIST_LOG(maphist, " (map=0x%x, start=0x%x, end=0x%x)",
5102 map, start, end, 0);
5103 if (map == kernel_map) {
5104 LOCKDEBUG_MEM_CHECK((void *)start, end - start);
5105 }
5106 /*
5107 * work now done by helper functions. wipe the pmap's and then
5108 * detach from the dead entries...
5109 */
5110 uvm_mapent_reserve(map, &umr, 2, flags);
5111 vm_map_lock(map);
5112 uvm_unmap_remove(map, start, end, &dead_entries, &umr, flags);
5113 vm_map_unlock(map);
5114 uvm_mapent_unreserve(map, &umr);
5115
5116 if (dead_entries != NULL)
5117 uvm_unmap_detach(dead_entries, 0);
5118
5119 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
5120 }
5121
5122
5123 /*
5124 * uvm_map_reference: add reference to a map
5125 *
5126 * => map need not be locked (we use misc_lock).
5127 */
5128
5129 void
5130 uvm_map_reference(struct vm_map *map)
5131 {
5132 mutex_enter(&map->misc_lock);
5133 map->ref_count++;
5134 mutex_exit(&map->misc_lock);
5135 }
5136
5137 struct vm_map_kernel *
5138 vm_map_to_kernel(struct vm_map *map)
5139 {
5140
5141 KASSERT(VM_MAP_IS_KERNEL(map));
5142
5143 return (struct vm_map_kernel *)map;
5144 }
5145
5146 bool
5147 vm_map_starved_p(struct vm_map *map)
5148 {
5149
5150 if ((map->flags & VM_MAP_WANTVA) != 0) {
5151 return true;
5152 }
5153 /* XXX */
5154 if ((vm_map_max(map) - vm_map_min(map)) / 16 * 15 < map->size) {
5155 return true;
5156 }
5157 return false;
5158 }
5159
5160 #if defined(DDB)
5161 void
5162 uvm_whatis(uintptr_t addr, void (*pr)(const char *, ...))
5163 {
5164 struct vm_map *map;
5165
5166 for (map = kernel_map;;) {
5167 struct vm_map_entry *entry;
5168
5169 if (!uvm_map_lookup_entry_bytree(map, (vaddr_t)addr, &entry)) {
5170 break;
5171 }
5172 (*pr)("%p is %p+%zu from VMMAP %p\n",
5173 (void *)addr, (void *)entry->start,
5174 (size_t)(addr - (uintptr_t)entry->start), map);
5175 if (!UVM_ET_ISSUBMAP(entry)) {
5176 break;
5177 }
5178 map = entry->object.sub_map;
5179 }
5180 }
5181 #endif /* defined(DDB) */
5182