uvm_map.c revision 1.244 1 /* $NetBSD: uvm_map.c,v 1.244 2007/11/26 08:15:19 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_map.c 8.3 (Berkeley) 1/12/94
42 * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 /*
70 * uvm_map.c: uvm map operations
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.244 2007/11/26 08:15:19 yamt Exp $");
75
76 #include "opt_ddb.h"
77 #include "opt_uvmhist.h"
78 #include "opt_uvm.h"
79 #include "opt_sysv.h"
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/mman.h>
84 #include <sys/proc.h>
85 #include <sys/malloc.h>
86 #include <sys/pool.h>
87 #include <sys/kernel.h>
88 #include <sys/mount.h>
89 #include <sys/vnode.h>
90 #include <sys/lockdebug.h>
91
92 #ifdef SYSVSHM
93 #include <sys/shm.h>
94 #endif
95
96 #include <uvm/uvm.h>
97 #undef RB_AUGMENT
98 #define RB_AUGMENT(x) uvm_rb_augment(x)
99
100 #ifdef DDB
101 #include <uvm/uvm_ddb.h>
102 #endif
103
104 #if defined(UVMMAP_NOCOUNTERS)
105
106 #define UVMMAP_EVCNT_DEFINE(name) /* nothing */
107 #define UVMMAP_EVCNT_INCR(ev) /* nothing */
108 #define UVMMAP_EVCNT_DECR(ev) /* nothing */
109
110 #else /* defined(UVMMAP_NOCOUNTERS) */
111
112 #include <sys/evcnt.h>
113 #define UVMMAP_EVCNT_DEFINE(name) \
114 struct evcnt uvmmap_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
115 "uvmmap", #name); \
116 EVCNT_ATTACH_STATIC(uvmmap_evcnt_##name);
117 #define UVMMAP_EVCNT_INCR(ev) uvmmap_evcnt_##ev.ev_count++
118 #define UVMMAP_EVCNT_DECR(ev) uvmmap_evcnt_##ev.ev_count--
119
120 #endif /* defined(UVMMAP_NOCOUNTERS) */
121
122 UVMMAP_EVCNT_DEFINE(ubackmerge)
123 UVMMAP_EVCNT_DEFINE(uforwmerge)
124 UVMMAP_EVCNT_DEFINE(ubimerge)
125 UVMMAP_EVCNT_DEFINE(unomerge)
126 UVMMAP_EVCNT_DEFINE(kbackmerge)
127 UVMMAP_EVCNT_DEFINE(kforwmerge)
128 UVMMAP_EVCNT_DEFINE(kbimerge)
129 UVMMAP_EVCNT_DEFINE(knomerge)
130 UVMMAP_EVCNT_DEFINE(map_call)
131 UVMMAP_EVCNT_DEFINE(mlk_call)
132 UVMMAP_EVCNT_DEFINE(mlk_hint)
133
134 UVMMAP_EVCNT_DEFINE(uke_alloc)
135 UVMMAP_EVCNT_DEFINE(uke_free)
136 UVMMAP_EVCNT_DEFINE(ukh_alloc)
137 UVMMAP_EVCNT_DEFINE(ukh_free)
138
139 const char vmmapbsy[] = "vmmapbsy";
140
141 /*
142 * pool for vmspace structures.
143 */
144
145 POOL_INIT(uvm_vmspace_pool, sizeof(struct vmspace), 0, 0, 0, "vmsppl",
146 &pool_allocator_nointr, IPL_NONE);
147
148 /*
149 * pool for dynamically-allocated map entries.
150 */
151
152 POOL_INIT(uvm_map_entry_pool, sizeof(struct vm_map_entry), 0, 0, 0, "vmmpepl",
153 &pool_allocator_nointr, IPL_NONE);
154
155 MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures");
156 MALLOC_DEFINE(M_VMPMAP, "VM pmap", "VM pmap");
157
158 #ifdef PMAP_GROWKERNEL
159 /*
160 * This global represents the end of the kernel virtual address
161 * space. If we want to exceed this, we must grow the kernel
162 * virtual address space dynamically.
163 *
164 * Note, this variable is locked by kernel_map's lock.
165 */
166 vaddr_t uvm_maxkaddr;
167 #endif
168
169 /*
170 * macros
171 */
172
173 /*
174 * VM_MAP_USE_KMAPENT: determine if uvm_kmapent_alloc/free is used
175 * for the vm_map.
176 */
177 extern struct vm_map *pager_map; /* XXX */
178 #define VM_MAP_USE_KMAPENT_FLAGS(flags) \
179 (((flags) & VM_MAP_INTRSAFE) != 0)
180 #define VM_MAP_USE_KMAPENT(map) \
181 (VM_MAP_USE_KMAPENT_FLAGS((map)->flags) || (map) == kernel_map)
182
183 /*
184 * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging
185 */
186
187 #define UVM_ET_ISCOMPATIBLE(ent, type, uobj, meflags, \
188 prot, maxprot, inh, adv, wire) \
189 ((ent)->etype == (type) && \
190 (((ent)->flags ^ (meflags)) & (UVM_MAP_NOMERGE | UVM_MAP_QUANTUM)) \
191 == 0 && \
192 (ent)->object.uvm_obj == (uobj) && \
193 (ent)->protection == (prot) && \
194 (ent)->max_protection == (maxprot) && \
195 (ent)->inheritance == (inh) && \
196 (ent)->advice == (adv) && \
197 (ent)->wired_count == (wire))
198
199 /*
200 * uvm_map_entry_link: insert entry into a map
201 *
202 * => map must be locked
203 */
204 #define uvm_map_entry_link(map, after_where, entry) do { \
205 uvm_mapent_check(entry); \
206 (map)->nentries++; \
207 (entry)->prev = (after_where); \
208 (entry)->next = (after_where)->next; \
209 (entry)->prev->next = (entry); \
210 (entry)->next->prev = (entry); \
211 uvm_rb_insert((map), (entry)); \
212 } while (/*CONSTCOND*/ 0)
213
214 /*
215 * uvm_map_entry_unlink: remove entry from a map
216 *
217 * => map must be locked
218 */
219 #define uvm_map_entry_unlink(map, entry) do { \
220 KASSERT((entry) != (map)->first_free); \
221 KASSERT((entry) != (map)->hint); \
222 uvm_mapent_check(entry); \
223 (map)->nentries--; \
224 (entry)->next->prev = (entry)->prev; \
225 (entry)->prev->next = (entry)->next; \
226 uvm_rb_remove((map), (entry)); \
227 } while (/*CONSTCOND*/ 0)
228
229 /*
230 * SAVE_HINT: saves the specified entry as the hint for future lookups.
231 *
232 * => map need not be locked (protected by hint_lock).
233 */
234 #define SAVE_HINT(map,check,value) do { \
235 mutex_enter(&(map)->hint_lock); \
236 if ((map)->hint == (check)) \
237 (map)->hint = (value); \
238 mutex_exit(&(map)->hint_lock); \
239 } while (/*CONSTCOND*/ 0)
240
241 /*
242 * clear_hints: ensure that hints don't point to the entry.
243 *
244 * => map must be write-locked.
245 */
246 static void
247 clear_hints(struct vm_map *map, struct vm_map_entry *ent)
248 {
249
250 SAVE_HINT(map, ent, ent->prev);
251 if (map->first_free == ent) {
252 map->first_free = ent->prev;
253 }
254 }
255
256 /*
257 * VM_MAP_RANGE_CHECK: check and correct range
258 *
259 * => map must at least be read locked
260 */
261
262 #define VM_MAP_RANGE_CHECK(map, start, end) do { \
263 if (start < vm_map_min(map)) \
264 start = vm_map_min(map); \
265 if (end > vm_map_max(map)) \
266 end = vm_map_max(map); \
267 if (start > end) \
268 start = end; \
269 } while (/*CONSTCOND*/ 0)
270
271 /*
272 * local prototypes
273 */
274
275 static struct vm_map_entry *
276 uvm_mapent_alloc(struct vm_map *, int);
277 static struct vm_map_entry *
278 uvm_mapent_alloc_split(struct vm_map *,
279 const struct vm_map_entry *, int,
280 struct uvm_mapent_reservation *);
281 static void uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *);
282 static void uvm_mapent_free(struct vm_map_entry *);
283 #if defined(DEBUG)
284 static void _uvm_mapent_check(const struct vm_map_entry *, const char *,
285 int);
286 #define uvm_mapent_check(map) _uvm_mapent_check(map, __FILE__, __LINE__)
287 #else /* defined(DEBUG) */
288 #define uvm_mapent_check(e) /* nothing */
289 #endif /* defined(DEBUG) */
290 static struct vm_map_entry *
291 uvm_kmapent_alloc(struct vm_map *, int);
292 static void uvm_kmapent_free(struct vm_map_entry *);
293 static vsize_t uvm_kmapent_overhead(vsize_t);
294
295 static void uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
296 static void uvm_map_reference_amap(struct vm_map_entry *, int);
297 static int uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int,
298 struct vm_map_entry *);
299 static void uvm_map_unreference_amap(struct vm_map_entry *, int);
300
301 int _uvm_map_sanity(struct vm_map *);
302 int _uvm_tree_sanity(struct vm_map *);
303 static vsize_t uvm_rb_subtree_space(const struct vm_map_entry *);
304
305 static inline int
306 uvm_compare(const struct vm_map_entry *a, const struct vm_map_entry *b)
307 {
308
309 if (a->start < b->start)
310 return (-1);
311 else if (a->start > b->start)
312 return (1);
313
314 return (0);
315 }
316
317 static inline void
318 uvm_rb_augment(struct vm_map_entry *entry)
319 {
320
321 entry->space = uvm_rb_subtree_space(entry);
322 }
323
324 RB_PROTOTYPE(uvm_tree, vm_map_entry, rb_entry, uvm_compare);
325
326 RB_GENERATE(uvm_tree, vm_map_entry, rb_entry, uvm_compare);
327
328 static inline vsize_t
329 uvm_rb_space(const struct vm_map *map, const struct vm_map_entry *entry)
330 {
331 /* XXX map is not used */
332
333 KASSERT(entry->next != NULL);
334 return entry->next->start - entry->end;
335 }
336
337 static vsize_t
338 uvm_rb_subtree_space(const struct vm_map_entry *entry)
339 {
340 vaddr_t space, tmp;
341
342 space = entry->ownspace;
343 if (RB_LEFT(entry, rb_entry)) {
344 tmp = RB_LEFT(entry, rb_entry)->space;
345 if (tmp > space)
346 space = tmp;
347 }
348
349 if (RB_RIGHT(entry, rb_entry)) {
350 tmp = RB_RIGHT(entry, rb_entry)->space;
351 if (tmp > space)
352 space = tmp;
353 }
354
355 return (space);
356 }
357
358 static inline void
359 uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
360 {
361 /* We need to traverse to the very top */
362 do {
363 entry->ownspace = uvm_rb_space(map, entry);
364 entry->space = uvm_rb_subtree_space(entry);
365 } while ((entry = RB_PARENT(entry, rb_entry)) != NULL);
366 }
367
368 static void
369 uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
370 {
371 vaddr_t space = uvm_rb_space(map, entry);
372 struct vm_map_entry *tmp;
373
374 entry->ownspace = entry->space = space;
375 tmp = RB_INSERT(uvm_tree, &(map)->rbhead, entry);
376 #ifdef DIAGNOSTIC
377 if (tmp != NULL)
378 panic("uvm_rb_insert: duplicate entry?");
379 #endif
380 uvm_rb_fixup(map, entry);
381 if (entry->prev != &map->header)
382 uvm_rb_fixup(map, entry->prev);
383 }
384
385 static void
386 uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
387 {
388 struct vm_map_entry *parent;
389
390 parent = RB_PARENT(entry, rb_entry);
391 RB_REMOVE(uvm_tree, &(map)->rbhead, entry);
392 if (entry->prev != &map->header)
393 uvm_rb_fixup(map, entry->prev);
394 if (parent)
395 uvm_rb_fixup(map, parent);
396 }
397
398 #if defined(DEBUG)
399 int uvm_debug_check_map = 0;
400 int uvm_debug_check_rbtree = 0;
401 #define uvm_map_check(map, name) \
402 _uvm_map_check((map), (name), __FILE__, __LINE__)
403 static void
404 _uvm_map_check(struct vm_map *map, const char *name,
405 const char *file, int line)
406 {
407
408 if ((uvm_debug_check_map && _uvm_map_sanity(map)) ||
409 (uvm_debug_check_rbtree && _uvm_tree_sanity(map))) {
410 panic("uvm_map_check failed: \"%s\" map=%p (%s:%d)",
411 name, map, file, line);
412 }
413 }
414 #else /* defined(DEBUG) */
415 #define uvm_map_check(map, name) /* nothing */
416 #endif /* defined(DEBUG) */
417
418 #if defined(DEBUG) || defined(DDB)
419 int
420 _uvm_map_sanity(struct vm_map *map)
421 {
422 bool first_free_found = false;
423 bool hint_found = false;
424 const struct vm_map_entry *e;
425
426 e = &map->header;
427 for (;;) {
428 if (map->first_free == e) {
429 first_free_found = true;
430 } else if (!first_free_found && e->next->start > e->end) {
431 printf("first_free %p should be %p\n",
432 map->first_free, e);
433 return -1;
434 }
435 if (map->hint == e) {
436 hint_found = true;
437 }
438
439 e = e->next;
440 if (e == &map->header) {
441 break;
442 }
443 }
444 if (!first_free_found) {
445 printf("stale first_free\n");
446 return -1;
447 }
448 if (!hint_found) {
449 printf("stale hint\n");
450 return -1;
451 }
452 return 0;
453 }
454
455 int
456 _uvm_tree_sanity(struct vm_map *map)
457 {
458 struct vm_map_entry *tmp, *trtmp;
459 int n = 0, i = 1;
460
461 RB_FOREACH(tmp, uvm_tree, &map->rbhead) {
462 if (tmp->ownspace != uvm_rb_space(map, tmp)) {
463 printf("%d/%d ownspace %lx != %lx %s\n",
464 n + 1, map->nentries,
465 (ulong)tmp->ownspace, (ulong)uvm_rb_space(map, tmp),
466 tmp->next == &map->header ? "(last)" : "");
467 goto error;
468 }
469 }
470 trtmp = NULL;
471 RB_FOREACH(tmp, uvm_tree, &map->rbhead) {
472 if (tmp->space != uvm_rb_subtree_space(tmp)) {
473 printf("space %lx != %lx\n",
474 (ulong)tmp->space,
475 (ulong)uvm_rb_subtree_space(tmp));
476 goto error;
477 }
478 if (trtmp != NULL && trtmp->start >= tmp->start) {
479 printf("corrupt: 0x%lx >= 0x%lx\n",
480 trtmp->start, tmp->start);
481 goto error;
482 }
483 n++;
484
485 trtmp = tmp;
486 }
487
488 if (n != map->nentries) {
489 printf("nentries: %d vs %d\n", n, map->nentries);
490 goto error;
491 }
492
493 for (tmp = map->header.next; tmp && tmp != &map->header;
494 tmp = tmp->next, i++) {
495 trtmp = RB_FIND(uvm_tree, &map->rbhead, tmp);
496 if (trtmp != tmp) {
497 printf("lookup: %d: %p - %p: %p\n", i, tmp, trtmp,
498 RB_PARENT(tmp, rb_entry));
499 goto error;
500 }
501 }
502
503 return (0);
504 error:
505 return (-1);
506 }
507 #endif /* defined(DEBUG) || defined(DDB) */
508
509 #ifdef DIAGNOSTIC
510 static struct vm_map *uvm_kmapent_map(struct vm_map_entry *);
511 #endif
512
513 /*
514 * vm_map_lock: acquire an exclusive (write) lock on a map.
515 *
516 * => Note that "intrsafe" maps use only exclusive, spin locks.
517 *
518 * => The locking protocol provides for guaranteed upgrade from shared ->
519 * exclusive by whichever thread currently has the map marked busy.
520 * See "LOCKING PROTOCOL NOTES" in uvm_map.h. This is horrible; among
521 * other problems, it defeats any fairness guarantees provided by RW
522 * locks.
523 */
524
525 void
526 vm_map_lock(struct vm_map *map)
527 {
528
529 if ((map->flags & VM_MAP_INTRSAFE) != 0) {
530 mutex_spin_enter(&map->mutex);
531 return;
532 }
533
534 for (;;) {
535 rw_enter(&map->lock, RW_WRITER);
536 if (map->busy == NULL)
537 break;
538 KASSERT(map->busy != curlwp);
539 mutex_enter(&map->misc_lock);
540 rw_exit(&map->lock);
541 cv_wait(&map->cv, &map->misc_lock);
542 mutex_exit(&map->misc_lock);
543 }
544
545 map->timestamp++;
546 }
547
548 /*
549 * vm_map_lock_try: try to lock a map, failing if it is already locked.
550 */
551
552 bool
553 vm_map_lock_try(struct vm_map *map)
554 {
555
556 if ((map->flags & VM_MAP_INTRSAFE) != 0)
557 return mutex_tryenter(&map->mutex);
558 if (!rw_tryenter(&map->lock, RW_WRITER))
559 return false;
560 if (map->busy != NULL) {
561 rw_exit(&map->lock);
562 return false;
563 }
564
565 map->timestamp++;
566 return true;
567 }
568
569 /*
570 * vm_map_unlock: release an exclusive lock on a map.
571 */
572
573 void
574 vm_map_unlock(struct vm_map *map)
575 {
576
577 if ((map->flags & VM_MAP_INTRSAFE) != 0)
578 mutex_spin_exit(&map->mutex);
579 else {
580 KASSERT(rw_write_held(&map->lock));
581 rw_exit(&map->lock);
582 }
583 }
584
585 /*
586 * vm_map_upgrade: upgrade a shared lock to an exclusive lock.
587 *
588 * => the caller must hold the map busy
589 */
590
591 void
592 vm_map_upgrade(struct vm_map *map)
593 {
594
595 KASSERT(rw_read_held(&map->lock));
596 KASSERT(map->busy == curlwp);
597
598 if (rw_tryupgrade(&map->lock))
599 return;
600
601 rw_exit(&map->lock);
602 rw_enter(&map->lock, RW_WRITER);
603 }
604
605 /*
606 * vm_map_unbusy: mark the map as unbusy, and wake any waiters that
607 * want an exclusive lock.
608 */
609
610 void
611 vm_map_unbusy(struct vm_map *map)
612 {
613
614 KASSERT(rw_lock_held(&map->lock));
615 KASSERT(map->busy == curlwp);
616
617 /*
618 * Safe to clear 'busy' and 'waiters' with only a read lock held:
619 *
620 * o they can only be set with a write lock held
621 * o writers are blocked out with a read or write hold
622 * o at any time, only one thread owns the set of values
623 */
624 map->busy = NULL;
625 mutex_enter(&map->misc_lock);
626 cv_broadcast(&map->cv);
627 mutex_exit(&map->misc_lock);
628 }
629
630 /*
631 * uvm_mapent_alloc: allocate a map entry
632 */
633
634 static struct vm_map_entry *
635 uvm_mapent_alloc(struct vm_map *map, int flags)
636 {
637 struct vm_map_entry *me;
638 int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
639 UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
640
641 if (VM_MAP_USE_KMAPENT(map)) {
642 me = uvm_kmapent_alloc(map, flags);
643 } else {
644 me = pool_get(&uvm_map_entry_pool, pflags);
645 if (__predict_false(me == NULL))
646 return NULL;
647 me->flags = 0;
648 }
649
650 UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me,
651 ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0);
652 return (me);
653 }
654
655 /*
656 * uvm_mapent_alloc_split: allocate a map entry for clipping.
657 */
658
659 static struct vm_map_entry *
660 uvm_mapent_alloc_split(struct vm_map *map,
661 const struct vm_map_entry *old_entry, int flags,
662 struct uvm_mapent_reservation *umr)
663 {
664 struct vm_map_entry *me;
665
666 KASSERT(!VM_MAP_USE_KMAPENT(map) ||
667 (old_entry->flags & UVM_MAP_QUANTUM) || !UMR_EMPTY(umr));
668
669 if (old_entry->flags & UVM_MAP_QUANTUM) {
670 struct vm_map_kernel *vmk = vm_map_to_kernel(map);
671
672 mutex_spin_enter(&uvm_kentry_lock);
673 me = vmk->vmk_merged_entries;
674 KASSERT(me);
675 vmk->vmk_merged_entries = me->next;
676 mutex_spin_exit(&uvm_kentry_lock);
677 KASSERT(me->flags & UVM_MAP_QUANTUM);
678 } else {
679 me = uvm_mapent_alloc(map, flags);
680 }
681
682 return me;
683 }
684
685 /*
686 * uvm_mapent_free: free map entry
687 */
688
689 static void
690 uvm_mapent_free(struct vm_map_entry *me)
691 {
692 UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
693
694 UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
695 me, me->flags, 0, 0);
696 if (me->flags & UVM_MAP_KERNEL) {
697 uvm_kmapent_free(me);
698 } else {
699 pool_put(&uvm_map_entry_pool, me);
700 }
701 }
702
703 /*
704 * uvm_mapent_free_merged: free merged map entry
705 *
706 * => keep the entry if needed.
707 * => caller shouldn't hold map locked if VM_MAP_USE_KMAPENT(map) is true.
708 */
709
710 static void
711 uvm_mapent_free_merged(struct vm_map *map, struct vm_map_entry *me)
712 {
713
714 KASSERT(!(me->flags & UVM_MAP_KERNEL) || uvm_kmapent_map(me) == map);
715
716 if (me->flags & UVM_MAP_QUANTUM) {
717 /*
718 * keep this entry for later splitting.
719 */
720 struct vm_map_kernel *vmk;
721
722 KASSERT(VM_MAP_IS_KERNEL(map));
723 KASSERT(!VM_MAP_USE_KMAPENT(map) ||
724 (me->flags & UVM_MAP_KERNEL));
725
726 vmk = vm_map_to_kernel(map);
727 mutex_spin_enter(&uvm_kentry_lock);
728 me->next = vmk->vmk_merged_entries;
729 vmk->vmk_merged_entries = me;
730 mutex_spin_exit(&uvm_kentry_lock);
731 } else {
732 uvm_mapent_free(me);
733 }
734 }
735
736 /*
737 * uvm_mapent_copy: copy a map entry, preserving flags
738 */
739
740 static inline void
741 uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
742 {
743
744 memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) -
745 ((char *)src));
746 }
747
748 /*
749 * uvm_mapent_overhead: calculate maximum kva overhead necessary for
750 * map entries.
751 *
752 * => size and flags are the same as uvm_km_suballoc's ones.
753 */
754
755 vsize_t
756 uvm_mapent_overhead(vsize_t size, int flags)
757 {
758
759 if (VM_MAP_USE_KMAPENT_FLAGS(flags)) {
760 return uvm_kmapent_overhead(size);
761 }
762 return 0;
763 }
764
765 #if defined(DEBUG)
766 static void
767 _uvm_mapent_check(const struct vm_map_entry *entry, const char *file, int line)
768 {
769
770 if (entry->start >= entry->end) {
771 goto bad;
772 }
773 if (UVM_ET_ISOBJ(entry)) {
774 if (entry->object.uvm_obj == NULL) {
775 goto bad;
776 }
777 } else if (UVM_ET_ISSUBMAP(entry)) {
778 if (entry->object.sub_map == NULL) {
779 goto bad;
780 }
781 } else {
782 if (entry->object.uvm_obj != NULL ||
783 entry->object.sub_map != NULL) {
784 goto bad;
785 }
786 }
787 if (!UVM_ET_ISOBJ(entry)) {
788 if (entry->offset != 0) {
789 goto bad;
790 }
791 }
792
793 return;
794
795 bad:
796 panic("%s: bad entry %p (%s:%d)", __func__, entry, file, line);
797 }
798 #endif /* defined(DEBUG) */
799
800 /*
801 * uvm_map_entry_unwire: unwire a map entry
802 *
803 * => map should be locked by caller
804 */
805
806 static inline void
807 uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
808 {
809
810 entry->wired_count = 0;
811 uvm_fault_unwire_locked(map, entry->start, entry->end);
812 }
813
814
815 /*
816 * wrapper for calling amap_ref()
817 */
818 static inline void
819 uvm_map_reference_amap(struct vm_map_entry *entry, int flags)
820 {
821
822 amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
823 (entry->end - entry->start) >> PAGE_SHIFT, flags);
824 }
825
826
827 /*
828 * wrapper for calling amap_unref()
829 */
830 static inline void
831 uvm_map_unreference_amap(struct vm_map_entry *entry, int flags)
832 {
833
834 amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
835 (entry->end - entry->start) >> PAGE_SHIFT, flags);
836 }
837
838
839 /*
840 * uvm_map_init: init mapping system at boot time. note that we allocate
841 * and init the static pool of struct vm_map_entry *'s for the kernel here.
842 */
843
844 void
845 uvm_map_init(void)
846 {
847 #if defined(UVMHIST)
848 static struct uvm_history_ent maphistbuf[100];
849 static struct uvm_history_ent pdhistbuf[100];
850 #endif
851
852 /*
853 * first, init logging system.
854 */
855
856 UVMHIST_FUNC("uvm_map_init");
857 UVMHIST_INIT_STATIC(maphist, maphistbuf);
858 UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
859 UVMHIST_CALLED(maphist);
860 UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
861
862 /*
863 * initialize the global lock for kernel map entry.
864 */
865
866 mutex_init(&uvm_kentry_lock, MUTEX_DRIVER, IPL_VM);
867 }
868
869 /*
870 * clippers
871 */
872
873 /*
874 * uvm_mapent_splitadj: adjust map entries for splitting, after uvm_mapent_copy.
875 */
876
877 static void
878 uvm_mapent_splitadj(struct vm_map_entry *entry1, struct vm_map_entry *entry2,
879 vaddr_t splitat)
880 {
881 vaddr_t adj;
882
883 KASSERT(entry1->start < splitat);
884 KASSERT(splitat < entry1->end);
885
886 adj = splitat - entry1->start;
887 entry1->end = entry2->start = splitat;
888
889 if (entry1->aref.ar_amap) {
890 amap_splitref(&entry1->aref, &entry2->aref, adj);
891 }
892 if (UVM_ET_ISSUBMAP(entry1)) {
893 /* ... unlikely to happen, but play it safe */
894 uvm_map_reference(entry1->object.sub_map);
895 } else if (UVM_ET_ISOBJ(entry1)) {
896 KASSERT(entry1->object.uvm_obj != NULL); /* suppress coverity */
897 entry2->offset += adj;
898 if (entry1->object.uvm_obj->pgops &&
899 entry1->object.uvm_obj->pgops->pgo_reference)
900 entry1->object.uvm_obj->pgops->pgo_reference(
901 entry1->object.uvm_obj);
902 }
903 }
904
905 /*
906 * uvm_map_clip_start: ensure that the entry begins at or after
907 * the starting address, if it doesn't we split the entry.
908 *
909 * => caller should use UVM_MAP_CLIP_START macro rather than calling
910 * this directly
911 * => map must be locked by caller
912 */
913
914 void
915 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
916 vaddr_t start, struct uvm_mapent_reservation *umr)
917 {
918 struct vm_map_entry *new_entry;
919
920 /* uvm_map_simplify_entry(map, entry); */ /* XXX */
921
922 uvm_map_check(map, "clip_start entry");
923 uvm_mapent_check(entry);
924
925 /*
926 * Split off the front portion. note that we must insert the new
927 * entry BEFORE this one, so that this entry has the specified
928 * starting address.
929 */
930 new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
931 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
932 uvm_mapent_splitadj(new_entry, entry, start);
933 uvm_map_entry_link(map, entry->prev, new_entry);
934
935 uvm_map_check(map, "clip_start leave");
936 }
937
938 /*
939 * uvm_map_clip_end: ensure that the entry ends at or before
940 * the ending address, if it does't we split the reference
941 *
942 * => caller should use UVM_MAP_CLIP_END macro rather than calling
943 * this directly
944 * => map must be locked by caller
945 */
946
947 void
948 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end,
949 struct uvm_mapent_reservation *umr)
950 {
951 struct vm_map_entry *new_entry;
952
953 uvm_map_check(map, "clip_end entry");
954 uvm_mapent_check(entry);
955
956 /*
957 * Create a new entry and insert it
958 * AFTER the specified entry
959 */
960 new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
961 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
962 uvm_mapent_splitadj(entry, new_entry, end);
963 uvm_map_entry_link(map, entry, new_entry);
964
965 uvm_map_check(map, "clip_end leave");
966 }
967
968 static void
969 vm_map_drain(struct vm_map *map, uvm_flag_t flags)
970 {
971
972 if (!VM_MAP_IS_KERNEL(map)) {
973 return;
974 }
975
976 uvm_km_va_drain(map, flags);
977 }
978
979 /*
980 * M A P - m a i n e n t r y p o i n t
981 */
982 /*
983 * uvm_map: establish a valid mapping in a map
984 *
985 * => assume startp is page aligned.
986 * => assume size is a multiple of PAGE_SIZE.
987 * => assume sys_mmap provides enough of a "hint" to have us skip
988 * over text/data/bss area.
989 * => map must be unlocked (we will lock it)
990 * => <uobj,uoffset> value meanings (4 cases):
991 * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER
992 * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER
993 * [3] <uobj,uoffset> == normal mapping
994 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA
995 *
996 * case [4] is for kernel mappings where we don't know the offset until
997 * we've found a virtual address. note that kernel object offsets are
998 * always relative to vm_map_min(kernel_map).
999 *
1000 * => if `align' is non-zero, we align the virtual address to the specified
1001 * alignment.
1002 * this is provided as a mechanism for large pages.
1003 *
1004 * => XXXCDC: need way to map in external amap?
1005 */
1006
1007 int
1008 uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
1009 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags)
1010 {
1011 struct uvm_map_args args;
1012 struct vm_map_entry *new_entry;
1013 int error;
1014
1015 KASSERT((flags & UVM_FLAG_QUANTUM) == 0 || VM_MAP_IS_KERNEL(map));
1016 KASSERT((size & PAGE_MASK) == 0);
1017
1018 /*
1019 * for pager_map, allocate the new entry first to avoid sleeping
1020 * for memory while we have the map locked.
1021 *
1022 * besides, because we allocates entries for in-kernel maps
1023 * a bit differently (cf. uvm_kmapent_alloc/free), we need to
1024 * allocate them before locking the map.
1025 */
1026
1027 new_entry = NULL;
1028 if (VM_MAP_USE_KMAPENT(map) || (flags & UVM_FLAG_QUANTUM) ||
1029 map == pager_map) {
1030 new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
1031 if (__predict_false(new_entry == NULL))
1032 return ENOMEM;
1033 if (flags & UVM_FLAG_QUANTUM)
1034 new_entry->flags |= UVM_MAP_QUANTUM;
1035 }
1036 if (map == pager_map)
1037 flags |= UVM_FLAG_NOMERGE;
1038
1039 error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align,
1040 flags, &args);
1041 if (!error) {
1042 error = uvm_map_enter(map, &args, new_entry);
1043 *startp = args.uma_start;
1044 } else if (new_entry) {
1045 uvm_mapent_free(new_entry);
1046 }
1047
1048 #if defined(DEBUG)
1049 if (!error && VM_MAP_IS_KERNEL(map)) {
1050 uvm_km_check_empty(*startp, *startp + size,
1051 (map->flags & VM_MAP_INTRSAFE) != 0);
1052 }
1053 #endif /* defined(DEBUG) */
1054
1055 return error;
1056 }
1057
1058 int
1059 uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size,
1060 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags,
1061 struct uvm_map_args *args)
1062 {
1063 struct vm_map_entry *prev_entry;
1064 vm_prot_t prot = UVM_PROTECTION(flags);
1065 vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1066
1067 UVMHIST_FUNC("uvm_map_prepare");
1068 UVMHIST_CALLED(maphist);
1069
1070 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
1071 map, start, size, flags);
1072 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
1073
1074 /*
1075 * detect a popular device driver bug.
1076 */
1077
1078 KASSERT(doing_shutdown || curlwp != NULL ||
1079 (map->flags & VM_MAP_INTRSAFE));
1080
1081 /*
1082 * zero-sized mapping doesn't make any sense.
1083 */
1084 KASSERT(size > 0);
1085
1086 KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0);
1087
1088 uvm_map_check(map, "map entry");
1089
1090 /*
1091 * check sanity of protection code
1092 */
1093
1094 if ((prot & maxprot) != prot) {
1095 UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x",
1096 prot, maxprot,0,0);
1097 return EACCES;
1098 }
1099
1100 /*
1101 * figure out where to put new VM range
1102 */
1103
1104 retry:
1105 if (vm_map_lock_try(map) == false) {
1106 if (flags & UVM_FLAG_TRYLOCK) {
1107 return EAGAIN;
1108 }
1109 vm_map_lock(map); /* could sleep here */
1110 }
1111 prev_entry = uvm_map_findspace(map, start, size, &start,
1112 uobj, uoffset, align, flags);
1113 if (prev_entry == NULL) {
1114 unsigned int timestamp;
1115
1116 timestamp = map->timestamp;
1117 UVMHIST_LOG(maphist,"waiting va timestamp=0x%x",
1118 timestamp,0,0,0);
1119 map->flags |= VM_MAP_WANTVA;
1120 vm_map_unlock(map);
1121
1122 /*
1123 * try to reclaim kva and wait until someone does unmap.
1124 * fragile locking here, so we awaken every second to
1125 * recheck the condition.
1126 */
1127
1128 vm_map_drain(map, flags);
1129
1130 mutex_enter(&map->misc_lock);
1131 while ((map->flags & VM_MAP_WANTVA) != 0 &&
1132 map->timestamp == timestamp) {
1133 if ((flags & UVM_FLAG_WAITVA) == 0) {
1134 mutex_exit(&map->misc_lock);
1135 UVMHIST_LOG(maphist,
1136 "<- uvm_map_findspace failed!", 0,0,0,0);
1137 return ENOMEM;
1138 } else {
1139 cv_timedwait(&map->cv, &map->misc_lock, hz);
1140 }
1141 }
1142 mutex_exit(&map->misc_lock);
1143 goto retry;
1144 }
1145
1146 #ifdef PMAP_GROWKERNEL
1147 /*
1148 * If the kernel pmap can't map the requested space,
1149 * then allocate more resources for it.
1150 */
1151 if (map == kernel_map && uvm_maxkaddr < (start + size))
1152 uvm_maxkaddr = pmap_growkernel(start + size);
1153 #endif
1154
1155 UVMMAP_EVCNT_INCR(map_call);
1156
1157 /*
1158 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
1159 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in
1160 * either case we want to zero it before storing it in the map entry
1161 * (because it looks strange and confusing when debugging...)
1162 *
1163 * if uobj is not null
1164 * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
1165 * and we do not need to change uoffset.
1166 * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
1167 * now (based on the starting address of the map). this case is
1168 * for kernel object mappings where we don't know the offset until
1169 * the virtual address is found (with uvm_map_findspace). the
1170 * offset is the distance we are from the start of the map.
1171 */
1172
1173 if (uobj == NULL) {
1174 uoffset = 0;
1175 } else {
1176 if (uoffset == UVM_UNKNOWN_OFFSET) {
1177 KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
1178 uoffset = start - vm_map_min(kernel_map);
1179 }
1180 }
1181
1182 args->uma_flags = flags;
1183 args->uma_prev = prev_entry;
1184 args->uma_start = start;
1185 args->uma_size = size;
1186 args->uma_uobj = uobj;
1187 args->uma_uoffset = uoffset;
1188
1189 return 0;
1190 }
1191
1192 int
1193 uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args,
1194 struct vm_map_entry *new_entry)
1195 {
1196 struct vm_map_entry *prev_entry = args->uma_prev;
1197 struct vm_map_entry *dead = NULL;
1198
1199 const uvm_flag_t flags = args->uma_flags;
1200 const vm_prot_t prot = UVM_PROTECTION(flags);
1201 const vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1202 const vm_inherit_t inherit = UVM_INHERIT(flags);
1203 const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
1204 AMAP_EXTEND_NOWAIT : 0;
1205 const int advice = UVM_ADVICE(flags);
1206 const int meflagval = (flags & UVM_FLAG_QUANTUM) ?
1207 UVM_MAP_QUANTUM : 0;
1208
1209 vaddr_t start = args->uma_start;
1210 vsize_t size = args->uma_size;
1211 struct uvm_object *uobj = args->uma_uobj;
1212 voff_t uoffset = args->uma_uoffset;
1213
1214 const int kmap = (vm_map_pmap(map) == pmap_kernel());
1215 int merged = 0;
1216 int error;
1217 int newetype;
1218
1219 UVMHIST_FUNC("uvm_map_enter");
1220 UVMHIST_CALLED(maphist);
1221
1222 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
1223 map, start, size, flags);
1224 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
1225
1226 KASSERT(map->hint == prev_entry); /* bimerge case assumes this */
1227
1228 if (flags & UVM_FLAG_QUANTUM) {
1229 KASSERT(new_entry);
1230 KASSERT(new_entry->flags & UVM_MAP_QUANTUM);
1231 }
1232
1233 if (uobj)
1234 newetype = UVM_ET_OBJ;
1235 else
1236 newetype = 0;
1237
1238 if (flags & UVM_FLAG_COPYONW) {
1239 newetype |= UVM_ET_COPYONWRITE;
1240 if ((flags & UVM_FLAG_OVERLAY) == 0)
1241 newetype |= UVM_ET_NEEDSCOPY;
1242 }
1243
1244 /*
1245 * try and insert in map by extending previous entry, if possible.
1246 * XXX: we don't try and pull back the next entry. might be useful
1247 * for a stack, but we are currently allocating our stack in advance.
1248 */
1249
1250 if (flags & UVM_FLAG_NOMERGE)
1251 goto nomerge;
1252
1253 if (prev_entry->end == start &&
1254 prev_entry != &map->header &&
1255 UVM_ET_ISCOMPATIBLE(prev_entry, newetype, uobj, meflagval,
1256 prot, maxprot, inherit, advice, 0)) {
1257
1258 if (uobj && prev_entry->offset +
1259 (prev_entry->end - prev_entry->start) != uoffset)
1260 goto forwardmerge;
1261
1262 /*
1263 * can't extend a shared amap. note: no need to lock amap to
1264 * look at refs since we don't care about its exact value.
1265 * if it is one (i.e. we have only reference) it will stay there
1266 */
1267
1268 if (prev_entry->aref.ar_amap &&
1269 amap_refs(prev_entry->aref.ar_amap) != 1) {
1270 goto forwardmerge;
1271 }
1272
1273 if (prev_entry->aref.ar_amap) {
1274 error = amap_extend(prev_entry, size,
1275 amapwaitflag | AMAP_EXTEND_FORWARDS);
1276 if (error)
1277 goto nomerge;
1278 }
1279
1280 if (kmap)
1281 UVMMAP_EVCNT_INCR(kbackmerge);
1282 else
1283 UVMMAP_EVCNT_INCR(ubackmerge);
1284 UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0);
1285
1286 /*
1287 * drop our reference to uobj since we are extending a reference
1288 * that we already have (the ref count can not drop to zero).
1289 */
1290
1291 if (uobj && uobj->pgops->pgo_detach)
1292 uobj->pgops->pgo_detach(uobj);
1293
1294 prev_entry->end += size;
1295 uvm_rb_fixup(map, prev_entry);
1296
1297 uvm_map_check(map, "map backmerged");
1298
1299 UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
1300 merged++;
1301 }
1302
1303 forwardmerge:
1304 if (prev_entry->next->start == (start + size) &&
1305 prev_entry->next != &map->header &&
1306 UVM_ET_ISCOMPATIBLE(prev_entry->next, newetype, uobj, meflagval,
1307 prot, maxprot, inherit, advice, 0)) {
1308
1309 if (uobj && prev_entry->next->offset != uoffset + size)
1310 goto nomerge;
1311
1312 /*
1313 * can't extend a shared amap. note: no need to lock amap to
1314 * look at refs since we don't care about its exact value.
1315 * if it is one (i.e. we have only reference) it will stay there.
1316 *
1317 * note that we also can't merge two amaps, so if we
1318 * merged with the previous entry which has an amap,
1319 * and the next entry also has an amap, we give up.
1320 *
1321 * Interesting cases:
1322 * amap, new, amap -> give up second merge (single fwd extend)
1323 * amap, new, none -> double forward extend (extend again here)
1324 * none, new, amap -> double backward extend (done here)
1325 * uobj, new, amap -> single backward extend (done here)
1326 *
1327 * XXX should we attempt to deal with someone refilling
1328 * the deallocated region between two entries that are
1329 * backed by the same amap (ie, arefs is 2, "prev" and
1330 * "next" refer to it, and adding this allocation will
1331 * close the hole, thus restoring arefs to 1 and
1332 * deallocating the "next" vm_map_entry)? -- @@@
1333 */
1334
1335 if (prev_entry->next->aref.ar_amap &&
1336 (amap_refs(prev_entry->next->aref.ar_amap) != 1 ||
1337 (merged && prev_entry->aref.ar_amap))) {
1338 goto nomerge;
1339 }
1340
1341 if (merged) {
1342 /*
1343 * Try to extend the amap of the previous entry to
1344 * cover the next entry as well. If it doesn't work
1345 * just skip on, don't actually give up, since we've
1346 * already completed the back merge.
1347 */
1348 if (prev_entry->aref.ar_amap) {
1349 if (amap_extend(prev_entry,
1350 prev_entry->next->end -
1351 prev_entry->next->start,
1352 amapwaitflag | AMAP_EXTEND_FORWARDS))
1353 goto nomerge;
1354 }
1355
1356 /*
1357 * Try to extend the amap of the *next* entry
1358 * back to cover the new allocation *and* the
1359 * previous entry as well (the previous merge
1360 * didn't have an amap already otherwise we
1361 * wouldn't be checking here for an amap). If
1362 * it doesn't work just skip on, again, don't
1363 * actually give up, since we've already
1364 * completed the back merge.
1365 */
1366 else if (prev_entry->next->aref.ar_amap) {
1367 if (amap_extend(prev_entry->next,
1368 prev_entry->end -
1369 prev_entry->start,
1370 amapwaitflag | AMAP_EXTEND_BACKWARDS))
1371 goto nomerge;
1372 }
1373 } else {
1374 /*
1375 * Pull the next entry's amap backwards to cover this
1376 * new allocation.
1377 */
1378 if (prev_entry->next->aref.ar_amap) {
1379 error = amap_extend(prev_entry->next, size,
1380 amapwaitflag | AMAP_EXTEND_BACKWARDS);
1381 if (error)
1382 goto nomerge;
1383 }
1384 }
1385
1386 if (merged) {
1387 if (kmap) {
1388 UVMMAP_EVCNT_DECR(kbackmerge);
1389 UVMMAP_EVCNT_INCR(kbimerge);
1390 } else {
1391 UVMMAP_EVCNT_DECR(ubackmerge);
1392 UVMMAP_EVCNT_INCR(ubimerge);
1393 }
1394 } else {
1395 if (kmap)
1396 UVMMAP_EVCNT_INCR(kforwmerge);
1397 else
1398 UVMMAP_EVCNT_INCR(uforwmerge);
1399 }
1400 UVMHIST_LOG(maphist," starting forward merge", 0, 0, 0, 0);
1401
1402 /*
1403 * drop our reference to uobj since we are extending a reference
1404 * that we already have (the ref count can not drop to zero).
1405 * (if merged, we've already detached)
1406 */
1407 if (uobj && uobj->pgops->pgo_detach && !merged)
1408 uobj->pgops->pgo_detach(uobj);
1409
1410 if (merged) {
1411 dead = prev_entry->next;
1412 prev_entry->end = dead->end;
1413 uvm_map_entry_unlink(map, dead);
1414 if (dead->aref.ar_amap != NULL) {
1415 prev_entry->aref = dead->aref;
1416 dead->aref.ar_amap = NULL;
1417 }
1418 } else {
1419 prev_entry->next->start -= size;
1420 if (prev_entry != &map->header)
1421 uvm_rb_fixup(map, prev_entry);
1422 if (uobj)
1423 prev_entry->next->offset = uoffset;
1424 }
1425
1426 uvm_map_check(map, "map forwardmerged");
1427
1428 UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0);
1429 merged++;
1430 }
1431
1432 nomerge:
1433 if (!merged) {
1434 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0);
1435 if (kmap)
1436 UVMMAP_EVCNT_INCR(knomerge);
1437 else
1438 UVMMAP_EVCNT_INCR(unomerge);
1439
1440 /*
1441 * allocate new entry and link it in.
1442 */
1443
1444 if (new_entry == NULL) {
1445 new_entry = uvm_mapent_alloc(map,
1446 (flags & UVM_FLAG_NOWAIT));
1447 if (__predict_false(new_entry == NULL)) {
1448 error = ENOMEM;
1449 goto done;
1450 }
1451 }
1452 new_entry->start = start;
1453 new_entry->end = new_entry->start + size;
1454 new_entry->object.uvm_obj = uobj;
1455 new_entry->offset = uoffset;
1456
1457 new_entry->etype = newetype;
1458
1459 if (flags & UVM_FLAG_NOMERGE) {
1460 new_entry->flags |= UVM_MAP_NOMERGE;
1461 }
1462
1463 new_entry->protection = prot;
1464 new_entry->max_protection = maxprot;
1465 new_entry->inheritance = inherit;
1466 new_entry->wired_count = 0;
1467 new_entry->advice = advice;
1468 if (flags & UVM_FLAG_OVERLAY) {
1469
1470 /*
1471 * to_add: for BSS we overallocate a little since we
1472 * are likely to extend
1473 */
1474
1475 vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
1476 UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
1477 struct vm_amap *amap = amap_alloc(size, to_add,
1478 (flags & UVM_FLAG_NOWAIT));
1479 if (__predict_false(amap == NULL)) {
1480 error = ENOMEM;
1481 goto done;
1482 }
1483 new_entry->aref.ar_pageoff = 0;
1484 new_entry->aref.ar_amap = amap;
1485 } else {
1486 new_entry->aref.ar_pageoff = 0;
1487 new_entry->aref.ar_amap = NULL;
1488 }
1489 uvm_map_entry_link(map, prev_entry, new_entry);
1490
1491 /*
1492 * Update the free space hint
1493 */
1494
1495 if ((map->first_free == prev_entry) &&
1496 (prev_entry->end >= new_entry->start))
1497 map->first_free = new_entry;
1498
1499 new_entry = NULL;
1500 }
1501
1502 map->size += size;
1503
1504 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1505
1506 error = 0;
1507 done:
1508 vm_map_unlock(map);
1509 if (new_entry) {
1510 if (error == 0) {
1511 KDASSERT(merged);
1512 uvm_mapent_free_merged(map, new_entry);
1513 } else {
1514 uvm_mapent_free(new_entry);
1515 }
1516 }
1517 if (dead) {
1518 KDASSERT(merged);
1519 uvm_mapent_free_merged(map, dead);
1520 }
1521 return error;
1522 }
1523
1524 /*
1525 * uvm_map_lookup_entry: find map entry at or before an address
1526 *
1527 * => map must at least be read-locked by caller
1528 * => entry is returned in "entry"
1529 * => return value is true if address is in the returned entry
1530 */
1531
1532 bool
1533 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
1534 struct vm_map_entry **entry /* OUT */)
1535 {
1536 struct vm_map_entry *cur;
1537 bool use_tree = false;
1538 UVMHIST_FUNC("uvm_map_lookup_entry");
1539 UVMHIST_CALLED(maphist);
1540
1541 UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)",
1542 map, address, entry, 0);
1543
1544 /*
1545 * start looking either from the head of the
1546 * list, or from the hint.
1547 */
1548
1549 mutex_enter(&map->hint_lock);
1550 cur = map->hint;
1551 mutex_exit(&map->hint_lock);
1552
1553 if (cur == &map->header)
1554 cur = cur->next;
1555
1556 UVMMAP_EVCNT_INCR(mlk_call);
1557 if (address >= cur->start) {
1558
1559 /*
1560 * go from hint to end of list.
1561 *
1562 * but first, make a quick check to see if
1563 * we are already looking at the entry we
1564 * want (which is usually the case).
1565 * note also that we don't need to save the hint
1566 * here... it is the same hint (unless we are
1567 * at the header, in which case the hint didn't
1568 * buy us anything anyway).
1569 */
1570
1571 if (cur != &map->header && cur->end > address) {
1572 UVMMAP_EVCNT_INCR(mlk_hint);
1573 *entry = cur;
1574 UVMHIST_LOG(maphist,"<- got it via hint (0x%x)",
1575 cur, 0, 0, 0);
1576 uvm_mapent_check(*entry);
1577 return (true);
1578 }
1579
1580 if (map->nentries > 30)
1581 use_tree = true;
1582 } else {
1583
1584 /*
1585 * invalid hint. use tree.
1586 */
1587 use_tree = true;
1588 }
1589
1590 uvm_map_check(map, __func__);
1591
1592 if (use_tree) {
1593 struct vm_map_entry *prev = &map->header;
1594 cur = RB_ROOT(&map->rbhead);
1595
1596 /*
1597 * Simple lookup in the tree. Happens when the hint is
1598 * invalid, or nentries reach a threshold.
1599 */
1600 while (cur) {
1601 if (address >= cur->start) {
1602 if (address < cur->end) {
1603 *entry = cur;
1604 goto got;
1605 }
1606 prev = cur;
1607 cur = RB_RIGHT(cur, rb_entry);
1608 } else
1609 cur = RB_LEFT(cur, rb_entry);
1610 }
1611 *entry = prev;
1612 goto failed;
1613 }
1614
1615 /*
1616 * search linearly
1617 */
1618
1619 while (cur != &map->header) {
1620 if (cur->end > address) {
1621 if (address >= cur->start) {
1622 /*
1623 * save this lookup for future
1624 * hints, and return
1625 */
1626
1627 *entry = cur;
1628 got:
1629 SAVE_HINT(map, map->hint, *entry);
1630 UVMHIST_LOG(maphist,"<- search got it (0x%x)",
1631 cur, 0, 0, 0);
1632 KDASSERT((*entry)->start <= address);
1633 KDASSERT(address < (*entry)->end);
1634 uvm_mapent_check(*entry);
1635 return (true);
1636 }
1637 break;
1638 }
1639 cur = cur->next;
1640 }
1641 *entry = cur->prev;
1642 failed:
1643 SAVE_HINT(map, map->hint, *entry);
1644 UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
1645 KDASSERT((*entry) == &map->header || (*entry)->end <= address);
1646 KDASSERT((*entry)->next == &map->header ||
1647 address < (*entry)->next->start);
1648 return (false);
1649 }
1650
1651 /*
1652 * See if the range between start and start + length fits in the gap
1653 * entry->next->start and entry->end. Returns 1 if fits, 0 if doesn't
1654 * fit, and -1 address wraps around.
1655 */
1656 static int
1657 uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
1658 vsize_t align, int topdown, struct vm_map_entry *entry)
1659 {
1660 vaddr_t end;
1661
1662 #ifdef PMAP_PREFER
1663 /*
1664 * push start address forward as needed to avoid VAC alias problems.
1665 * we only do this if a valid offset is specified.
1666 */
1667
1668 if (uoffset != UVM_UNKNOWN_OFFSET)
1669 PMAP_PREFER(uoffset, start, length, topdown);
1670 #endif
1671 if (align != 0) {
1672 if ((*start & (align - 1)) != 0) {
1673 if (topdown)
1674 *start &= ~(align - 1);
1675 else
1676 *start = roundup(*start, align);
1677 }
1678 /*
1679 * XXX Should we PMAP_PREFER() here again?
1680 * eh...i think we're okay
1681 */
1682 }
1683
1684 /*
1685 * Find the end of the proposed new region. Be sure we didn't
1686 * wrap around the address; if so, we lose. Otherwise, if the
1687 * proposed new region fits before the next entry, we win.
1688 */
1689
1690 end = *start + length;
1691 if (end < *start)
1692 return (-1);
1693
1694 if (entry->next->start >= end && *start >= entry->end)
1695 return (1);
1696
1697 return (0);
1698 }
1699
1700 /*
1701 * uvm_map_findspace: find "length" sized space in "map".
1702 *
1703 * => "hint" is a hint about where we want it, unless UVM_FLAG_FIXED is
1704 * set in "flags" (in which case we insist on using "hint").
1705 * => "result" is VA returned
1706 * => uobj/uoffset are to be used to handle VAC alignment, if required
1707 * => if "align" is non-zero, we attempt to align to that value.
1708 * => caller must at least have read-locked map
1709 * => returns NULL on failure, or pointer to prev. map entry if success
1710 * => note this is a cross between the old vm_map_findspace and vm_map_find
1711 */
1712
1713 struct vm_map_entry *
1714 uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
1715 vaddr_t *result /* OUT */, struct uvm_object *uobj, voff_t uoffset,
1716 vsize_t align, int flags)
1717 {
1718 struct vm_map_entry *entry;
1719 struct vm_map_entry *child, *prev, *tmp;
1720 vaddr_t orig_hint;
1721 const int topdown = map->flags & VM_MAP_TOPDOWN;
1722 UVMHIST_FUNC("uvm_map_findspace");
1723 UVMHIST_CALLED(maphist);
1724
1725 UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)",
1726 map, hint, length, flags);
1727 KASSERT((align & (align - 1)) == 0);
1728 KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1729
1730 uvm_map_check(map, "map_findspace entry");
1731
1732 /*
1733 * remember the original hint. if we are aligning, then we
1734 * may have to try again with no alignment constraint if
1735 * we fail the first time.
1736 */
1737
1738 orig_hint = hint;
1739 if (hint < vm_map_min(map)) { /* check ranges ... */
1740 if (flags & UVM_FLAG_FIXED) {
1741 UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
1742 return (NULL);
1743 }
1744 hint = vm_map_min(map);
1745 }
1746 if (hint > vm_map_max(map)) {
1747 UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]",
1748 hint, vm_map_min(map), vm_map_max(map), 0);
1749 return (NULL);
1750 }
1751
1752 /*
1753 * Look for the first possible address; if there's already
1754 * something at this address, we have to start after it.
1755 */
1756
1757 /*
1758 * @@@: there are four, no, eight cases to consider.
1759 *
1760 * 0: found, fixed, bottom up -> fail
1761 * 1: found, fixed, top down -> fail
1762 * 2: found, not fixed, bottom up -> start after entry->end,
1763 * loop up
1764 * 3: found, not fixed, top down -> start before entry->start,
1765 * loop down
1766 * 4: not found, fixed, bottom up -> check entry->next->start, fail
1767 * 5: not found, fixed, top down -> check entry->next->start, fail
1768 * 6: not found, not fixed, bottom up -> check entry->next->start,
1769 * loop up
1770 * 7: not found, not fixed, top down -> check entry->next->start,
1771 * loop down
1772 *
1773 * as you can see, it reduces to roughly five cases, and that
1774 * adding top down mapping only adds one unique case (without
1775 * it, there would be four cases).
1776 */
1777
1778 if ((flags & UVM_FLAG_FIXED) == 0 && hint == vm_map_min(map)) {
1779 entry = map->first_free;
1780 } else {
1781 if (uvm_map_lookup_entry(map, hint, &entry)) {
1782 /* "hint" address already in use ... */
1783 if (flags & UVM_FLAG_FIXED) {
1784 UVMHIST_LOG(maphist, "<- fixed & VA in use",
1785 0, 0, 0, 0);
1786 return (NULL);
1787 }
1788 if (topdown)
1789 /* Start from lower gap. */
1790 entry = entry->prev;
1791 } else if (flags & UVM_FLAG_FIXED) {
1792 if (entry->next->start >= hint + length &&
1793 hint + length > hint)
1794 goto found;
1795
1796 /* "hint" address is gap but too small */
1797 UVMHIST_LOG(maphist, "<- fixed mapping failed",
1798 0, 0, 0, 0);
1799 return (NULL); /* only one shot at it ... */
1800 } else {
1801 /*
1802 * See if given hint fits in this gap.
1803 */
1804 switch (uvm_map_space_avail(&hint, length,
1805 uoffset, align, topdown, entry)) {
1806 case 1:
1807 goto found;
1808 case -1:
1809 goto wraparound;
1810 }
1811
1812 if (topdown) {
1813 /*
1814 * Still there is a chance to fit
1815 * if hint > entry->end.
1816 */
1817 } else {
1818 /* Start from higher gap. */
1819 entry = entry->next;
1820 if (entry == &map->header)
1821 goto notfound;
1822 goto nextgap;
1823 }
1824 }
1825 }
1826
1827 /*
1828 * Note that all UVM_FLAGS_FIXED case is already handled.
1829 */
1830 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
1831
1832 /* Try to find the space in the red-black tree */
1833
1834 /* Check slot before any entry */
1835 hint = topdown ? entry->next->start - length : entry->end;
1836 switch (uvm_map_space_avail(&hint, length, uoffset, align,
1837 topdown, entry)) {
1838 case 1:
1839 goto found;
1840 case -1:
1841 goto wraparound;
1842 }
1843
1844 nextgap:
1845 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
1846 /* If there is not enough space in the whole tree, we fail */
1847 tmp = RB_ROOT(&map->rbhead);
1848 if (tmp == NULL || tmp->space < length)
1849 goto notfound;
1850
1851 prev = NULL; /* previous candidate */
1852
1853 /* Find an entry close to hint that has enough space */
1854 for (; tmp;) {
1855 KASSERT(tmp->next->start == tmp->end + tmp->ownspace);
1856 if (topdown) {
1857 if (tmp->next->start < hint + length &&
1858 (prev == NULL || tmp->end > prev->end)) {
1859 if (tmp->ownspace >= length)
1860 prev = tmp;
1861 else if ((child = RB_LEFT(tmp, rb_entry))
1862 != NULL && child->space >= length)
1863 prev = tmp;
1864 }
1865 } else {
1866 if (tmp->end >= hint &&
1867 (prev == NULL || tmp->end < prev->end)) {
1868 if (tmp->ownspace >= length)
1869 prev = tmp;
1870 else if ((child = RB_RIGHT(tmp, rb_entry))
1871 != NULL && child->space >= length)
1872 prev = tmp;
1873 }
1874 }
1875 if (tmp->next->start < hint + length)
1876 child = RB_RIGHT(tmp, rb_entry);
1877 else if (tmp->end > hint)
1878 child = RB_LEFT(tmp, rb_entry);
1879 else {
1880 if (tmp->ownspace >= length)
1881 break;
1882 if (topdown)
1883 child = RB_LEFT(tmp, rb_entry);
1884 else
1885 child = RB_RIGHT(tmp, rb_entry);
1886 }
1887 if (child == NULL || child->space < length)
1888 break;
1889 tmp = child;
1890 }
1891
1892 if (tmp != NULL && tmp->start < hint && hint < tmp->next->start) {
1893 /*
1894 * Check if the entry that we found satifies the
1895 * space requirement
1896 */
1897 if (topdown) {
1898 if (hint > tmp->next->start - length)
1899 hint = tmp->next->start - length;
1900 } else {
1901 if (hint < tmp->end)
1902 hint = tmp->end;
1903 }
1904 switch (uvm_map_space_avail(&hint, length, uoffset, align,
1905 topdown, tmp)) {
1906 case 1:
1907 entry = tmp;
1908 goto found;
1909 case -1:
1910 goto wraparound;
1911 }
1912 if (tmp->ownspace >= length)
1913 goto listsearch;
1914 }
1915 if (prev == NULL)
1916 goto notfound;
1917
1918 if (topdown) {
1919 KASSERT(orig_hint >= prev->next->start - length ||
1920 prev->next->start - length > prev->next->start);
1921 hint = prev->next->start - length;
1922 } else {
1923 KASSERT(orig_hint <= prev->end);
1924 hint = prev->end;
1925 }
1926 switch (uvm_map_space_avail(&hint, length, uoffset, align,
1927 topdown, prev)) {
1928 case 1:
1929 entry = prev;
1930 goto found;
1931 case -1:
1932 goto wraparound;
1933 }
1934 if (prev->ownspace >= length)
1935 goto listsearch;
1936
1937 if (topdown)
1938 tmp = RB_LEFT(prev, rb_entry);
1939 else
1940 tmp = RB_RIGHT(prev, rb_entry);
1941 for (;;) {
1942 KASSERT(tmp && tmp->space >= length);
1943 if (topdown)
1944 child = RB_RIGHT(tmp, rb_entry);
1945 else
1946 child = RB_LEFT(tmp, rb_entry);
1947 if (child && child->space >= length) {
1948 tmp = child;
1949 continue;
1950 }
1951 if (tmp->ownspace >= length)
1952 break;
1953 if (topdown)
1954 tmp = RB_LEFT(tmp, rb_entry);
1955 else
1956 tmp = RB_RIGHT(tmp, rb_entry);
1957 }
1958
1959 if (topdown) {
1960 KASSERT(orig_hint >= tmp->next->start - length ||
1961 tmp->next->start - length > tmp->next->start);
1962 hint = tmp->next->start - length;
1963 } else {
1964 KASSERT(orig_hint <= tmp->end);
1965 hint = tmp->end;
1966 }
1967 switch (uvm_map_space_avail(&hint, length, uoffset, align,
1968 topdown, tmp)) {
1969 case 1:
1970 entry = tmp;
1971 goto found;
1972 case -1:
1973 goto wraparound;
1974 }
1975
1976 /*
1977 * The tree fails to find an entry because of offset or alignment
1978 * restrictions. Search the list instead.
1979 */
1980 listsearch:
1981 /*
1982 * Look through the rest of the map, trying to fit a new region in
1983 * the gap between existing regions, or after the very last region.
1984 * note: entry->end = base VA of current gap,
1985 * entry->next->start = VA of end of current gap
1986 */
1987
1988 for (;;) {
1989 /* Update hint for current gap. */
1990 hint = topdown ? entry->next->start - length : entry->end;
1991
1992 /* See if it fits. */
1993 switch (uvm_map_space_avail(&hint, length, uoffset, align,
1994 topdown, entry)) {
1995 case 1:
1996 goto found;
1997 case -1:
1998 goto wraparound;
1999 }
2000
2001 /* Advance to next/previous gap */
2002 if (topdown) {
2003 if (entry == &map->header) {
2004 UVMHIST_LOG(maphist, "<- failed (off start)",
2005 0,0,0,0);
2006 goto notfound;
2007 }
2008 entry = entry->prev;
2009 } else {
2010 entry = entry->next;
2011 if (entry == &map->header) {
2012 UVMHIST_LOG(maphist, "<- failed (off end)",
2013 0,0,0,0);
2014 goto notfound;
2015 }
2016 }
2017 }
2018
2019 found:
2020 SAVE_HINT(map, map->hint, entry);
2021 *result = hint;
2022 UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0);
2023 KASSERT( topdown || hint >= orig_hint);
2024 KASSERT(!topdown || hint <= orig_hint);
2025 KASSERT(entry->end <= hint);
2026 KASSERT(hint + length <= entry->next->start);
2027 return (entry);
2028
2029 wraparound:
2030 UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0);
2031
2032 return (NULL);
2033
2034 notfound:
2035 UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0);
2036
2037 return (NULL);
2038 }
2039
2040 /*
2041 * U N M A P - m a i n h e l p e r f u n c t i o n s
2042 */
2043
2044 /*
2045 * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
2046 *
2047 * => caller must check alignment and size
2048 * => map must be locked by caller
2049 * => we return a list of map entries that we've remove from the map
2050 * in "entry_list"
2051 */
2052
2053 void
2054 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
2055 struct vm_map_entry **entry_list /* OUT */,
2056 struct uvm_mapent_reservation *umr, int flags)
2057 {
2058 struct vm_map_entry *entry, *first_entry, *next;
2059 vaddr_t len;
2060 UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist);
2061
2062 UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)",
2063 map, start, end, 0);
2064 VM_MAP_RANGE_CHECK(map, start, end);
2065
2066 uvm_map_check(map, "unmap_remove entry");
2067
2068 /*
2069 * find first entry
2070 */
2071
2072 if (uvm_map_lookup_entry(map, start, &first_entry) == true) {
2073 /* clip and go... */
2074 entry = first_entry;
2075 UVM_MAP_CLIP_START(map, entry, start, umr);
2076 /* critical! prevents stale hint */
2077 SAVE_HINT(map, entry, entry->prev);
2078 } else {
2079 entry = first_entry->next;
2080 }
2081
2082 /*
2083 * Save the free space hint
2084 */
2085
2086 if (map->first_free != &map->header && map->first_free->start >= start)
2087 map->first_free = entry->prev;
2088
2089 /*
2090 * note: we now re-use first_entry for a different task. we remove
2091 * a number of map entries from the map and save them in a linked
2092 * list headed by "first_entry". once we remove them from the map
2093 * the caller should unlock the map and drop the references to the
2094 * backing objects [c.f. uvm_unmap_detach]. the object is to
2095 * separate unmapping from reference dropping. why?
2096 * [1] the map has to be locked for unmapping
2097 * [2] the map need not be locked for reference dropping
2098 * [3] dropping references may trigger pager I/O, and if we hit
2099 * a pager that does synchronous I/O we may have to wait for it.
2100 * [4] we would like all waiting for I/O to occur with maps unlocked
2101 * so that we don't block other threads.
2102 */
2103
2104 first_entry = NULL;
2105 *entry_list = NULL;
2106
2107 /*
2108 * break up the area into map entry sized regions and unmap. note
2109 * that all mappings have to be removed before we can even consider
2110 * dropping references to amaps or VM objects (otherwise we could end
2111 * up with a mapping to a page on the free list which would be very bad)
2112 */
2113
2114 while ((entry != &map->header) && (entry->start < end)) {
2115 KASSERT((entry->flags & UVM_MAP_FIRST) == 0);
2116
2117 UVM_MAP_CLIP_END(map, entry, end, umr);
2118 next = entry->next;
2119 len = entry->end - entry->start;
2120
2121 /*
2122 * unwire before removing addresses from the pmap; otherwise
2123 * unwiring will put the entries back into the pmap (XXX).
2124 */
2125
2126 if (VM_MAPENT_ISWIRED(entry)) {
2127 uvm_map_entry_unwire(map, entry);
2128 }
2129 if (flags & UVM_FLAG_VAONLY) {
2130
2131 /* nothing */
2132
2133 } else if ((map->flags & VM_MAP_PAGEABLE) == 0) {
2134
2135 /*
2136 * if the map is non-pageable, any pages mapped there
2137 * must be wired and entered with pmap_kenter_pa(),
2138 * and we should free any such pages immediately.
2139 * this is mostly used for kmem_map and mb_map.
2140 */
2141
2142 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
2143 uvm_km_pgremove_intrsafe(entry->start,
2144 entry->end);
2145 pmap_kremove(entry->start, len);
2146 }
2147 } else if (UVM_ET_ISOBJ(entry) &&
2148 UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
2149 KASSERT(vm_map_pmap(map) == pmap_kernel());
2150
2151 /*
2152 * note: kernel object mappings are currently used in
2153 * two ways:
2154 * [1] "normal" mappings of pages in the kernel object
2155 * [2] uvm_km_valloc'd allocations in which we
2156 * pmap_enter in some non-kernel-object page
2157 * (e.g. vmapbuf).
2158 *
2159 * for case [1], we need to remove the mapping from
2160 * the pmap and then remove the page from the kernel
2161 * object (because, once pages in a kernel object are
2162 * unmapped they are no longer needed, unlike, say,
2163 * a vnode where you might want the data to persist
2164 * until flushed out of a queue).
2165 *
2166 * for case [2], we need to remove the mapping from
2167 * the pmap. there shouldn't be any pages at the
2168 * specified offset in the kernel object [but it
2169 * doesn't hurt to call uvm_km_pgremove just to be
2170 * safe?]
2171 *
2172 * uvm_km_pgremove currently does the following:
2173 * for pages in the kernel object in range:
2174 * - drops the swap slot
2175 * - uvm_pagefree the page
2176 */
2177
2178 /*
2179 * remove mappings from pmap and drop the pages
2180 * from the object. offsets are always relative
2181 * to vm_map_min(kernel_map).
2182 */
2183
2184 pmap_remove(pmap_kernel(), entry->start,
2185 entry->start + len);
2186 uvm_km_pgremove(entry->start, entry->end);
2187
2188 /*
2189 * null out kernel_object reference, we've just
2190 * dropped it
2191 */
2192
2193 entry->etype &= ~UVM_ET_OBJ;
2194 entry->object.uvm_obj = NULL;
2195 } else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
2196
2197 /*
2198 * remove mappings the standard way.
2199 */
2200
2201 pmap_remove(map->pmap, entry->start, entry->end);
2202 }
2203
2204 #if defined(DEBUG)
2205 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
2206
2207 /*
2208 * check if there's remaining mapping,
2209 * which is a bug in caller.
2210 */
2211
2212 vaddr_t va;
2213 for (va = entry->start; va < entry->end;
2214 va += PAGE_SIZE) {
2215 if (pmap_extract(vm_map_pmap(map), va, NULL)) {
2216 panic("uvm_unmap_remove: has mapping");
2217 }
2218 }
2219
2220 if (VM_MAP_IS_KERNEL(map)) {
2221 uvm_km_check_empty(entry->start, entry->end,
2222 (map->flags & VM_MAP_INTRSAFE) != 0);
2223 }
2224 }
2225 #endif /* defined(DEBUG) */
2226
2227 /*
2228 * remove entry from map and put it on our list of entries
2229 * that we've nuked. then go to next entry.
2230 */
2231
2232 UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0,0);
2233
2234 /* critical! prevents stale hint */
2235 SAVE_HINT(map, entry, entry->prev);
2236
2237 uvm_map_entry_unlink(map, entry);
2238 KASSERT(map->size >= len);
2239 map->size -= len;
2240 entry->prev = NULL;
2241 entry->next = first_entry;
2242 first_entry = entry;
2243 entry = next;
2244 }
2245 if ((map->flags & VM_MAP_DYING) == 0) {
2246 pmap_update(vm_map_pmap(map));
2247 }
2248
2249 uvm_map_check(map, "unmap_remove leave");
2250
2251 /*
2252 * now we've cleaned up the map and are ready for the caller to drop
2253 * references to the mapped objects.
2254 */
2255
2256 *entry_list = first_entry;
2257 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
2258
2259 if (map->flags & VM_MAP_WANTVA) {
2260 mutex_enter(&map->misc_lock);
2261 map->flags &= ~VM_MAP_WANTVA;
2262 cv_broadcast(&map->cv);
2263 mutex_exit(&map->misc_lock);
2264 }
2265 }
2266
2267 /*
2268 * uvm_unmap_detach: drop references in a chain of map entries
2269 *
2270 * => we will free the map entries as we traverse the list.
2271 */
2272
2273 void
2274 uvm_unmap_detach(struct vm_map_entry *first_entry, int flags)
2275 {
2276 struct vm_map_entry *next_entry;
2277 UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
2278
2279 while (first_entry) {
2280 KASSERT(!VM_MAPENT_ISWIRED(first_entry));
2281 UVMHIST_LOG(maphist,
2282 " detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d",
2283 first_entry, first_entry->aref.ar_amap,
2284 first_entry->object.uvm_obj,
2285 UVM_ET_ISSUBMAP(first_entry));
2286
2287 /*
2288 * drop reference to amap, if we've got one
2289 */
2290
2291 if (first_entry->aref.ar_amap)
2292 uvm_map_unreference_amap(first_entry, flags);
2293
2294 /*
2295 * drop reference to our backing object, if we've got one
2296 */
2297
2298 KASSERT(!UVM_ET_ISSUBMAP(first_entry));
2299 if (UVM_ET_ISOBJ(first_entry) &&
2300 first_entry->object.uvm_obj->pgops->pgo_detach) {
2301 (*first_entry->object.uvm_obj->pgops->pgo_detach)
2302 (first_entry->object.uvm_obj);
2303 }
2304 next_entry = first_entry->next;
2305 uvm_mapent_free(first_entry);
2306 first_entry = next_entry;
2307 }
2308 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
2309 }
2310
2311 /*
2312 * E X T R A C T I O N F U N C T I O N S
2313 */
2314
2315 /*
2316 * uvm_map_reserve: reserve space in a vm_map for future use.
2317 *
2318 * => we reserve space in a map by putting a dummy map entry in the
2319 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
2320 * => map should be unlocked (we will write lock it)
2321 * => we return true if we were able to reserve space
2322 * => XXXCDC: should be inline?
2323 */
2324
2325 int
2326 uvm_map_reserve(struct vm_map *map, vsize_t size,
2327 vaddr_t offset /* hint for pmap_prefer */,
2328 vsize_t align /* alignment */,
2329 vaddr_t *raddr /* IN:hint, OUT: reserved VA */,
2330 uvm_flag_t flags /* UVM_FLAG_FIXED or 0 */)
2331 {
2332 UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
2333
2334 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
2335 map,size,offset,raddr);
2336
2337 size = round_page(size);
2338
2339 /*
2340 * reserve some virtual space.
2341 */
2342
2343 if (uvm_map(map, raddr, size, NULL, offset, align,
2344 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
2345 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE|flags)) != 0) {
2346 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
2347 return (false);
2348 }
2349
2350 UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
2351 return (true);
2352 }
2353
2354 /*
2355 * uvm_map_replace: replace a reserved (blank) area of memory with
2356 * real mappings.
2357 *
2358 * => caller must WRITE-LOCK the map
2359 * => we return true if replacement was a success
2360 * => we expect the newents chain to have nnewents entrys on it and
2361 * we expect newents->prev to point to the last entry on the list
2362 * => note newents is allowed to be NULL
2363 */
2364
2365 int
2366 uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
2367 struct vm_map_entry *newents, int nnewents)
2368 {
2369 struct vm_map_entry *oldent, *last;
2370
2371 uvm_map_check(map, "map_replace entry");
2372
2373 /*
2374 * first find the blank map entry at the specified address
2375 */
2376
2377 if (!uvm_map_lookup_entry(map, start, &oldent)) {
2378 return (false);
2379 }
2380
2381 /*
2382 * check to make sure we have a proper blank entry
2383 */
2384
2385 if (end < oldent->end && !VM_MAP_USE_KMAPENT(map)) {
2386 UVM_MAP_CLIP_END(map, oldent, end, NULL);
2387 }
2388 if (oldent->start != start || oldent->end != end ||
2389 oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
2390 return (false);
2391 }
2392
2393 #ifdef DIAGNOSTIC
2394
2395 /*
2396 * sanity check the newents chain
2397 */
2398
2399 {
2400 struct vm_map_entry *tmpent = newents;
2401 int nent = 0;
2402 vaddr_t cur = start;
2403
2404 while (tmpent) {
2405 nent++;
2406 if (tmpent->start < cur)
2407 panic("uvm_map_replace1");
2408 if (tmpent->start > tmpent->end || tmpent->end > end) {
2409 printf("tmpent->start=0x%lx, tmpent->end=0x%lx, end=0x%lx\n",
2410 tmpent->start, tmpent->end, end);
2411 panic("uvm_map_replace2");
2412 }
2413 cur = tmpent->end;
2414 if (tmpent->next) {
2415 if (tmpent->next->prev != tmpent)
2416 panic("uvm_map_replace3");
2417 } else {
2418 if (newents->prev != tmpent)
2419 panic("uvm_map_replace4");
2420 }
2421 tmpent = tmpent->next;
2422 }
2423 if (nent != nnewents)
2424 panic("uvm_map_replace5");
2425 }
2426 #endif
2427
2428 /*
2429 * map entry is a valid blank! replace it. (this does all the
2430 * work of map entry link/unlink...).
2431 */
2432
2433 if (newents) {
2434 last = newents->prev;
2435
2436 /* critical: flush stale hints out of map */
2437 SAVE_HINT(map, map->hint, newents);
2438 if (map->first_free == oldent)
2439 map->first_free = last;
2440
2441 last->next = oldent->next;
2442 last->next->prev = last;
2443
2444 /* Fix RB tree */
2445 uvm_rb_remove(map, oldent);
2446
2447 newents->prev = oldent->prev;
2448 newents->prev->next = newents;
2449 map->nentries = map->nentries + (nnewents - 1);
2450
2451 /* Fixup the RB tree */
2452 {
2453 int i;
2454 struct vm_map_entry *tmp;
2455
2456 tmp = newents;
2457 for (i = 0; i < nnewents && tmp; i++) {
2458 uvm_rb_insert(map, tmp);
2459 tmp = tmp->next;
2460 }
2461 }
2462 } else {
2463 /* NULL list of new entries: just remove the old one */
2464 clear_hints(map, oldent);
2465 uvm_map_entry_unlink(map, oldent);
2466 }
2467
2468 uvm_map_check(map, "map_replace leave");
2469
2470 /*
2471 * now we can free the old blank entry and return.
2472 */
2473
2474 uvm_mapent_free(oldent);
2475 return (true);
2476 }
2477
2478 /*
2479 * uvm_map_extract: extract a mapping from a map and put it somewhere
2480 * (maybe removing the old mapping)
2481 *
2482 * => maps should be unlocked (we will write lock them)
2483 * => returns 0 on success, error code otherwise
2484 * => start must be page aligned
2485 * => len must be page sized
2486 * => flags:
2487 * UVM_EXTRACT_REMOVE: remove mappings from srcmap
2488 * UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
2489 * UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
2490 * UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
2491 * >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
2492 * >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
2493 * be used from within the kernel in a kernel level map <<<
2494 */
2495
2496 int
2497 uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
2498 struct vm_map *dstmap, vaddr_t *dstaddrp, int flags)
2499 {
2500 vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge;
2501 struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry,
2502 *deadentry, *oldentry;
2503 vsize_t elen;
2504 int nchain, error, copy_ok;
2505 UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
2506
2507 UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start,
2508 len,0);
2509 UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0);
2510
2511 uvm_map_check(srcmap, "map_extract src enter");
2512 uvm_map_check(dstmap, "map_extract dst enter");
2513
2514 /*
2515 * step 0: sanity check: start must be on a page boundary, length
2516 * must be page sized. can't ask for CONTIG/QREF if you asked for
2517 * REMOVE.
2518 */
2519
2520 KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0);
2521 KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
2522 (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
2523
2524 /*
2525 * step 1: reserve space in the target map for the extracted area
2526 */
2527
2528 if ((flags & UVM_EXTRACT_RESERVED) == 0) {
2529 dstaddr = vm_map_min(dstmap);
2530 if (!uvm_map_reserve(dstmap, len, start, 0, &dstaddr, 0))
2531 return (ENOMEM);
2532 *dstaddrp = dstaddr; /* pass address back to caller */
2533 UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0);
2534 } else {
2535 dstaddr = *dstaddrp;
2536 }
2537
2538 /*
2539 * step 2: setup for the extraction process loop by init'ing the
2540 * map entry chain, locking src map, and looking up the first useful
2541 * entry in the map.
2542 */
2543
2544 end = start + len;
2545 newend = dstaddr + len;
2546 chain = endchain = NULL;
2547 nchain = 0;
2548 vm_map_lock(srcmap);
2549
2550 if (uvm_map_lookup_entry(srcmap, start, &entry)) {
2551
2552 /* "start" is within an entry */
2553 if (flags & UVM_EXTRACT_QREF) {
2554
2555 /*
2556 * for quick references we don't clip the entry, so
2557 * the entry may map space "before" the starting
2558 * virtual address... this is the "fudge" factor
2559 * (which can be non-zero only the first time
2560 * through the "while" loop in step 3).
2561 */
2562
2563 fudge = start - entry->start;
2564 } else {
2565
2566 /*
2567 * normal reference: we clip the map to fit (thus
2568 * fudge is zero)
2569 */
2570
2571 UVM_MAP_CLIP_START(srcmap, entry, start, NULL);
2572 SAVE_HINT(srcmap, srcmap->hint, entry->prev);
2573 fudge = 0;
2574 }
2575 } else {
2576
2577 /* "start" is not within an entry ... skip to next entry */
2578 if (flags & UVM_EXTRACT_CONTIG) {
2579 error = EINVAL;
2580 goto bad; /* definite hole here ... */
2581 }
2582
2583 entry = entry->next;
2584 fudge = 0;
2585 }
2586
2587 /* save values from srcmap for step 6 */
2588 orig_entry = entry;
2589 orig_fudge = fudge;
2590
2591 /*
2592 * step 3: now start looping through the map entries, extracting
2593 * as we go.
2594 */
2595
2596 while (entry->start < end && entry != &srcmap->header) {
2597
2598 /* if we are not doing a quick reference, clip it */
2599 if ((flags & UVM_EXTRACT_QREF) == 0)
2600 UVM_MAP_CLIP_END(srcmap, entry, end, NULL);
2601
2602 /* clear needs_copy (allow chunking) */
2603 if (UVM_ET_ISNEEDSCOPY(entry)) {
2604 amap_copy(srcmap, entry,
2605 AMAP_COPY_NOWAIT|AMAP_COPY_NOMERGE, start, end);
2606 if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */
2607 error = ENOMEM;
2608 goto bad;
2609 }
2610
2611 /* amap_copy could clip (during chunk)! update fudge */
2612 if (fudge) {
2613 fudge = start - entry->start;
2614 orig_fudge = fudge;
2615 }
2616 }
2617
2618 /* calculate the offset of this from "start" */
2619 oldoffset = (entry->start + fudge) - start;
2620
2621 /* allocate a new map entry */
2622 newentry = uvm_mapent_alloc(dstmap, 0);
2623 if (newentry == NULL) {
2624 error = ENOMEM;
2625 goto bad;
2626 }
2627
2628 /* set up new map entry */
2629 newentry->next = NULL;
2630 newentry->prev = endchain;
2631 newentry->start = dstaddr + oldoffset;
2632 newentry->end =
2633 newentry->start + (entry->end - (entry->start + fudge));
2634 if (newentry->end > newend || newentry->end < newentry->start)
2635 newentry->end = newend;
2636 newentry->object.uvm_obj = entry->object.uvm_obj;
2637 if (newentry->object.uvm_obj) {
2638 if (newentry->object.uvm_obj->pgops->pgo_reference)
2639 newentry->object.uvm_obj->pgops->
2640 pgo_reference(newentry->object.uvm_obj);
2641 newentry->offset = entry->offset + fudge;
2642 } else {
2643 newentry->offset = 0;
2644 }
2645 newentry->etype = entry->etype;
2646 newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
2647 entry->max_protection : entry->protection;
2648 newentry->max_protection = entry->max_protection;
2649 newentry->inheritance = entry->inheritance;
2650 newentry->wired_count = 0;
2651 newentry->aref.ar_amap = entry->aref.ar_amap;
2652 if (newentry->aref.ar_amap) {
2653 newentry->aref.ar_pageoff =
2654 entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
2655 uvm_map_reference_amap(newentry, AMAP_SHARED |
2656 ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
2657 } else {
2658 newentry->aref.ar_pageoff = 0;
2659 }
2660 newentry->advice = entry->advice;
2661
2662 /* now link it on the chain */
2663 nchain++;
2664 if (endchain == NULL) {
2665 chain = endchain = newentry;
2666 } else {
2667 endchain->next = newentry;
2668 endchain = newentry;
2669 }
2670
2671 /* end of 'while' loop! */
2672 if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
2673 (entry->next == &srcmap->header ||
2674 entry->next->start != entry->end)) {
2675 error = EINVAL;
2676 goto bad;
2677 }
2678 entry = entry->next;
2679 fudge = 0;
2680 }
2681
2682 /*
2683 * step 4: close off chain (in format expected by uvm_map_replace)
2684 */
2685
2686 if (chain)
2687 chain->prev = endchain;
2688
2689 /*
2690 * step 5: attempt to lock the dest map so we can pmap_copy.
2691 * note usage of copy_ok:
2692 * 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
2693 * 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
2694 */
2695
2696 if (srcmap == dstmap || vm_map_lock_try(dstmap) == true) {
2697 copy_ok = 1;
2698 if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2699 nchain)) {
2700 if (srcmap != dstmap)
2701 vm_map_unlock(dstmap);
2702 error = EIO;
2703 goto bad;
2704 }
2705 } else {
2706 copy_ok = 0;
2707 /* replace defered until step 7 */
2708 }
2709
2710 /*
2711 * step 6: traverse the srcmap a second time to do the following:
2712 * - if we got a lock on the dstmap do pmap_copy
2713 * - if UVM_EXTRACT_REMOVE remove the entries
2714 * we make use of orig_entry and orig_fudge (saved in step 2)
2715 */
2716
2717 if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
2718
2719 /* purge possible stale hints from srcmap */
2720 if (flags & UVM_EXTRACT_REMOVE) {
2721 SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
2722 if (srcmap->first_free != &srcmap->header &&
2723 srcmap->first_free->start >= start)
2724 srcmap->first_free = orig_entry->prev;
2725 }
2726
2727 entry = orig_entry;
2728 fudge = orig_fudge;
2729 deadentry = NULL; /* for UVM_EXTRACT_REMOVE */
2730
2731 while (entry->start < end && entry != &srcmap->header) {
2732 if (copy_ok) {
2733 oldoffset = (entry->start + fudge) - start;
2734 elen = MIN(end, entry->end) -
2735 (entry->start + fudge);
2736 pmap_copy(dstmap->pmap, srcmap->pmap,
2737 dstaddr + oldoffset, elen,
2738 entry->start + fudge);
2739 }
2740
2741 /* we advance "entry" in the following if statement */
2742 if (flags & UVM_EXTRACT_REMOVE) {
2743 pmap_remove(srcmap->pmap, entry->start,
2744 entry->end);
2745 oldentry = entry; /* save entry */
2746 entry = entry->next; /* advance */
2747 uvm_map_entry_unlink(srcmap, oldentry);
2748 /* add to dead list */
2749 oldentry->next = deadentry;
2750 deadentry = oldentry;
2751 } else {
2752 entry = entry->next; /* advance */
2753 }
2754
2755 /* end of 'while' loop */
2756 fudge = 0;
2757 }
2758 pmap_update(srcmap->pmap);
2759
2760 /*
2761 * unlock dstmap. we will dispose of deadentry in
2762 * step 7 if needed
2763 */
2764
2765 if (copy_ok && srcmap != dstmap)
2766 vm_map_unlock(dstmap);
2767
2768 } else {
2769 deadentry = NULL;
2770 }
2771
2772 /*
2773 * step 7: we are done with the source map, unlock. if copy_ok
2774 * is 0 then we have not replaced the dummy mapping in dstmap yet
2775 * and we need to do so now.
2776 */
2777
2778 vm_map_unlock(srcmap);
2779 if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
2780 uvm_unmap_detach(deadentry, 0); /* dispose of old entries */
2781
2782 /* now do the replacement if we didn't do it in step 5 */
2783 if (copy_ok == 0) {
2784 vm_map_lock(dstmap);
2785 error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2786 nchain);
2787 vm_map_unlock(dstmap);
2788
2789 if (error == false) {
2790 error = EIO;
2791 goto bad2;
2792 }
2793 }
2794
2795 uvm_map_check(srcmap, "map_extract src leave");
2796 uvm_map_check(dstmap, "map_extract dst leave");
2797
2798 return (0);
2799
2800 /*
2801 * bad: failure recovery
2802 */
2803 bad:
2804 vm_map_unlock(srcmap);
2805 bad2: /* src already unlocked */
2806 if (chain)
2807 uvm_unmap_detach(chain,
2808 (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
2809
2810 uvm_map_check(srcmap, "map_extract src err leave");
2811 uvm_map_check(dstmap, "map_extract dst err leave");
2812
2813 if ((flags & UVM_EXTRACT_RESERVED) == 0) {
2814 uvm_unmap(dstmap, dstaddr, dstaddr+len); /* ??? */
2815 }
2816 return (error);
2817 }
2818
2819 /* end of extraction functions */
2820
2821 /*
2822 * uvm_map_submap: punch down part of a map into a submap
2823 *
2824 * => only the kernel_map is allowed to be submapped
2825 * => the purpose of submapping is to break up the locking granularity
2826 * of a larger map
2827 * => the range specified must have been mapped previously with a uvm_map()
2828 * call [with uobj==NULL] to create a blank map entry in the main map.
2829 * [And it had better still be blank!]
2830 * => maps which contain submaps should never be copied or forked.
2831 * => to remove a submap, use uvm_unmap() on the main map
2832 * and then uvm_map_deallocate() the submap.
2833 * => main map must be unlocked.
2834 * => submap must have been init'd and have a zero reference count.
2835 * [need not be locked as we don't actually reference it]
2836 */
2837
2838 int
2839 uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
2840 struct vm_map *submap)
2841 {
2842 struct vm_map_entry *entry;
2843 struct uvm_mapent_reservation umr;
2844 int error;
2845
2846 uvm_mapent_reserve(map, &umr, 2, 0);
2847
2848 vm_map_lock(map);
2849 VM_MAP_RANGE_CHECK(map, start, end);
2850
2851 if (uvm_map_lookup_entry(map, start, &entry)) {
2852 UVM_MAP_CLIP_START(map, entry, start, &umr);
2853 UVM_MAP_CLIP_END(map, entry, end, &umr); /* to be safe */
2854 } else {
2855 entry = NULL;
2856 }
2857
2858 if (entry != NULL &&
2859 entry->start == start && entry->end == end &&
2860 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
2861 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
2862 entry->etype |= UVM_ET_SUBMAP;
2863 entry->object.sub_map = submap;
2864 entry->offset = 0;
2865 uvm_map_reference(submap);
2866 error = 0;
2867 } else {
2868 error = EINVAL;
2869 }
2870 vm_map_unlock(map);
2871
2872 uvm_mapent_unreserve(map, &umr);
2873
2874 return error;
2875 }
2876
2877 /*
2878 * uvm_map_setup_kernel: init in-kernel map
2879 *
2880 * => map must not be in service yet.
2881 */
2882
2883 void
2884 uvm_map_setup_kernel(struct vm_map_kernel *map,
2885 vaddr_t vmin, vaddr_t vmax, int flags)
2886 {
2887
2888 uvm_map_setup(&map->vmk_map, vmin, vmax, flags);
2889 callback_head_init(&map->vmk_reclaim_callback, IPL_VM);
2890 LIST_INIT(&map->vmk_kentry_free);
2891 map->vmk_merged_entries = NULL;
2892 }
2893
2894
2895 /*
2896 * uvm_map_protect: change map protection
2897 *
2898 * => set_max means set max_protection.
2899 * => map must be unlocked.
2900 */
2901
2902 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
2903 ~VM_PROT_WRITE : VM_PROT_ALL)
2904
2905 int
2906 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
2907 vm_prot_t new_prot, bool set_max)
2908 {
2909 struct vm_map_entry *current, *entry;
2910 int error = 0;
2911 UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
2912 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
2913 map, start, end, new_prot);
2914
2915 vm_map_lock(map);
2916 VM_MAP_RANGE_CHECK(map, start, end);
2917 if (uvm_map_lookup_entry(map, start, &entry)) {
2918 UVM_MAP_CLIP_START(map, entry, start, NULL);
2919 } else {
2920 entry = entry->next;
2921 }
2922
2923 /*
2924 * make a first pass to check for protection violations.
2925 */
2926
2927 current = entry;
2928 while ((current != &map->header) && (current->start < end)) {
2929 if (UVM_ET_ISSUBMAP(current)) {
2930 error = EINVAL;
2931 goto out;
2932 }
2933 if ((new_prot & current->max_protection) != new_prot) {
2934 error = EACCES;
2935 goto out;
2936 }
2937 /*
2938 * Don't allow VM_PROT_EXECUTE to be set on entries that
2939 * point to vnodes that are associated with a NOEXEC file
2940 * system.
2941 */
2942 if (UVM_ET_ISOBJ(current) &&
2943 UVM_OBJ_IS_VNODE(current->object.uvm_obj)) {
2944 struct vnode *vp =
2945 (struct vnode *) current->object.uvm_obj;
2946
2947 if ((new_prot & VM_PROT_EXECUTE) != 0 &&
2948 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
2949 error = EACCES;
2950 goto out;
2951 }
2952 }
2953
2954 current = current->next;
2955 }
2956
2957 /* go back and fix up protections (no need to clip this time). */
2958
2959 current = entry;
2960 while ((current != &map->header) && (current->start < end)) {
2961 vm_prot_t old_prot;
2962
2963 UVM_MAP_CLIP_END(map, current, end, NULL);
2964 old_prot = current->protection;
2965 if (set_max)
2966 current->protection =
2967 (current->max_protection = new_prot) & old_prot;
2968 else
2969 current->protection = new_prot;
2970
2971 /*
2972 * update physical map if necessary. worry about copy-on-write
2973 * here -- CHECK THIS XXX
2974 */
2975
2976 if (current->protection != old_prot) {
2977 /* update pmap! */
2978 pmap_protect(map->pmap, current->start, current->end,
2979 current->protection & MASK(entry));
2980
2981 /*
2982 * If this entry points at a vnode, and the
2983 * protection includes VM_PROT_EXECUTE, mark
2984 * the vnode as VEXECMAP.
2985 */
2986 if (UVM_ET_ISOBJ(current)) {
2987 struct uvm_object *uobj =
2988 current->object.uvm_obj;
2989
2990 if (UVM_OBJ_IS_VNODE(uobj) &&
2991 (current->protection & VM_PROT_EXECUTE)) {
2992 simple_lock(&uobj->vmobjlock);
2993 vn_markexec((struct vnode *) uobj);
2994 simple_unlock(&uobj->vmobjlock);
2995 }
2996 }
2997 }
2998
2999 /*
3000 * If the map is configured to lock any future mappings,
3001 * wire this entry now if the old protection was VM_PROT_NONE
3002 * and the new protection is not VM_PROT_NONE.
3003 */
3004
3005 if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
3006 VM_MAPENT_ISWIRED(entry) == 0 &&
3007 old_prot == VM_PROT_NONE &&
3008 new_prot != VM_PROT_NONE) {
3009 if (uvm_map_pageable(map, entry->start,
3010 entry->end, false,
3011 UVM_LK_ENTER|UVM_LK_EXIT) != 0) {
3012
3013 /*
3014 * If locking the entry fails, remember the
3015 * error if it's the first one. Note we
3016 * still continue setting the protection in
3017 * the map, but will return the error
3018 * condition regardless.
3019 *
3020 * XXX Ignore what the actual error is,
3021 * XXX just call it a resource shortage
3022 * XXX so that it doesn't get confused
3023 * XXX what uvm_map_protect() itself would
3024 * XXX normally return.
3025 */
3026
3027 error = ENOMEM;
3028 }
3029 }
3030 current = current->next;
3031 }
3032 pmap_update(map->pmap);
3033
3034 out:
3035 vm_map_unlock(map);
3036
3037 UVMHIST_LOG(maphist, "<- done, error=%d",error,0,0,0);
3038 return error;
3039 }
3040
3041 #undef MASK
3042
3043 /*
3044 * uvm_map_inherit: set inheritance code for range of addrs in map.
3045 *
3046 * => map must be unlocked
3047 * => note that the inherit code is used during a "fork". see fork
3048 * code for details.
3049 */
3050
3051 int
3052 uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
3053 vm_inherit_t new_inheritance)
3054 {
3055 struct vm_map_entry *entry, *temp_entry;
3056 UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
3057 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)",
3058 map, start, end, new_inheritance);
3059
3060 switch (new_inheritance) {
3061 case MAP_INHERIT_NONE:
3062 case MAP_INHERIT_COPY:
3063 case MAP_INHERIT_SHARE:
3064 break;
3065 default:
3066 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3067 return EINVAL;
3068 }
3069
3070 vm_map_lock(map);
3071 VM_MAP_RANGE_CHECK(map, start, end);
3072 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3073 entry = temp_entry;
3074 UVM_MAP_CLIP_START(map, entry, start, NULL);
3075 } else {
3076 entry = temp_entry->next;
3077 }
3078 while ((entry != &map->header) && (entry->start < end)) {
3079 UVM_MAP_CLIP_END(map, entry, end, NULL);
3080 entry->inheritance = new_inheritance;
3081 entry = entry->next;
3082 }
3083 vm_map_unlock(map);
3084 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3085 return 0;
3086 }
3087
3088 /*
3089 * uvm_map_advice: set advice code for range of addrs in map.
3090 *
3091 * => map must be unlocked
3092 */
3093
3094 int
3095 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
3096 {
3097 struct vm_map_entry *entry, *temp_entry;
3098 UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist);
3099 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_adv=0x%x)",
3100 map, start, end, new_advice);
3101
3102 vm_map_lock(map);
3103 VM_MAP_RANGE_CHECK(map, start, end);
3104 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3105 entry = temp_entry;
3106 UVM_MAP_CLIP_START(map, entry, start, NULL);
3107 } else {
3108 entry = temp_entry->next;
3109 }
3110
3111 /*
3112 * XXXJRT: disallow holes?
3113 */
3114
3115 while ((entry != &map->header) && (entry->start < end)) {
3116 UVM_MAP_CLIP_END(map, entry, end, NULL);
3117
3118 switch (new_advice) {
3119 case MADV_NORMAL:
3120 case MADV_RANDOM:
3121 case MADV_SEQUENTIAL:
3122 /* nothing special here */
3123 break;
3124
3125 default:
3126 vm_map_unlock(map);
3127 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3128 return EINVAL;
3129 }
3130 entry->advice = new_advice;
3131 entry = entry->next;
3132 }
3133
3134 vm_map_unlock(map);
3135 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3136 return 0;
3137 }
3138
3139 /*
3140 * uvm_map_pageable: sets the pageability of a range in a map.
3141 *
3142 * => wires map entries. should not be used for transient page locking.
3143 * for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
3144 * => regions specified as not pageable require lock-down (wired) memory
3145 * and page tables.
3146 * => map must never be read-locked
3147 * => if islocked is true, map is already write-locked
3148 * => we always unlock the map, since we must downgrade to a read-lock
3149 * to call uvm_fault_wire()
3150 * => XXXCDC: check this and try and clean it up.
3151 */
3152
3153 int
3154 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
3155 bool new_pageable, int lockflags)
3156 {
3157 struct vm_map_entry *entry, *start_entry, *failed_entry;
3158 int rv;
3159 #ifdef DIAGNOSTIC
3160 u_int timestamp_save;
3161 #endif
3162 UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
3163 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
3164 map, start, end, new_pageable);
3165 KASSERT(map->flags & VM_MAP_PAGEABLE);
3166
3167 if ((lockflags & UVM_LK_ENTER) == 0)
3168 vm_map_lock(map);
3169 VM_MAP_RANGE_CHECK(map, start, end);
3170
3171 /*
3172 * only one pageability change may take place at one time, since
3173 * uvm_fault_wire assumes it will be called only once for each
3174 * wiring/unwiring. therefore, we have to make sure we're actually
3175 * changing the pageability for the entire region. we do so before
3176 * making any changes.
3177 */
3178
3179 if (uvm_map_lookup_entry(map, start, &start_entry) == false) {
3180 if ((lockflags & UVM_LK_EXIT) == 0)
3181 vm_map_unlock(map);
3182
3183 UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0);
3184 return EFAULT;
3185 }
3186 entry = start_entry;
3187
3188 /*
3189 * handle wiring and unwiring separately.
3190 */
3191
3192 if (new_pageable) { /* unwire */
3193 UVM_MAP_CLIP_START(map, entry, start, NULL);
3194
3195 /*
3196 * unwiring. first ensure that the range to be unwired is
3197 * really wired down and that there are no holes.
3198 */
3199
3200 while ((entry != &map->header) && (entry->start < end)) {
3201 if (entry->wired_count == 0 ||
3202 (entry->end < end &&
3203 (entry->next == &map->header ||
3204 entry->next->start > entry->end))) {
3205 if ((lockflags & UVM_LK_EXIT) == 0)
3206 vm_map_unlock(map);
3207 UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0);
3208 return EINVAL;
3209 }
3210 entry = entry->next;
3211 }
3212
3213 /*
3214 * POSIX 1003.1b - a single munlock call unlocks a region,
3215 * regardless of the number of mlock calls made on that
3216 * region.
3217 */
3218
3219 entry = start_entry;
3220 while ((entry != &map->header) && (entry->start < end)) {
3221 UVM_MAP_CLIP_END(map, entry, end, NULL);
3222 if (VM_MAPENT_ISWIRED(entry))
3223 uvm_map_entry_unwire(map, entry);
3224 entry = entry->next;
3225 }
3226 if ((lockflags & UVM_LK_EXIT) == 0)
3227 vm_map_unlock(map);
3228 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3229 return 0;
3230 }
3231
3232 /*
3233 * wire case: in two passes [XXXCDC: ugly block of code here]
3234 *
3235 * 1: holding the write lock, we create any anonymous maps that need
3236 * to be created. then we clip each map entry to the region to
3237 * be wired and increment its wiring count.
3238 *
3239 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
3240 * in the pages for any newly wired area (wired_count == 1).
3241 *
3242 * downgrading to a read lock for uvm_fault_wire avoids a possible
3243 * deadlock with another thread that may have faulted on one of
3244 * the pages to be wired (it would mark the page busy, blocking
3245 * us, then in turn block on the map lock that we hold). because
3246 * of problems in the recursive lock package, we cannot upgrade
3247 * to a write lock in vm_map_lookup. thus, any actions that
3248 * require the write lock must be done beforehand. because we
3249 * keep the read lock on the map, the copy-on-write status of the
3250 * entries we modify here cannot change.
3251 */
3252
3253 while ((entry != &map->header) && (entry->start < end)) {
3254 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3255
3256 /*
3257 * perform actions of vm_map_lookup that need the
3258 * write lock on the map: create an anonymous map
3259 * for a copy-on-write region, or an anonymous map
3260 * for a zero-fill region. (XXXCDC: submap case
3261 * ok?)
3262 */
3263
3264 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3265 if (UVM_ET_ISNEEDSCOPY(entry) &&
3266 ((entry->max_protection & VM_PROT_WRITE) ||
3267 (entry->object.uvm_obj == NULL))) {
3268 amap_copy(map, entry, 0, start, end);
3269 /* XXXCDC: wait OK? */
3270 }
3271 }
3272 }
3273 UVM_MAP_CLIP_START(map, entry, start, NULL);
3274 UVM_MAP_CLIP_END(map, entry, end, NULL);
3275 entry->wired_count++;
3276
3277 /*
3278 * Check for holes
3279 */
3280
3281 if (entry->protection == VM_PROT_NONE ||
3282 (entry->end < end &&
3283 (entry->next == &map->header ||
3284 entry->next->start > entry->end))) {
3285
3286 /*
3287 * found one. amap creation actions do not need to
3288 * be undone, but the wired counts need to be restored.
3289 */
3290
3291 while (entry != &map->header && entry->end > start) {
3292 entry->wired_count--;
3293 entry = entry->prev;
3294 }
3295 if ((lockflags & UVM_LK_EXIT) == 0)
3296 vm_map_unlock(map);
3297 UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
3298 return EINVAL;
3299 }
3300 entry = entry->next;
3301 }
3302
3303 /*
3304 * Pass 2.
3305 */
3306
3307 #ifdef DIAGNOSTIC
3308 timestamp_save = map->timestamp;
3309 #endif
3310 vm_map_busy(map);
3311 vm_map_downgrade(map);
3312
3313 rv = 0;
3314 entry = start_entry;
3315 while (entry != &map->header && entry->start < end) {
3316 if (entry->wired_count == 1) {
3317 rv = uvm_fault_wire(map, entry->start, entry->end,
3318 entry->max_protection, 1);
3319 if (rv) {
3320
3321 /*
3322 * wiring failed. break out of the loop.
3323 * we'll clean up the map below, once we
3324 * have a write lock again.
3325 */
3326
3327 break;
3328 }
3329 }
3330 entry = entry->next;
3331 }
3332
3333 if (rv) { /* failed? */
3334
3335 /*
3336 * Get back to an exclusive (write) lock.
3337 */
3338
3339 vm_map_upgrade(map);
3340 vm_map_unbusy(map);
3341
3342 #ifdef DIAGNOSTIC
3343 if (timestamp_save != map->timestamp)
3344 panic("uvm_map_pageable: stale map");
3345 #endif
3346
3347 /*
3348 * first drop the wiring count on all the entries
3349 * which haven't actually been wired yet.
3350 */
3351
3352 failed_entry = entry;
3353 while (entry != &map->header && entry->start < end) {
3354 entry->wired_count--;
3355 entry = entry->next;
3356 }
3357
3358 /*
3359 * now, unwire all the entries that were successfully
3360 * wired above.
3361 */
3362
3363 entry = start_entry;
3364 while (entry != failed_entry) {
3365 entry->wired_count--;
3366 if (VM_MAPENT_ISWIRED(entry) == 0)
3367 uvm_map_entry_unwire(map, entry);
3368 entry = entry->next;
3369 }
3370 if ((lockflags & UVM_LK_EXIT) == 0)
3371 vm_map_unlock(map);
3372 UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
3373 return (rv);
3374 }
3375
3376 /* We are holding a read lock here. */
3377 if ((lockflags & UVM_LK_EXIT) == 0) {
3378 vm_map_unbusy(map);
3379 vm_map_unlock_read(map);
3380 } else {
3381
3382 /*
3383 * Get back to an exclusive (write) lock.
3384 */
3385
3386 vm_map_upgrade(map);
3387 vm_map_unbusy(map);
3388 }
3389
3390 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3391 return 0;
3392 }
3393
3394 /*
3395 * uvm_map_pageable_all: special case of uvm_map_pageable - affects
3396 * all mapped regions.
3397 *
3398 * => map must not be locked.
3399 * => if no flags are specified, all regions are unwired.
3400 * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
3401 */
3402
3403 int
3404 uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
3405 {
3406 struct vm_map_entry *entry, *failed_entry;
3407 vsize_t size;
3408 int rv;
3409 #ifdef DIAGNOSTIC
3410 u_int timestamp_save;
3411 #endif
3412 UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist);
3413 UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0);
3414
3415 KASSERT(map->flags & VM_MAP_PAGEABLE);
3416
3417 vm_map_lock(map);
3418
3419 /*
3420 * handle wiring and unwiring separately.
3421 */
3422
3423 if (flags == 0) { /* unwire */
3424
3425 /*
3426 * POSIX 1003.1b -- munlockall unlocks all regions,
3427 * regardless of how many times mlockall has been called.
3428 */
3429
3430 for (entry = map->header.next; entry != &map->header;
3431 entry = entry->next) {
3432 if (VM_MAPENT_ISWIRED(entry))
3433 uvm_map_entry_unwire(map, entry);
3434 }
3435 map->flags &= ~VM_MAP_WIREFUTURE;
3436 vm_map_unlock(map);
3437 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3438 return 0;
3439 }
3440
3441 if (flags & MCL_FUTURE) {
3442
3443 /*
3444 * must wire all future mappings; remember this.
3445 */
3446
3447 map->flags |= VM_MAP_WIREFUTURE;
3448 }
3449
3450 if ((flags & MCL_CURRENT) == 0) {
3451
3452 /*
3453 * no more work to do!
3454 */
3455
3456 UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
3457 vm_map_unlock(map);
3458 return 0;
3459 }
3460
3461 /*
3462 * wire case: in three passes [XXXCDC: ugly block of code here]
3463 *
3464 * 1: holding the write lock, count all pages mapped by non-wired
3465 * entries. if this would cause us to go over our limit, we fail.
3466 *
3467 * 2: still holding the write lock, we create any anonymous maps that
3468 * need to be created. then we increment its wiring count.
3469 *
3470 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
3471 * in the pages for any newly wired area (wired_count == 1).
3472 *
3473 * downgrading to a read lock for uvm_fault_wire avoids a possible
3474 * deadlock with another thread that may have faulted on one of
3475 * the pages to be wired (it would mark the page busy, blocking
3476 * us, then in turn block on the map lock that we hold). because
3477 * of problems in the recursive lock package, we cannot upgrade
3478 * to a write lock in vm_map_lookup. thus, any actions that
3479 * require the write lock must be done beforehand. because we
3480 * keep the read lock on the map, the copy-on-write status of the
3481 * entries we modify here cannot change.
3482 */
3483
3484 for (size = 0, entry = map->header.next; entry != &map->header;
3485 entry = entry->next) {
3486 if (entry->protection != VM_PROT_NONE &&
3487 VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3488 size += entry->end - entry->start;
3489 }
3490 }
3491
3492 if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
3493 vm_map_unlock(map);
3494 return ENOMEM;
3495 }
3496
3497 if (limit != 0 &&
3498 (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
3499 vm_map_unlock(map);
3500 return ENOMEM;
3501 }
3502
3503 /*
3504 * Pass 2.
3505 */
3506
3507 for (entry = map->header.next; entry != &map->header;
3508 entry = entry->next) {
3509 if (entry->protection == VM_PROT_NONE)
3510 continue;
3511 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3512
3513 /*
3514 * perform actions of vm_map_lookup that need the
3515 * write lock on the map: create an anonymous map
3516 * for a copy-on-write region, or an anonymous map
3517 * for a zero-fill region. (XXXCDC: submap case
3518 * ok?)
3519 */
3520
3521 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3522 if (UVM_ET_ISNEEDSCOPY(entry) &&
3523 ((entry->max_protection & VM_PROT_WRITE) ||
3524 (entry->object.uvm_obj == NULL))) {
3525 amap_copy(map, entry, 0, entry->start,
3526 entry->end);
3527 /* XXXCDC: wait OK? */
3528 }
3529 }
3530 }
3531 entry->wired_count++;
3532 }
3533
3534 /*
3535 * Pass 3.
3536 */
3537
3538 #ifdef DIAGNOSTIC
3539 timestamp_save = map->timestamp;
3540 #endif
3541 vm_map_busy(map);
3542 vm_map_downgrade(map);
3543
3544 rv = 0;
3545 for (entry = map->header.next; entry != &map->header;
3546 entry = entry->next) {
3547 if (entry->wired_count == 1) {
3548 rv = uvm_fault_wire(map, entry->start, entry->end,
3549 entry->max_protection, 1);
3550 if (rv) {
3551
3552 /*
3553 * wiring failed. break out of the loop.
3554 * we'll clean up the map below, once we
3555 * have a write lock again.
3556 */
3557
3558 break;
3559 }
3560 }
3561 }
3562
3563 if (rv) {
3564
3565 /*
3566 * Get back an exclusive (write) lock.
3567 */
3568
3569 vm_map_upgrade(map);
3570 vm_map_unbusy(map);
3571
3572 #ifdef DIAGNOSTIC
3573 if (timestamp_save != map->timestamp)
3574 panic("uvm_map_pageable_all: stale map");
3575 #endif
3576
3577 /*
3578 * first drop the wiring count on all the entries
3579 * which haven't actually been wired yet.
3580 *
3581 * Skip VM_PROT_NONE entries like we did above.
3582 */
3583
3584 failed_entry = entry;
3585 for (/* nothing */; entry != &map->header;
3586 entry = entry->next) {
3587 if (entry->protection == VM_PROT_NONE)
3588 continue;
3589 entry->wired_count--;
3590 }
3591
3592 /*
3593 * now, unwire all the entries that were successfully
3594 * wired above.
3595 *
3596 * Skip VM_PROT_NONE entries like we did above.
3597 */
3598
3599 for (entry = map->header.next; entry != failed_entry;
3600 entry = entry->next) {
3601 if (entry->protection == VM_PROT_NONE)
3602 continue;
3603 entry->wired_count--;
3604 if (VM_MAPENT_ISWIRED(entry))
3605 uvm_map_entry_unwire(map, entry);
3606 }
3607 vm_map_unlock(map);
3608 UVMHIST_LOG(maphist,"<- done (RV=%d)", rv,0,0,0);
3609 return (rv);
3610 }
3611
3612 /* We are holding a read lock here. */
3613 vm_map_unbusy(map);
3614 vm_map_unlock_read(map);
3615
3616 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3617 return 0;
3618 }
3619
3620 /*
3621 * uvm_map_clean: clean out a map range
3622 *
3623 * => valid flags:
3624 * if (flags & PGO_CLEANIT): dirty pages are cleaned first
3625 * if (flags & PGO_SYNCIO): dirty pages are written synchronously
3626 * if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
3627 * if (flags & PGO_FREE): any cached pages are freed after clean
3628 * => returns an error if any part of the specified range isn't mapped
3629 * => never a need to flush amap layer since the anonymous memory has
3630 * no permanent home, but may deactivate pages there
3631 * => called from sys_msync() and sys_madvise()
3632 * => caller must not write-lock map (read OK).
3633 * => we may sleep while cleaning if SYNCIO [with map read-locked]
3634 */
3635
3636 int
3637 uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
3638 {
3639 struct vm_map_entry *current, *entry;
3640 struct uvm_object *uobj;
3641 struct vm_amap *amap;
3642 struct vm_anon *anon;
3643 struct vm_page *pg;
3644 vaddr_t offset;
3645 vsize_t size;
3646 voff_t uoff;
3647 int error, refs;
3648 UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
3649
3650 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
3651 map, start, end, flags);
3652 KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
3653 (PGO_FREE|PGO_DEACTIVATE));
3654
3655 vm_map_lock_read(map);
3656 VM_MAP_RANGE_CHECK(map, start, end);
3657 if (uvm_map_lookup_entry(map, start, &entry) == false) {
3658 vm_map_unlock_read(map);
3659 return EFAULT;
3660 }
3661
3662 /*
3663 * Make a first pass to check for holes and wiring problems.
3664 */
3665
3666 for (current = entry; current->start < end; current = current->next) {
3667 if (UVM_ET_ISSUBMAP(current)) {
3668 vm_map_unlock_read(map);
3669 return EINVAL;
3670 }
3671 if ((flags & PGO_FREE) != 0 && VM_MAPENT_ISWIRED(entry)) {
3672 vm_map_unlock_read(map);
3673 return EBUSY;
3674 }
3675 if (end <= current->end) {
3676 break;
3677 }
3678 if (current->end != current->next->start) {
3679 vm_map_unlock_read(map);
3680 return EFAULT;
3681 }
3682 }
3683
3684 error = 0;
3685 for (current = entry; start < end; current = current->next) {
3686 amap = current->aref.ar_amap; /* top layer */
3687 uobj = current->object.uvm_obj; /* bottom layer */
3688 KASSERT(start >= current->start);
3689
3690 /*
3691 * No amap cleaning necessary if:
3692 *
3693 * (1) There's no amap.
3694 *
3695 * (2) We're not deactivating or freeing pages.
3696 */
3697
3698 if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
3699 goto flush_object;
3700
3701 amap_lock(amap);
3702 offset = start - current->start;
3703 size = MIN(end, current->end) - start;
3704 for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) {
3705 anon = amap_lookup(¤t->aref, offset);
3706 if (anon == NULL)
3707 continue;
3708
3709 simple_lock(&anon->an_lock);
3710 pg = anon->an_page;
3711 if (pg == NULL) {
3712 simple_unlock(&anon->an_lock);
3713 continue;
3714 }
3715
3716 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
3717
3718 /*
3719 * In these first 3 cases, we just deactivate the page.
3720 */
3721
3722 case PGO_CLEANIT|PGO_FREE:
3723 case PGO_CLEANIT|PGO_DEACTIVATE:
3724 case PGO_DEACTIVATE:
3725 deactivate_it:
3726 /*
3727 * skip the page if it's loaned or wired,
3728 * since it shouldn't be on a paging queue
3729 * at all in these cases.
3730 */
3731
3732 uvm_lock_pageq();
3733 if (pg->loan_count != 0 ||
3734 pg->wire_count != 0) {
3735 uvm_unlock_pageq();
3736 simple_unlock(&anon->an_lock);
3737 continue;
3738 }
3739 KASSERT(pg->uanon == anon);
3740 pmap_clear_reference(pg);
3741 uvm_pagedeactivate(pg);
3742 uvm_unlock_pageq();
3743 simple_unlock(&anon->an_lock);
3744 continue;
3745
3746 case PGO_FREE:
3747
3748 /*
3749 * If there are multiple references to
3750 * the amap, just deactivate the page.
3751 */
3752
3753 if (amap_refs(amap) > 1)
3754 goto deactivate_it;
3755
3756 /* skip the page if it's wired */
3757 if (pg->wire_count != 0) {
3758 simple_unlock(&anon->an_lock);
3759 continue;
3760 }
3761 amap_unadd(¤t->aref, offset);
3762 refs = --anon->an_ref;
3763 simple_unlock(&anon->an_lock);
3764 if (refs == 0)
3765 uvm_anfree(anon);
3766 continue;
3767 }
3768 }
3769 amap_unlock(amap);
3770
3771 flush_object:
3772 /*
3773 * flush pages if we've got a valid backing object.
3774 * note that we must always clean object pages before
3775 * freeing them since otherwise we could reveal stale
3776 * data from files.
3777 */
3778
3779 uoff = current->offset + (start - current->start);
3780 size = MIN(end, current->end) - start;
3781 if (uobj != NULL) {
3782 simple_lock(&uobj->vmobjlock);
3783 if (uobj->pgops->pgo_put != NULL)
3784 error = (uobj->pgops->pgo_put)(uobj, uoff,
3785 uoff + size, flags | PGO_CLEANIT);
3786 else
3787 error = 0;
3788 }
3789 start += size;
3790 }
3791 vm_map_unlock_read(map);
3792 return (error);
3793 }
3794
3795
3796 /*
3797 * uvm_map_checkprot: check protection in map
3798 *
3799 * => must allow specified protection in a fully allocated region.
3800 * => map must be read or write locked by caller.
3801 */
3802
3803 bool
3804 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
3805 vm_prot_t protection)
3806 {
3807 struct vm_map_entry *entry;
3808 struct vm_map_entry *tmp_entry;
3809
3810 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
3811 return (false);
3812 }
3813 entry = tmp_entry;
3814 while (start < end) {
3815 if (entry == &map->header) {
3816 return (false);
3817 }
3818
3819 /*
3820 * no holes allowed
3821 */
3822
3823 if (start < entry->start) {
3824 return (false);
3825 }
3826
3827 /*
3828 * check protection associated with entry
3829 */
3830
3831 if ((entry->protection & protection) != protection) {
3832 return (false);
3833 }
3834 start = entry->end;
3835 entry = entry->next;
3836 }
3837 return (true);
3838 }
3839
3840 /*
3841 * uvmspace_alloc: allocate a vmspace structure.
3842 *
3843 * - structure includes vm_map and pmap
3844 * - XXX: no locking on this structure
3845 * - refcnt set to 1, rest must be init'd by caller
3846 */
3847 struct vmspace *
3848 uvmspace_alloc(vaddr_t vmin, vaddr_t vmax)
3849 {
3850 struct vmspace *vm;
3851 UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
3852
3853 vm = pool_get(&uvm_vmspace_pool, PR_WAITOK);
3854 uvmspace_init(vm, NULL, vmin, vmax);
3855 UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0);
3856 return (vm);
3857 }
3858
3859 /*
3860 * uvmspace_init: initialize a vmspace structure.
3861 *
3862 * - XXX: no locking on this structure
3863 * - refcnt set to 1, rest must be init'd by caller
3864 */
3865 void
3866 uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin, vaddr_t vmax)
3867 {
3868 UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist);
3869
3870 memset(vm, 0, sizeof(*vm));
3871 uvm_map_setup(&vm->vm_map, vmin, vmax, VM_MAP_PAGEABLE
3872 #ifdef __USING_TOPDOWN_VM
3873 | VM_MAP_TOPDOWN
3874 #endif
3875 );
3876 if (pmap)
3877 pmap_reference(pmap);
3878 else
3879 pmap = pmap_create();
3880 vm->vm_map.pmap = pmap;
3881 vm->vm_refcnt = 1;
3882 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
3883 }
3884
3885 /*
3886 * uvmspace_share: share a vmspace between two processes
3887 *
3888 * - used for vfork, threads(?)
3889 */
3890
3891 void
3892 uvmspace_share(struct proc *p1, struct proc *p2)
3893 {
3894
3895 uvmspace_addref(p1->p_vmspace);
3896 p2->p_vmspace = p1->p_vmspace;
3897 }
3898
3899 /*
3900 * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
3901 *
3902 * - XXX: no locking on vmspace
3903 */
3904
3905 void
3906 uvmspace_unshare(struct lwp *l)
3907 {
3908 struct proc *p = l->l_proc;
3909 struct vmspace *nvm, *ovm = p->p_vmspace;
3910
3911 if (ovm->vm_refcnt == 1)
3912 /* nothing to do: vmspace isn't shared in the first place */
3913 return;
3914
3915 /* make a new vmspace, still holding old one */
3916 nvm = uvmspace_fork(ovm);
3917
3918 pmap_deactivate(l); /* unbind old vmspace */
3919 p->p_vmspace = nvm;
3920 pmap_activate(l); /* switch to new vmspace */
3921
3922 uvmspace_free(ovm); /* drop reference to old vmspace */
3923 }
3924
3925 /*
3926 * uvmspace_exec: the process wants to exec a new program
3927 */
3928
3929 void
3930 uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end)
3931 {
3932 struct proc *p = l->l_proc;
3933 struct vmspace *nvm, *ovm = p->p_vmspace;
3934 struct vm_map *map = &ovm->vm_map;
3935
3936 #ifdef __sparc__
3937 /* XXX cgd 960926: the sparc #ifdef should be a MD hook */
3938 kill_user_windows(l); /* before stack addresses go away */
3939 #endif
3940
3941 /*
3942 * see if more than one process is using this vmspace...
3943 */
3944
3945 if (ovm->vm_refcnt == 1) {
3946
3947 /*
3948 * if p is the only process using its vmspace then we can safely
3949 * recycle that vmspace for the program that is being exec'd.
3950 */
3951
3952 #ifdef SYSVSHM
3953 /*
3954 * SYSV SHM semantics require us to kill all segments on an exec
3955 */
3956
3957 if (ovm->vm_shm)
3958 shmexit(ovm);
3959 #endif
3960
3961 /*
3962 * POSIX 1003.1b -- "lock future mappings" is revoked
3963 * when a process execs another program image.
3964 */
3965
3966 map->flags &= ~VM_MAP_WIREFUTURE;
3967
3968 /*
3969 * now unmap the old program
3970 */
3971
3972 pmap_remove_all(map->pmap);
3973 uvm_unmap(map, vm_map_min(map), vm_map_max(map));
3974 KASSERT(map->header.prev == &map->header);
3975 KASSERT(map->nentries == 0);
3976
3977 /*
3978 * resize the map
3979 */
3980
3981 vm_map_setmin(map, start);
3982 vm_map_setmax(map, end);
3983 } else {
3984
3985 /*
3986 * p's vmspace is being shared, so we can't reuse it for p since
3987 * it is still being used for others. allocate a new vmspace
3988 * for p
3989 */
3990
3991 nvm = uvmspace_alloc(start, end);
3992
3993 /*
3994 * install new vmspace and drop our ref to the old one.
3995 */
3996
3997 pmap_deactivate(l);
3998 p->p_vmspace = nvm;
3999 pmap_activate(l);
4000
4001 uvmspace_free(ovm);
4002 }
4003 }
4004
4005 /*
4006 * uvmspace_addref: add a referece to a vmspace.
4007 */
4008
4009 void
4010 uvmspace_addref(struct vmspace *vm)
4011 {
4012 struct vm_map *map = &vm->vm_map;
4013
4014 KASSERT((map->flags & VM_MAP_DYING) == 0);
4015
4016 mutex_enter(&map->misc_lock);
4017 KASSERT(vm->vm_refcnt > 0);
4018 vm->vm_refcnt++;
4019 mutex_exit(&map->misc_lock);
4020 }
4021
4022 /*
4023 * uvmspace_free: free a vmspace data structure
4024 */
4025
4026 void
4027 uvmspace_free(struct vmspace *vm)
4028 {
4029 struct vm_map_entry *dead_entries;
4030 struct vm_map *map = &vm->vm_map;
4031 int n;
4032
4033 UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
4034
4035 UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0);
4036 mutex_enter(&map->misc_lock);
4037 n = --vm->vm_refcnt;
4038 mutex_exit(&map->misc_lock);
4039 if (n > 0)
4040 return;
4041
4042 /*
4043 * at this point, there should be no other references to the map.
4044 * delete all of the mappings, then destroy the pmap.
4045 */
4046
4047 map->flags |= VM_MAP_DYING;
4048 pmap_remove_all(map->pmap);
4049 #ifdef SYSVSHM
4050 /* Get rid of any SYSV shared memory segments. */
4051 if (vm->vm_shm != NULL)
4052 shmexit(vm);
4053 #endif
4054 if (map->nentries) {
4055 uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map),
4056 &dead_entries, NULL, 0);
4057 if (dead_entries != NULL)
4058 uvm_unmap_detach(dead_entries, 0);
4059 }
4060 KASSERT(map->nentries == 0);
4061 KASSERT(map->size == 0);
4062 mutex_destroy(&map->misc_lock);
4063 mutex_destroy(&map->hint_lock);
4064 mutex_destroy(&map->mutex);
4065 rw_destroy(&map->lock);
4066 pmap_destroy(map->pmap);
4067 pool_put(&uvm_vmspace_pool, vm);
4068 }
4069
4070 /*
4071 * F O R K - m a i n e n t r y p o i n t
4072 */
4073 /*
4074 * uvmspace_fork: fork a process' main map
4075 *
4076 * => create a new vmspace for child process from parent.
4077 * => parent's map must not be locked.
4078 */
4079
4080 struct vmspace *
4081 uvmspace_fork(struct vmspace *vm1)
4082 {
4083 struct vmspace *vm2;
4084 struct vm_map *old_map = &vm1->vm_map;
4085 struct vm_map *new_map;
4086 struct vm_map_entry *old_entry;
4087 struct vm_map_entry *new_entry;
4088 UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
4089
4090 vm_map_lock(old_map);
4091
4092 vm2 = uvmspace_alloc(vm_map_min(old_map), vm_map_max(old_map));
4093 memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
4094 (char *) (vm1 + 1) - (char *) &vm1->vm_startcopy);
4095 new_map = &vm2->vm_map; /* XXX */
4096
4097 old_entry = old_map->header.next;
4098 new_map->size = old_map->size;
4099
4100 /*
4101 * go entry-by-entry
4102 */
4103
4104 while (old_entry != &old_map->header) {
4105
4106 /*
4107 * first, some sanity checks on the old entry
4108 */
4109
4110 KASSERT(!UVM_ET_ISSUBMAP(old_entry));
4111 KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) ||
4112 !UVM_ET_ISNEEDSCOPY(old_entry));
4113
4114 switch (old_entry->inheritance) {
4115 case MAP_INHERIT_NONE:
4116
4117 /*
4118 * drop the mapping, modify size
4119 */
4120 new_map->size -= old_entry->end - old_entry->start;
4121 break;
4122
4123 case MAP_INHERIT_SHARE:
4124
4125 /*
4126 * share the mapping: this means we want the old and
4127 * new entries to share amaps and backing objects.
4128 */
4129 /*
4130 * if the old_entry needs a new amap (due to prev fork)
4131 * then we need to allocate it now so that we have
4132 * something we own to share with the new_entry. [in
4133 * other words, we need to clear needs_copy]
4134 */
4135
4136 if (UVM_ET_ISNEEDSCOPY(old_entry)) {
4137 /* get our own amap, clears needs_copy */
4138 amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,
4139 0, 0);
4140 /* XXXCDC: WAITOK??? */
4141 }
4142
4143 new_entry = uvm_mapent_alloc(new_map, 0);
4144 /* old_entry -> new_entry */
4145 uvm_mapent_copy(old_entry, new_entry);
4146
4147 /* new pmap has nothing wired in it */
4148 new_entry->wired_count = 0;
4149
4150 /*
4151 * gain reference to object backing the map (can't
4152 * be a submap, already checked this case).
4153 */
4154
4155 if (new_entry->aref.ar_amap)
4156 uvm_map_reference_amap(new_entry, AMAP_SHARED);
4157
4158 if (new_entry->object.uvm_obj &&
4159 new_entry->object.uvm_obj->pgops->pgo_reference)
4160 new_entry->object.uvm_obj->
4161 pgops->pgo_reference(
4162 new_entry->object.uvm_obj);
4163
4164 /* insert entry at end of new_map's entry list */
4165 uvm_map_entry_link(new_map, new_map->header.prev,
4166 new_entry);
4167
4168 break;
4169
4170 case MAP_INHERIT_COPY:
4171
4172 /*
4173 * copy-on-write the mapping (using mmap's
4174 * MAP_PRIVATE semantics)
4175 *
4176 * allocate new_entry, adjust reference counts.
4177 * (note that new references are read-only).
4178 */
4179
4180 new_entry = uvm_mapent_alloc(new_map, 0);
4181 /* old_entry -> new_entry */
4182 uvm_mapent_copy(old_entry, new_entry);
4183
4184 if (new_entry->aref.ar_amap)
4185 uvm_map_reference_amap(new_entry, 0);
4186
4187 if (new_entry->object.uvm_obj &&
4188 new_entry->object.uvm_obj->pgops->pgo_reference)
4189 new_entry->object.uvm_obj->pgops->pgo_reference
4190 (new_entry->object.uvm_obj);
4191
4192 /* new pmap has nothing wired in it */
4193 new_entry->wired_count = 0;
4194
4195 new_entry->etype |=
4196 (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
4197 uvm_map_entry_link(new_map, new_map->header.prev,
4198 new_entry);
4199
4200 /*
4201 * the new entry will need an amap. it will either
4202 * need to be copied from the old entry or created
4203 * from scratch (if the old entry does not have an
4204 * amap). can we defer this process until later
4205 * (by setting "needs_copy") or do we need to copy
4206 * the amap now?
4207 *
4208 * we must copy the amap now if any of the following
4209 * conditions hold:
4210 * 1. the old entry has an amap and that amap is
4211 * being shared. this means that the old (parent)
4212 * process is sharing the amap with another
4213 * process. if we do not clear needs_copy here
4214 * we will end up in a situation where both the
4215 * parent and child process are refering to the
4216 * same amap with "needs_copy" set. if the
4217 * parent write-faults, the fault routine will
4218 * clear "needs_copy" in the parent by allocating
4219 * a new amap. this is wrong because the
4220 * parent is supposed to be sharing the old amap
4221 * and the new amap will break that.
4222 *
4223 * 2. if the old entry has an amap and a non-zero
4224 * wire count then we are going to have to call
4225 * amap_cow_now to avoid page faults in the
4226 * parent process. since amap_cow_now requires
4227 * "needs_copy" to be clear we might as well
4228 * clear it here as well.
4229 *
4230 */
4231
4232 if (old_entry->aref.ar_amap != NULL) {
4233 if ((amap_flags(old_entry->aref.ar_amap) &
4234 AMAP_SHARED) != 0 ||
4235 VM_MAPENT_ISWIRED(old_entry)) {
4236
4237 amap_copy(new_map, new_entry,
4238 AMAP_COPY_NOCHUNK, 0, 0);
4239 /* XXXCDC: M_WAITOK ... ok? */
4240 }
4241 }
4242
4243 /*
4244 * if the parent's entry is wired down, then the
4245 * parent process does not want page faults on
4246 * access to that memory. this means that we
4247 * cannot do copy-on-write because we can't write
4248 * protect the old entry. in this case we
4249 * resolve all copy-on-write faults now, using
4250 * amap_cow_now. note that we have already
4251 * allocated any needed amap (above).
4252 */
4253
4254 if (VM_MAPENT_ISWIRED(old_entry)) {
4255
4256 /*
4257 * resolve all copy-on-write faults now
4258 * (note that there is nothing to do if
4259 * the old mapping does not have an amap).
4260 */
4261 if (old_entry->aref.ar_amap)
4262 amap_cow_now(new_map, new_entry);
4263
4264 } else {
4265
4266 /*
4267 * setup mappings to trigger copy-on-write faults
4268 * we must write-protect the parent if it has
4269 * an amap and it is not already "needs_copy"...
4270 * if it is already "needs_copy" then the parent
4271 * has already been write-protected by a previous
4272 * fork operation.
4273 */
4274
4275 if (old_entry->aref.ar_amap &&
4276 !UVM_ET_ISNEEDSCOPY(old_entry)) {
4277 if (old_entry->max_protection & VM_PROT_WRITE) {
4278 pmap_protect(old_map->pmap,
4279 old_entry->start,
4280 old_entry->end,
4281 old_entry->protection &
4282 ~VM_PROT_WRITE);
4283 pmap_update(old_map->pmap);
4284 }
4285 old_entry->etype |= UVM_ET_NEEDSCOPY;
4286 }
4287 }
4288 break;
4289 } /* end of switch statement */
4290 old_entry = old_entry->next;
4291 }
4292
4293 vm_map_unlock(old_map);
4294
4295 #ifdef SYSVSHM
4296 if (vm1->vm_shm)
4297 shmfork(vm1, vm2);
4298 #endif
4299
4300 #ifdef PMAP_FORK
4301 pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap);
4302 #endif
4303
4304 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4305 return (vm2);
4306 }
4307
4308
4309 /*
4310 * in-kernel map entry allocation.
4311 */
4312
4313 struct uvm_kmapent_hdr {
4314 LIST_ENTRY(uvm_kmapent_hdr) ukh_listq;
4315 int ukh_nused;
4316 struct vm_map_entry *ukh_freelist;
4317 struct vm_map *ukh_map;
4318 struct vm_map_entry ukh_entries[0];
4319 };
4320
4321 #define UVM_KMAPENT_CHUNK \
4322 ((PAGE_SIZE - sizeof(struct uvm_kmapent_hdr)) \
4323 / sizeof(struct vm_map_entry))
4324
4325 #define UVM_KHDR_FIND(entry) \
4326 ((struct uvm_kmapent_hdr *)(((vaddr_t)entry) & ~PAGE_MASK))
4327
4328
4329 #ifdef DIAGNOSTIC
4330 static struct vm_map *
4331 uvm_kmapent_map(struct vm_map_entry *entry)
4332 {
4333 const struct uvm_kmapent_hdr *ukh;
4334
4335 ukh = UVM_KHDR_FIND(entry);
4336 return ukh->ukh_map;
4337 }
4338 #endif
4339
4340 static inline struct vm_map_entry *
4341 uvm_kmapent_get(struct uvm_kmapent_hdr *ukh)
4342 {
4343 struct vm_map_entry *entry;
4344
4345 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4346 KASSERT(ukh->ukh_nused >= 0);
4347
4348 entry = ukh->ukh_freelist;
4349 if (entry) {
4350 KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4351 == UVM_MAP_KERNEL);
4352 ukh->ukh_freelist = entry->next;
4353 ukh->ukh_nused++;
4354 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4355 } else {
4356 KASSERT(ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4357 }
4358
4359 return entry;
4360 }
4361
4362 static inline void
4363 uvm_kmapent_put(struct uvm_kmapent_hdr *ukh, struct vm_map_entry *entry)
4364 {
4365
4366 KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4367 == UVM_MAP_KERNEL);
4368 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4369 KASSERT(ukh->ukh_nused > 0);
4370 KASSERT(ukh->ukh_freelist != NULL ||
4371 ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4372 KASSERT(ukh->ukh_freelist == NULL ||
4373 ukh->ukh_nused < UVM_KMAPENT_CHUNK);
4374
4375 ukh->ukh_nused--;
4376 entry->next = ukh->ukh_freelist;
4377 ukh->ukh_freelist = entry;
4378 }
4379
4380 /*
4381 * uvm_kmapent_alloc: allocate a map entry for in-kernel map
4382 */
4383
4384 static struct vm_map_entry *
4385 uvm_kmapent_alloc(struct vm_map *map, int flags)
4386 {
4387 struct vm_page *pg;
4388 struct uvm_map_args args;
4389 struct uvm_kmapent_hdr *ukh;
4390 struct vm_map_entry *entry;
4391 uvm_flag_t mapflags = UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
4392 UVM_INH_NONE, UVM_ADV_RANDOM, flags | UVM_FLAG_NOMERGE);
4393 vaddr_t va;
4394 int error;
4395 int i;
4396
4397 KDASSERT(UVM_KMAPENT_CHUNK > 2);
4398 KDASSERT(kernel_map != NULL);
4399 KASSERT(vm_map_pmap(map) == pmap_kernel());
4400
4401 UVMMAP_EVCNT_INCR(uke_alloc);
4402 entry = NULL;
4403 again:
4404 /*
4405 * try to grab an entry from freelist.
4406 */
4407 mutex_spin_enter(&uvm_kentry_lock);
4408 ukh = LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free);
4409 if (ukh) {
4410 entry = uvm_kmapent_get(ukh);
4411 if (ukh->ukh_nused == UVM_KMAPENT_CHUNK)
4412 LIST_REMOVE(ukh, ukh_listq);
4413 }
4414 mutex_spin_exit(&uvm_kentry_lock);
4415
4416 if (entry)
4417 return entry;
4418
4419 /*
4420 * there's no free entry for this vm_map.
4421 * now we need to allocate some vm_map_entry.
4422 * for simplicity, always allocate one page chunk of them at once.
4423 */
4424
4425 pg = uvm_pagealloc(NULL, 0, NULL, 0);
4426 if (__predict_false(pg == NULL)) {
4427 if (flags & UVM_FLAG_NOWAIT)
4428 return NULL;
4429 uvm_wait("kme_alloc");
4430 goto again;
4431 }
4432
4433 error = uvm_map_prepare(map, 0, PAGE_SIZE, NULL, UVM_UNKNOWN_OFFSET,
4434 0, mapflags, &args);
4435 if (error) {
4436 uvm_pagefree(pg);
4437 return NULL;
4438 }
4439
4440 va = args.uma_start;
4441
4442 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE);
4443 pmap_update(vm_map_pmap(map));
4444
4445 ukh = (void *)va;
4446
4447 /*
4448 * use the first entry for ukh itsself.
4449 */
4450
4451 entry = &ukh->ukh_entries[0];
4452 entry->flags = UVM_MAP_KERNEL | UVM_MAP_KMAPENT;
4453 error = uvm_map_enter(map, &args, entry);
4454 KASSERT(error == 0);
4455
4456 ukh->ukh_nused = UVM_KMAPENT_CHUNK;
4457 ukh->ukh_map = map;
4458 ukh->ukh_freelist = NULL;
4459 for (i = UVM_KMAPENT_CHUNK - 1; i >= 2; i--) {
4460 struct vm_map_entry *xentry = &ukh->ukh_entries[i];
4461
4462 xentry->flags = UVM_MAP_KERNEL;
4463 uvm_kmapent_put(ukh, xentry);
4464 }
4465 KASSERT(ukh->ukh_nused == 2);
4466
4467 mutex_spin_enter(&uvm_kentry_lock);
4468 LIST_INSERT_HEAD(&vm_map_to_kernel(map)->vmk_kentry_free,
4469 ukh, ukh_listq);
4470 mutex_spin_exit(&uvm_kentry_lock);
4471
4472 /*
4473 * return second entry.
4474 */
4475
4476 entry = &ukh->ukh_entries[1];
4477 entry->flags = UVM_MAP_KERNEL;
4478 UVMMAP_EVCNT_INCR(ukh_alloc);
4479 return entry;
4480 }
4481
4482 /*
4483 * uvm_mapent_free: free map entry for in-kernel map
4484 */
4485
4486 static void
4487 uvm_kmapent_free(struct vm_map_entry *entry)
4488 {
4489 struct uvm_kmapent_hdr *ukh;
4490 struct vm_page *pg;
4491 struct vm_map *map;
4492 struct pmap *pmap;
4493 vaddr_t va;
4494 paddr_t pa;
4495 struct vm_map_entry *deadentry;
4496
4497 UVMMAP_EVCNT_INCR(uke_free);
4498 ukh = UVM_KHDR_FIND(entry);
4499 map = ukh->ukh_map;
4500
4501 mutex_spin_enter(&uvm_kentry_lock);
4502 uvm_kmapent_put(ukh, entry);
4503 if (ukh->ukh_nused > 1) {
4504 if (ukh->ukh_nused == UVM_KMAPENT_CHUNK - 1)
4505 LIST_INSERT_HEAD(
4506 &vm_map_to_kernel(map)->vmk_kentry_free,
4507 ukh, ukh_listq);
4508 mutex_spin_exit(&uvm_kentry_lock);
4509 return;
4510 }
4511
4512 /*
4513 * now we can free this ukh.
4514 *
4515 * however, keep an empty ukh to avoid ping-pong.
4516 */
4517
4518 if (LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free) == ukh &&
4519 LIST_NEXT(ukh, ukh_listq) == NULL) {
4520 mutex_spin_exit(&uvm_kentry_lock);
4521 return;
4522 }
4523 LIST_REMOVE(ukh, ukh_listq);
4524 mutex_spin_exit(&uvm_kentry_lock);
4525
4526 KASSERT(ukh->ukh_nused == 1);
4527
4528 /*
4529 * remove map entry for ukh itsself.
4530 */
4531
4532 va = (vaddr_t)ukh;
4533 KASSERT((va & PAGE_MASK) == 0);
4534 vm_map_lock(map);
4535 uvm_unmap_remove(map, va, va + PAGE_SIZE, &deadentry, NULL, 0);
4536 KASSERT(deadentry->flags & UVM_MAP_KERNEL);
4537 KASSERT(deadentry->flags & UVM_MAP_KMAPENT);
4538 KASSERT(deadentry->next == NULL);
4539 KASSERT(deadentry == &ukh->ukh_entries[0]);
4540
4541 /*
4542 * unmap the page from pmap and free it.
4543 */
4544
4545 pmap = vm_map_pmap(map);
4546 KASSERT(pmap == pmap_kernel());
4547 if (!pmap_extract(pmap, va, &pa))
4548 panic("%s: no mapping", __func__);
4549 pmap_kremove(va, PAGE_SIZE);
4550 vm_map_unlock(map);
4551 pg = PHYS_TO_VM_PAGE(pa);
4552 uvm_pagefree(pg);
4553 UVMMAP_EVCNT_INCR(ukh_free);
4554 }
4555
4556 static vsize_t
4557 uvm_kmapent_overhead(vsize_t size)
4558 {
4559
4560 /*
4561 * - the max number of unmerged entries is howmany(size, PAGE_SIZE)
4562 * as the min allocation unit is PAGE_SIZE.
4563 * - UVM_KMAPENT_CHUNK "kmapent"s are allocated from a page.
4564 * one of them are used to map the page itself.
4565 */
4566
4567 return howmany(howmany(size, PAGE_SIZE), (UVM_KMAPENT_CHUNK - 1)) *
4568 PAGE_SIZE;
4569 }
4570
4571 /*
4572 * map entry reservation
4573 */
4574
4575 /*
4576 * uvm_mapent_reserve: reserve map entries for clipping before locking map.
4577 *
4578 * => needed when unmapping entries allocated without UVM_FLAG_QUANTUM.
4579 * => caller shouldn't hold map locked.
4580 */
4581 int
4582 uvm_mapent_reserve(struct vm_map *map, struct uvm_mapent_reservation *umr,
4583 int nentries, int flags)
4584 {
4585
4586 umr->umr_nentries = 0;
4587
4588 if ((flags & UVM_FLAG_QUANTUM) != 0)
4589 return 0;
4590
4591 if (!VM_MAP_USE_KMAPENT(map))
4592 return 0;
4593
4594 while (nentries--) {
4595 struct vm_map_entry *ent;
4596 ent = uvm_kmapent_alloc(map, flags);
4597 if (!ent) {
4598 uvm_mapent_unreserve(map, umr);
4599 return ENOMEM;
4600 }
4601 UMR_PUTENTRY(umr, ent);
4602 }
4603
4604 return 0;
4605 }
4606
4607 /*
4608 * uvm_mapent_unreserve:
4609 *
4610 * => caller shouldn't hold map locked.
4611 * => never fail or sleep.
4612 */
4613 void
4614 uvm_mapent_unreserve(struct vm_map *map, struct uvm_mapent_reservation *umr)
4615 {
4616
4617 while (!UMR_EMPTY(umr))
4618 uvm_kmapent_free(UMR_GETENTRY(umr));
4619 }
4620
4621 /*
4622 * uvm_mapent_trymerge: try to merge an entry with its neighbors.
4623 *
4624 * => called with map locked.
4625 * => return non zero if successfully merged.
4626 */
4627
4628 int
4629 uvm_mapent_trymerge(struct vm_map *map, struct vm_map_entry *entry, int flags)
4630 {
4631 struct uvm_object *uobj;
4632 struct vm_map_entry *next;
4633 struct vm_map_entry *prev;
4634 vsize_t size;
4635 int merged = 0;
4636 bool copying;
4637 int newetype;
4638
4639 if (VM_MAP_USE_KMAPENT(map)) {
4640 return 0;
4641 }
4642 if (entry->aref.ar_amap != NULL) {
4643 return 0;
4644 }
4645 if ((entry->flags & UVM_MAP_NOMERGE) != 0) {
4646 return 0;
4647 }
4648
4649 uobj = entry->object.uvm_obj;
4650 size = entry->end - entry->start;
4651 copying = (flags & UVM_MERGE_COPYING) != 0;
4652 newetype = copying ? (entry->etype & ~UVM_ET_NEEDSCOPY) : entry->etype;
4653
4654 next = entry->next;
4655 if (next != &map->header &&
4656 next->start == entry->end &&
4657 ((copying && next->aref.ar_amap != NULL &&
4658 amap_refs(next->aref.ar_amap) == 1) ||
4659 (!copying && next->aref.ar_amap == NULL)) &&
4660 UVM_ET_ISCOMPATIBLE(next, newetype,
4661 uobj, entry->flags, entry->protection,
4662 entry->max_protection, entry->inheritance, entry->advice,
4663 entry->wired_count) &&
4664 (uobj == NULL || entry->offset + size == next->offset)) {
4665 int error;
4666
4667 if (copying) {
4668 error = amap_extend(next, size,
4669 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_BACKWARDS);
4670 } else {
4671 error = 0;
4672 }
4673 if (error == 0) {
4674 if (uobj) {
4675 if (uobj->pgops->pgo_detach) {
4676 uobj->pgops->pgo_detach(uobj);
4677 }
4678 }
4679
4680 entry->end = next->end;
4681 clear_hints(map, next);
4682 uvm_map_entry_unlink(map, next);
4683 if (copying) {
4684 entry->aref = next->aref;
4685 entry->etype &= ~UVM_ET_NEEDSCOPY;
4686 }
4687 uvm_map_check(map, "trymerge forwardmerge");
4688 uvm_mapent_free_merged(map, next);
4689 merged++;
4690 }
4691 }
4692
4693 prev = entry->prev;
4694 if (prev != &map->header &&
4695 prev->end == entry->start &&
4696 ((copying && !merged && prev->aref.ar_amap != NULL &&
4697 amap_refs(prev->aref.ar_amap) == 1) ||
4698 (!copying && prev->aref.ar_amap == NULL)) &&
4699 UVM_ET_ISCOMPATIBLE(prev, newetype,
4700 uobj, entry->flags, entry->protection,
4701 entry->max_protection, entry->inheritance, entry->advice,
4702 entry->wired_count) &&
4703 (uobj == NULL ||
4704 prev->offset + prev->end - prev->start == entry->offset)) {
4705 int error;
4706
4707 if (copying) {
4708 error = amap_extend(prev, size,
4709 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_FORWARDS);
4710 } else {
4711 error = 0;
4712 }
4713 if (error == 0) {
4714 if (uobj) {
4715 if (uobj->pgops->pgo_detach) {
4716 uobj->pgops->pgo_detach(uobj);
4717 }
4718 entry->offset = prev->offset;
4719 }
4720
4721 entry->start = prev->start;
4722 clear_hints(map, prev);
4723 uvm_map_entry_unlink(map, prev);
4724 if (copying) {
4725 entry->aref = prev->aref;
4726 entry->etype &= ~UVM_ET_NEEDSCOPY;
4727 }
4728 uvm_map_check(map, "trymerge backmerge");
4729 uvm_mapent_free_merged(map, prev);
4730 merged++;
4731 }
4732 }
4733
4734 return merged;
4735 }
4736
4737 #if defined(DDB)
4738
4739 /*
4740 * DDB hooks
4741 */
4742
4743 /*
4744 * uvm_map_printit: actually prints the map
4745 */
4746
4747 void
4748 uvm_map_printit(struct vm_map *map, bool full,
4749 void (*pr)(const char *, ...))
4750 {
4751 struct vm_map_entry *entry;
4752
4753 (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, vm_map_min(map),
4754 vm_map_max(map));
4755 (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=0x%x\n",
4756 map->nentries, map->size, map->ref_count, map->timestamp,
4757 map->flags);
4758 (*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap,
4759 pmap_resident_count(map->pmap), pmap_wired_count(map->pmap));
4760 if (!full)
4761 return;
4762 for (entry = map->header.next; entry != &map->header;
4763 entry = entry->next) {
4764 (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%llx, amap=%p/%d\n",
4765 entry, entry->start, entry->end, entry->object.uvm_obj,
4766 (long long)entry->offset, entry->aref.ar_amap,
4767 entry->aref.ar_pageoff);
4768 (*pr)(
4769 "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
4770 "wc=%d, adv=%d\n",
4771 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
4772 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
4773 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
4774 entry->protection, entry->max_protection,
4775 entry->inheritance, entry->wired_count, entry->advice);
4776 }
4777 }
4778
4779 /*
4780 * uvm_object_printit: actually prints the object
4781 */
4782
4783 void
4784 uvm_object_printit(struct uvm_object *uobj, bool full,
4785 void (*pr)(const char *, ...))
4786 {
4787 struct vm_page *pg;
4788 int cnt = 0;
4789
4790 (*pr)("OBJECT %p: locked=%d, pgops=%p, npages=%d, ",
4791 uobj, uobj->vmobjlock.lock_data, uobj->pgops, uobj->uo_npages);
4792 if (UVM_OBJ_IS_KERN_OBJECT(uobj))
4793 (*pr)("refs=<SYSTEM>\n");
4794 else
4795 (*pr)("refs=%d\n", uobj->uo_refs);
4796
4797 if (!full) {
4798 return;
4799 }
4800 (*pr)(" PAGES <pg,offset>:\n ");
4801 TAILQ_FOREACH(pg, &uobj->memq, listq) {
4802 cnt++;
4803 (*pr)("<%p,0x%llx> ", pg, (long long)pg->offset);
4804 if ((cnt % 3) == 0) {
4805 (*pr)("\n ");
4806 }
4807 }
4808 if ((cnt % 3) != 0) {
4809 (*pr)("\n");
4810 }
4811 }
4812
4813 /*
4814 * uvm_page_printit: actually print the page
4815 */
4816
4817 static const char page_flagbits[] = UVM_PGFLAGBITS;
4818 static const char page_pqflagbits[] = UVM_PQFLAGBITS;
4819
4820 void
4821 uvm_page_printit(struct vm_page *pg, bool full,
4822 void (*pr)(const char *, ...))
4823 {
4824 struct vm_page *tpg;
4825 struct uvm_object *uobj;
4826 struct pglist *pgl;
4827 char pgbuf[128];
4828 char pqbuf[128];
4829
4830 (*pr)("PAGE %p:\n", pg);
4831 bitmask_snprintf(pg->flags, page_flagbits, pgbuf, sizeof(pgbuf));
4832 bitmask_snprintf(pg->pqflags, page_pqflagbits, pqbuf, sizeof(pqbuf));
4833 (*pr)(" flags=%s, pqflags=%s, wire_count=%d, pa=0x%lx\n",
4834 pgbuf, pqbuf, pg->wire_count, (long)VM_PAGE_TO_PHYS(pg));
4835 (*pr)(" uobject=%p, uanon=%p, offset=0x%llx loan_count=%d\n",
4836 pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count);
4837 #if defined(UVM_PAGE_TRKOWN)
4838 if (pg->flags & PG_BUSY)
4839 (*pr)(" owning process = %d, tag=%s\n",
4840 pg->owner, pg->owner_tag);
4841 else
4842 (*pr)(" page not busy, no owner\n");
4843 #else
4844 (*pr)(" [page ownership tracking disabled]\n");
4845 #endif
4846
4847 if (!full)
4848 return;
4849
4850 /* cross-verify object/anon */
4851 if ((pg->pqflags & PQ_FREE) == 0) {
4852 if (pg->pqflags & PQ_ANON) {
4853 if (pg->uanon == NULL || pg->uanon->an_page != pg)
4854 (*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n",
4855 (pg->uanon) ? pg->uanon->an_page : NULL);
4856 else
4857 (*pr)(" anon backpointer is OK\n");
4858 } else {
4859 uobj = pg->uobject;
4860 if (uobj) {
4861 (*pr)(" checking object list\n");
4862 TAILQ_FOREACH(tpg, &uobj->memq, listq) {
4863 if (tpg == pg) {
4864 break;
4865 }
4866 }
4867 if (tpg)
4868 (*pr)(" page found on object list\n");
4869 else
4870 (*pr)(" >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
4871 }
4872 }
4873 }
4874
4875 /* cross-verify page queue */
4876 if (pg->pqflags & PQ_FREE) {
4877 int fl = uvm_page_lookup_freelist(pg);
4878 int color = VM_PGCOLOR_BUCKET(pg);
4879 pgl = &uvm.page_free[fl].pgfl_buckets[color].pgfl_queues[
4880 ((pg)->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN];
4881 } else {
4882 pgl = NULL;
4883 }
4884
4885 if (pgl) {
4886 (*pr)(" checking pageq list\n");
4887 TAILQ_FOREACH(tpg, pgl, pageq) {
4888 if (tpg == pg) {
4889 break;
4890 }
4891 }
4892 if (tpg)
4893 (*pr)(" page found on pageq list\n");
4894 else
4895 (*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
4896 }
4897 }
4898
4899 /*
4900 * uvm_pages_printthem - print a summary of all managed pages
4901 */
4902
4903 void
4904 uvm_page_printall(void (*pr)(const char *, ...))
4905 {
4906 unsigned i;
4907 struct vm_page *pg;
4908
4909 (*pr)("%18s %4s %4s %18s %18s"
4910 #ifdef UVM_PAGE_TRKOWN
4911 " OWNER"
4912 #endif
4913 "\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON");
4914 for (i = 0; i < vm_nphysseg; i++) {
4915 for (pg = vm_physmem[i].pgs; pg <= vm_physmem[i].lastpg; pg++) {
4916 (*pr)("%18p %04x %04x %18p %18p",
4917 pg, pg->flags, pg->pqflags, pg->uobject,
4918 pg->uanon);
4919 #ifdef UVM_PAGE_TRKOWN
4920 if (pg->flags & PG_BUSY)
4921 (*pr)(" %d [%s]", pg->owner, pg->owner_tag);
4922 #endif
4923 (*pr)("\n");
4924 }
4925 }
4926 }
4927
4928 #endif
4929
4930 /*
4931 * uvm_map_create: create map
4932 */
4933
4934 struct vm_map *
4935 uvm_map_create(pmap_t pmap, vaddr_t vmin, vaddr_t vmax, int flags)
4936 {
4937 struct vm_map *result;
4938
4939 MALLOC(result, struct vm_map *, sizeof(struct vm_map),
4940 M_VMMAP, M_WAITOK);
4941 uvm_map_setup(result, vmin, vmax, flags);
4942 result->pmap = pmap;
4943 return(result);
4944 }
4945
4946 /*
4947 * uvm_map_setup: init map
4948 *
4949 * => map must not be in service yet.
4950 */
4951
4952 void
4953 uvm_map_setup(struct vm_map *map, vaddr_t vmin, vaddr_t vmax, int flags)
4954 {
4955 int ipl;
4956
4957 RB_INIT(&map->rbhead);
4958 map->header.next = map->header.prev = &map->header;
4959 map->nentries = 0;
4960 map->size = 0;
4961 map->ref_count = 1;
4962 vm_map_setmin(map, vmin);
4963 vm_map_setmax(map, vmax);
4964 map->flags = flags;
4965 map->first_free = &map->header;
4966 map->hint = &map->header;
4967 map->timestamp = 0;
4968 map->busy = NULL;
4969
4970 if ((flags & VM_MAP_INTRSAFE) != 0) {
4971 ipl = IPL_VM;
4972 } else {
4973 ipl = IPL_NONE;
4974 }
4975
4976 rw_init(&map->lock);
4977 cv_init(&map->cv, "vm_map");
4978 mutex_init(&map->misc_lock, MUTEX_DRIVER, ipl);
4979 mutex_init(&map->mutex, MUTEX_DRIVER, ipl);
4980
4981 /*
4982 * The hint lock can get acquired with the pagequeue
4983 * lock held, so must be at IPL_VM.
4984 */
4985 mutex_init(&map->hint_lock, MUTEX_DRIVER, IPL_VM);
4986 }
4987
4988
4989 /*
4990 * U N M A P - m a i n e n t r y p o i n t
4991 */
4992
4993 /*
4994 * uvm_unmap1: remove mappings from a vm_map (from "start" up to "stop")
4995 *
4996 * => caller must check alignment and size
4997 * => map must be unlocked (we will lock it)
4998 * => flags is UVM_FLAG_QUANTUM or 0.
4999 */
5000
5001 void
5002 uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
5003 {
5004 struct vm_map_entry *dead_entries;
5005 struct uvm_mapent_reservation umr;
5006 UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist);
5007
5008 UVMHIST_LOG(maphist, " (map=0x%x, start=0x%x, end=0x%x)",
5009 map, start, end, 0);
5010 if (map == kernel_map)
5011 LOCKDEBUG_MEM_CHECK((void *)start, end - start);
5012 /*
5013 * work now done by helper functions. wipe the pmap's and then
5014 * detach from the dead entries...
5015 */
5016 uvm_mapent_reserve(map, &umr, 2, flags);
5017 vm_map_lock(map);
5018 uvm_unmap_remove(map, start, end, &dead_entries, &umr, flags);
5019 vm_map_unlock(map);
5020 uvm_mapent_unreserve(map, &umr);
5021
5022 if (dead_entries != NULL)
5023 uvm_unmap_detach(dead_entries, 0);
5024
5025 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
5026 }
5027
5028
5029 /*
5030 * uvm_map_reference: add reference to a map
5031 *
5032 * => map need not be locked (we use misc_lock).
5033 */
5034
5035 void
5036 uvm_map_reference(struct vm_map *map)
5037 {
5038 mutex_enter(&map->misc_lock);
5039 map->ref_count++;
5040 mutex_exit(&map->misc_lock);
5041 }
5042
5043 struct vm_map_kernel *
5044 vm_map_to_kernel(struct vm_map *map)
5045 {
5046
5047 KASSERT(VM_MAP_IS_KERNEL(map));
5048
5049 return (struct vm_map_kernel *)map;
5050 }
5051
5052 bool
5053 vm_map_starved_p(struct vm_map *map)
5054 {
5055
5056 if ((map->flags & VM_MAP_WANTVA) != 0) {
5057 return true;
5058 }
5059 /* XXX */
5060 if ((vm_map_max(map) - vm_map_min(map)) / 16 * 15 < map->size) {
5061 return true;
5062 }
5063 return false;
5064 }
5065