uvm_map.c revision 1.129 1 /* $NetBSD: uvm_map.c,v 1.129 2003/01/21 00:03:07 christos Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_map.c 8.3 (Berkeley) 1/12/94
42 * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 /*
70 * uvm_map.c: uvm map operations
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.129 2003/01/21 00:03:07 christos Exp $");
75
76 #include "opt_ddb.h"
77 #include "opt_uvmhist.h"
78 #include "opt_sysv.h"
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/mman.h>
83 #include <sys/proc.h>
84 #include <sys/malloc.h>
85 #include <sys/pool.h>
86 #include <sys/kernel.h>
87 #include <sys/mount.h>
88 #include <sys/vnode.h>
89
90 #ifdef SYSVSHM
91 #include <sys/shm.h>
92 #endif
93
94 #define UVM_MAP
95 #include <uvm/uvm.h>
96
97 #ifdef DDB
98 #include <uvm/uvm_ddb.h>
99 #endif
100
101 extern struct vm_map *pager_map;
102
103 struct uvm_cnt map_ubackmerge, map_uforwmerge;
104 struct uvm_cnt map_ubimerge, map_unomerge;
105 struct uvm_cnt map_kbackmerge, map_kforwmerge;
106 struct uvm_cnt map_kbimerge, map_knomerge;
107 struct uvm_cnt uvm_map_call, uvm_mlk_call, uvm_mlk_hint;
108 const char vmmapbsy[] = "vmmapbsy";
109
110 /*
111 * pool for vmspace structures.
112 */
113
114 struct pool uvm_vmspace_pool;
115
116 /*
117 * pool for dynamically-allocated map entries.
118 */
119
120 struct pool uvm_map_entry_pool;
121 struct pool uvm_map_entry_kmem_pool;
122
123 #ifdef PMAP_GROWKERNEL
124 /*
125 * This global represents the end of the kernel virtual address
126 * space. If we want to exceed this, we must grow the kernel
127 * virtual address space dynamically.
128 *
129 * Note, this variable is locked by kernel_map's lock.
130 */
131 vaddr_t uvm_maxkaddr;
132 #endif
133
134 /*
135 * macros
136 */
137
138 /*
139 * uvm_map_entry_link: insert entry into a map
140 *
141 * => map must be locked
142 */
143 #define uvm_map_entry_link(map, after_where, entry) do { \
144 (map)->nentries++; \
145 (entry)->prev = (after_where); \
146 (entry)->next = (after_where)->next; \
147 (entry)->prev->next = (entry); \
148 (entry)->next->prev = (entry); \
149 } while (/*CONSTCOND*/ 0)
150
151 /*
152 * uvm_map_entry_unlink: remove entry from a map
153 *
154 * => map must be locked
155 */
156 #define uvm_map_entry_unlink(map, entry) do { \
157 (map)->nentries--; \
158 (entry)->next->prev = (entry)->prev; \
159 (entry)->prev->next = (entry)->next; \
160 } while (/*CONSTCOND*/ 0)
161
162 /*
163 * SAVE_HINT: saves the specified entry as the hint for future lookups.
164 *
165 * => map need not be locked (protected by hint_lock).
166 */
167 #define SAVE_HINT(map,check,value) do { \
168 simple_lock(&(map)->hint_lock); \
169 if ((map)->hint == (check)) \
170 (map)->hint = (value); \
171 simple_unlock(&(map)->hint_lock); \
172 } while (/*CONSTCOND*/ 0)
173
174 /*
175 * VM_MAP_RANGE_CHECK: check and correct range
176 *
177 * => map must at least be read locked
178 */
179
180 #define VM_MAP_RANGE_CHECK(map, start, end) do { \
181 if (start < vm_map_min(map)) \
182 start = vm_map_min(map); \
183 if (end > vm_map_max(map)) \
184 end = vm_map_max(map); \
185 if (start > end) \
186 start = end; \
187 } while (/*CONSTCOND*/ 0)
188
189 /*
190 * local prototypes
191 */
192
193 static struct vm_map_entry *uvm_mapent_alloc __P((struct vm_map *, int));
194 static void uvm_mapent_copy __P((struct vm_map_entry *, struct vm_map_entry *));
195 static void uvm_mapent_free __P((struct vm_map_entry *));
196 static void uvm_map_entry_unwire __P((struct vm_map *, struct vm_map_entry *));
197 static void uvm_map_reference_amap __P((struct vm_map_entry *, int));
198 static void uvm_map_unreference_amap __P((struct vm_map_entry *, int));
199
200 /*
201 * local inlines
202 */
203
204 /*
205 * uvm_mapent_alloc: allocate a map entry
206 */
207
208 static __inline struct vm_map_entry *
209 uvm_mapent_alloc(map, flags)
210 struct vm_map *map;
211 int flags;
212 {
213 struct vm_map_entry *me;
214 int s;
215 int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
216 UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
217
218 if (map->flags & VM_MAP_INTRSAFE || cold) {
219 s = splvm();
220 simple_lock(&uvm.kentry_lock);
221 me = uvm.kentry_free;
222 if (me) uvm.kentry_free = me->next;
223 simple_unlock(&uvm.kentry_lock);
224 splx(s);
225 if (__predict_false(me == NULL)) {
226 panic("uvm_mapent_alloc: out of static map entries, "
227 "check MAX_KMAPENT (currently %d)",
228 MAX_KMAPENT);
229 }
230 me->flags = UVM_MAP_STATIC;
231 } else if (map == kernel_map) {
232 me = pool_get(&uvm_map_entry_kmem_pool, pflags);
233 if (__predict_false(me == NULL))
234 return NULL;
235 me->flags = UVM_MAP_KMEM;
236 } else {
237 me = pool_get(&uvm_map_entry_pool, pflags);
238 if (__predict_false(me == NULL))
239 return NULL;
240 me->flags = 0;
241 }
242
243 UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me,
244 ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0);
245 return(me);
246 }
247
248 /*
249 * uvm_mapent_free: free map entry
250 */
251
252 static __inline void
253 uvm_mapent_free(me)
254 struct vm_map_entry *me;
255 {
256 int s;
257 UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
258
259 UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
260 me, me->flags, 0, 0);
261 if (me->flags & UVM_MAP_STATIC) {
262 s = splvm();
263 simple_lock(&uvm.kentry_lock);
264 me->next = uvm.kentry_free;
265 uvm.kentry_free = me;
266 simple_unlock(&uvm.kentry_lock);
267 splx(s);
268 } else if (me->flags & UVM_MAP_KMEM) {
269 pool_put(&uvm_map_entry_kmem_pool, me);
270 } else {
271 pool_put(&uvm_map_entry_pool, me);
272 }
273 }
274
275 /*
276 * uvm_mapent_copy: copy a map entry, preserving flags
277 */
278
279 static __inline void
280 uvm_mapent_copy(src, dst)
281 struct vm_map_entry *src;
282 struct vm_map_entry *dst;
283 {
284 memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) -
285 ((char *)src));
286 }
287
288 /*
289 * uvm_map_entry_unwire: unwire a map entry
290 *
291 * => map should be locked by caller
292 */
293
294 static __inline void
295 uvm_map_entry_unwire(map, entry)
296 struct vm_map *map;
297 struct vm_map_entry *entry;
298 {
299 entry->wired_count = 0;
300 uvm_fault_unwire_locked(map, entry->start, entry->end);
301 }
302
303
304 /*
305 * wrapper for calling amap_ref()
306 */
307 static __inline void
308 uvm_map_reference_amap(entry, flags)
309 struct vm_map_entry *entry;
310 int flags;
311 {
312 amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
313 (entry->end - entry->start) >> PAGE_SHIFT, flags);
314 }
315
316
317 /*
318 * wrapper for calling amap_unref()
319 */
320 static __inline void
321 uvm_map_unreference_amap(entry, flags)
322 struct vm_map_entry *entry;
323 int flags;
324 {
325 amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
326 (entry->end - entry->start) >> PAGE_SHIFT, flags);
327 }
328
329
330 /*
331 * uvm_map_init: init mapping system at boot time. note that we allocate
332 * and init the static pool of struct vm_map_entry *'s for the kernel here.
333 */
334
335 void
336 uvm_map_init()
337 {
338 static struct vm_map_entry kernel_map_entry[MAX_KMAPENT];
339 #if defined(UVMHIST)
340 static struct uvm_history_ent maphistbuf[100];
341 static struct uvm_history_ent pdhistbuf[100];
342 #endif
343 int lcv;
344
345 /*
346 * first, init logging system.
347 */
348
349 UVMHIST_FUNC("uvm_map_init");
350 UVMHIST_INIT_STATIC(maphist, maphistbuf);
351 UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
352 UVMHIST_CALLED(maphist);
353 UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
354 UVMCNT_INIT(uvm_map_call, UVMCNT_CNT, 0,
355 "# uvm_map() successful calls", 0);
356
357 UVMCNT_INIT(map_ubackmerge, UVMCNT_CNT, 0,
358 "# uvm_map() back umerges", 0);
359 UVMCNT_INIT(map_uforwmerge, UVMCNT_CNT, 0,
360 "# uvm_map() forward umerges", 0);
361 UVMCNT_INIT(map_ubimerge, UVMCNT_CNT, 0,
362 "# uvm_map() dual umerge", 0);
363 UVMCNT_INIT(map_unomerge, UVMCNT_CNT, 0,
364 "# uvm_map() no umerge", 0);
365
366 UVMCNT_INIT(map_kbackmerge, UVMCNT_CNT, 0,
367 "# uvm_map() back kmerges", 0);
368 UVMCNT_INIT(map_kforwmerge, UVMCNT_CNT, 0,
369 "# uvm_map() forward kmerges", 0);
370 UVMCNT_INIT(map_kbimerge, UVMCNT_CNT, 0,
371 "# uvm_map() dual kmerge", 0);
372 UVMCNT_INIT(map_knomerge, UVMCNT_CNT, 0,
373 "# uvm_map() no kmerge", 0);
374
375 UVMCNT_INIT(uvm_mlk_call, UVMCNT_CNT, 0, "# map lookup calls", 0);
376 UVMCNT_INIT(uvm_mlk_hint, UVMCNT_CNT, 0, "# map lookup hint hits", 0);
377
378 /*
379 * now set up static pool of kernel map entrys ...
380 */
381
382 simple_lock_init(&uvm.kentry_lock);
383 uvm.kentry_free = NULL;
384 for (lcv = 0 ; lcv < MAX_KMAPENT ; lcv++) {
385 kernel_map_entry[lcv].next = uvm.kentry_free;
386 uvm.kentry_free = &kernel_map_entry[lcv];
387 }
388
389 /*
390 * initialize the map-related pools.
391 */
392 pool_init(&uvm_vmspace_pool, sizeof(struct vmspace),
393 0, 0, 0, "vmsppl", &pool_allocator_nointr);
394 pool_init(&uvm_map_entry_pool, sizeof(struct vm_map_entry),
395 0, 0, 0, "vmmpepl", &pool_allocator_nointr);
396 pool_init(&uvm_map_entry_kmem_pool, sizeof(struct vm_map_entry),
397 0, 0, 0, "vmmpekpl", NULL);
398 }
399
400 /*
401 * clippers
402 */
403
404 /*
405 * uvm_map_clip_start: ensure that the entry begins at or after
406 * the starting address, if it doesn't we split the entry.
407 *
408 * => caller should use UVM_MAP_CLIP_START macro rather than calling
409 * this directly
410 * => map must be locked by caller
411 */
412
413 void
414 uvm_map_clip_start(map, entry, start)
415 struct vm_map *map;
416 struct vm_map_entry *entry;
417 vaddr_t start;
418 {
419 struct vm_map_entry *new_entry;
420 vaddr_t new_adj;
421
422 /* uvm_map_simplify_entry(map, entry); */ /* XXX */
423
424 /*
425 * Split off the front portion. note that we must insert the new
426 * entry BEFORE this one, so that this entry has the specified
427 * starting address.
428 */
429
430 new_entry = uvm_mapent_alloc(map, 0);
431 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
432
433 new_entry->end = start;
434 new_adj = start - new_entry->start;
435 if (entry->object.uvm_obj)
436 entry->offset += new_adj; /* shift start over */
437 entry->start = start;
438
439 if (new_entry->aref.ar_amap) {
440 amap_splitref(&new_entry->aref, &entry->aref, new_adj);
441 }
442
443 uvm_map_entry_link(map, entry->prev, new_entry);
444
445 if (UVM_ET_ISSUBMAP(entry)) {
446 /* ... unlikely to happen, but play it safe */
447 uvm_map_reference(new_entry->object.sub_map);
448 } else {
449 if (UVM_ET_ISOBJ(entry) &&
450 entry->object.uvm_obj->pgops &&
451 entry->object.uvm_obj->pgops->pgo_reference)
452 entry->object.uvm_obj->pgops->pgo_reference(
453 entry->object.uvm_obj);
454 }
455 }
456
457 /*
458 * uvm_map_clip_end: ensure that the entry ends at or before
459 * the ending address, if it does't we split the reference
460 *
461 * => caller should use UVM_MAP_CLIP_END macro rather than calling
462 * this directly
463 * => map must be locked by caller
464 */
465
466 void
467 uvm_map_clip_end(map, entry, end)
468 struct vm_map *map;
469 struct vm_map_entry *entry;
470 vaddr_t end;
471 {
472 struct vm_map_entry * new_entry;
473 vaddr_t new_adj; /* #bytes we move start forward */
474
475 /*
476 * Create a new entry and insert it
477 * AFTER the specified entry
478 */
479
480 new_entry = uvm_mapent_alloc(map, 0);
481 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
482
483 new_entry->start = entry->end = end;
484 new_adj = end - entry->start;
485 if (new_entry->object.uvm_obj)
486 new_entry->offset += new_adj;
487
488 if (entry->aref.ar_amap)
489 amap_splitref(&entry->aref, &new_entry->aref, new_adj);
490
491 uvm_map_entry_link(map, entry, new_entry);
492
493 if (UVM_ET_ISSUBMAP(entry)) {
494 /* ... unlikely to happen, but play it safe */
495 uvm_map_reference(new_entry->object.sub_map);
496 } else {
497 if (UVM_ET_ISOBJ(entry) &&
498 entry->object.uvm_obj->pgops &&
499 entry->object.uvm_obj->pgops->pgo_reference)
500 entry->object.uvm_obj->pgops->pgo_reference(
501 entry->object.uvm_obj);
502 }
503 }
504
505
506 /*
507 * M A P - m a i n e n t r y p o i n t
508 */
509 /*
510 * uvm_map: establish a valid mapping in a map
511 *
512 * => assume startp is page aligned.
513 * => assume size is a multiple of PAGE_SIZE.
514 * => assume sys_mmap provides enough of a "hint" to have us skip
515 * over text/data/bss area.
516 * => map must be unlocked (we will lock it)
517 * => <uobj,uoffset> value meanings (4 cases):
518 * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER
519 * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER
520 * [3] <uobj,uoffset> == normal mapping
521 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA
522 *
523 * case [4] is for kernel mappings where we don't know the offset until
524 * we've found a virtual address. note that kernel object offsets are
525 * always relative to vm_map_min(kernel_map).
526 *
527 * => if `align' is non-zero, we try to align the virtual address to
528 * the specified alignment. this is only a hint; if we can't
529 * do it, the address will be unaligned. this is provided as
530 * a mechanism for large pages.
531 *
532 * => XXXCDC: need way to map in external amap?
533 */
534
535 int
536 uvm_map(map, startp, size, uobj, uoffset, align, flags)
537 struct vm_map *map;
538 vaddr_t *startp; /* IN/OUT */
539 vsize_t size;
540 struct uvm_object *uobj;
541 voff_t uoffset;
542 vsize_t align;
543 uvm_flag_t flags;
544 {
545 struct vm_map_entry *prev_entry, *new_entry;
546 const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
547 AMAP_EXTEND_NOWAIT : 0;
548 vm_prot_t prot = UVM_PROTECTION(flags), maxprot =
549 UVM_MAXPROTECTION(flags);
550 vm_inherit_t inherit = UVM_INHERIT(flags);
551 int advice = UVM_ADVICE(flags);
552 int error, merged = 0, kmap = (vm_map_pmap(map) == pmap_kernel());
553 UVMHIST_FUNC("uvm_map");
554 UVMHIST_CALLED(maphist);
555
556 UVMHIST_LOG(maphist, "(map=0x%x, *startp=0x%x, size=%d, flags=0x%x)",
557 map, *startp, size, flags);
558 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
559
560 /*
561 * detect a popular device driver bug.
562 */
563
564 KASSERT(doing_shutdown || curlwp != NULL ||
565 (map->flags & VM_MAP_INTRSAFE));
566
567 /*
568 * check sanity of protection code
569 */
570
571 if ((prot & maxprot) != prot) {
572 UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x",
573 prot, maxprot,0,0);
574 return EACCES;
575 }
576
577 /*
578 * for pager_map, allocate the new entry first to avoid sleeping
579 * for memory while we have the map locked.
580 */
581
582 new_entry = NULL;
583 if (map == pager_map) {
584 new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
585 if (__predict_false(new_entry == NULL))
586 return ENOMEM;
587 }
588
589 /*
590 * figure out where to put new VM range
591 */
592
593 if (vm_map_lock_try(map) == FALSE) {
594 if (flags & UVM_FLAG_TRYLOCK) {
595 if (new_entry) {
596 uvm_mapent_free(new_entry);
597 }
598 return EAGAIN;
599 }
600 vm_map_lock(map); /* could sleep here */
601 }
602 if ((prev_entry = uvm_map_findspace(map, *startp, size, startp,
603 uobj, uoffset, align, flags)) == NULL) {
604 UVMHIST_LOG(maphist,"<- uvm_map_findspace failed!",0,0,0,0);
605 vm_map_unlock(map);
606 if (new_entry) {
607 uvm_mapent_free(new_entry);
608 }
609 return ENOMEM;
610 }
611
612 #ifdef PMAP_GROWKERNEL
613 {
614 /*
615 * If the kernel pmap can't map the requested space,
616 * then allocate more resources for it.
617 */
618 if (map == kernel_map && uvm_maxkaddr < (*startp + size))
619 uvm_maxkaddr = pmap_growkernel(*startp + size);
620 }
621 #endif
622
623 UVMCNT_INCR(uvm_map_call);
624
625 /*
626 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
627 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in
628 * either case we want to zero it before storing it in the map entry
629 * (because it looks strange and confusing when debugging...)
630 *
631 * if uobj is not null
632 * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
633 * and we do not need to change uoffset.
634 * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
635 * now (based on the starting address of the map). this case is
636 * for kernel object mappings where we don't know the offset until
637 * the virtual address is found (with uvm_map_findspace). the
638 * offset is the distance we are from the start of the map.
639 */
640
641 if (uobj == NULL) {
642 uoffset = 0;
643 } else {
644 if (uoffset == UVM_UNKNOWN_OFFSET) {
645 KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
646 uoffset = *startp - vm_map_min(kernel_map);
647 }
648 }
649
650 /*
651 * try and insert in map by extending previous entry, if possible.
652 * XXX: we don't try and pull back the next entry. might be useful
653 * for a stack, but we are currently allocating our stack in advance.
654 */
655
656 if (flags & UVM_FLAG_NOMERGE)
657 goto nomerge;
658
659 if (prev_entry->end == *startp &&
660 prev_entry != &map->header &&
661 prev_entry->object.uvm_obj == uobj) {
662
663 if (uobj && prev_entry->offset +
664 (prev_entry->end - prev_entry->start) != uoffset)
665 goto forwardmerge;
666
667 if (UVM_ET_ISSUBMAP(prev_entry))
668 goto forwardmerge;
669
670 if (prev_entry->protection != prot ||
671 prev_entry->max_protection != maxprot)
672 goto forwardmerge;
673
674 if (prev_entry->inheritance != inherit ||
675 prev_entry->advice != advice)
676 goto forwardmerge;
677
678 /* wiring status must match (new area is unwired) */
679 if (VM_MAPENT_ISWIRED(prev_entry))
680 goto forwardmerge;
681
682 /*
683 * can't extend a shared amap. note: no need to lock amap to
684 * look at refs since we don't care about its exact value.
685 * if it is one (i.e. we have only reference) it will stay there
686 */
687
688 if (prev_entry->aref.ar_amap &&
689 amap_refs(prev_entry->aref.ar_amap) != 1) {
690 goto forwardmerge;
691 }
692
693 if (prev_entry->aref.ar_amap) {
694 error = amap_extend(prev_entry, size,
695 amapwaitflag | AMAP_EXTEND_FORWARDS);
696 if (error) {
697 vm_map_unlock(map);
698 if (new_entry) {
699 uvm_mapent_free(new_entry);
700 }
701 return error;
702 }
703 }
704
705 if (kmap)
706 UVMCNT_INCR(map_kbackmerge);
707 else
708 UVMCNT_INCR(map_ubackmerge);
709 UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0);
710
711 /*
712 * drop our reference to uobj since we are extending a reference
713 * that we already have (the ref count can not drop to zero).
714 */
715
716 if (uobj && uobj->pgops->pgo_detach)
717 uobj->pgops->pgo_detach(uobj);
718
719 prev_entry->end += size;
720 map->size += size;
721
722 UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
723 if (new_entry) {
724 uvm_mapent_free(new_entry);
725 new_entry = NULL;
726 }
727 merged++;
728 }
729
730 forwardmerge:
731 if (prev_entry->next->start == (*startp + size) &&
732 prev_entry->next != &map->header &&
733 prev_entry->next->object.uvm_obj == uobj) {
734
735 if (uobj && prev_entry->next->offset != uoffset + size)
736 goto nomerge;
737
738 if (UVM_ET_ISSUBMAP(prev_entry->next))
739 goto nomerge;
740
741 if (prev_entry->next->protection != prot ||
742 prev_entry->next->max_protection != maxprot)
743 goto nomerge;
744
745 if (prev_entry->next->inheritance != inherit ||
746 prev_entry->next->advice != advice)
747 goto nomerge;
748
749 /* wiring status must match (new area is unwired) */
750 if (VM_MAPENT_ISWIRED(prev_entry->next))
751 goto nomerge;
752
753 /*
754 * can't extend a shared amap. note: no need to lock amap to
755 * look at refs since we don't care about its exact value.
756 * if it is one (i.e. we have only reference) it will stay there.
757 *
758 * note that we also can't merge two amaps, so if we
759 * merged with the previous entry which has an amap,
760 * and the next entry also has an amap, we give up.
761 *
762 * Interesting cases:
763 * amap, new, amap -> give up second merge (single fwd extend)
764 * amap, new, none -> double forward extend (extend again here)
765 * none, new, amap -> double backward extend (done here)
766 * uobj, new, amap -> single backward extend (done here)
767 *
768 * XXX should we attempt to deal with someone refilling
769 * the deallocated region between two entries that are
770 * backed by the same amap (ie, arefs is 2, "prev" and
771 * "next" refer to it, and adding this allocation will
772 * close the hole, thus restoring arefs to 1 and
773 * deallocating the "next" vm_map_entry)? -- @@@
774 */
775
776 if (prev_entry->next->aref.ar_amap &&
777 (amap_refs(prev_entry->next->aref.ar_amap) != 1 ||
778 (merged && prev_entry->aref.ar_amap))) {
779 goto nomerge;
780 }
781
782 if (merged) {
783 /*
784 * Try to extend the amap of the previous entry to
785 * cover the next entry as well. If it doesn't work
786 * just skip on, don't actually give up, since we've
787 * already completed the back merge.
788 */
789 if (prev_entry->aref.ar_amap) {
790 if (amap_extend(prev_entry,
791 prev_entry->next->end -
792 prev_entry->next->start,
793 amapwaitflag | AMAP_EXTEND_FORWARDS))
794 goto nomerge;
795 }
796
797 /*
798 * Try to extend the amap of the *next* entry
799 * back to cover the new allocation *and* the
800 * previous entry as well (the previous merge
801 * didn't have an amap already otherwise we
802 * wouldn't be checking here for an amap). If
803 * it doesn't work just skip on, again, don't
804 * actually give up, since we've already
805 * completed the back merge.
806 */
807 else if (prev_entry->next->aref.ar_amap) {
808 if (amap_extend(prev_entry->next,
809 prev_entry->end -
810 prev_entry->start + size,
811 amapwaitflag | AMAP_EXTEND_BACKWARDS))
812 goto nomerge;
813 }
814 } else {
815 /*
816 * Pull the next entry's amap backwards to cover this
817 * new allocation.
818 */
819 if (prev_entry->next->aref.ar_amap) {
820 error = amap_extend(prev_entry->next, size,
821 amapwaitflag | AMAP_EXTEND_BACKWARDS);
822 if (error) {
823 vm_map_unlock(map);
824 if (new_entry) {
825 uvm_mapent_free(new_entry);
826 }
827 return error;
828 }
829 }
830 }
831
832 if (merged) {
833 if (kmap) {
834 UVMCNT_DECR(map_kbackmerge);
835 UVMCNT_INCR(map_kbimerge);
836 } else {
837 UVMCNT_DECR(map_ubackmerge);
838 UVMCNT_INCR(map_ubimerge);
839 }
840 } else {
841 if (kmap)
842 UVMCNT_INCR(map_kforwmerge);
843 else
844 UVMCNT_INCR(map_uforwmerge);
845 }
846 UVMHIST_LOG(maphist," starting forward merge", 0, 0, 0, 0);
847
848 /*
849 * drop our reference to uobj since we are extending a reference
850 * that we already have (the ref count can not drop to zero).
851 * (if merged, we've already detached)
852 */
853 if (uobj && uobj->pgops->pgo_detach && !merged)
854 uobj->pgops->pgo_detach(uobj);
855
856 if (merged) {
857 struct vm_map_entry *dead = prev_entry->next;
858 prev_entry->end = dead->end;
859 uvm_map_entry_unlink(map, dead);
860 if (dead->aref.ar_amap != NULL) {
861 prev_entry->aref = dead->aref;
862 dead->aref.ar_amap = NULL;
863 }
864 uvm_mapent_free(dead);
865 } else {
866 prev_entry->next->start -= size;
867 map->size += size;
868 if (uobj)
869 prev_entry->next->offset = uoffset;
870 }
871
872 UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0);
873 if (new_entry) {
874 uvm_mapent_free(new_entry);
875 new_entry = NULL;
876 }
877 merged++;
878 }
879
880 nomerge:
881 if (!merged) {
882 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0);
883 if (kmap)
884 UVMCNT_INCR(map_knomerge);
885 else
886 UVMCNT_INCR(map_unomerge);
887
888 /*
889 * allocate new entry and link it in.
890 */
891
892 if (new_entry == NULL) {
893 new_entry = uvm_mapent_alloc(map,
894 (flags & UVM_FLAG_NOWAIT));
895 if (__predict_false(new_entry == NULL)) {
896 vm_map_unlock(map);
897 return ENOMEM;
898 }
899 }
900 new_entry->start = *startp;
901 new_entry->end = new_entry->start + size;
902 new_entry->object.uvm_obj = uobj;
903 new_entry->offset = uoffset;
904
905 if (uobj)
906 new_entry->etype = UVM_ET_OBJ;
907 else
908 new_entry->etype = 0;
909
910 if (flags & UVM_FLAG_COPYONW) {
911 new_entry->etype |= UVM_ET_COPYONWRITE;
912 if ((flags & UVM_FLAG_OVERLAY) == 0)
913 new_entry->etype |= UVM_ET_NEEDSCOPY;
914 }
915
916 new_entry->protection = prot;
917 new_entry->max_protection = maxprot;
918 new_entry->inheritance = inherit;
919 new_entry->wired_count = 0;
920 new_entry->advice = advice;
921 if (flags & UVM_FLAG_OVERLAY) {
922
923 /*
924 * to_add: for BSS we overallocate a little since we
925 * are likely to extend
926 */
927
928 vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
929 UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
930 struct vm_amap *amap = amap_alloc(size, to_add,
931 (flags & UVM_FLAG_NOWAIT) ? M_NOWAIT : M_WAITOK);
932 if (__predict_false(amap == NULL)) {
933 vm_map_unlock(map);
934 uvm_mapent_free(new_entry);
935 return ENOMEM;
936 }
937 new_entry->aref.ar_pageoff = 0;
938 new_entry->aref.ar_amap = amap;
939 } else {
940 new_entry->aref.ar_pageoff = 0;
941 new_entry->aref.ar_amap = NULL;
942 }
943 uvm_map_entry_link(map, prev_entry, new_entry);
944 map->size += size;
945
946 /*
947 * Update the free space hint
948 */
949
950 if ((map->first_free == prev_entry) &&
951 (prev_entry->end >= new_entry->start))
952 map->first_free = new_entry;
953 }
954
955 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
956 vm_map_unlock(map);
957 return 0;
958 }
959
960 /*
961 * uvm_map_lookup_entry: find map entry at or before an address
962 *
963 * => map must at least be read-locked by caller
964 * => entry is returned in "entry"
965 * => return value is true if address is in the returned entry
966 */
967
968 boolean_t
969 uvm_map_lookup_entry(map, address, entry)
970 struct vm_map *map;
971 vaddr_t address;
972 struct vm_map_entry **entry; /* OUT */
973 {
974 struct vm_map_entry *cur;
975 struct vm_map_entry *last;
976 UVMHIST_FUNC("uvm_map_lookup_entry");
977 UVMHIST_CALLED(maphist);
978
979 UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)",
980 map, address, entry, 0);
981
982 /*
983 * start looking either from the head of the
984 * list, or from the hint.
985 */
986
987 simple_lock(&map->hint_lock);
988 cur = map->hint;
989 simple_unlock(&map->hint_lock);
990
991 if (cur == &map->header)
992 cur = cur->next;
993
994 UVMCNT_INCR(uvm_mlk_call);
995 if (address >= cur->start) {
996
997 /*
998 * go from hint to end of list.
999 *
1000 * but first, make a quick check to see if
1001 * we are already looking at the entry we
1002 * want (which is usually the case).
1003 * note also that we don't need to save the hint
1004 * here... it is the same hint (unless we are
1005 * at the header, in which case the hint didn't
1006 * buy us anything anyway).
1007 */
1008
1009 last = &map->header;
1010 if ((cur != last) && (cur->end > address)) {
1011 UVMCNT_INCR(uvm_mlk_hint);
1012 *entry = cur;
1013 UVMHIST_LOG(maphist,"<- got it via hint (0x%x)",
1014 cur, 0, 0, 0);
1015 return (TRUE);
1016 }
1017 } else {
1018
1019 /*
1020 * go from start to hint, *inclusively*
1021 */
1022
1023 last = cur->next;
1024 cur = map->header.next;
1025 }
1026
1027 /*
1028 * search linearly
1029 */
1030
1031 while (cur != last) {
1032 if (cur->end > address) {
1033 if (address >= cur->start) {
1034 /*
1035 * save this lookup for future
1036 * hints, and return
1037 */
1038
1039 *entry = cur;
1040 SAVE_HINT(map, map->hint, cur);
1041 UVMHIST_LOG(maphist,"<- search got it (0x%x)",
1042 cur, 0, 0, 0);
1043 return (TRUE);
1044 }
1045 break;
1046 }
1047 cur = cur->next;
1048 }
1049 *entry = cur->prev;
1050 SAVE_HINT(map, map->hint, *entry);
1051 UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
1052 return (FALSE);
1053 }
1054
1055 /*
1056 * uvm_map_findspace: find "length" sized space in "map".
1057 *
1058 * => "hint" is a hint about where we want it, unless FINDSPACE_FIXED is
1059 * set (in which case we insist on using "hint").
1060 * => "result" is VA returned
1061 * => uobj/uoffset are to be used to handle VAC alignment, if required
1062 * => if `align' is non-zero, we attempt to align to that value.
1063 * => caller must at least have read-locked map
1064 * => returns NULL on failure, or pointer to prev. map entry if success
1065 * => note this is a cross between the old vm_map_findspace and vm_map_find
1066 */
1067
1068 struct vm_map_entry *
1069 uvm_map_findspace(map, hint, length, result, uobj, uoffset, align, flags)
1070 struct vm_map *map;
1071 vaddr_t hint;
1072 vsize_t length;
1073 vaddr_t *result; /* OUT */
1074 struct uvm_object *uobj;
1075 voff_t uoffset;
1076 vsize_t align;
1077 int flags;
1078 {
1079 struct vm_map_entry *entry, *next, *tmp;
1080 vaddr_t end, orig_hint;
1081 UVMHIST_FUNC("uvm_map_findspace");
1082 UVMHIST_CALLED(maphist);
1083
1084 UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)",
1085 map, hint, length, flags);
1086 KASSERT((align & (align - 1)) == 0);
1087 KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1088
1089 /*
1090 * remember the original hint. if we are aligning, then we
1091 * may have to try again with no alignment constraint if
1092 * we fail the first time.
1093 */
1094
1095 orig_hint = hint;
1096 if (hint < map->min_offset) { /* check ranges ... */
1097 if (flags & UVM_FLAG_FIXED) {
1098 UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
1099 return(NULL);
1100 }
1101 hint = map->min_offset;
1102 }
1103 if (hint > map->max_offset) {
1104 UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]",
1105 hint, map->min_offset, map->max_offset, 0);
1106 return(NULL);
1107 }
1108
1109 /*
1110 * Look for the first possible address; if there's already
1111 * something at this address, we have to start after it.
1112 */
1113
1114 if ((flags & UVM_FLAG_FIXED) == 0 && hint == map->min_offset) {
1115 if ((entry = map->first_free) != &map->header)
1116 hint = entry->end;
1117 } else {
1118 if (uvm_map_lookup_entry(map, hint, &tmp)) {
1119 /* "hint" address already in use ... */
1120 if (flags & UVM_FLAG_FIXED) {
1121 UVMHIST_LOG(maphist,"<- fixed & VA in use",
1122 0, 0, 0, 0);
1123 return(NULL);
1124 }
1125 hint = tmp->end;
1126 }
1127 entry = tmp;
1128 }
1129
1130 /*
1131 * Look through the rest of the map, trying to fit a new region in
1132 * the gap between existing regions, or after the very last region.
1133 * note: entry->end = base VA of current gap,
1134 * next->start = VA of end of current gap
1135 */
1136
1137 for (;; hint = (entry = next)->end) {
1138
1139 /*
1140 * Find the end of the proposed new region. Be sure we didn't
1141 * go beyond the end of the map, or wrap around the address;
1142 * if so, we lose. Otherwise, if this is the last entry, or
1143 * if the proposed new region fits before the next entry, we
1144 * win.
1145 */
1146
1147 #ifdef PMAP_PREFER
1148 /*
1149 * push hint forward as needed to avoid VAC alias problems.
1150 * we only do this if a valid offset is specified.
1151 */
1152
1153 if ((flags & UVM_FLAG_FIXED) == 0 &&
1154 uoffset != UVM_UNKNOWN_OFFSET)
1155 PMAP_PREFER(uoffset, &hint);
1156 #endif
1157 if (align != 0) {
1158 if ((hint & (align - 1)) != 0)
1159 hint = roundup(hint, align);
1160 /*
1161 * XXX Should we PMAP_PREFER() here again?
1162 */
1163 }
1164 end = hint + length;
1165 if (end > map->max_offset || end < hint) {
1166 UVMHIST_LOG(maphist,"<- failed (off end)", 0,0,0,0);
1167 if (align != 0) {
1168 UVMHIST_LOG(maphist,
1169 "calling recursively, no align",
1170 0,0,0,0);
1171 return (uvm_map_findspace(map, orig_hint,
1172 length, result, uobj, uoffset, 0, flags));
1173 }
1174 return (NULL);
1175 }
1176 next = entry->next;
1177 if (next == &map->header || next->start >= end)
1178 break;
1179 if (flags & UVM_FLAG_FIXED) {
1180 UVMHIST_LOG(maphist,"<- fixed mapping failed", 0,0,0,0);
1181 return(NULL); /* only one shot at it ... */
1182 }
1183 }
1184 SAVE_HINT(map, map->hint, entry);
1185 *result = hint;
1186 UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0);
1187 return (entry);
1188 }
1189
1190 /*
1191 * U N M A P - m a i n h e l p e r f u n c t i o n s
1192 */
1193
1194 /*
1195 * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
1196 *
1197 * => caller must check alignment and size
1198 * => map must be locked by caller
1199 * => we return a list of map entries that we've remove from the map
1200 * in "entry_list"
1201 */
1202
1203 void
1204 uvm_unmap_remove(map, start, end, entry_list)
1205 struct vm_map *map;
1206 vaddr_t start, end;
1207 struct vm_map_entry **entry_list; /* OUT */
1208 {
1209 struct vm_map_entry *entry, *first_entry, *next;
1210 vaddr_t len;
1211 UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist);
1212
1213 UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)",
1214 map, start, end, 0);
1215 VM_MAP_RANGE_CHECK(map, start, end);
1216
1217 /*
1218 * find first entry
1219 */
1220
1221 if (uvm_map_lookup_entry(map, start, &first_entry) == TRUE) {
1222 /* clip and go... */
1223 entry = first_entry;
1224 UVM_MAP_CLIP_START(map, entry, start);
1225 /* critical! prevents stale hint */
1226 SAVE_HINT(map, entry, entry->prev);
1227 } else {
1228 entry = first_entry->next;
1229 }
1230
1231 /*
1232 * Save the free space hint
1233 */
1234
1235 if (map->first_free->start >= start)
1236 map->first_free = entry->prev;
1237
1238 /*
1239 * note: we now re-use first_entry for a different task. we remove
1240 * a number of map entries from the map and save them in a linked
1241 * list headed by "first_entry". once we remove them from the map
1242 * the caller should unlock the map and drop the references to the
1243 * backing objects [c.f. uvm_unmap_detach]. the object is to
1244 * separate unmapping from reference dropping. why?
1245 * [1] the map has to be locked for unmapping
1246 * [2] the map need not be locked for reference dropping
1247 * [3] dropping references may trigger pager I/O, and if we hit
1248 * a pager that does synchronous I/O we may have to wait for it.
1249 * [4] we would like all waiting for I/O to occur with maps unlocked
1250 * so that we don't block other threads.
1251 */
1252
1253 first_entry = NULL;
1254 *entry_list = NULL;
1255
1256 /*
1257 * break up the area into map entry sized regions and unmap. note
1258 * that all mappings have to be removed before we can even consider
1259 * dropping references to amaps or VM objects (otherwise we could end
1260 * up with a mapping to a page on the free list which would be very bad)
1261 */
1262
1263 while ((entry != &map->header) && (entry->start < end)) {
1264 UVM_MAP_CLIP_END(map, entry, end);
1265 next = entry->next;
1266 len = entry->end - entry->start;
1267
1268 /*
1269 * unwire before removing addresses from the pmap; otherwise
1270 * unwiring will put the entries back into the pmap (XXX).
1271 */
1272
1273 if (VM_MAPENT_ISWIRED(entry)) {
1274 uvm_map_entry_unwire(map, entry);
1275 }
1276 if ((map->flags & VM_MAP_PAGEABLE) == 0) {
1277
1278 /*
1279 * if the map is non-pageable, any pages mapped there
1280 * must be wired and entered with pmap_kenter_pa(),
1281 * and we should free any such pages immediately.
1282 * this is mostly used for kmem_map and mb_map.
1283 */
1284
1285 uvm_km_pgremove_intrsafe(entry->start, entry->end);
1286 pmap_kremove(entry->start, len);
1287 } else if (UVM_ET_ISOBJ(entry) &&
1288 UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
1289 KASSERT(vm_map_pmap(map) == pmap_kernel());
1290
1291 /*
1292 * note: kernel object mappings are currently used in
1293 * two ways:
1294 * [1] "normal" mappings of pages in the kernel object
1295 * [2] uvm_km_valloc'd allocations in which we
1296 * pmap_enter in some non-kernel-object page
1297 * (e.g. vmapbuf).
1298 *
1299 * for case [1], we need to remove the mapping from
1300 * the pmap and then remove the page from the kernel
1301 * object (because, once pages in a kernel object are
1302 * unmapped they are no longer needed, unlike, say,
1303 * a vnode where you might want the data to persist
1304 * until flushed out of a queue).
1305 *
1306 * for case [2], we need to remove the mapping from
1307 * the pmap. there shouldn't be any pages at the
1308 * specified offset in the kernel object [but it
1309 * doesn't hurt to call uvm_km_pgremove just to be
1310 * safe?]
1311 *
1312 * uvm_km_pgremove currently does the following:
1313 * for pages in the kernel object in range:
1314 * - drops the swap slot
1315 * - uvm_pagefree the page
1316 */
1317
1318 /*
1319 * remove mappings from pmap and drop the pages
1320 * from the object. offsets are always relative
1321 * to vm_map_min(kernel_map).
1322 */
1323
1324 pmap_remove(pmap_kernel(), entry->start,
1325 entry->start + len);
1326 uvm_km_pgremove(entry->object.uvm_obj,
1327 entry->start - vm_map_min(kernel_map),
1328 entry->end - vm_map_min(kernel_map));
1329
1330 /*
1331 * null out kernel_object reference, we've just
1332 * dropped it
1333 */
1334
1335 entry->etype &= ~UVM_ET_OBJ;
1336 entry->object.uvm_obj = NULL;
1337 } else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
1338
1339 /*
1340 * remove mappings the standard way.
1341 */
1342
1343 pmap_remove(map->pmap, entry->start, entry->end);
1344 }
1345
1346 /*
1347 * remove entry from map and put it on our list of entries
1348 * that we've nuked. then go to next entry.
1349 */
1350
1351 UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0,0);
1352
1353 /* critical! prevents stale hint */
1354 SAVE_HINT(map, entry, entry->prev);
1355
1356 uvm_map_entry_unlink(map, entry);
1357 map->size -= len;
1358 entry->next = first_entry;
1359 first_entry = entry;
1360 entry = next;
1361 }
1362 if ((map->flags & VM_MAP_DYING) == 0) {
1363 pmap_update(vm_map_pmap(map));
1364 }
1365
1366 /*
1367 * now we've cleaned up the map and are ready for the caller to drop
1368 * references to the mapped objects.
1369 */
1370
1371 *entry_list = first_entry;
1372 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1373 }
1374
1375 /*
1376 * uvm_unmap_detach: drop references in a chain of map entries
1377 *
1378 * => we will free the map entries as we traverse the list.
1379 */
1380
1381 void
1382 uvm_unmap_detach(first_entry, flags)
1383 struct vm_map_entry *first_entry;
1384 int flags;
1385 {
1386 struct vm_map_entry *next_entry;
1387 UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
1388
1389 while (first_entry) {
1390 KASSERT(!VM_MAPENT_ISWIRED(first_entry));
1391 UVMHIST_LOG(maphist,
1392 " detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d",
1393 first_entry, first_entry->aref.ar_amap,
1394 first_entry->object.uvm_obj,
1395 UVM_ET_ISSUBMAP(first_entry));
1396
1397 /*
1398 * drop reference to amap, if we've got one
1399 */
1400
1401 if (first_entry->aref.ar_amap)
1402 uvm_map_unreference_amap(first_entry, flags);
1403
1404 /*
1405 * drop reference to our backing object, if we've got one
1406 */
1407
1408 KASSERT(!UVM_ET_ISSUBMAP(first_entry));
1409 if (UVM_ET_ISOBJ(first_entry) &&
1410 first_entry->object.uvm_obj->pgops->pgo_detach) {
1411 (*first_entry->object.uvm_obj->pgops->pgo_detach)
1412 (first_entry->object.uvm_obj);
1413 }
1414 next_entry = first_entry->next;
1415 uvm_mapent_free(first_entry);
1416 first_entry = next_entry;
1417 }
1418 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
1419 }
1420
1421 /*
1422 * E X T R A C T I O N F U N C T I O N S
1423 */
1424
1425 /*
1426 * uvm_map_reserve: reserve space in a vm_map for future use.
1427 *
1428 * => we reserve space in a map by putting a dummy map entry in the
1429 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
1430 * => map should be unlocked (we will write lock it)
1431 * => we return true if we were able to reserve space
1432 * => XXXCDC: should be inline?
1433 */
1434
1435 int
1436 uvm_map_reserve(map, size, offset, align, raddr)
1437 struct vm_map *map;
1438 vsize_t size;
1439 vaddr_t offset; /* hint for pmap_prefer */
1440 vsize_t align; /* alignment hint */
1441 vaddr_t *raddr; /* IN:hint, OUT: reserved VA */
1442 {
1443 UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
1444
1445 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
1446 map,size,offset,raddr);
1447
1448 size = round_page(size);
1449 if (*raddr < vm_map_min(map))
1450 *raddr = vm_map_min(map); /* hint */
1451
1452 /*
1453 * reserve some virtual space.
1454 */
1455
1456 if (uvm_map(map, raddr, size, NULL, offset, 0,
1457 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
1458 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
1459 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
1460 return (FALSE);
1461 }
1462
1463 UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
1464 return (TRUE);
1465 }
1466
1467 /*
1468 * uvm_map_replace: replace a reserved (blank) area of memory with
1469 * real mappings.
1470 *
1471 * => caller must WRITE-LOCK the map
1472 * => we return TRUE if replacement was a success
1473 * => we expect the newents chain to have nnewents entrys on it and
1474 * we expect newents->prev to point to the last entry on the list
1475 * => note newents is allowed to be NULL
1476 */
1477
1478 int
1479 uvm_map_replace(map, start, end, newents, nnewents)
1480 struct vm_map *map;
1481 vaddr_t start, end;
1482 struct vm_map_entry *newents;
1483 int nnewents;
1484 {
1485 struct vm_map_entry *oldent, *last;
1486
1487 /*
1488 * first find the blank map entry at the specified address
1489 */
1490
1491 if (!uvm_map_lookup_entry(map, start, &oldent)) {
1492 return(FALSE);
1493 }
1494
1495 /*
1496 * check to make sure we have a proper blank entry
1497 */
1498
1499 if (oldent->start != start || oldent->end != end ||
1500 oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
1501 return (FALSE);
1502 }
1503
1504 #ifdef DIAGNOSTIC
1505
1506 /*
1507 * sanity check the newents chain
1508 */
1509
1510 {
1511 struct vm_map_entry *tmpent = newents;
1512 int nent = 0;
1513 vaddr_t cur = start;
1514
1515 while (tmpent) {
1516 nent++;
1517 if (tmpent->start < cur)
1518 panic("uvm_map_replace1");
1519 if (tmpent->start > tmpent->end || tmpent->end > end) {
1520 printf("tmpent->start=0x%lx, tmpent->end=0x%lx, end=0x%lx\n",
1521 tmpent->start, tmpent->end, end);
1522 panic("uvm_map_replace2");
1523 }
1524 cur = tmpent->end;
1525 if (tmpent->next) {
1526 if (tmpent->next->prev != tmpent)
1527 panic("uvm_map_replace3");
1528 } else {
1529 if (newents->prev != tmpent)
1530 panic("uvm_map_replace4");
1531 }
1532 tmpent = tmpent->next;
1533 }
1534 if (nent != nnewents)
1535 panic("uvm_map_replace5");
1536 }
1537 #endif
1538
1539 /*
1540 * map entry is a valid blank! replace it. (this does all the
1541 * work of map entry link/unlink...).
1542 */
1543
1544 if (newents) {
1545 last = newents->prev;
1546
1547 /* critical: flush stale hints out of map */
1548 SAVE_HINT(map, map->hint, newents);
1549 if (map->first_free == oldent)
1550 map->first_free = last;
1551
1552 last->next = oldent->next;
1553 last->next->prev = last;
1554 newents->prev = oldent->prev;
1555 newents->prev->next = newents;
1556 map->nentries = map->nentries + (nnewents - 1);
1557
1558 } else {
1559
1560 /* critical: flush stale hints out of map */
1561 SAVE_HINT(map, map->hint, oldent->prev);
1562 if (map->first_free == oldent)
1563 map->first_free = oldent->prev;
1564
1565 /* NULL list of new entries: just remove the old one */
1566 uvm_map_entry_unlink(map, oldent);
1567 }
1568
1569
1570 /*
1571 * now we can free the old blank entry, unlock the map and return.
1572 */
1573
1574 uvm_mapent_free(oldent);
1575 return(TRUE);
1576 }
1577
1578 /*
1579 * uvm_map_extract: extract a mapping from a map and put it somewhere
1580 * (maybe removing the old mapping)
1581 *
1582 * => maps should be unlocked (we will write lock them)
1583 * => returns 0 on success, error code otherwise
1584 * => start must be page aligned
1585 * => len must be page sized
1586 * => flags:
1587 * UVM_EXTRACT_REMOVE: remove mappings from srcmap
1588 * UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
1589 * UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
1590 * UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
1591 * >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
1592 * >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
1593 * be used from within the kernel in a kernel level map <<<
1594 */
1595
1596 int
1597 uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
1598 struct vm_map *srcmap, *dstmap;
1599 vaddr_t start, *dstaddrp;
1600 vsize_t len;
1601 int flags;
1602 {
1603 vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge,
1604 oldstart;
1605 struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry,
1606 *deadentry, *oldentry;
1607 vsize_t elen;
1608 int nchain, error, copy_ok;
1609 UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
1610
1611 UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start,
1612 len,0);
1613 UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0);
1614
1615 /*
1616 * step 0: sanity check: start must be on a page boundary, length
1617 * must be page sized. can't ask for CONTIG/QREF if you asked for
1618 * REMOVE.
1619 */
1620
1621 KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0);
1622 KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
1623 (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
1624
1625 /*
1626 * step 1: reserve space in the target map for the extracted area
1627 */
1628
1629 dstaddr = vm_map_min(dstmap);
1630 if (uvm_map_reserve(dstmap, len, start, 0, &dstaddr) == FALSE)
1631 return(ENOMEM);
1632 *dstaddrp = dstaddr; /* pass address back to caller */
1633 UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0);
1634
1635 /*
1636 * step 2: setup for the extraction process loop by init'ing the
1637 * map entry chain, locking src map, and looking up the first useful
1638 * entry in the map.
1639 */
1640
1641 end = start + len;
1642 newend = dstaddr + len;
1643 chain = endchain = NULL;
1644 nchain = 0;
1645 vm_map_lock(srcmap);
1646
1647 if (uvm_map_lookup_entry(srcmap, start, &entry)) {
1648
1649 /* "start" is within an entry */
1650 if (flags & UVM_EXTRACT_QREF) {
1651
1652 /*
1653 * for quick references we don't clip the entry, so
1654 * the entry may map space "before" the starting
1655 * virtual address... this is the "fudge" factor
1656 * (which can be non-zero only the first time
1657 * through the "while" loop in step 3).
1658 */
1659
1660 fudge = start - entry->start;
1661 } else {
1662
1663 /*
1664 * normal reference: we clip the map to fit (thus
1665 * fudge is zero)
1666 */
1667
1668 UVM_MAP_CLIP_START(srcmap, entry, start);
1669 SAVE_HINT(srcmap, srcmap->hint, entry->prev);
1670 fudge = 0;
1671 }
1672 } else {
1673
1674 /* "start" is not within an entry ... skip to next entry */
1675 if (flags & UVM_EXTRACT_CONTIG) {
1676 error = EINVAL;
1677 goto bad; /* definite hole here ... */
1678 }
1679
1680 entry = entry->next;
1681 fudge = 0;
1682 }
1683
1684 /* save values from srcmap for step 6 */
1685 orig_entry = entry;
1686 orig_fudge = fudge;
1687
1688 /*
1689 * step 3: now start looping through the map entries, extracting
1690 * as we go.
1691 */
1692
1693 while (entry->start < end && entry != &srcmap->header) {
1694
1695 /* if we are not doing a quick reference, clip it */
1696 if ((flags & UVM_EXTRACT_QREF) == 0)
1697 UVM_MAP_CLIP_END(srcmap, entry, end);
1698
1699 /* clear needs_copy (allow chunking) */
1700 if (UVM_ET_ISNEEDSCOPY(entry)) {
1701 if (fudge)
1702 oldstart = entry->start;
1703 else
1704 oldstart = 0; /* XXX: gcc */
1705 amap_copy(srcmap, entry, M_NOWAIT, TRUE, start, end);
1706 if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */
1707 error = ENOMEM;
1708 goto bad;
1709 }
1710
1711 /* amap_copy could clip (during chunk)! update fudge */
1712 if (fudge) {
1713 fudge = fudge - (entry->start - oldstart);
1714 orig_fudge = fudge;
1715 }
1716 }
1717
1718 /* calculate the offset of this from "start" */
1719 oldoffset = (entry->start + fudge) - start;
1720
1721 /* allocate a new map entry */
1722 newentry = uvm_mapent_alloc(dstmap, 0);
1723 if (newentry == NULL) {
1724 error = ENOMEM;
1725 goto bad;
1726 }
1727
1728 /* set up new map entry */
1729 newentry->next = NULL;
1730 newentry->prev = endchain;
1731 newentry->start = dstaddr + oldoffset;
1732 newentry->end =
1733 newentry->start + (entry->end - (entry->start + fudge));
1734 if (newentry->end > newend || newentry->end < newentry->start)
1735 newentry->end = newend;
1736 newentry->object.uvm_obj = entry->object.uvm_obj;
1737 if (newentry->object.uvm_obj) {
1738 if (newentry->object.uvm_obj->pgops->pgo_reference)
1739 newentry->object.uvm_obj->pgops->
1740 pgo_reference(newentry->object.uvm_obj);
1741 newentry->offset = entry->offset + fudge;
1742 } else {
1743 newentry->offset = 0;
1744 }
1745 newentry->etype = entry->etype;
1746 newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
1747 entry->max_protection : entry->protection;
1748 newentry->max_protection = entry->max_protection;
1749 newentry->inheritance = entry->inheritance;
1750 newentry->wired_count = 0;
1751 newentry->aref.ar_amap = entry->aref.ar_amap;
1752 if (newentry->aref.ar_amap) {
1753 newentry->aref.ar_pageoff =
1754 entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
1755 uvm_map_reference_amap(newentry, AMAP_SHARED |
1756 ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
1757 } else {
1758 newentry->aref.ar_pageoff = 0;
1759 }
1760 newentry->advice = entry->advice;
1761
1762 /* now link it on the chain */
1763 nchain++;
1764 if (endchain == NULL) {
1765 chain = endchain = newentry;
1766 } else {
1767 endchain->next = newentry;
1768 endchain = newentry;
1769 }
1770
1771 /* end of 'while' loop! */
1772 if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
1773 (entry->next == &srcmap->header ||
1774 entry->next->start != entry->end)) {
1775 error = EINVAL;
1776 goto bad;
1777 }
1778 entry = entry->next;
1779 fudge = 0;
1780 }
1781
1782 /*
1783 * step 4: close off chain (in format expected by uvm_map_replace)
1784 */
1785
1786 if (chain)
1787 chain->prev = endchain;
1788
1789 /*
1790 * step 5: attempt to lock the dest map so we can pmap_copy.
1791 * note usage of copy_ok:
1792 * 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
1793 * 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
1794 */
1795
1796 if (srcmap == dstmap || vm_map_lock_try(dstmap) == TRUE) {
1797 copy_ok = 1;
1798 if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
1799 nchain)) {
1800 if (srcmap != dstmap)
1801 vm_map_unlock(dstmap);
1802 error = EIO;
1803 goto bad;
1804 }
1805 } else {
1806 copy_ok = 0;
1807 /* replace defered until step 7 */
1808 }
1809
1810 /*
1811 * step 6: traverse the srcmap a second time to do the following:
1812 * - if we got a lock on the dstmap do pmap_copy
1813 * - if UVM_EXTRACT_REMOVE remove the entries
1814 * we make use of orig_entry and orig_fudge (saved in step 2)
1815 */
1816
1817 if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
1818
1819 /* purge possible stale hints from srcmap */
1820 if (flags & UVM_EXTRACT_REMOVE) {
1821 SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
1822 if (srcmap->first_free->start >= start)
1823 srcmap->first_free = orig_entry->prev;
1824 }
1825
1826 entry = orig_entry;
1827 fudge = orig_fudge;
1828 deadentry = NULL; /* for UVM_EXTRACT_REMOVE */
1829
1830 while (entry->start < end && entry != &srcmap->header) {
1831 if (copy_ok) {
1832 oldoffset = (entry->start + fudge) - start;
1833 elen = MIN(end, entry->end) -
1834 (entry->start + fudge);
1835 pmap_copy(dstmap->pmap, srcmap->pmap,
1836 dstaddr + oldoffset, elen,
1837 entry->start + fudge);
1838 }
1839
1840 /* we advance "entry" in the following if statement */
1841 if (flags & UVM_EXTRACT_REMOVE) {
1842 pmap_remove(srcmap->pmap, entry->start,
1843 entry->end);
1844 oldentry = entry; /* save entry */
1845 entry = entry->next; /* advance */
1846 uvm_map_entry_unlink(srcmap, oldentry);
1847 /* add to dead list */
1848 oldentry->next = deadentry;
1849 deadentry = oldentry;
1850 } else {
1851 entry = entry->next; /* advance */
1852 }
1853
1854 /* end of 'while' loop */
1855 fudge = 0;
1856 }
1857 pmap_update(srcmap->pmap);
1858
1859 /*
1860 * unlock dstmap. we will dispose of deadentry in
1861 * step 7 if needed
1862 */
1863
1864 if (copy_ok && srcmap != dstmap)
1865 vm_map_unlock(dstmap);
1866
1867 } else {
1868 deadentry = NULL;
1869 }
1870
1871 /*
1872 * step 7: we are done with the source map, unlock. if copy_ok
1873 * is 0 then we have not replaced the dummy mapping in dstmap yet
1874 * and we need to do so now.
1875 */
1876
1877 vm_map_unlock(srcmap);
1878 if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
1879 uvm_unmap_detach(deadentry, 0); /* dispose of old entries */
1880
1881 /* now do the replacement if we didn't do it in step 5 */
1882 if (copy_ok == 0) {
1883 vm_map_lock(dstmap);
1884 error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
1885 nchain);
1886 vm_map_unlock(dstmap);
1887
1888 if (error == FALSE) {
1889 error = EIO;
1890 goto bad2;
1891 }
1892 }
1893 return(0);
1894
1895 /*
1896 * bad: failure recovery
1897 */
1898 bad:
1899 vm_map_unlock(srcmap);
1900 bad2: /* src already unlocked */
1901 if (chain)
1902 uvm_unmap_detach(chain,
1903 (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
1904 uvm_unmap(dstmap, dstaddr, dstaddr+len); /* ??? */
1905 return(error);
1906 }
1907
1908 /* end of extraction functions */
1909
1910 /*
1911 * uvm_map_submap: punch down part of a map into a submap
1912 *
1913 * => only the kernel_map is allowed to be submapped
1914 * => the purpose of submapping is to break up the locking granularity
1915 * of a larger map
1916 * => the range specified must have been mapped previously with a uvm_map()
1917 * call [with uobj==NULL] to create a blank map entry in the main map.
1918 * [And it had better still be blank!]
1919 * => maps which contain submaps should never be copied or forked.
1920 * => to remove a submap, use uvm_unmap() on the main map
1921 * and then uvm_map_deallocate() the submap.
1922 * => main map must be unlocked.
1923 * => submap must have been init'd and have a zero reference count.
1924 * [need not be locked as we don't actually reference it]
1925 */
1926
1927 int
1928 uvm_map_submap(map, start, end, submap)
1929 struct vm_map *map, *submap;
1930 vaddr_t start, end;
1931 {
1932 struct vm_map_entry *entry;
1933 int error;
1934
1935 vm_map_lock(map);
1936 VM_MAP_RANGE_CHECK(map, start, end);
1937
1938 if (uvm_map_lookup_entry(map, start, &entry)) {
1939 UVM_MAP_CLIP_START(map, entry, start);
1940 UVM_MAP_CLIP_END(map, entry, end); /* to be safe */
1941 } else {
1942 entry = NULL;
1943 }
1944
1945 if (entry != NULL &&
1946 entry->start == start && entry->end == end &&
1947 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
1948 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
1949 entry->etype |= UVM_ET_SUBMAP;
1950 entry->object.sub_map = submap;
1951 entry->offset = 0;
1952 uvm_map_reference(submap);
1953 error = 0;
1954 } else {
1955 error = EINVAL;
1956 }
1957 vm_map_unlock(map);
1958 return error;
1959 }
1960
1961
1962 /*
1963 * uvm_map_protect: change map protection
1964 *
1965 * => set_max means set max_protection.
1966 * => map must be unlocked.
1967 */
1968
1969 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
1970 ~VM_PROT_WRITE : VM_PROT_ALL)
1971
1972 int
1973 uvm_map_protect(map, start, end, new_prot, set_max)
1974 struct vm_map *map;
1975 vaddr_t start, end;
1976 vm_prot_t new_prot;
1977 boolean_t set_max;
1978 {
1979 struct vm_map_entry *current, *entry;
1980 int error = 0;
1981 UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
1982 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
1983 map, start, end, new_prot);
1984
1985 vm_map_lock(map);
1986 VM_MAP_RANGE_CHECK(map, start, end);
1987 if (uvm_map_lookup_entry(map, start, &entry)) {
1988 UVM_MAP_CLIP_START(map, entry, start);
1989 } else {
1990 entry = entry->next;
1991 }
1992
1993 /*
1994 * make a first pass to check for protection violations.
1995 */
1996
1997 current = entry;
1998 while ((current != &map->header) && (current->start < end)) {
1999 if (UVM_ET_ISSUBMAP(current)) {
2000 error = EINVAL;
2001 goto out;
2002 }
2003 if ((new_prot & current->max_protection) != new_prot) {
2004 error = EACCES;
2005 goto out;
2006 }
2007 /*
2008 * Don't allow VM_PROT_EXECUTE to be set on entries that
2009 * point to vnodes that are associated with a NOEXEC file
2010 * system.
2011 */
2012 if (UVM_ET_ISOBJ(current) &&
2013 UVM_OBJ_IS_VNODE(current->object.uvm_obj)) {
2014 struct vnode *vp =
2015 (struct vnode *) current->object.uvm_obj;
2016
2017 if ((new_prot & VM_PROT_EXECUTE) != 0 &&
2018 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
2019 error = EACCES;
2020 goto out;
2021 }
2022 }
2023 current = current->next;
2024 }
2025
2026 /* go back and fix up protections (no need to clip this time). */
2027
2028 current = entry;
2029 while ((current != &map->header) && (current->start < end)) {
2030 vm_prot_t old_prot;
2031
2032 UVM_MAP_CLIP_END(map, current, end);
2033 old_prot = current->protection;
2034 if (set_max)
2035 current->protection =
2036 (current->max_protection = new_prot) & old_prot;
2037 else
2038 current->protection = new_prot;
2039
2040 /*
2041 * update physical map if necessary. worry about copy-on-write
2042 * here -- CHECK THIS XXX
2043 */
2044
2045 if (current->protection != old_prot) {
2046 /* update pmap! */
2047 pmap_protect(map->pmap, current->start, current->end,
2048 current->protection & MASK(entry));
2049
2050 /*
2051 * If this entry points at a vnode, and the
2052 * protection includes VM_PROT_EXECUTE, mark
2053 * the vnode as VEXECMAP.
2054 */
2055 if (UVM_ET_ISOBJ(current)) {
2056 struct uvm_object *uobj =
2057 current->object.uvm_obj;
2058
2059 if (UVM_OBJ_IS_VNODE(uobj) &&
2060 (current->protection & VM_PROT_EXECUTE))
2061 vn_markexec((struct vnode *) uobj);
2062 }
2063 }
2064
2065 /*
2066 * If the map is configured to lock any future mappings,
2067 * wire this entry now if the old protection was VM_PROT_NONE
2068 * and the new protection is not VM_PROT_NONE.
2069 */
2070
2071 if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
2072 VM_MAPENT_ISWIRED(entry) == 0 &&
2073 old_prot == VM_PROT_NONE &&
2074 new_prot != VM_PROT_NONE) {
2075 if (uvm_map_pageable(map, entry->start,
2076 entry->end, FALSE,
2077 UVM_LK_ENTER|UVM_LK_EXIT) != 0) {
2078
2079 /*
2080 * If locking the entry fails, remember the
2081 * error if it's the first one. Note we
2082 * still continue setting the protection in
2083 * the map, but will return the error
2084 * condition regardless.
2085 *
2086 * XXX Ignore what the actual error is,
2087 * XXX just call it a resource shortage
2088 * XXX so that it doesn't get confused
2089 * XXX what uvm_map_protect() itself would
2090 * XXX normally return.
2091 */
2092
2093 error = ENOMEM;
2094 }
2095 }
2096 current = current->next;
2097 }
2098 pmap_update(map->pmap);
2099
2100 out:
2101 vm_map_unlock(map);
2102 UVMHIST_LOG(maphist, "<- done, error=%d",error,0,0,0);
2103 return error;
2104 }
2105
2106 #undef MASK
2107
2108 /*
2109 * uvm_map_inherit: set inheritance code for range of addrs in map.
2110 *
2111 * => map must be unlocked
2112 * => note that the inherit code is used during a "fork". see fork
2113 * code for details.
2114 */
2115
2116 int
2117 uvm_map_inherit(map, start, end, new_inheritance)
2118 struct vm_map *map;
2119 vaddr_t start;
2120 vaddr_t end;
2121 vm_inherit_t new_inheritance;
2122 {
2123 struct vm_map_entry *entry, *temp_entry;
2124 UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
2125 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)",
2126 map, start, end, new_inheritance);
2127
2128 switch (new_inheritance) {
2129 case MAP_INHERIT_NONE:
2130 case MAP_INHERIT_COPY:
2131 case MAP_INHERIT_SHARE:
2132 break;
2133 default:
2134 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
2135 return EINVAL;
2136 }
2137
2138 vm_map_lock(map);
2139 VM_MAP_RANGE_CHECK(map, start, end);
2140 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
2141 entry = temp_entry;
2142 UVM_MAP_CLIP_START(map, entry, start);
2143 } else {
2144 entry = temp_entry->next;
2145 }
2146 while ((entry != &map->header) && (entry->start < end)) {
2147 UVM_MAP_CLIP_END(map, entry, end);
2148 entry->inheritance = new_inheritance;
2149 entry = entry->next;
2150 }
2151 vm_map_unlock(map);
2152 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
2153 return 0;
2154 }
2155
2156 /*
2157 * uvm_map_advice: set advice code for range of addrs in map.
2158 *
2159 * => map must be unlocked
2160 */
2161
2162 int
2163 uvm_map_advice(map, start, end, new_advice)
2164 struct vm_map *map;
2165 vaddr_t start;
2166 vaddr_t end;
2167 int new_advice;
2168 {
2169 struct vm_map_entry *entry, *temp_entry;
2170 UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist);
2171 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_adv=0x%x)",
2172 map, start, end, new_advice);
2173
2174 vm_map_lock(map);
2175 VM_MAP_RANGE_CHECK(map, start, end);
2176 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
2177 entry = temp_entry;
2178 UVM_MAP_CLIP_START(map, entry, start);
2179 } else {
2180 entry = temp_entry->next;
2181 }
2182
2183 /*
2184 * XXXJRT: disallow holes?
2185 */
2186
2187 while ((entry != &map->header) && (entry->start < end)) {
2188 UVM_MAP_CLIP_END(map, entry, end);
2189
2190 switch (new_advice) {
2191 case MADV_NORMAL:
2192 case MADV_RANDOM:
2193 case MADV_SEQUENTIAL:
2194 /* nothing special here */
2195 break;
2196
2197 default:
2198 vm_map_unlock(map);
2199 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
2200 return EINVAL;
2201 }
2202 entry->advice = new_advice;
2203 entry = entry->next;
2204 }
2205
2206 vm_map_unlock(map);
2207 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
2208 return 0;
2209 }
2210
2211 /*
2212 * uvm_map_pageable: sets the pageability of a range in a map.
2213 *
2214 * => wires map entries. should not be used for transient page locking.
2215 * for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
2216 * => regions sepcified as not pageable require lock-down (wired) memory
2217 * and page tables.
2218 * => map must never be read-locked
2219 * => if islocked is TRUE, map is already write-locked
2220 * => we always unlock the map, since we must downgrade to a read-lock
2221 * to call uvm_fault_wire()
2222 * => XXXCDC: check this and try and clean it up.
2223 */
2224
2225 int
2226 uvm_map_pageable(map, start, end, new_pageable, lockflags)
2227 struct vm_map *map;
2228 vaddr_t start, end;
2229 boolean_t new_pageable;
2230 int lockflags;
2231 {
2232 struct vm_map_entry *entry, *start_entry, *failed_entry;
2233 int rv;
2234 #ifdef DIAGNOSTIC
2235 u_int timestamp_save;
2236 #endif
2237 UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
2238 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
2239 map, start, end, new_pageable);
2240 KASSERT(map->flags & VM_MAP_PAGEABLE);
2241
2242 if ((lockflags & UVM_LK_ENTER) == 0)
2243 vm_map_lock(map);
2244 VM_MAP_RANGE_CHECK(map, start, end);
2245
2246 /*
2247 * only one pageability change may take place at one time, since
2248 * uvm_fault_wire assumes it will be called only once for each
2249 * wiring/unwiring. therefore, we have to make sure we're actually
2250 * changing the pageability for the entire region. we do so before
2251 * making any changes.
2252 */
2253
2254 if (uvm_map_lookup_entry(map, start, &start_entry) == FALSE) {
2255 if ((lockflags & UVM_LK_EXIT) == 0)
2256 vm_map_unlock(map);
2257
2258 UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0);
2259 return EFAULT;
2260 }
2261 entry = start_entry;
2262
2263 /*
2264 * handle wiring and unwiring separately.
2265 */
2266
2267 if (new_pageable) { /* unwire */
2268 UVM_MAP_CLIP_START(map, entry, start);
2269
2270 /*
2271 * unwiring. first ensure that the range to be unwired is
2272 * really wired down and that there are no holes.
2273 */
2274
2275 while ((entry != &map->header) && (entry->start < end)) {
2276 if (entry->wired_count == 0 ||
2277 (entry->end < end &&
2278 (entry->next == &map->header ||
2279 entry->next->start > entry->end))) {
2280 if ((lockflags & UVM_LK_EXIT) == 0)
2281 vm_map_unlock(map);
2282 UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0);
2283 return EINVAL;
2284 }
2285 entry = entry->next;
2286 }
2287
2288 /*
2289 * POSIX 1003.1b - a single munlock call unlocks a region,
2290 * regardless of the number of mlock calls made on that
2291 * region.
2292 */
2293
2294 entry = start_entry;
2295 while ((entry != &map->header) && (entry->start < end)) {
2296 UVM_MAP_CLIP_END(map, entry, end);
2297 if (VM_MAPENT_ISWIRED(entry))
2298 uvm_map_entry_unwire(map, entry);
2299 entry = entry->next;
2300 }
2301 if ((lockflags & UVM_LK_EXIT) == 0)
2302 vm_map_unlock(map);
2303 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
2304 return 0;
2305 }
2306
2307 /*
2308 * wire case: in two passes [XXXCDC: ugly block of code here]
2309 *
2310 * 1: holding the write lock, we create any anonymous maps that need
2311 * to be created. then we clip each map entry to the region to
2312 * be wired and increment its wiring count.
2313 *
2314 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
2315 * in the pages for any newly wired area (wired_count == 1).
2316 *
2317 * downgrading to a read lock for uvm_fault_wire avoids a possible
2318 * deadlock with another thread that may have faulted on one of
2319 * the pages to be wired (it would mark the page busy, blocking
2320 * us, then in turn block on the map lock that we hold). because
2321 * of problems in the recursive lock package, we cannot upgrade
2322 * to a write lock in vm_map_lookup. thus, any actions that
2323 * require the write lock must be done beforehand. because we
2324 * keep the read lock on the map, the copy-on-write status of the
2325 * entries we modify here cannot change.
2326 */
2327
2328 while ((entry != &map->header) && (entry->start < end)) {
2329 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
2330
2331 /*
2332 * perform actions of vm_map_lookup that need the
2333 * write lock on the map: create an anonymous map
2334 * for a copy-on-write region, or an anonymous map
2335 * for a zero-fill region. (XXXCDC: submap case
2336 * ok?)
2337 */
2338
2339 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
2340 if (UVM_ET_ISNEEDSCOPY(entry) &&
2341 ((entry->max_protection & VM_PROT_WRITE) ||
2342 (entry->object.uvm_obj == NULL))) {
2343 amap_copy(map, entry, M_WAITOK, TRUE,
2344 start, end);
2345 /* XXXCDC: wait OK? */
2346 }
2347 }
2348 }
2349 UVM_MAP_CLIP_START(map, entry, start);
2350 UVM_MAP_CLIP_END(map, entry, end);
2351 entry->wired_count++;
2352
2353 /*
2354 * Check for holes
2355 */
2356
2357 if (entry->protection == VM_PROT_NONE ||
2358 (entry->end < end &&
2359 (entry->next == &map->header ||
2360 entry->next->start > entry->end))) {
2361
2362 /*
2363 * found one. amap creation actions do not need to
2364 * be undone, but the wired counts need to be restored.
2365 */
2366
2367 while (entry != &map->header && entry->end > start) {
2368 entry->wired_count--;
2369 entry = entry->prev;
2370 }
2371 if ((lockflags & UVM_LK_EXIT) == 0)
2372 vm_map_unlock(map);
2373 UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
2374 return EINVAL;
2375 }
2376 entry = entry->next;
2377 }
2378
2379 /*
2380 * Pass 2.
2381 */
2382
2383 #ifdef DIAGNOSTIC
2384 timestamp_save = map->timestamp;
2385 #endif
2386 vm_map_busy(map);
2387 vm_map_downgrade(map);
2388
2389 rv = 0;
2390 entry = start_entry;
2391 while (entry != &map->header && entry->start < end) {
2392 if (entry->wired_count == 1) {
2393 rv = uvm_fault_wire(map, entry->start, entry->end,
2394 VM_FAULT_WIREMAX, entry->max_protection);
2395 if (rv) {
2396
2397 /*
2398 * wiring failed. break out of the loop.
2399 * we'll clean up the map below, once we
2400 * have a write lock again.
2401 */
2402
2403 break;
2404 }
2405 }
2406 entry = entry->next;
2407 }
2408
2409 if (rv) { /* failed? */
2410
2411 /*
2412 * Get back to an exclusive (write) lock.
2413 */
2414
2415 vm_map_upgrade(map);
2416 vm_map_unbusy(map);
2417
2418 #ifdef DIAGNOSTIC
2419 if (timestamp_save != map->timestamp)
2420 panic("uvm_map_pageable: stale map");
2421 #endif
2422
2423 /*
2424 * first drop the wiring count on all the entries
2425 * which haven't actually been wired yet.
2426 */
2427
2428 failed_entry = entry;
2429 while (entry != &map->header && entry->start < end) {
2430 entry->wired_count--;
2431 entry = entry->next;
2432 }
2433
2434 /*
2435 * now, unwire all the entries that were successfully
2436 * wired above.
2437 */
2438
2439 entry = start_entry;
2440 while (entry != failed_entry) {
2441 entry->wired_count--;
2442 if (VM_MAPENT_ISWIRED(entry) == 0)
2443 uvm_map_entry_unwire(map, entry);
2444 entry = entry->next;
2445 }
2446 if ((lockflags & UVM_LK_EXIT) == 0)
2447 vm_map_unlock(map);
2448 UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
2449 return(rv);
2450 }
2451
2452 /* We are holding a read lock here. */
2453 if ((lockflags & UVM_LK_EXIT) == 0) {
2454 vm_map_unbusy(map);
2455 vm_map_unlock_read(map);
2456 } else {
2457
2458 /*
2459 * Get back to an exclusive (write) lock.
2460 */
2461
2462 vm_map_upgrade(map);
2463 vm_map_unbusy(map);
2464 }
2465
2466 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
2467 return 0;
2468 }
2469
2470 /*
2471 * uvm_map_pageable_all: special case of uvm_map_pageable - affects
2472 * all mapped regions.
2473 *
2474 * => map must not be locked.
2475 * => if no flags are specified, all regions are unwired.
2476 * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
2477 */
2478
2479 int
2480 uvm_map_pageable_all(map, flags, limit)
2481 struct vm_map *map;
2482 int flags;
2483 vsize_t limit;
2484 {
2485 struct vm_map_entry *entry, *failed_entry;
2486 vsize_t size;
2487 int rv;
2488 #ifdef DIAGNOSTIC
2489 u_int timestamp_save;
2490 #endif
2491 UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist);
2492 UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0);
2493
2494 KASSERT(map->flags & VM_MAP_PAGEABLE);
2495
2496 vm_map_lock(map);
2497
2498 /*
2499 * handle wiring and unwiring separately.
2500 */
2501
2502 if (flags == 0) { /* unwire */
2503
2504 /*
2505 * POSIX 1003.1b -- munlockall unlocks all regions,
2506 * regardless of how many times mlockall has been called.
2507 */
2508
2509 for (entry = map->header.next; entry != &map->header;
2510 entry = entry->next) {
2511 if (VM_MAPENT_ISWIRED(entry))
2512 uvm_map_entry_unwire(map, entry);
2513 }
2514 vm_map_modflags(map, 0, VM_MAP_WIREFUTURE);
2515 vm_map_unlock(map);
2516 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
2517 return 0;
2518 }
2519
2520 if (flags & MCL_FUTURE) {
2521
2522 /*
2523 * must wire all future mappings; remember this.
2524 */
2525
2526 vm_map_modflags(map, VM_MAP_WIREFUTURE, 0);
2527 }
2528
2529 if ((flags & MCL_CURRENT) == 0) {
2530
2531 /*
2532 * no more work to do!
2533 */
2534
2535 UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
2536 vm_map_unlock(map);
2537 return 0;
2538 }
2539
2540 /*
2541 * wire case: in three passes [XXXCDC: ugly block of code here]
2542 *
2543 * 1: holding the write lock, count all pages mapped by non-wired
2544 * entries. if this would cause us to go over our limit, we fail.
2545 *
2546 * 2: still holding the write lock, we create any anonymous maps that
2547 * need to be created. then we increment its wiring count.
2548 *
2549 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
2550 * in the pages for any newly wired area (wired_count == 1).
2551 *
2552 * downgrading to a read lock for uvm_fault_wire avoids a possible
2553 * deadlock with another thread that may have faulted on one of
2554 * the pages to be wired (it would mark the page busy, blocking
2555 * us, then in turn block on the map lock that we hold). because
2556 * of problems in the recursive lock package, we cannot upgrade
2557 * to a write lock in vm_map_lookup. thus, any actions that
2558 * require the write lock must be done beforehand. because we
2559 * keep the read lock on the map, the copy-on-write status of the
2560 * entries we modify here cannot change.
2561 */
2562
2563 for (size = 0, entry = map->header.next; entry != &map->header;
2564 entry = entry->next) {
2565 if (entry->protection != VM_PROT_NONE &&
2566 VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
2567 size += entry->end - entry->start;
2568 }
2569 }
2570
2571 if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
2572 vm_map_unlock(map);
2573 return ENOMEM;
2574 }
2575
2576 /* XXX non-pmap_wired_count case must be handled by caller */
2577 #ifdef pmap_wired_count
2578 if (limit != 0 &&
2579 (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
2580 vm_map_unlock(map);
2581 return ENOMEM;
2582 }
2583 #endif
2584
2585 /*
2586 * Pass 2.
2587 */
2588
2589 for (entry = map->header.next; entry != &map->header;
2590 entry = entry->next) {
2591 if (entry->protection == VM_PROT_NONE)
2592 continue;
2593 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
2594
2595 /*
2596 * perform actions of vm_map_lookup that need the
2597 * write lock on the map: create an anonymous map
2598 * for a copy-on-write region, or an anonymous map
2599 * for a zero-fill region. (XXXCDC: submap case
2600 * ok?)
2601 */
2602
2603 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
2604 if (UVM_ET_ISNEEDSCOPY(entry) &&
2605 ((entry->max_protection & VM_PROT_WRITE) ||
2606 (entry->object.uvm_obj == NULL))) {
2607 amap_copy(map, entry, M_WAITOK, TRUE,
2608 entry->start, entry->end);
2609 /* XXXCDC: wait OK? */
2610 }
2611 }
2612 }
2613 entry->wired_count++;
2614 }
2615
2616 /*
2617 * Pass 3.
2618 */
2619
2620 #ifdef DIAGNOSTIC
2621 timestamp_save = map->timestamp;
2622 #endif
2623 vm_map_busy(map);
2624 vm_map_downgrade(map);
2625
2626 rv = 0;
2627 for (entry = map->header.next; entry != &map->header;
2628 entry = entry->next) {
2629 if (entry->wired_count == 1) {
2630 rv = uvm_fault_wire(map, entry->start, entry->end,
2631 VM_FAULT_WIREMAX, entry->max_protection);
2632 if (rv) {
2633
2634 /*
2635 * wiring failed. break out of the loop.
2636 * we'll clean up the map below, once we
2637 * have a write lock again.
2638 */
2639
2640 break;
2641 }
2642 }
2643 }
2644
2645 if (rv) {
2646
2647 /*
2648 * Get back an exclusive (write) lock.
2649 */
2650
2651 vm_map_upgrade(map);
2652 vm_map_unbusy(map);
2653
2654 #ifdef DIAGNOSTIC
2655 if (timestamp_save != map->timestamp)
2656 panic("uvm_map_pageable_all: stale map");
2657 #endif
2658
2659 /*
2660 * first drop the wiring count on all the entries
2661 * which haven't actually been wired yet.
2662 *
2663 * Skip VM_PROT_NONE entries like we did above.
2664 */
2665
2666 failed_entry = entry;
2667 for (/* nothing */; entry != &map->header;
2668 entry = entry->next) {
2669 if (entry->protection == VM_PROT_NONE)
2670 continue;
2671 entry->wired_count--;
2672 }
2673
2674 /*
2675 * now, unwire all the entries that were successfully
2676 * wired above.
2677 *
2678 * Skip VM_PROT_NONE entries like we did above.
2679 */
2680
2681 for (entry = map->header.next; entry != failed_entry;
2682 entry = entry->next) {
2683 if (entry->protection == VM_PROT_NONE)
2684 continue;
2685 entry->wired_count--;
2686 if (VM_MAPENT_ISWIRED(entry))
2687 uvm_map_entry_unwire(map, entry);
2688 }
2689 vm_map_unlock(map);
2690 UVMHIST_LOG(maphist,"<- done (RV=%d)", rv,0,0,0);
2691 return (rv);
2692 }
2693
2694 /* We are holding a read lock here. */
2695 vm_map_unbusy(map);
2696 vm_map_unlock_read(map);
2697
2698 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
2699 return 0;
2700 }
2701
2702 /*
2703 * uvm_map_clean: clean out a map range
2704 *
2705 * => valid flags:
2706 * if (flags & PGO_CLEANIT): dirty pages are cleaned first
2707 * if (flags & PGO_SYNCIO): dirty pages are written synchronously
2708 * if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
2709 * if (flags & PGO_FREE): any cached pages are freed after clean
2710 * => returns an error if any part of the specified range isn't mapped
2711 * => never a need to flush amap layer since the anonymous memory has
2712 * no permanent home, but may deactivate pages there
2713 * => called from sys_msync() and sys_madvise()
2714 * => caller must not write-lock map (read OK).
2715 * => we may sleep while cleaning if SYNCIO [with map read-locked]
2716 */
2717
2718 int
2719 uvm_map_clean(map, start, end, flags)
2720 struct vm_map *map;
2721 vaddr_t start, end;
2722 int flags;
2723 {
2724 struct vm_map_entry *current, *entry;
2725 struct uvm_object *uobj;
2726 struct vm_amap *amap;
2727 struct vm_anon *anon;
2728 struct vm_page *pg;
2729 vaddr_t offset;
2730 vsize_t size;
2731 int error, refs;
2732 UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
2733
2734 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
2735 map, start, end, flags);
2736 KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
2737 (PGO_FREE|PGO_DEACTIVATE));
2738
2739 vm_map_lock_read(map);
2740 VM_MAP_RANGE_CHECK(map, start, end);
2741 if (uvm_map_lookup_entry(map, start, &entry) == FALSE) {
2742 vm_map_unlock_read(map);
2743 return EFAULT;
2744 }
2745
2746 /*
2747 * Make a first pass to check for holes.
2748 */
2749
2750 for (current = entry; current->start < end; current = current->next) {
2751 if (UVM_ET_ISSUBMAP(current)) {
2752 vm_map_unlock_read(map);
2753 return EINVAL;
2754 }
2755 if (end <= current->end) {
2756 break;
2757 }
2758 if (current->end != current->next->start) {
2759 vm_map_unlock_read(map);
2760 return EFAULT;
2761 }
2762 }
2763
2764 error = 0;
2765 for (current = entry; start < end; current = current->next) {
2766 amap = current->aref.ar_amap; /* top layer */
2767 uobj = current->object.uvm_obj; /* bottom layer */
2768 KASSERT(start >= current->start);
2769
2770 /*
2771 * No amap cleaning necessary if:
2772 *
2773 * (1) There's no amap.
2774 *
2775 * (2) We're not deactivating or freeing pages.
2776 */
2777
2778 if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
2779 goto flush_object;
2780
2781 amap_lock(amap);
2782 offset = start - current->start;
2783 size = MIN(end, current->end) - start;
2784 for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) {
2785 anon = amap_lookup(¤t->aref, offset);
2786 if (anon == NULL)
2787 continue;
2788
2789 simple_lock(&anon->an_lock);
2790 pg = anon->u.an_page;
2791 if (pg == NULL) {
2792 simple_unlock(&anon->an_lock);
2793 continue;
2794 }
2795
2796 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
2797
2798 /*
2799 * In these first 3 cases, we just deactivate the page.
2800 */
2801
2802 case PGO_CLEANIT|PGO_FREE:
2803 case PGO_CLEANIT|PGO_DEACTIVATE:
2804 case PGO_DEACTIVATE:
2805 deactivate_it:
2806 /*
2807 * skip the page if it's loaned or wired,
2808 * since it shouldn't be on a paging queue
2809 * at all in these cases.
2810 */
2811
2812 uvm_lock_pageq();
2813 if (pg->loan_count != 0 ||
2814 pg->wire_count != 0) {
2815 uvm_unlock_pageq();
2816 simple_unlock(&anon->an_lock);
2817 continue;
2818 }
2819 KASSERT(pg->uanon == anon);
2820 pmap_clear_reference(pg);
2821 uvm_pagedeactivate(pg);
2822 uvm_unlock_pageq();
2823 simple_unlock(&anon->an_lock);
2824 continue;
2825
2826 case PGO_FREE:
2827
2828 /*
2829 * If there are multiple references to
2830 * the amap, just deactivate the page.
2831 */
2832
2833 if (amap_refs(amap) > 1)
2834 goto deactivate_it;
2835
2836 /* skip the page if it's wired */
2837 if (pg->wire_count != 0) {
2838 simple_unlock(&anon->an_lock);
2839 continue;
2840 }
2841 amap_unadd(¤t->aref, offset);
2842 refs = --anon->an_ref;
2843 simple_unlock(&anon->an_lock);
2844 if (refs == 0)
2845 uvm_anfree(anon);
2846 continue;
2847 }
2848 }
2849 amap_unlock(amap);
2850
2851 flush_object:
2852 /*
2853 * flush pages if we've got a valid backing object.
2854 * note that we must always clean object pages before
2855 * freeing them since otherwise we could reveal stale
2856 * data from files.
2857 */
2858
2859 offset = current->offset + (start - current->start);
2860 size = MIN(end, current->end) - start;
2861 if (uobj != NULL) {
2862 simple_lock(&uobj->vmobjlock);
2863 error = (uobj->pgops->pgo_put)(uobj, offset,
2864 offset + size, flags | PGO_CLEANIT);
2865 }
2866 start += size;
2867 }
2868 vm_map_unlock_read(map);
2869 return (error);
2870 }
2871
2872
2873 /*
2874 * uvm_map_checkprot: check protection in map
2875 *
2876 * => must allow specified protection in a fully allocated region.
2877 * => map must be read or write locked by caller.
2878 */
2879
2880 boolean_t
2881 uvm_map_checkprot(map, start, end, protection)
2882 struct vm_map * map;
2883 vaddr_t start, end;
2884 vm_prot_t protection;
2885 {
2886 struct vm_map_entry *entry;
2887 struct vm_map_entry *tmp_entry;
2888
2889 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
2890 return(FALSE);
2891 }
2892 entry = tmp_entry;
2893 while (start < end) {
2894 if (entry == &map->header) {
2895 return(FALSE);
2896 }
2897
2898 /*
2899 * no holes allowed
2900 */
2901
2902 if (start < entry->start) {
2903 return(FALSE);
2904 }
2905
2906 /*
2907 * check protection associated with entry
2908 */
2909
2910 if ((entry->protection & protection) != protection) {
2911 return(FALSE);
2912 }
2913 start = entry->end;
2914 entry = entry->next;
2915 }
2916 return(TRUE);
2917 }
2918
2919 /*
2920 * uvmspace_alloc: allocate a vmspace structure.
2921 *
2922 * - structure includes vm_map and pmap
2923 * - XXX: no locking on this structure
2924 * - refcnt set to 1, rest must be init'd by caller
2925 */
2926 struct vmspace *
2927 uvmspace_alloc(min, max)
2928 vaddr_t min, max;
2929 {
2930 struct vmspace *vm;
2931 UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
2932
2933 vm = pool_get(&uvm_vmspace_pool, PR_WAITOK);
2934 uvmspace_init(vm, NULL, min, max);
2935 UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0);
2936 return (vm);
2937 }
2938
2939 /*
2940 * uvmspace_init: initialize a vmspace structure.
2941 *
2942 * - XXX: no locking on this structure
2943 * - refcnt set to 1, rest must me init'd by caller
2944 */
2945 void
2946 uvmspace_init(vm, pmap, min, max)
2947 struct vmspace *vm;
2948 struct pmap *pmap;
2949 vaddr_t min, max;
2950 {
2951 UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist);
2952
2953 memset(vm, 0, sizeof(*vm));
2954 uvm_map_setup(&vm->vm_map, min, max, VM_MAP_PAGEABLE);
2955 if (pmap)
2956 pmap_reference(pmap);
2957 else
2958 pmap = pmap_create();
2959 vm->vm_map.pmap = pmap;
2960 vm->vm_refcnt = 1;
2961 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
2962 }
2963
2964 /*
2965 * uvmspace_share: share a vmspace between two proceses
2966 *
2967 * - XXX: no locking on vmspace
2968 * - used for vfork, threads(?)
2969 */
2970
2971 void
2972 uvmspace_share(p1, p2)
2973 struct proc *p1, *p2;
2974 {
2975 p2->p_vmspace = p1->p_vmspace;
2976 p1->p_vmspace->vm_refcnt++;
2977 }
2978
2979 /*
2980 * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
2981 *
2982 * - XXX: no locking on vmspace
2983 */
2984
2985 void
2986 uvmspace_unshare(l)
2987 struct lwp *l;
2988 {
2989 struct proc *p = l->l_proc;
2990 struct vmspace *nvm, *ovm = p->p_vmspace;
2991
2992 if (ovm->vm_refcnt == 1)
2993 /* nothing to do: vmspace isn't shared in the first place */
2994 return;
2995
2996 /* make a new vmspace, still holding old one */
2997 nvm = uvmspace_fork(ovm);
2998
2999 pmap_deactivate(l); /* unbind old vmspace */
3000 p->p_vmspace = nvm;
3001 pmap_activate(l); /* switch to new vmspace */
3002
3003 uvmspace_free(ovm); /* drop reference to old vmspace */
3004 }
3005
3006 /*
3007 * uvmspace_exec: the process wants to exec a new program
3008 *
3009 * - XXX: no locking on vmspace
3010 */
3011
3012 void
3013 uvmspace_exec(l, start, end)
3014 struct lwp *l;
3015 vaddr_t start, end;
3016 {
3017 struct proc *p = l->l_proc;
3018 struct vmspace *nvm, *ovm = p->p_vmspace;
3019 struct vm_map *map = &ovm->vm_map;
3020
3021 #ifdef __sparc__
3022 /* XXX cgd 960926: the sparc #ifdef should be a MD hook */
3023 kill_user_windows(l); /* before stack addresses go away */
3024 #endif
3025
3026 /*
3027 * see if more than one process is using this vmspace...
3028 */
3029
3030 if (ovm->vm_refcnt == 1) {
3031
3032 /*
3033 * if p is the only process using its vmspace then we can safely
3034 * recycle that vmspace for the program that is being exec'd.
3035 */
3036
3037 #ifdef SYSVSHM
3038 /*
3039 * SYSV SHM semantics require us to kill all segments on an exec
3040 */
3041
3042 if (ovm->vm_shm)
3043 shmexit(ovm);
3044 #endif
3045
3046 /*
3047 * POSIX 1003.1b -- "lock future mappings" is revoked
3048 * when a process execs another program image.
3049 */
3050
3051 vm_map_modflags(map, 0, VM_MAP_WIREFUTURE);
3052
3053 /*
3054 * now unmap the old program
3055 */
3056
3057 pmap_remove_all(map->pmap);
3058 uvm_unmap(map, map->min_offset, map->max_offset);
3059
3060 /*
3061 * resize the map
3062 */
3063
3064 map->min_offset = start;
3065 map->max_offset = end;
3066 } else {
3067
3068 /*
3069 * p's vmspace is being shared, so we can't reuse it for p since
3070 * it is still being used for others. allocate a new vmspace
3071 * for p
3072 */
3073
3074 nvm = uvmspace_alloc(start, end);
3075
3076 /*
3077 * install new vmspace and drop our ref to the old one.
3078 */
3079
3080 pmap_deactivate(l);
3081 p->p_vmspace = nvm;
3082 pmap_activate(l);
3083
3084 uvmspace_free(ovm);
3085 }
3086 }
3087
3088 /*
3089 * uvmspace_free: free a vmspace data structure
3090 *
3091 * - XXX: no locking on vmspace
3092 */
3093
3094 void
3095 uvmspace_free(vm)
3096 struct vmspace *vm;
3097 {
3098 struct vm_map_entry *dead_entries;
3099 struct vm_map *map;
3100 UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
3101
3102 UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0);
3103 if (--vm->vm_refcnt > 0) {
3104 return;
3105 }
3106
3107 /*
3108 * at this point, there should be no other references to the map.
3109 * delete all of the mappings, then destroy the pmap.
3110 */
3111
3112 map = &vm->vm_map;
3113 map->flags |= VM_MAP_DYING;
3114 pmap_remove_all(map->pmap);
3115 #ifdef SYSVSHM
3116 /* Get rid of any SYSV shared memory segments. */
3117 if (vm->vm_shm != NULL)
3118 shmexit(vm);
3119 #endif
3120 if (map->nentries) {
3121 uvm_unmap_remove(map, map->min_offset, map->max_offset,
3122 &dead_entries);
3123 if (dead_entries != NULL)
3124 uvm_unmap_detach(dead_entries, 0);
3125 }
3126 pmap_destroy(map->pmap);
3127 pool_put(&uvm_vmspace_pool, vm);
3128 }
3129
3130 /*
3131 * F O R K - m a i n e n t r y p o i n t
3132 */
3133 /*
3134 * uvmspace_fork: fork a process' main map
3135 *
3136 * => create a new vmspace for child process from parent.
3137 * => parent's map must not be locked.
3138 */
3139
3140 struct vmspace *
3141 uvmspace_fork(vm1)
3142 struct vmspace *vm1;
3143 {
3144 struct vmspace *vm2;
3145 struct vm_map *old_map = &vm1->vm_map;
3146 struct vm_map *new_map;
3147 struct vm_map_entry *old_entry;
3148 struct vm_map_entry *new_entry;
3149 pmap_t new_pmap;
3150 UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
3151
3152 vm_map_lock(old_map);
3153
3154 vm2 = uvmspace_alloc(old_map->min_offset, old_map->max_offset);
3155 memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
3156 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
3157 new_map = &vm2->vm_map; /* XXX */
3158 new_pmap = new_map->pmap;
3159
3160 old_entry = old_map->header.next;
3161
3162 /*
3163 * go entry-by-entry
3164 */
3165
3166 while (old_entry != &old_map->header) {
3167
3168 /*
3169 * first, some sanity checks on the old entry
3170 */
3171
3172 KASSERT(!UVM_ET_ISSUBMAP(old_entry));
3173 KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) ||
3174 !UVM_ET_ISNEEDSCOPY(old_entry));
3175
3176 switch (old_entry->inheritance) {
3177 case MAP_INHERIT_NONE:
3178
3179 /*
3180 * drop the mapping
3181 */
3182
3183 break;
3184
3185 case MAP_INHERIT_SHARE:
3186
3187 /*
3188 * share the mapping: this means we want the old and
3189 * new entries to share amaps and backing objects.
3190 */
3191 /*
3192 * if the old_entry needs a new amap (due to prev fork)
3193 * then we need to allocate it now so that we have
3194 * something we own to share with the new_entry. [in
3195 * other words, we need to clear needs_copy]
3196 */
3197
3198 if (UVM_ET_ISNEEDSCOPY(old_entry)) {
3199 /* get our own amap, clears needs_copy */
3200 amap_copy(old_map, old_entry, M_WAITOK, FALSE,
3201 0, 0);
3202 /* XXXCDC: WAITOK??? */
3203 }
3204
3205 new_entry = uvm_mapent_alloc(new_map, 0);
3206 /* old_entry -> new_entry */
3207 uvm_mapent_copy(old_entry, new_entry);
3208
3209 /* new pmap has nothing wired in it */
3210 new_entry->wired_count = 0;
3211
3212 /*
3213 * gain reference to object backing the map (can't
3214 * be a submap, already checked this case).
3215 */
3216
3217 if (new_entry->aref.ar_amap)
3218 uvm_map_reference_amap(new_entry, AMAP_SHARED);
3219
3220 if (new_entry->object.uvm_obj &&
3221 new_entry->object.uvm_obj->pgops->pgo_reference)
3222 new_entry->object.uvm_obj->
3223 pgops->pgo_reference(
3224 new_entry->object.uvm_obj);
3225
3226 /* insert entry at end of new_map's entry list */
3227 uvm_map_entry_link(new_map, new_map->header.prev,
3228 new_entry);
3229
3230 break;
3231
3232 case MAP_INHERIT_COPY:
3233
3234 /*
3235 * copy-on-write the mapping (using mmap's
3236 * MAP_PRIVATE semantics)
3237 *
3238 * allocate new_entry, adjust reference counts.
3239 * (note that new references are read-only).
3240 */
3241
3242 new_entry = uvm_mapent_alloc(new_map, 0);
3243 /* old_entry -> new_entry */
3244 uvm_mapent_copy(old_entry, new_entry);
3245
3246 if (new_entry->aref.ar_amap)
3247 uvm_map_reference_amap(new_entry, 0);
3248
3249 if (new_entry->object.uvm_obj &&
3250 new_entry->object.uvm_obj->pgops->pgo_reference)
3251 new_entry->object.uvm_obj->pgops->pgo_reference
3252 (new_entry->object.uvm_obj);
3253
3254 /* new pmap has nothing wired in it */
3255 new_entry->wired_count = 0;
3256
3257 new_entry->etype |=
3258 (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
3259 uvm_map_entry_link(new_map, new_map->header.prev,
3260 new_entry);
3261
3262 /*
3263 * the new entry will need an amap. it will either
3264 * need to be copied from the old entry or created
3265 * from scratch (if the old entry does not have an
3266 * amap). can we defer this process until later
3267 * (by setting "needs_copy") or do we need to copy
3268 * the amap now?
3269 *
3270 * we must copy the amap now if any of the following
3271 * conditions hold:
3272 * 1. the old entry has an amap and that amap is
3273 * being shared. this means that the old (parent)
3274 * process is sharing the amap with another
3275 * process. if we do not clear needs_copy here
3276 * we will end up in a situation where both the
3277 * parent and child process are refering to the
3278 * same amap with "needs_copy" set. if the
3279 * parent write-faults, the fault routine will
3280 * clear "needs_copy" in the parent by allocating
3281 * a new amap. this is wrong because the
3282 * parent is supposed to be sharing the old amap
3283 * and the new amap will break that.
3284 *
3285 * 2. if the old entry has an amap and a non-zero
3286 * wire count then we are going to have to call
3287 * amap_cow_now to avoid page faults in the
3288 * parent process. since amap_cow_now requires
3289 * "needs_copy" to be clear we might as well
3290 * clear it here as well.
3291 *
3292 */
3293
3294 if (old_entry->aref.ar_amap != NULL) {
3295 if ((amap_flags(old_entry->aref.ar_amap) &
3296 AMAP_SHARED) != 0 ||
3297 VM_MAPENT_ISWIRED(old_entry)) {
3298
3299 amap_copy(new_map, new_entry, M_WAITOK,
3300 FALSE, 0, 0);
3301 /* XXXCDC: M_WAITOK ... ok? */
3302 }
3303 }
3304
3305 /*
3306 * if the parent's entry is wired down, then the
3307 * parent process does not want page faults on
3308 * access to that memory. this means that we
3309 * cannot do copy-on-write because we can't write
3310 * protect the old entry. in this case we
3311 * resolve all copy-on-write faults now, using
3312 * amap_cow_now. note that we have already
3313 * allocated any needed amap (above).
3314 */
3315
3316 if (VM_MAPENT_ISWIRED(old_entry)) {
3317
3318 /*
3319 * resolve all copy-on-write faults now
3320 * (note that there is nothing to do if
3321 * the old mapping does not have an amap).
3322 */
3323 if (old_entry->aref.ar_amap)
3324 amap_cow_now(new_map, new_entry);
3325
3326 } else {
3327
3328 /*
3329 * setup mappings to trigger copy-on-write faults
3330 * we must write-protect the parent if it has
3331 * an amap and it is not already "needs_copy"...
3332 * if it is already "needs_copy" then the parent
3333 * has already been write-protected by a previous
3334 * fork operation.
3335 */
3336
3337 if (old_entry->aref.ar_amap &&
3338 !UVM_ET_ISNEEDSCOPY(old_entry)) {
3339 if (old_entry->max_protection & VM_PROT_WRITE) {
3340 pmap_protect(old_map->pmap,
3341 old_entry->start,
3342 old_entry->end,
3343 old_entry->protection &
3344 ~VM_PROT_WRITE);
3345 pmap_update(old_map->pmap);
3346 }
3347 old_entry->etype |= UVM_ET_NEEDSCOPY;
3348 }
3349 }
3350 break;
3351 } /* end of switch statement */
3352 old_entry = old_entry->next;
3353 }
3354
3355 new_map->size = old_map->size;
3356 vm_map_unlock(old_map);
3357
3358 #ifdef SYSVSHM
3359 if (vm1->vm_shm)
3360 shmfork(vm1, vm2);
3361 #endif
3362
3363 #ifdef PMAP_FORK
3364 pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap);
3365 #endif
3366
3367 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
3368 return(vm2);
3369 }
3370
3371
3372 #if defined(DDB)
3373
3374 /*
3375 * DDB hooks
3376 */
3377
3378 /*
3379 * uvm_map_printit: actually prints the map
3380 */
3381
3382 void
3383 uvm_map_printit(map, full, pr)
3384 struct vm_map *map;
3385 boolean_t full;
3386 void (*pr) __P((const char *, ...));
3387 {
3388 struct vm_map_entry *entry;
3389
3390 (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, map->min_offset,map->max_offset);
3391 (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=0x%x\n",
3392 map->nentries, map->size, map->ref_count, map->timestamp,
3393 map->flags);
3394 (*pr)("\tpmap=%p(resident=%d)\n", map->pmap,
3395 pmap_resident_count(map->pmap));
3396 if (!full)
3397 return;
3398 for (entry = map->header.next; entry != &map->header;
3399 entry = entry->next) {
3400 (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%llx, amap=%p/%d\n",
3401 entry, entry->start, entry->end, entry->object.uvm_obj,
3402 (long long)entry->offset, entry->aref.ar_amap,
3403 entry->aref.ar_pageoff);
3404 (*pr)(
3405 "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
3406 "wc=%d, adv=%d\n",
3407 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
3408 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
3409 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
3410 entry->protection, entry->max_protection,
3411 entry->inheritance, entry->wired_count, entry->advice);
3412 }
3413 }
3414
3415 /*
3416 * uvm_object_printit: actually prints the object
3417 */
3418
3419 void
3420 uvm_object_printit(uobj, full, pr)
3421 struct uvm_object *uobj;
3422 boolean_t full;
3423 void (*pr) __P((const char *, ...));
3424 {
3425 struct vm_page *pg;
3426 int cnt = 0;
3427
3428 (*pr)("OBJECT %p: locked=%d, pgops=%p, npages=%d, ",
3429 uobj, uobj->vmobjlock.lock_data, uobj->pgops, uobj->uo_npages);
3430 if (UVM_OBJ_IS_KERN_OBJECT(uobj))
3431 (*pr)("refs=<SYSTEM>\n");
3432 else
3433 (*pr)("refs=%d\n", uobj->uo_refs);
3434
3435 if (!full) {
3436 return;
3437 }
3438 (*pr)(" PAGES <pg,offset>:\n ");
3439 TAILQ_FOREACH(pg, &uobj->memq, listq) {
3440 cnt++;
3441 (*pr)("<%p,0x%llx> ", pg, (long long)pg->offset);
3442 if ((cnt % 3) == 0) {
3443 (*pr)("\n ");
3444 }
3445 }
3446 if ((cnt % 3) != 0) {
3447 (*pr)("\n");
3448 }
3449 }
3450
3451 /*
3452 * uvm_page_printit: actually print the page
3453 */
3454
3455 static const char page_flagbits[] =
3456 "\20\1BUSY\2WANTED\3TABLED\4CLEAN\5PAGEOUT\6RELEASED\7FAKE\10RDONLY"
3457 "\11ZERO\15PAGER1";
3458 static const char page_pqflagbits[] =
3459 "\20\1FREE\2INACTIVE\3ACTIVE\5ANON\6AOBJ";
3460
3461 void
3462 uvm_page_printit(pg, full, pr)
3463 struct vm_page *pg;
3464 boolean_t full;
3465 void (*pr) __P((const char *, ...));
3466 {
3467 struct vm_page *tpg;
3468 struct uvm_object *uobj;
3469 struct pglist *pgl;
3470 char pgbuf[128];
3471 char pqbuf[128];
3472
3473 (*pr)("PAGE %p:\n", pg);
3474 bitmask_snprintf(pg->flags, page_flagbits, pgbuf, sizeof(pgbuf));
3475 bitmask_snprintf(pg->pqflags, page_pqflagbits, pqbuf, sizeof(pqbuf));
3476 (*pr)(" flags=%s, pqflags=%s, wire_count=%d, pa=0x%lx\n",
3477 pgbuf, pqbuf, pg->wire_count, (long)pg->phys_addr);
3478 (*pr)(" uobject=%p, uanon=%p, offset=0x%llx loan_count=%d\n",
3479 pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count);
3480 #if defined(UVM_PAGE_TRKOWN)
3481 if (pg->flags & PG_BUSY)
3482 (*pr)(" owning process = %d, tag=%s\n",
3483 pg->owner, pg->owner_tag);
3484 else
3485 (*pr)(" page not busy, no owner\n");
3486 #else
3487 (*pr)(" [page ownership tracking disabled]\n");
3488 #endif
3489
3490 if (!full)
3491 return;
3492
3493 /* cross-verify object/anon */
3494 if ((pg->pqflags & PQ_FREE) == 0) {
3495 if (pg->pqflags & PQ_ANON) {
3496 if (pg->uanon == NULL || pg->uanon->u.an_page != pg)
3497 (*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n",
3498 (pg->uanon) ? pg->uanon->u.an_page : NULL);
3499 else
3500 (*pr)(" anon backpointer is OK\n");
3501 } else {
3502 uobj = pg->uobject;
3503 if (uobj) {
3504 (*pr)(" checking object list\n");
3505 TAILQ_FOREACH(tpg, &uobj->memq, listq) {
3506 if (tpg == pg) {
3507 break;
3508 }
3509 }
3510 if (tpg)
3511 (*pr)(" page found on object list\n");
3512 else
3513 (*pr)(" >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
3514 }
3515 }
3516 }
3517
3518 /* cross-verify page queue */
3519 if (pg->pqflags & PQ_FREE) {
3520 int fl = uvm_page_lookup_freelist(pg);
3521 int color = VM_PGCOLOR_BUCKET(pg);
3522 pgl = &uvm.page_free[fl].pgfl_buckets[color].pgfl_queues[
3523 ((pg)->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN];
3524 } else if (pg->pqflags & PQ_INACTIVE) {
3525 pgl = &uvm.page_inactive;
3526 } else if (pg->pqflags & PQ_ACTIVE) {
3527 pgl = &uvm.page_active;
3528 } else {
3529 pgl = NULL;
3530 }
3531
3532 if (pgl) {
3533 (*pr)(" checking pageq list\n");
3534 TAILQ_FOREACH(tpg, pgl, pageq) {
3535 if (tpg == pg) {
3536 break;
3537 }
3538 }
3539 if (tpg)
3540 (*pr)(" page found on pageq list\n");
3541 else
3542 (*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
3543 }
3544 }
3545 #endif
3546