uvm_map.c revision 1.180 1 /* $NetBSD: uvm_map.c,v 1.180 2005/01/13 11:50:32 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_map.c 8.3 (Berkeley) 1/12/94
42 * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 /*
70 * uvm_map.c: uvm map operations
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.180 2005/01/13 11:50:32 yamt Exp $");
75
76 #include "opt_ddb.h"
77 #include "opt_uvmhist.h"
78 #include "opt_uvm.h"
79 #include "opt_sysv.h"
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/mman.h>
84 #include <sys/proc.h>
85 #include <sys/malloc.h>
86 #include <sys/pool.h>
87 #include <sys/kernel.h>
88 #include <sys/mount.h>
89 #include <sys/vnode.h>
90
91 #ifdef SYSVSHM
92 #include <sys/shm.h>
93 #endif
94
95 #define UVM_MAP
96 #include <uvm/uvm.h>
97 #undef RB_AUGMENT
98 #define RB_AUGMENT(x) uvm_rb_augment(x)
99
100 #ifdef DDB
101 #include <uvm/uvm_ddb.h>
102 #endif
103
104 #ifndef UVMMAP_NOCOUNTERS
105 #include <sys/device.h>
106 struct evcnt map_ubackmerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
107 "uvmmap", "ubackmerge");
108 struct evcnt map_uforwmerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
109 "uvmmap", "uforwmerge");
110 struct evcnt map_ubimerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
111 "uvmmap", "ubimerge");
112 struct evcnt map_unomerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
113 "uvmmap", "unomerge");
114 struct evcnt map_kbackmerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
115 "uvmmap", "kbackmerge");
116 struct evcnt map_kforwmerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
117 "uvmmap", "kforwmerge");
118 struct evcnt map_kbimerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
119 "uvmmap", "kbimerge");
120 struct evcnt map_knomerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
121 "uvmmap", "knomerge");
122 struct evcnt uvm_map_call = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
123 "uvmmap", "map_call");
124 struct evcnt uvm_mlk_call = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
125 "uvmmap", "mlk_call");
126 struct evcnt uvm_mlk_hint = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
127 "uvmmap", "mlk_hint");
128
129 EVCNT_ATTACH_STATIC(map_ubackmerge);
130 EVCNT_ATTACH_STATIC(map_uforwmerge);
131 EVCNT_ATTACH_STATIC(map_ubimerge);
132 EVCNT_ATTACH_STATIC(map_unomerge);
133 EVCNT_ATTACH_STATIC(map_kbackmerge);
134 EVCNT_ATTACH_STATIC(map_kforwmerge);
135 EVCNT_ATTACH_STATIC(map_kbimerge);
136 EVCNT_ATTACH_STATIC(map_knomerge);
137 EVCNT_ATTACH_STATIC(uvm_map_call);
138 EVCNT_ATTACH_STATIC(uvm_mlk_call);
139 EVCNT_ATTACH_STATIC(uvm_mlk_hint);
140
141 #define UVMCNT_INCR(ev) ev.ev_count++
142 #define UVMCNT_DECR(ev) ev.ev_count--
143 #else
144 #define UVMCNT_INCR(ev)
145 #define UVMCNT_DECR(ev)
146 #endif
147
148 const char vmmapbsy[] = "vmmapbsy";
149
150 /*
151 * pool for vmspace structures.
152 */
153
154 POOL_INIT(uvm_vmspace_pool, sizeof(struct vmspace), 0, 0, 0, "vmsppl",
155 &pool_allocator_nointr);
156
157 /*
158 * pool for dynamically-allocated map entries.
159 */
160
161 POOL_INIT(uvm_map_entry_pool, sizeof(struct vm_map_entry), 0, 0, 0, "vmmpepl",
162 &pool_allocator_nointr);
163
164 MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures");
165 MALLOC_DEFINE(M_VMPMAP, "VM pmap", "VM pmap");
166
167 #ifdef PMAP_GROWKERNEL
168 /*
169 * This global represents the end of the kernel virtual address
170 * space. If we want to exceed this, we must grow the kernel
171 * virtual address space dynamically.
172 *
173 * Note, this variable is locked by kernel_map's lock.
174 */
175 vaddr_t uvm_maxkaddr;
176 #endif
177
178 /*
179 * macros
180 */
181
182 /*
183 * VM_MAP_USE_KMAPENT: determine if uvm_kmapent_alloc/free is used
184 * for the vm_map.
185 *
186 * we exclude pager_map because it needs pager_map_wanted handling
187 * when doing map/unmap.
188 */
189 extern struct vm_map *pager_map; /* XXX */
190 #define VM_MAP_USE_KMAPENT(map) \
191 (vm_map_pmap(map) == pmap_kernel() && (map) != pager_map)
192
193 /*
194 * uvm_map_entry_link: insert entry into a map
195 *
196 * => map must be locked
197 */
198 #define uvm_map_entry_link(map, after_where, entry) do { \
199 KASSERT(entry->start < entry->end); \
200 (map)->nentries++; \
201 (entry)->prev = (after_where); \
202 (entry)->next = (after_where)->next; \
203 (entry)->prev->next = (entry); \
204 (entry)->next->prev = (entry); \
205 uvm_rb_insert((map), (entry)); \
206 } while (/*CONSTCOND*/ 0)
207
208 /*
209 * uvm_map_entry_unlink: remove entry from a map
210 *
211 * => map must be locked
212 */
213 #define uvm_map_entry_unlink(map, entry) do { \
214 (map)->nentries--; \
215 (entry)->next->prev = (entry)->prev; \
216 (entry)->prev->next = (entry)->next; \
217 uvm_rb_remove((map), (entry)); \
218 } while (/*CONSTCOND*/ 0)
219
220 /*
221 * SAVE_HINT: saves the specified entry as the hint for future lookups.
222 *
223 * => map need not be locked (protected by hint_lock).
224 */
225 #define SAVE_HINT(map,check,value) do { \
226 simple_lock(&(map)->hint_lock); \
227 if ((map)->hint == (check)) \
228 (map)->hint = (value); \
229 simple_unlock(&(map)->hint_lock); \
230 } while (/*CONSTCOND*/ 0)
231
232 /*
233 * VM_MAP_RANGE_CHECK: check and correct range
234 *
235 * => map must at least be read locked
236 */
237
238 #define VM_MAP_RANGE_CHECK(map, start, end) do { \
239 if (start < vm_map_min(map)) \
240 start = vm_map_min(map); \
241 if (end > vm_map_max(map)) \
242 end = vm_map_max(map); \
243 if (start > end) \
244 start = end; \
245 } while (/*CONSTCOND*/ 0)
246
247 /*
248 * local prototypes
249 */
250
251 static struct vm_map_entry *
252 uvm_mapent_alloc(struct vm_map *, int);
253 static struct vm_map_entry *
254 uvm_mapent_alloc_split(struct vm_map *,
255 const struct vm_map_entry *, int,
256 struct uvm_mapent_reservation *);
257 static void uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *);
258 static void uvm_mapent_free(struct vm_map_entry *);
259 static struct vm_map_entry *
260 uvm_kmapent_alloc(struct vm_map *, int);
261 static void uvm_kmapent_free(struct vm_map_entry *);
262 static void uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
263 static void uvm_map_reference_amap(struct vm_map_entry *, int);
264 static int uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int,
265 struct vm_map_entry *);
266 static void uvm_map_unreference_amap(struct vm_map_entry *, int);
267
268 int _uvm_tree_sanity(struct vm_map *, const char *);
269 static vsize_t uvm_rb_subtree_space(const struct vm_map_entry *);
270
271 static __inline int
272 uvm_compare(const struct vm_map_entry *a, const struct vm_map_entry *b)
273 {
274
275 if (a->start < b->start)
276 return (-1);
277 else if (a->start > b->start)
278 return (1);
279
280 return (0);
281 }
282
283 static __inline void
284 uvm_rb_augment(struct vm_map_entry *entry)
285 {
286
287 entry->space = uvm_rb_subtree_space(entry);
288 }
289
290 RB_PROTOTYPE(uvm_tree, vm_map_entry, rb_entry, uvm_compare);
291
292 RB_GENERATE(uvm_tree, vm_map_entry, rb_entry, uvm_compare);
293
294 static __inline vsize_t
295 uvm_rb_space(const struct vm_map *map, const struct vm_map_entry *entry)
296 {
297 /* XXX map is not used */
298
299 KASSERT(entry->next != NULL);
300 return entry->next->start - entry->end;
301 }
302
303 static vsize_t
304 uvm_rb_subtree_space(const struct vm_map_entry *entry)
305 {
306 vaddr_t space, tmp;
307
308 space = entry->ownspace;
309 if (RB_LEFT(entry, rb_entry)) {
310 tmp = RB_LEFT(entry, rb_entry)->space;
311 if (tmp > space)
312 space = tmp;
313 }
314
315 if (RB_RIGHT(entry, rb_entry)) {
316 tmp = RB_RIGHT(entry, rb_entry)->space;
317 if (tmp > space)
318 space = tmp;
319 }
320
321 return (space);
322 }
323
324 static __inline void
325 uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
326 {
327 /* We need to traverse to the very top */
328 do {
329 entry->ownspace = uvm_rb_space(map, entry);
330 entry->space = uvm_rb_subtree_space(entry);
331 } while ((entry = RB_PARENT(entry, rb_entry)) != NULL);
332 }
333
334 static __inline void
335 uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
336 {
337 vaddr_t space = uvm_rb_space(map, entry);
338 struct vm_map_entry *tmp;
339
340 entry->ownspace = entry->space = space;
341 tmp = RB_INSERT(uvm_tree, &(map)->rbhead, entry);
342 #ifdef DIAGNOSTIC
343 if (tmp != NULL)
344 panic("uvm_rb_insert: duplicate entry?");
345 #endif
346 uvm_rb_fixup(map, entry);
347 if (entry->prev != &map->header)
348 uvm_rb_fixup(map, entry->prev);
349 }
350
351 static __inline void
352 uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
353 {
354 struct vm_map_entry *parent;
355
356 parent = RB_PARENT(entry, rb_entry);
357 RB_REMOVE(uvm_tree, &(map)->rbhead, entry);
358 if (entry->prev != &map->header)
359 uvm_rb_fixup(map, entry->prev);
360 if (parent)
361 uvm_rb_fixup(map, parent);
362 }
363
364 #ifdef DEBUG
365 int uvm_debug_check_rbtree = 0;
366 #define uvm_tree_sanity(x,y) \
367 if (uvm_debug_check_rbtree) \
368 _uvm_tree_sanity(x,y)
369 #else
370 #define uvm_tree_sanity(x,y)
371 #endif
372
373 int
374 _uvm_tree_sanity(struct vm_map *map, const char *name)
375 {
376 struct vm_map_entry *tmp, *trtmp;
377 int n = 0, i = 1;
378
379 RB_FOREACH(tmp, uvm_tree, &map->rbhead) {
380 if (tmp->ownspace != uvm_rb_space(map, tmp)) {
381 printf("%s: %d/%d ownspace %lx != %lx %s\n",
382 name, n + 1, map->nentries,
383 (ulong)tmp->ownspace, (ulong)uvm_rb_space(map, tmp),
384 tmp->next == &map->header ? "(last)" : "");
385 goto error;
386 }
387 }
388 trtmp = NULL;
389 RB_FOREACH(tmp, uvm_tree, &map->rbhead) {
390 if (tmp->space != uvm_rb_subtree_space(tmp)) {
391 printf("%s: space %lx != %lx\n",
392 name, (ulong)tmp->space,
393 (ulong)uvm_rb_subtree_space(tmp));
394 goto error;
395 }
396 if (trtmp != NULL && trtmp->start >= tmp->start) {
397 printf("%s: corrupt: 0x%lx >= 0x%lx\n",
398 name, trtmp->start, tmp->start);
399 goto error;
400 }
401 n++;
402
403 trtmp = tmp;
404 }
405
406 if (n != map->nentries) {
407 printf("%s: nentries: %d vs %d\n",
408 name, n, map->nentries);
409 goto error;
410 }
411
412 for (tmp = map->header.next; tmp && tmp != &map->header;
413 tmp = tmp->next, i++) {
414 trtmp = RB_FIND(uvm_tree, &map->rbhead, tmp);
415 if (trtmp != tmp) {
416 printf("%s: lookup: %d: %p - %p: %p\n",
417 name, i, tmp, trtmp,
418 RB_PARENT(tmp, rb_entry));
419 goto error;
420 }
421 }
422
423 return (0);
424 error:
425 #ifdef DDB
426 /* handy breakpoint location for error case */
427 __asm(".globl treesanity_label\ntreesanity_label:");
428 #endif
429 return (-1);
430 }
431
432 /*
433 * local inlines
434 */
435
436 static __inline struct vm_map *uvm_kmapent_map(struct vm_map_entry *);
437
438 /*
439 * uvm_mapent_alloc: allocate a map entry
440 */
441
442 static __inline struct vm_map_entry *
443 uvm_mapent_alloc(struct vm_map *map, int flags)
444 {
445 struct vm_map_entry *me;
446 int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
447 UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
448
449 if (VM_MAP_USE_KMAPENT(map)) {
450 me = uvm_kmapent_alloc(map, flags);
451 } else {
452 me = pool_get(&uvm_map_entry_pool, pflags);
453 if (__predict_false(me == NULL))
454 return NULL;
455 me->flags = 0;
456 }
457
458 UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me,
459 ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0);
460 return (me);
461 }
462
463 /*
464 * uvm_mapent_alloc_split: allocate a map entry for clipping.
465 */
466
467 static __inline struct vm_map_entry *
468 uvm_mapent_alloc_split(struct vm_map *map,
469 const struct vm_map_entry *old_entry, int flags,
470 struct uvm_mapent_reservation *umr)
471 {
472 struct vm_map_entry *me;
473
474 KASSERT(!VM_MAP_USE_KMAPENT(map) ||
475 (old_entry->flags & UVM_MAP_QUANTUM) || !UMR_EMPTY(umr));
476
477 if (old_entry->flags & UVM_MAP_QUANTUM) {
478 int s;
479 struct vm_map_kernel *vmk = vm_map_to_kernel(map);
480
481 s = splvm();
482 simple_lock(&uvm.kentry_lock);
483 me = vmk->vmk_merged_entries;
484 KASSERT(me);
485 vmk->vmk_merged_entries = me->next;
486 simple_unlock(&uvm.kentry_lock);
487 splx(s);
488 KASSERT(me->flags & UVM_MAP_QUANTUM);
489 } else {
490 me = uvm_mapent_alloc(map, flags);
491 }
492
493 return me;
494 }
495
496 /*
497 * uvm_mapent_free: free map entry
498 */
499
500 static __inline void
501 uvm_mapent_free(struct vm_map_entry *me)
502 {
503 UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
504
505 UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
506 me, me->flags, 0, 0);
507 if (me->flags & UVM_MAP_KERNEL) {
508 uvm_kmapent_free(me);
509 } else {
510 pool_put(&uvm_map_entry_pool, me);
511 }
512 }
513
514 /*
515 * uvm_mapent_free_merge: free merged map entry
516 *
517 * => keep the entry if needed.
518 * => caller shouldn't hold map locked.
519 */
520
521 static __inline void
522 uvm_mapent_free_merged(struct vm_map_entry *me)
523 {
524
525 if (me->flags & UVM_MAP_QUANTUM) {
526 /*
527 * keep this entry for later splitting.
528 */
529 struct vm_map *map;
530 struct vm_map_kernel *vmk;
531 int s;
532
533 KASSERT(me->flags & UVM_MAP_KERNEL);
534
535 map = uvm_kmapent_map(me);
536 vmk = vm_map_to_kernel(map);
537 s = splvm();
538 simple_lock(&uvm.kentry_lock);
539 me->next = vmk->vmk_merged_entries;
540 vmk->vmk_merged_entries = me;
541 simple_unlock(&uvm.kentry_lock);
542 splx(s);
543 } else {
544 uvm_mapent_free(me);
545 }
546 }
547
548 /*
549 * uvm_mapent_copy: copy a map entry, preserving flags
550 */
551
552 static __inline void
553 uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
554 {
555
556 memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) -
557 ((char *)src));
558 }
559
560 /*
561 * uvm_map_entry_unwire: unwire a map entry
562 *
563 * => map should be locked by caller
564 */
565
566 static __inline void
567 uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
568 {
569
570 entry->wired_count = 0;
571 uvm_fault_unwire_locked(map, entry->start, entry->end);
572 }
573
574
575 /*
576 * wrapper for calling amap_ref()
577 */
578 static __inline void
579 uvm_map_reference_amap(struct vm_map_entry *entry, int flags)
580 {
581
582 amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
583 (entry->end - entry->start) >> PAGE_SHIFT, flags);
584 }
585
586
587 /*
588 * wrapper for calling amap_unref()
589 */
590 static __inline void
591 uvm_map_unreference_amap(struct vm_map_entry *entry, int flags)
592 {
593
594 amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
595 (entry->end - entry->start) >> PAGE_SHIFT, flags);
596 }
597
598
599 /*
600 * uvm_map_init: init mapping system at boot time. note that we allocate
601 * and init the static pool of struct vm_map_entry *'s for the kernel here.
602 */
603
604 void
605 uvm_map_init(void)
606 {
607 #if defined(UVMHIST)
608 static struct uvm_history_ent maphistbuf[100];
609 static struct uvm_history_ent pdhistbuf[100];
610 #endif
611
612 /*
613 * first, init logging system.
614 */
615
616 UVMHIST_FUNC("uvm_map_init");
617 UVMHIST_INIT_STATIC(maphist, maphistbuf);
618 UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
619 UVMHIST_CALLED(maphist);
620 UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
621
622 /*
623 * initialize the global lock for kernel map entry.
624 *
625 * XXX is it worth to have per-map lock instead?
626 */
627
628 simple_lock_init(&uvm.kentry_lock);
629 }
630
631 /*
632 * clippers
633 */
634
635 /*
636 * uvm_map_clip_start: ensure that the entry begins at or after
637 * the starting address, if it doesn't we split the entry.
638 *
639 * => caller should use UVM_MAP_CLIP_START macro rather than calling
640 * this directly
641 * => map must be locked by caller
642 */
643
644 void
645 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
646 vaddr_t start, struct uvm_mapent_reservation *umr)
647 {
648 struct vm_map_entry *new_entry;
649 vaddr_t new_adj;
650
651 /* uvm_map_simplify_entry(map, entry); */ /* XXX */
652
653 uvm_tree_sanity(map, "clip_start entry");
654
655 /*
656 * Split off the front portion. note that we must insert the new
657 * entry BEFORE this one, so that this entry has the specified
658 * starting address.
659 */
660 new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
661 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
662
663 new_entry->end = start;
664 new_adj = start - new_entry->start;
665 if (entry->object.uvm_obj)
666 entry->offset += new_adj; /* shift start over */
667
668 /* Does not change order for the RB tree */
669 entry->start = start;
670
671 if (new_entry->aref.ar_amap) {
672 amap_splitref(&new_entry->aref, &entry->aref, new_adj);
673 }
674
675 uvm_map_entry_link(map, entry->prev, new_entry);
676
677 if (UVM_ET_ISSUBMAP(entry)) {
678 /* ... unlikely to happen, but play it safe */
679 uvm_map_reference(new_entry->object.sub_map);
680 } else {
681 if (UVM_ET_ISOBJ(entry) &&
682 entry->object.uvm_obj->pgops &&
683 entry->object.uvm_obj->pgops->pgo_reference)
684 entry->object.uvm_obj->pgops->pgo_reference(
685 entry->object.uvm_obj);
686 }
687
688 uvm_tree_sanity(map, "clip_start leave");
689 }
690
691 /*
692 * uvm_map_clip_end: ensure that the entry ends at or before
693 * the ending address, if it does't we split the reference
694 *
695 * => caller should use UVM_MAP_CLIP_END macro rather than calling
696 * this directly
697 * => map must be locked by caller
698 */
699
700 void
701 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end,
702 struct uvm_mapent_reservation *umr)
703 {
704 struct vm_map_entry * new_entry;
705 vaddr_t new_adj; /* #bytes we move start forward */
706
707 uvm_tree_sanity(map, "clip_end entry");
708
709 /*
710 * Create a new entry and insert it
711 * AFTER the specified entry
712 */
713 new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
714 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
715
716 new_entry->start = entry->end = end;
717 new_adj = end - entry->start;
718 if (new_entry->object.uvm_obj)
719 new_entry->offset += new_adj;
720
721 if (entry->aref.ar_amap)
722 amap_splitref(&entry->aref, &new_entry->aref, new_adj);
723
724 uvm_rb_fixup(map, entry);
725
726 uvm_map_entry_link(map, entry, new_entry);
727
728 if (UVM_ET_ISSUBMAP(entry)) {
729 /* ... unlikely to happen, but play it safe */
730 uvm_map_reference(new_entry->object.sub_map);
731 } else {
732 if (UVM_ET_ISOBJ(entry) &&
733 entry->object.uvm_obj->pgops &&
734 entry->object.uvm_obj->pgops->pgo_reference)
735 entry->object.uvm_obj->pgops->pgo_reference(
736 entry->object.uvm_obj);
737 }
738
739 uvm_tree_sanity(map, "clip_end leave");
740 }
741
742
743 /*
744 * M A P - m a i n e n t r y p o i n t
745 */
746 /*
747 * uvm_map: establish a valid mapping in a map
748 *
749 * => assume startp is page aligned.
750 * => assume size is a multiple of PAGE_SIZE.
751 * => assume sys_mmap provides enough of a "hint" to have us skip
752 * over text/data/bss area.
753 * => map must be unlocked (we will lock it)
754 * => <uobj,uoffset> value meanings (4 cases):
755 * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER
756 * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER
757 * [3] <uobj,uoffset> == normal mapping
758 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA
759 *
760 * case [4] is for kernel mappings where we don't know the offset until
761 * we've found a virtual address. note that kernel object offsets are
762 * always relative to vm_map_min(kernel_map).
763 *
764 * => if `align' is non-zero, we align the virtual address to the specified
765 * alignment.
766 * this is provided as a mechanism for large pages.
767 *
768 * => XXXCDC: need way to map in external amap?
769 */
770
771 int
772 uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
773 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags)
774 {
775 struct uvm_map_args args;
776 struct vm_map_entry *new_entry;
777 int error;
778
779 KASSERT((flags & UVM_FLAG_QUANTUM) == 0 || VM_MAP_USE_KMAPENT(map));
780
781 /*
782 * for pager_map, allocate the new entry first to avoid sleeping
783 * for memory while we have the map locked.
784 *
785 * besides, because we allocates entries for in-kernel maps
786 * a bit differently (cf. uvm_kmapent_alloc/free), we need to
787 * allocate them before locking the map.
788 */
789
790 new_entry = NULL;
791 if (VM_MAP_USE_KMAPENT(map) || map == pager_map) {
792 new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
793 if (__predict_false(new_entry == NULL))
794 return ENOMEM;
795 if (flags & UVM_FLAG_QUANTUM)
796 new_entry->flags |= UVM_MAP_QUANTUM;
797 }
798 if (map == pager_map)
799 flags |= UVM_FLAG_NOMERGE;
800
801 error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align,
802 flags, &args);
803 if (!error) {
804 error = uvm_map_enter(map, &args, new_entry);
805 *startp = args.uma_start;
806 }
807
808 return error;
809 }
810
811 int
812 uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size,
813 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags,
814 struct uvm_map_args *args)
815 {
816 struct vm_map_entry *prev_entry;
817 vm_prot_t prot = UVM_PROTECTION(flags);
818 vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
819
820 UVMHIST_FUNC("uvm_map_prepare");
821 UVMHIST_CALLED(maphist);
822
823 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
824 map, start, size, flags);
825 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
826
827 /*
828 * detect a popular device driver bug.
829 */
830
831 KASSERT(doing_shutdown || curlwp != NULL ||
832 (map->flags & VM_MAP_INTRSAFE));
833
834 /*
835 * zero-sized mapping doesn't make any sense.
836 */
837 KASSERT(size > 0);
838
839 KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0);
840
841 uvm_tree_sanity(map, "map entry");
842
843 /*
844 * check sanity of protection code
845 */
846
847 if ((prot & maxprot) != prot) {
848 UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x",
849 prot, maxprot,0,0);
850 return EACCES;
851 }
852
853 /*
854 * figure out where to put new VM range
855 */
856
857 retry:
858 if (vm_map_lock_try(map) == FALSE) {
859 if (flags & UVM_FLAG_TRYLOCK) {
860 return EAGAIN;
861 }
862 vm_map_lock(map); /* could sleep here */
863 }
864 if ((prev_entry = uvm_map_findspace(map, start, size, &start,
865 uobj, uoffset, align, flags)) == NULL) {
866 unsigned int timestamp;
867
868 if ((flags & UVM_FLAG_WAITVA) == 0) {
869 UVMHIST_LOG(maphist,"<- uvm_map_findspace failed!",
870 0,0,0,0);
871 vm_map_unlock(map);
872 return ENOMEM;
873 }
874 timestamp = map->timestamp;
875 UVMHIST_LOG(maphist,"waiting va timestamp=0x%x",
876 timestamp,0,0,0);
877 simple_lock(&map->flags_lock);
878 map->flags |= VM_MAP_WANTVA;
879 simple_unlock(&map->flags_lock);
880 vm_map_unlock(map);
881
882 /*
883 * wait until someone does unmap.
884 * XXX fragile locking
885 */
886
887 simple_lock(&map->flags_lock);
888 while ((map->flags & VM_MAP_WANTVA) != 0 &&
889 map->timestamp == timestamp) {
890 ltsleep(&map->header, PVM, "vmmapva", 0,
891 &map->flags_lock);
892 }
893 simple_unlock(&map->flags_lock);
894 goto retry;
895 }
896
897 #ifdef PMAP_GROWKERNEL
898 /*
899 * If the kernel pmap can't map the requested space,
900 * then allocate more resources for it.
901 */
902 if (map == kernel_map && uvm_maxkaddr < (start + size))
903 uvm_maxkaddr = pmap_growkernel(start + size);
904 #endif
905
906 UVMCNT_INCR(uvm_map_call);
907
908 /*
909 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
910 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in
911 * either case we want to zero it before storing it in the map entry
912 * (because it looks strange and confusing when debugging...)
913 *
914 * if uobj is not null
915 * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
916 * and we do not need to change uoffset.
917 * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
918 * now (based on the starting address of the map). this case is
919 * for kernel object mappings where we don't know the offset until
920 * the virtual address is found (with uvm_map_findspace). the
921 * offset is the distance we are from the start of the map.
922 */
923
924 if (uobj == NULL) {
925 uoffset = 0;
926 } else {
927 if (uoffset == UVM_UNKNOWN_OFFSET) {
928 KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
929 uoffset = start - vm_map_min(kernel_map);
930 }
931 }
932
933 args->uma_flags = flags;
934 args->uma_prev = prev_entry;
935 args->uma_start = start;
936 args->uma_size = size;
937 args->uma_uobj = uobj;
938 args->uma_uoffset = uoffset;
939
940 return 0;
941 }
942
943 int
944 uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args,
945 struct vm_map_entry *new_entry)
946 {
947 struct vm_map_entry *prev_entry = args->uma_prev;
948 struct vm_map_entry *dead = NULL;
949
950 const uvm_flag_t flags = args->uma_flags;
951 const vm_prot_t prot = UVM_PROTECTION(flags);
952 const vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
953 const vm_inherit_t inherit = UVM_INHERIT(flags);
954 const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
955 AMAP_EXTEND_NOWAIT : 0;
956 const int advice = UVM_ADVICE(flags);
957 const int meflagmask = UVM_MAP_NOMERGE | UVM_MAP_QUANTUM;
958 const int meflagval = (flags & UVM_FLAG_QUANTUM) ?
959 UVM_MAP_QUANTUM : 0;
960
961 vaddr_t start = args->uma_start;
962 vsize_t size = args->uma_size;
963 struct uvm_object *uobj = args->uma_uobj;
964 voff_t uoffset = args->uma_uoffset;
965
966 const int kmap = (vm_map_pmap(map) == pmap_kernel());
967 int merged = 0;
968 int error;
969 int newetype;
970
971 UVMHIST_FUNC("uvm_map_enter");
972 UVMHIST_CALLED(maphist);
973
974 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
975 map, start, size, flags);
976 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
977
978 if (flags & UVM_FLAG_QUANTUM) {
979 KASSERT(new_entry);
980 KASSERT(new_entry->flags & UVM_MAP_QUANTUM);
981 }
982
983 if (uobj)
984 newetype = UVM_ET_OBJ;
985 else
986 newetype = 0;
987
988 if (flags & UVM_FLAG_COPYONW) {
989 newetype |= UVM_ET_COPYONWRITE;
990 if ((flags & UVM_FLAG_OVERLAY) == 0)
991 newetype |= UVM_ET_NEEDSCOPY;
992 }
993
994 /*
995 * try and insert in map by extending previous entry, if possible.
996 * XXX: we don't try and pull back the next entry. might be useful
997 * for a stack, but we are currently allocating our stack in advance.
998 */
999
1000 if (flags & UVM_FLAG_NOMERGE)
1001 goto nomerge;
1002
1003 if (prev_entry->etype == newetype &&
1004 prev_entry->end == start &&
1005 prev_entry != &map->header &&
1006 prev_entry->object.uvm_obj == uobj) {
1007
1008 if ((prev_entry->flags & meflagmask) != meflagval)
1009 goto forwardmerge;
1010
1011 if (uobj && prev_entry->offset +
1012 (prev_entry->end - prev_entry->start) != uoffset)
1013 goto forwardmerge;
1014
1015 if (prev_entry->protection != prot ||
1016 prev_entry->max_protection != maxprot)
1017 goto forwardmerge;
1018
1019 if (prev_entry->inheritance != inherit ||
1020 prev_entry->advice != advice)
1021 goto forwardmerge;
1022
1023 /* wiring status must match (new area is unwired) */
1024 if (VM_MAPENT_ISWIRED(prev_entry))
1025 goto forwardmerge;
1026
1027 /*
1028 * can't extend a shared amap. note: no need to lock amap to
1029 * look at refs since we don't care about its exact value.
1030 * if it is one (i.e. we have only reference) it will stay there
1031 */
1032
1033 if (prev_entry->aref.ar_amap &&
1034 amap_refs(prev_entry->aref.ar_amap) != 1) {
1035 goto forwardmerge;
1036 }
1037
1038 if (prev_entry->aref.ar_amap) {
1039 error = amap_extend(prev_entry, size,
1040 amapwaitflag | AMAP_EXTEND_FORWARDS);
1041 if (error)
1042 goto done;
1043 }
1044
1045 if (kmap)
1046 UVMCNT_INCR(map_kbackmerge);
1047 else
1048 UVMCNT_INCR(map_ubackmerge);
1049 UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0);
1050
1051 /*
1052 * drop our reference to uobj since we are extending a reference
1053 * that we already have (the ref count can not drop to zero).
1054 */
1055
1056 if (uobj && uobj->pgops->pgo_detach)
1057 uobj->pgops->pgo_detach(uobj);
1058
1059 prev_entry->end += size;
1060 uvm_rb_fixup(map, prev_entry);
1061
1062 uvm_tree_sanity(map, "map backmerged");
1063
1064 UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
1065 merged++;
1066 }
1067
1068 forwardmerge:
1069 if (prev_entry->next->etype == newetype &&
1070 prev_entry->next->start == (start + size) &&
1071 prev_entry->next != &map->header &&
1072 prev_entry->next->object.uvm_obj == uobj) {
1073
1074 if ((prev_entry->next->flags & meflagmask) != meflagval)
1075 goto nomerge;
1076
1077 if (uobj && prev_entry->next->offset != uoffset + size)
1078 goto nomerge;
1079
1080 if (prev_entry->next->protection != prot ||
1081 prev_entry->next->max_protection != maxprot)
1082 goto nomerge;
1083
1084 if (prev_entry->next->inheritance != inherit ||
1085 prev_entry->next->advice != advice)
1086 goto nomerge;
1087
1088 /* wiring status must match (new area is unwired) */
1089 if (VM_MAPENT_ISWIRED(prev_entry->next))
1090 goto nomerge;
1091
1092 /*
1093 * can't extend a shared amap. note: no need to lock amap to
1094 * look at refs since we don't care about its exact value.
1095 * if it is one (i.e. we have only reference) it will stay there.
1096 *
1097 * note that we also can't merge two amaps, so if we
1098 * merged with the previous entry which has an amap,
1099 * and the next entry also has an amap, we give up.
1100 *
1101 * Interesting cases:
1102 * amap, new, amap -> give up second merge (single fwd extend)
1103 * amap, new, none -> double forward extend (extend again here)
1104 * none, new, amap -> double backward extend (done here)
1105 * uobj, new, amap -> single backward extend (done here)
1106 *
1107 * XXX should we attempt to deal with someone refilling
1108 * the deallocated region between two entries that are
1109 * backed by the same amap (ie, arefs is 2, "prev" and
1110 * "next" refer to it, and adding this allocation will
1111 * close the hole, thus restoring arefs to 1 and
1112 * deallocating the "next" vm_map_entry)? -- @@@
1113 */
1114
1115 if (prev_entry->next->aref.ar_amap &&
1116 (amap_refs(prev_entry->next->aref.ar_amap) != 1 ||
1117 (merged && prev_entry->aref.ar_amap))) {
1118 goto nomerge;
1119 }
1120
1121 if (merged) {
1122 /*
1123 * Try to extend the amap of the previous entry to
1124 * cover the next entry as well. If it doesn't work
1125 * just skip on, don't actually give up, since we've
1126 * already completed the back merge.
1127 */
1128 if (prev_entry->aref.ar_amap) {
1129 if (amap_extend(prev_entry,
1130 prev_entry->next->end -
1131 prev_entry->next->start,
1132 amapwaitflag | AMAP_EXTEND_FORWARDS))
1133 goto nomerge;
1134 }
1135
1136 /*
1137 * Try to extend the amap of the *next* entry
1138 * back to cover the new allocation *and* the
1139 * previous entry as well (the previous merge
1140 * didn't have an amap already otherwise we
1141 * wouldn't be checking here for an amap). If
1142 * it doesn't work just skip on, again, don't
1143 * actually give up, since we've already
1144 * completed the back merge.
1145 */
1146 else if (prev_entry->next->aref.ar_amap) {
1147 if (amap_extend(prev_entry->next,
1148 prev_entry->end -
1149 prev_entry->start,
1150 amapwaitflag | AMAP_EXTEND_BACKWARDS))
1151 goto nomerge;
1152 }
1153 } else {
1154 /*
1155 * Pull the next entry's amap backwards to cover this
1156 * new allocation.
1157 */
1158 if (prev_entry->next->aref.ar_amap) {
1159 error = amap_extend(prev_entry->next, size,
1160 amapwaitflag | AMAP_EXTEND_BACKWARDS);
1161 if (error)
1162 goto done;
1163 }
1164 }
1165
1166 if (merged) {
1167 if (kmap) {
1168 UVMCNT_DECR(map_kbackmerge);
1169 UVMCNT_INCR(map_kbimerge);
1170 } else {
1171 UVMCNT_DECR(map_ubackmerge);
1172 UVMCNT_INCR(map_ubimerge);
1173 }
1174 } else {
1175 if (kmap)
1176 UVMCNT_INCR(map_kforwmerge);
1177 else
1178 UVMCNT_INCR(map_uforwmerge);
1179 }
1180 UVMHIST_LOG(maphist," starting forward merge", 0, 0, 0, 0);
1181
1182 /*
1183 * drop our reference to uobj since we are extending a reference
1184 * that we already have (the ref count can not drop to zero).
1185 * (if merged, we've already detached)
1186 */
1187 if (uobj && uobj->pgops->pgo_detach && !merged)
1188 uobj->pgops->pgo_detach(uobj);
1189
1190 if (merged) {
1191 dead = prev_entry->next;
1192 prev_entry->end = dead->end;
1193 uvm_map_entry_unlink(map, dead);
1194 if (dead->aref.ar_amap != NULL) {
1195 prev_entry->aref = dead->aref;
1196 dead->aref.ar_amap = NULL;
1197 }
1198 } else {
1199 prev_entry->next->start -= size;
1200 if (prev_entry != &map->header)
1201 uvm_rb_fixup(map, prev_entry);
1202 if (uobj)
1203 prev_entry->next->offset = uoffset;
1204 }
1205
1206 uvm_tree_sanity(map, "map forwardmerged");
1207
1208 UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0);
1209 merged++;
1210 }
1211
1212 nomerge:
1213 if (!merged) {
1214 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0);
1215 if (kmap)
1216 UVMCNT_INCR(map_knomerge);
1217 else
1218 UVMCNT_INCR(map_unomerge);
1219
1220 /*
1221 * allocate new entry and link it in.
1222 */
1223
1224 if (new_entry == NULL) {
1225 new_entry = uvm_mapent_alloc(map,
1226 (flags & UVM_FLAG_NOWAIT));
1227 if (__predict_false(new_entry == NULL)) {
1228 error = ENOMEM;
1229 goto done;
1230 }
1231 }
1232 new_entry->start = start;
1233 new_entry->end = new_entry->start + size;
1234 new_entry->object.uvm_obj = uobj;
1235 new_entry->offset = uoffset;
1236
1237 new_entry->etype = newetype;
1238
1239 if (flags & UVM_FLAG_NOMERGE) {
1240 new_entry->flags |= UVM_MAP_NOMERGE;
1241 }
1242
1243 new_entry->protection = prot;
1244 new_entry->max_protection = maxprot;
1245 new_entry->inheritance = inherit;
1246 new_entry->wired_count = 0;
1247 new_entry->advice = advice;
1248 if (flags & UVM_FLAG_OVERLAY) {
1249
1250 /*
1251 * to_add: for BSS we overallocate a little since we
1252 * are likely to extend
1253 */
1254
1255 vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
1256 UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
1257 struct vm_amap *amap = amap_alloc(size, to_add,
1258 (flags & UVM_FLAG_NOWAIT) ? M_NOWAIT : M_WAITOK);
1259 if (__predict_false(amap == NULL)) {
1260 error = ENOMEM;
1261 goto done;
1262 }
1263 new_entry->aref.ar_pageoff = 0;
1264 new_entry->aref.ar_amap = amap;
1265 } else {
1266 new_entry->aref.ar_pageoff = 0;
1267 new_entry->aref.ar_amap = NULL;
1268 }
1269 uvm_map_entry_link(map, prev_entry, new_entry);
1270
1271 /*
1272 * Update the free space hint
1273 */
1274
1275 if ((map->first_free == prev_entry) &&
1276 (prev_entry->end >= new_entry->start))
1277 map->first_free = new_entry;
1278
1279 new_entry = NULL;
1280 }
1281
1282 map->size += size;
1283
1284 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1285
1286 error = 0;
1287 done:
1288 vm_map_unlock(map);
1289 if (new_entry) {
1290 if (error == 0) {
1291 KDASSERT(merged);
1292 uvm_mapent_free_merged(new_entry);
1293 } else {
1294 uvm_mapent_free(new_entry);
1295 }
1296 }
1297 if (dead) {
1298 KDASSERT(merged);
1299 uvm_mapent_free_merged(dead);
1300 }
1301 return error;
1302 }
1303
1304 /*
1305 * uvm_map_lookup_entry: find map entry at or before an address
1306 *
1307 * => map must at least be read-locked by caller
1308 * => entry is returned in "entry"
1309 * => return value is true if address is in the returned entry
1310 */
1311
1312 boolean_t
1313 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
1314 struct vm_map_entry **entry /* OUT */)
1315 {
1316 struct vm_map_entry *cur;
1317 boolean_t use_tree = FALSE;
1318 UVMHIST_FUNC("uvm_map_lookup_entry");
1319 UVMHIST_CALLED(maphist);
1320
1321 UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)",
1322 map, address, entry, 0);
1323
1324 /*
1325 * start looking either from the head of the
1326 * list, or from the hint.
1327 */
1328
1329 simple_lock(&map->hint_lock);
1330 cur = map->hint;
1331 simple_unlock(&map->hint_lock);
1332
1333 if (cur == &map->header)
1334 cur = cur->next;
1335
1336 UVMCNT_INCR(uvm_mlk_call);
1337 if (address >= cur->start) {
1338
1339 /*
1340 * go from hint to end of list.
1341 *
1342 * but first, make a quick check to see if
1343 * we are already looking at the entry we
1344 * want (which is usually the case).
1345 * note also that we don't need to save the hint
1346 * here... it is the same hint (unless we are
1347 * at the header, in which case the hint didn't
1348 * buy us anything anyway).
1349 */
1350
1351 if (cur != &map->header && cur->end > address) {
1352 UVMCNT_INCR(uvm_mlk_hint);
1353 *entry = cur;
1354 UVMHIST_LOG(maphist,"<- got it via hint (0x%x)",
1355 cur, 0, 0, 0);
1356 return (TRUE);
1357 }
1358
1359 if (map->nentries > 30)
1360 use_tree = TRUE;
1361 } else {
1362
1363 /*
1364 * invalid hint. use tree.
1365 */
1366 use_tree = TRUE;
1367 }
1368
1369 uvm_tree_sanity(map, __func__);
1370
1371 if (use_tree) {
1372 struct vm_map_entry *prev = &map->header;
1373 cur = RB_ROOT(&map->rbhead);
1374
1375 /*
1376 * Simple lookup in the tree. Happens when the hint is
1377 * invalid, or nentries reach a threshold.
1378 */
1379 while (cur) {
1380 if (address >= cur->start) {
1381 if (address < cur->end) {
1382 *entry = cur;
1383 goto got;
1384 }
1385 prev = cur;
1386 cur = RB_RIGHT(cur, rb_entry);
1387 } else
1388 cur = RB_LEFT(cur, rb_entry);
1389 }
1390 *entry = prev;
1391 goto failed;
1392 }
1393
1394 /*
1395 * search linearly
1396 */
1397
1398 while (cur != &map->header) {
1399 if (cur->end > address) {
1400 if (address >= cur->start) {
1401 /*
1402 * save this lookup for future
1403 * hints, and return
1404 */
1405
1406 *entry = cur;
1407 got:
1408 SAVE_HINT(map, map->hint, *entry);
1409 UVMHIST_LOG(maphist,"<- search got it (0x%x)",
1410 cur, 0, 0, 0);
1411 KDASSERT((*entry)->start <= address);
1412 KDASSERT(address < (*entry)->end);
1413 return (TRUE);
1414 }
1415 break;
1416 }
1417 cur = cur->next;
1418 }
1419 *entry = cur->prev;
1420 failed:
1421 SAVE_HINT(map, map->hint, *entry);
1422 UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
1423 KDASSERT((*entry) == &map->header || (*entry)->end <= address);
1424 KDASSERT((*entry)->next == &map->header ||
1425 address < (*entry)->next->start);
1426 return (FALSE);
1427 }
1428
1429 /*
1430 * See if the range between start and start + length fits in the gap
1431 * entry->next->start and entry->end. Returns 1 if fits, 0 if doesn't
1432 * fit, and -1 address wraps around.
1433 */
1434 static __inline int
1435 uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
1436 vsize_t align, int topdown, struct vm_map_entry *entry)
1437 {
1438 vaddr_t end;
1439
1440 #ifdef PMAP_PREFER
1441 /*
1442 * push start address forward as needed to avoid VAC alias problems.
1443 * we only do this if a valid offset is specified.
1444 */
1445
1446 if (uoffset != UVM_UNKNOWN_OFFSET)
1447 PMAP_PREFER(uoffset, start);
1448 #endif
1449 if (align != 0) {
1450 if ((*start & (align - 1)) != 0) {
1451 if (topdown)
1452 *start &= ~(align - 1);
1453 else
1454 *start = roundup(*start, align);
1455 }
1456 /*
1457 * XXX Should we PMAP_PREFER() here again?
1458 */
1459 }
1460
1461 /*
1462 * Find the end of the proposed new region. Be sure we didn't
1463 * wrap around the address; if so, we lose. Otherwise, if the
1464 * proposed new region fits before the next entry, we win.
1465 */
1466
1467 end = *start + length;
1468 if (end < *start)
1469 return (-1);
1470
1471 if (entry->next->start >= end && *start >= entry->end)
1472 return (1);
1473
1474 return (0);
1475 }
1476
1477 /*
1478 * uvm_map_findspace: find "length" sized space in "map".
1479 *
1480 * => "hint" is a hint about where we want it, unless UVM_FLAG_FIXED is
1481 * set in "flags" (in which case we insist on using "hint").
1482 * => "result" is VA returned
1483 * => uobj/uoffset are to be used to handle VAC alignment, if required
1484 * => if "align" is non-zero, we attempt to align to that value.
1485 * => caller must at least have read-locked map
1486 * => returns NULL on failure, or pointer to prev. map entry if success
1487 * => note this is a cross between the old vm_map_findspace and vm_map_find
1488 */
1489
1490 struct vm_map_entry *
1491 uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
1492 vaddr_t *result /* OUT */, struct uvm_object *uobj, voff_t uoffset,
1493 vsize_t align, int flags)
1494 {
1495 struct vm_map_entry *entry;
1496 struct vm_map_entry *child, *prev, *tmp;
1497 vaddr_t orig_hint;
1498 const int topdown = map->flags & VM_MAP_TOPDOWN;
1499 UVMHIST_FUNC("uvm_map_findspace");
1500 UVMHIST_CALLED(maphist);
1501
1502 UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)",
1503 map, hint, length, flags);
1504 KASSERT((align & (align - 1)) == 0);
1505 KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1506
1507 uvm_tree_sanity(map, "map_findspace entry");
1508
1509 /*
1510 * remember the original hint. if we are aligning, then we
1511 * may have to try again with no alignment constraint if
1512 * we fail the first time.
1513 */
1514
1515 orig_hint = hint;
1516 if (hint < map->min_offset) { /* check ranges ... */
1517 if (flags & UVM_FLAG_FIXED) {
1518 UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
1519 return (NULL);
1520 }
1521 hint = map->min_offset;
1522 }
1523 if (hint > map->max_offset) {
1524 UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]",
1525 hint, map->min_offset, map->max_offset, 0);
1526 return (NULL);
1527 }
1528
1529 /*
1530 * Look for the first possible address; if there's already
1531 * something at this address, we have to start after it.
1532 */
1533
1534 /*
1535 * @@@: there are four, no, eight cases to consider.
1536 *
1537 * 0: found, fixed, bottom up -> fail
1538 * 1: found, fixed, top down -> fail
1539 * 2: found, not fixed, bottom up -> start after entry->end,
1540 * loop up
1541 * 3: found, not fixed, top down -> start before entry->start,
1542 * loop down
1543 * 4: not found, fixed, bottom up -> check entry->next->start, fail
1544 * 5: not found, fixed, top down -> check entry->next->start, fail
1545 * 6: not found, not fixed, bottom up -> check entry->next->start,
1546 * loop up
1547 * 7: not found, not fixed, top down -> check entry->next->start,
1548 * loop down
1549 *
1550 * as you can see, it reduces to roughly five cases, and that
1551 * adding top down mapping only adds one unique case (without
1552 * it, there would be four cases).
1553 */
1554
1555 if ((flags & UVM_FLAG_FIXED) == 0 && hint == map->min_offset) {
1556 entry = map->first_free;
1557 } else {
1558 if (uvm_map_lookup_entry(map, hint, &entry)) {
1559 /* "hint" address already in use ... */
1560 if (flags & UVM_FLAG_FIXED) {
1561 UVMHIST_LOG(maphist, "<- fixed & VA in use",
1562 0, 0, 0, 0);
1563 return (NULL);
1564 }
1565 if (topdown)
1566 /* Start from lower gap. */
1567 entry = entry->prev;
1568 } else if (flags & UVM_FLAG_FIXED) {
1569 if (entry->next->start >= hint + length &&
1570 hint + length > hint)
1571 goto found;
1572
1573 /* "hint" address is gap but too small */
1574 UVMHIST_LOG(maphist, "<- fixed mapping failed",
1575 0, 0, 0, 0);
1576 return (NULL); /* only one shot at it ... */
1577 } else {
1578 /*
1579 * See if given hint fits in this gap.
1580 */
1581 switch (uvm_map_space_avail(&hint, length,
1582 uoffset, align, topdown, entry)) {
1583 case 1:
1584 goto found;
1585 case -1:
1586 goto wraparound;
1587 }
1588
1589 if (topdown) {
1590 /*
1591 * Still there is a chance to fit
1592 * if hint > entry->end.
1593 */
1594 } else {
1595 /* Start from higher gap. */
1596 entry = entry->next;
1597 if (entry == &map->header)
1598 goto notfound;
1599 goto nextgap;
1600 }
1601 }
1602 }
1603
1604 /*
1605 * Note that all UVM_FLAGS_FIXED case is already handled.
1606 */
1607 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
1608
1609 /* Try to find the space in the red-black tree */
1610
1611 /* Check slot before any entry */
1612 hint = topdown ? entry->next->start - length : entry->end;
1613 switch (uvm_map_space_avail(&hint, length, uoffset, align,
1614 topdown, entry)) {
1615 case 1:
1616 goto found;
1617 case -1:
1618 goto wraparound;
1619 }
1620
1621 nextgap:
1622 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
1623 /* If there is not enough space in the whole tree, we fail */
1624 tmp = RB_ROOT(&map->rbhead);
1625 if (tmp == NULL || tmp->space < length)
1626 goto notfound;
1627
1628 prev = NULL; /* previous candidate */
1629
1630 /* Find an entry close to hint that has enough space */
1631 for (; tmp;) {
1632 KASSERT(tmp->next->start == tmp->end + tmp->ownspace);
1633 if (topdown) {
1634 if (tmp->next->start < hint + length &&
1635 (prev == NULL || tmp->end > prev->end)) {
1636 if (tmp->ownspace >= length)
1637 prev = tmp;
1638 else if ((child = RB_LEFT(tmp, rb_entry))
1639 != NULL && child->space >= length)
1640 prev = tmp;
1641 }
1642 } else {
1643 if (tmp->end >= hint &&
1644 (prev == NULL || tmp->end < prev->end)) {
1645 if (tmp->ownspace >= length)
1646 prev = tmp;
1647 else if ((child = RB_RIGHT(tmp, rb_entry))
1648 != NULL && child->space >= length)
1649 prev = tmp;
1650 }
1651 }
1652 if (tmp->next->start < hint + length)
1653 child = RB_RIGHT(tmp, rb_entry);
1654 else if (tmp->end > hint)
1655 child = RB_LEFT(tmp, rb_entry);
1656 else {
1657 if (tmp->ownspace >= length)
1658 break;
1659 if (topdown)
1660 child = RB_LEFT(tmp, rb_entry);
1661 else
1662 child = RB_RIGHT(tmp, rb_entry);
1663 }
1664 if (child == NULL || child->space < length)
1665 break;
1666 tmp = child;
1667 }
1668
1669 if (tmp != NULL && tmp->start < hint && hint < tmp->next->start) {
1670 /*
1671 * Check if the entry that we found satifies the
1672 * space requirement
1673 */
1674 if (topdown) {
1675 if (hint > tmp->next->start - length)
1676 hint = tmp->next->start - length;
1677 } else {
1678 if (hint < tmp->end)
1679 hint = tmp->end;
1680 }
1681 switch (uvm_map_space_avail(&hint, length, uoffset, align,
1682 topdown, tmp)) {
1683 case 1:
1684 entry = tmp;
1685 goto found;
1686 case -1:
1687 goto wraparound;
1688 }
1689 if (tmp->ownspace >= length)
1690 goto listsearch;
1691 }
1692 if (prev == NULL)
1693 goto notfound;
1694
1695 if (topdown) {
1696 KASSERT(orig_hint >= prev->next->start - length ||
1697 prev->next->start - length > prev->next->start);
1698 hint = prev->next->start - length;
1699 } else {
1700 KASSERT(orig_hint <= prev->end);
1701 hint = prev->end;
1702 }
1703 switch (uvm_map_space_avail(&hint, length, uoffset, align,
1704 topdown, prev)) {
1705 case 1:
1706 entry = prev;
1707 goto found;
1708 case -1:
1709 goto wraparound;
1710 }
1711 if (prev->ownspace >= length)
1712 goto listsearch;
1713
1714 if (topdown)
1715 tmp = RB_LEFT(prev, rb_entry);
1716 else
1717 tmp = RB_RIGHT(prev, rb_entry);
1718 for (;;) {
1719 KASSERT(tmp && tmp->space >= length);
1720 if (topdown)
1721 child = RB_RIGHT(tmp, rb_entry);
1722 else
1723 child = RB_LEFT(tmp, rb_entry);
1724 if (child && child->space >= length) {
1725 tmp = child;
1726 continue;
1727 }
1728 if (tmp->ownspace >= length)
1729 break;
1730 if (topdown)
1731 tmp = RB_LEFT(tmp, rb_entry);
1732 else
1733 tmp = RB_RIGHT(tmp, rb_entry);
1734 }
1735
1736 if (topdown) {
1737 KASSERT(orig_hint >= tmp->next->start - length ||
1738 tmp->next->start - length > tmp->next->start);
1739 hint = tmp->next->start - length;
1740 } else {
1741 KASSERT(orig_hint <= tmp->end);
1742 hint = tmp->end;
1743 }
1744 switch (uvm_map_space_avail(&hint, length, uoffset, align,
1745 topdown, tmp)) {
1746 case 1:
1747 entry = tmp;
1748 goto found;
1749 case -1:
1750 goto wraparound;
1751 }
1752
1753 /*
1754 * The tree fails to find an entry because of offset or alignment
1755 * restrictions. Search the list instead.
1756 */
1757 listsearch:
1758 /*
1759 * Look through the rest of the map, trying to fit a new region in
1760 * the gap between existing regions, or after the very last region.
1761 * note: entry->end = base VA of current gap,
1762 * entry->next->start = VA of end of current gap
1763 */
1764
1765 for (;;) {
1766 /* Update hint for current gap. */
1767 hint = topdown ? entry->next->start - length : entry->end;
1768
1769 /* See if it fits. */
1770 switch (uvm_map_space_avail(&hint, length, uoffset, align,
1771 topdown, entry)) {
1772 case 1:
1773 goto found;
1774 case -1:
1775 goto wraparound;
1776 }
1777
1778 /* Advance to next/previous gap */
1779 if (topdown) {
1780 if (entry == &map->header) {
1781 UVMHIST_LOG(maphist, "<- failed (off start)",
1782 0,0,0,0);
1783 goto notfound;
1784 }
1785 entry = entry->prev;
1786 } else {
1787 entry = entry->next;
1788 if (entry == &map->header) {
1789 UVMHIST_LOG(maphist, "<- failed (off end)",
1790 0,0,0,0);
1791 goto notfound;
1792 }
1793 }
1794 }
1795
1796 found:
1797 SAVE_HINT(map, map->hint, entry);
1798 *result = hint;
1799 UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0);
1800 KASSERT( topdown || hint >= orig_hint);
1801 KASSERT(!topdown || hint <= orig_hint);
1802 KASSERT(entry->end <= hint);
1803 KASSERT(hint + length <= entry->next->start);
1804 return (entry);
1805
1806 wraparound:
1807 UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0);
1808
1809 return (NULL);
1810
1811 notfound:
1812 UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0);
1813
1814 return (NULL);
1815 }
1816
1817 /*
1818 * U N M A P - m a i n h e l p e r f u n c t i o n s
1819 */
1820
1821 /*
1822 * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
1823 *
1824 * => caller must check alignment and size
1825 * => map must be locked by caller
1826 * => we return a list of map entries that we've remove from the map
1827 * in "entry_list"
1828 */
1829
1830 void
1831 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
1832 struct vm_map_entry **entry_list /* OUT */,
1833 struct uvm_mapent_reservation *umr)
1834 {
1835 struct vm_map_entry *entry, *first_entry, *next;
1836 vaddr_t len;
1837 UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist);
1838
1839 UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)",
1840 map, start, end, 0);
1841 VM_MAP_RANGE_CHECK(map, start, end);
1842
1843 uvm_tree_sanity(map, "unmap_remove entry");
1844
1845 /*
1846 * find first entry
1847 */
1848
1849 if (uvm_map_lookup_entry(map, start, &first_entry) == TRUE) {
1850 /* clip and go... */
1851 entry = first_entry;
1852 UVM_MAP_CLIP_START(map, entry, start, umr);
1853 /* critical! prevents stale hint */
1854 SAVE_HINT(map, entry, entry->prev);
1855 } else {
1856 entry = first_entry->next;
1857 }
1858
1859 /*
1860 * Save the free space hint
1861 */
1862
1863 if (map->first_free->start >= start)
1864 map->first_free = entry->prev;
1865
1866 /*
1867 * note: we now re-use first_entry for a different task. we remove
1868 * a number of map entries from the map and save them in a linked
1869 * list headed by "first_entry". once we remove them from the map
1870 * the caller should unlock the map and drop the references to the
1871 * backing objects [c.f. uvm_unmap_detach]. the object is to
1872 * separate unmapping from reference dropping. why?
1873 * [1] the map has to be locked for unmapping
1874 * [2] the map need not be locked for reference dropping
1875 * [3] dropping references may trigger pager I/O, and if we hit
1876 * a pager that does synchronous I/O we may have to wait for it.
1877 * [4] we would like all waiting for I/O to occur with maps unlocked
1878 * so that we don't block other threads.
1879 */
1880
1881 first_entry = NULL;
1882 *entry_list = NULL;
1883
1884 /*
1885 * break up the area into map entry sized regions and unmap. note
1886 * that all mappings have to be removed before we can even consider
1887 * dropping references to amaps or VM objects (otherwise we could end
1888 * up with a mapping to a page on the free list which would be very bad)
1889 */
1890
1891 while ((entry != &map->header) && (entry->start < end)) {
1892 KASSERT((entry->flags & UVM_MAP_FIRST) == 0);
1893
1894 UVM_MAP_CLIP_END(map, entry, end, umr);
1895 next = entry->next;
1896 len = entry->end - entry->start;
1897
1898 /*
1899 * unwire before removing addresses from the pmap; otherwise
1900 * unwiring will put the entries back into the pmap (XXX).
1901 */
1902
1903 if (VM_MAPENT_ISWIRED(entry)) {
1904 uvm_map_entry_unwire(map, entry);
1905 }
1906 if ((map->flags & VM_MAP_PAGEABLE) == 0) {
1907
1908 /*
1909 * if the map is non-pageable, any pages mapped there
1910 * must be wired and entered with pmap_kenter_pa(),
1911 * and we should free any such pages immediately.
1912 * this is mostly used for kmem_map and mb_map.
1913 */
1914
1915 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
1916 uvm_km_pgremove_intrsafe(entry->start,
1917 entry->end);
1918 pmap_kremove(entry->start, len);
1919 }
1920 } else if (UVM_ET_ISOBJ(entry) &&
1921 UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
1922 KASSERT(vm_map_pmap(map) == pmap_kernel());
1923
1924 /*
1925 * note: kernel object mappings are currently used in
1926 * two ways:
1927 * [1] "normal" mappings of pages in the kernel object
1928 * [2] uvm_km_valloc'd allocations in which we
1929 * pmap_enter in some non-kernel-object page
1930 * (e.g. vmapbuf).
1931 *
1932 * for case [1], we need to remove the mapping from
1933 * the pmap and then remove the page from the kernel
1934 * object (because, once pages in a kernel object are
1935 * unmapped they are no longer needed, unlike, say,
1936 * a vnode where you might want the data to persist
1937 * until flushed out of a queue).
1938 *
1939 * for case [2], we need to remove the mapping from
1940 * the pmap. there shouldn't be any pages at the
1941 * specified offset in the kernel object [but it
1942 * doesn't hurt to call uvm_km_pgremove just to be
1943 * safe?]
1944 *
1945 * uvm_km_pgremove currently does the following:
1946 * for pages in the kernel object in range:
1947 * - drops the swap slot
1948 * - uvm_pagefree the page
1949 */
1950
1951 /*
1952 * remove mappings from pmap and drop the pages
1953 * from the object. offsets are always relative
1954 * to vm_map_min(kernel_map).
1955 */
1956
1957 pmap_remove(pmap_kernel(), entry->start,
1958 entry->start + len);
1959 uvm_km_pgremove(entry->object.uvm_obj,
1960 entry->start - vm_map_min(kernel_map),
1961 entry->end - vm_map_min(kernel_map));
1962
1963 /*
1964 * null out kernel_object reference, we've just
1965 * dropped it
1966 */
1967
1968 entry->etype &= ~UVM_ET_OBJ;
1969 entry->object.uvm_obj = NULL;
1970 } else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
1971
1972 /*
1973 * remove mappings the standard way.
1974 */
1975
1976 pmap_remove(map->pmap, entry->start, entry->end);
1977 }
1978
1979 #if defined(DEBUG)
1980 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
1981
1982 /*
1983 * check if there's remaining mapping,
1984 * which is a bug in caller.
1985 */
1986
1987 vaddr_t va;
1988 for (va = entry->start; va < entry->end;
1989 va += PAGE_SIZE) {
1990 if (pmap_extract(vm_map_pmap(map), va, NULL)) {
1991 panic("uvm_unmap_remove: has mapping");
1992 }
1993 }
1994 }
1995 #endif /* defined(DEBUG) */
1996
1997 /*
1998 * remove entry from map and put it on our list of entries
1999 * that we've nuked. then go to next entry.
2000 */
2001
2002 UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0,0);
2003
2004 /* critical! prevents stale hint */
2005 SAVE_HINT(map, entry, entry->prev);
2006
2007 uvm_map_entry_unlink(map, entry);
2008 KASSERT(map->size >= len);
2009 map->size -= len;
2010 entry->prev = NULL;
2011 entry->next = first_entry;
2012 first_entry = entry;
2013 entry = next;
2014 }
2015 if ((map->flags & VM_MAP_DYING) == 0) {
2016 pmap_update(vm_map_pmap(map));
2017 }
2018
2019 uvm_tree_sanity(map, "unmap_remove leave");
2020
2021 /*
2022 * now we've cleaned up the map and are ready for the caller to drop
2023 * references to the mapped objects.
2024 */
2025
2026 *entry_list = first_entry;
2027 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
2028
2029 simple_lock(&map->flags_lock);
2030 if (map->flags & VM_MAP_WANTVA) {
2031 map->flags &= ~VM_MAP_WANTVA;
2032 wakeup(&map->header);
2033 }
2034 simple_unlock(&map->flags_lock);
2035 }
2036
2037 /*
2038 * uvm_unmap_detach: drop references in a chain of map entries
2039 *
2040 * => we will free the map entries as we traverse the list.
2041 */
2042
2043 void
2044 uvm_unmap_detach(struct vm_map_entry *first_entry, int flags)
2045 {
2046 struct vm_map_entry *next_entry;
2047 UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
2048
2049 while (first_entry) {
2050 KASSERT(!VM_MAPENT_ISWIRED(first_entry));
2051 UVMHIST_LOG(maphist,
2052 " detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d",
2053 first_entry, first_entry->aref.ar_amap,
2054 first_entry->object.uvm_obj,
2055 UVM_ET_ISSUBMAP(first_entry));
2056
2057 /*
2058 * drop reference to amap, if we've got one
2059 */
2060
2061 if (first_entry->aref.ar_amap)
2062 uvm_map_unreference_amap(first_entry, flags);
2063
2064 /*
2065 * drop reference to our backing object, if we've got one
2066 */
2067
2068 KASSERT(!UVM_ET_ISSUBMAP(first_entry));
2069 if (UVM_ET_ISOBJ(first_entry) &&
2070 first_entry->object.uvm_obj->pgops->pgo_detach) {
2071 (*first_entry->object.uvm_obj->pgops->pgo_detach)
2072 (first_entry->object.uvm_obj);
2073 }
2074 next_entry = first_entry->next;
2075 uvm_mapent_free(first_entry);
2076 first_entry = next_entry;
2077 }
2078 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
2079 }
2080
2081 /*
2082 * E X T R A C T I O N F U N C T I O N S
2083 */
2084
2085 /*
2086 * uvm_map_reserve: reserve space in a vm_map for future use.
2087 *
2088 * => we reserve space in a map by putting a dummy map entry in the
2089 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
2090 * => map should be unlocked (we will write lock it)
2091 * => we return true if we were able to reserve space
2092 * => XXXCDC: should be inline?
2093 */
2094
2095 int
2096 uvm_map_reserve(struct vm_map *map, vsize_t size,
2097 vaddr_t offset /* hint for pmap_prefer */,
2098 vsize_t align /* alignment hint */,
2099 vaddr_t *raddr /* IN:hint, OUT: reserved VA */)
2100 {
2101 UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
2102
2103 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
2104 map,size,offset,raddr);
2105
2106 size = round_page(size);
2107 if (*raddr < vm_map_min(map))
2108 *raddr = vm_map_min(map); /* hint */
2109
2110 /*
2111 * reserve some virtual space.
2112 */
2113
2114 if (uvm_map(map, raddr, size, NULL, offset, 0,
2115 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
2116 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
2117 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
2118 return (FALSE);
2119 }
2120
2121 UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
2122 return (TRUE);
2123 }
2124
2125 /*
2126 * uvm_map_replace: replace a reserved (blank) area of memory with
2127 * real mappings.
2128 *
2129 * => caller must WRITE-LOCK the map
2130 * => we return TRUE if replacement was a success
2131 * => we expect the newents chain to have nnewents entrys on it and
2132 * we expect newents->prev to point to the last entry on the list
2133 * => note newents is allowed to be NULL
2134 */
2135
2136 int
2137 uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
2138 struct vm_map_entry *newents, int nnewents)
2139 {
2140 struct vm_map_entry *oldent, *last;
2141
2142 uvm_tree_sanity(map, "map_replace entry");
2143
2144 /*
2145 * first find the blank map entry at the specified address
2146 */
2147
2148 if (!uvm_map_lookup_entry(map, start, &oldent)) {
2149 return (FALSE);
2150 }
2151
2152 /*
2153 * check to make sure we have a proper blank entry
2154 */
2155
2156 if (oldent->start != start || oldent->end != end ||
2157 oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
2158 return (FALSE);
2159 }
2160
2161 #ifdef DIAGNOSTIC
2162
2163 /*
2164 * sanity check the newents chain
2165 */
2166
2167 {
2168 struct vm_map_entry *tmpent = newents;
2169 int nent = 0;
2170 vaddr_t cur = start;
2171
2172 while (tmpent) {
2173 nent++;
2174 if (tmpent->start < cur)
2175 panic("uvm_map_replace1");
2176 if (tmpent->start > tmpent->end || tmpent->end > end) {
2177 printf("tmpent->start=0x%lx, tmpent->end=0x%lx, end=0x%lx\n",
2178 tmpent->start, tmpent->end, end);
2179 panic("uvm_map_replace2");
2180 }
2181 cur = tmpent->end;
2182 if (tmpent->next) {
2183 if (tmpent->next->prev != tmpent)
2184 panic("uvm_map_replace3");
2185 } else {
2186 if (newents->prev != tmpent)
2187 panic("uvm_map_replace4");
2188 }
2189 tmpent = tmpent->next;
2190 }
2191 if (nent != nnewents)
2192 panic("uvm_map_replace5");
2193 }
2194 #endif
2195
2196 /*
2197 * map entry is a valid blank! replace it. (this does all the
2198 * work of map entry link/unlink...).
2199 */
2200
2201 if (newents) {
2202 last = newents->prev;
2203
2204 /* critical: flush stale hints out of map */
2205 SAVE_HINT(map, map->hint, newents);
2206 if (map->first_free == oldent)
2207 map->first_free = last;
2208
2209 last->next = oldent->next;
2210 last->next->prev = last;
2211
2212 /* Fix RB tree */
2213 uvm_rb_remove(map, oldent);
2214
2215 newents->prev = oldent->prev;
2216 newents->prev->next = newents;
2217 map->nentries = map->nentries + (nnewents - 1);
2218
2219 /* Fixup the RB tree */
2220 {
2221 int i;
2222 struct vm_map_entry *tmp;
2223
2224 tmp = newents;
2225 for (i = 0; i < nnewents && tmp; i++) {
2226 uvm_rb_insert(map, tmp);
2227 tmp = tmp->next;
2228 }
2229 }
2230 } else {
2231
2232 /* critical: flush stale hints out of map */
2233 SAVE_HINT(map, map->hint, oldent->prev);
2234 if (map->first_free == oldent)
2235 map->first_free = oldent->prev;
2236
2237 /* NULL list of new entries: just remove the old one */
2238 uvm_map_entry_unlink(map, oldent);
2239 }
2240
2241 uvm_tree_sanity(map, "map_replace leave");
2242
2243 /*
2244 * now we can free the old blank entry, unlock the map and return.
2245 */
2246
2247 uvm_mapent_free(oldent);
2248 return (TRUE);
2249 }
2250
2251 /*
2252 * uvm_map_extract: extract a mapping from a map and put it somewhere
2253 * (maybe removing the old mapping)
2254 *
2255 * => maps should be unlocked (we will write lock them)
2256 * => returns 0 on success, error code otherwise
2257 * => start must be page aligned
2258 * => len must be page sized
2259 * => flags:
2260 * UVM_EXTRACT_REMOVE: remove mappings from srcmap
2261 * UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
2262 * UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
2263 * UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
2264 * >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
2265 * >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
2266 * be used from within the kernel in a kernel level map <<<
2267 */
2268
2269 int
2270 uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
2271 struct vm_map *dstmap, vaddr_t *dstaddrp, int flags)
2272 {
2273 vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge;
2274 struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry,
2275 *deadentry, *oldentry;
2276 vsize_t elen;
2277 int nchain, error, copy_ok;
2278 UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
2279
2280 UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start,
2281 len,0);
2282 UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0);
2283
2284 uvm_tree_sanity(srcmap, "map_extract src enter");
2285 uvm_tree_sanity(dstmap, "map_extract dst enter");
2286
2287 /*
2288 * step 0: sanity check: start must be on a page boundary, length
2289 * must be page sized. can't ask for CONTIG/QREF if you asked for
2290 * REMOVE.
2291 */
2292
2293 KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0);
2294 KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
2295 (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
2296
2297 /*
2298 * step 1: reserve space in the target map for the extracted area
2299 */
2300
2301 dstaddr = vm_map_min(dstmap);
2302 if (uvm_map_reserve(dstmap, len, start, 0, &dstaddr) == FALSE)
2303 return (ENOMEM);
2304 *dstaddrp = dstaddr; /* pass address back to caller */
2305 UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0);
2306
2307 /*
2308 * step 2: setup for the extraction process loop by init'ing the
2309 * map entry chain, locking src map, and looking up the first useful
2310 * entry in the map.
2311 */
2312
2313 end = start + len;
2314 newend = dstaddr + len;
2315 chain = endchain = NULL;
2316 nchain = 0;
2317 vm_map_lock(srcmap);
2318
2319 if (uvm_map_lookup_entry(srcmap, start, &entry)) {
2320
2321 /* "start" is within an entry */
2322 if (flags & UVM_EXTRACT_QREF) {
2323
2324 /*
2325 * for quick references we don't clip the entry, so
2326 * the entry may map space "before" the starting
2327 * virtual address... this is the "fudge" factor
2328 * (which can be non-zero only the first time
2329 * through the "while" loop in step 3).
2330 */
2331
2332 fudge = start - entry->start;
2333 } else {
2334
2335 /*
2336 * normal reference: we clip the map to fit (thus
2337 * fudge is zero)
2338 */
2339
2340 UVM_MAP_CLIP_START(srcmap, entry, start, NULL);
2341 SAVE_HINT(srcmap, srcmap->hint, entry->prev);
2342 fudge = 0;
2343 }
2344 } else {
2345
2346 /* "start" is not within an entry ... skip to next entry */
2347 if (flags & UVM_EXTRACT_CONTIG) {
2348 error = EINVAL;
2349 goto bad; /* definite hole here ... */
2350 }
2351
2352 entry = entry->next;
2353 fudge = 0;
2354 }
2355
2356 /* save values from srcmap for step 6 */
2357 orig_entry = entry;
2358 orig_fudge = fudge;
2359
2360 /*
2361 * step 3: now start looping through the map entries, extracting
2362 * as we go.
2363 */
2364
2365 while (entry->start < end && entry != &srcmap->header) {
2366
2367 /* if we are not doing a quick reference, clip it */
2368 if ((flags & UVM_EXTRACT_QREF) == 0)
2369 UVM_MAP_CLIP_END(srcmap, entry, end, NULL);
2370
2371 /* clear needs_copy (allow chunking) */
2372 if (UVM_ET_ISNEEDSCOPY(entry)) {
2373 amap_copy(srcmap, entry, M_NOWAIT, TRUE, start, end);
2374 if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */
2375 error = ENOMEM;
2376 goto bad;
2377 }
2378
2379 /* amap_copy could clip (during chunk)! update fudge */
2380 if (fudge) {
2381 fudge = start - entry->start;
2382 orig_fudge = fudge;
2383 }
2384 }
2385
2386 /* calculate the offset of this from "start" */
2387 oldoffset = (entry->start + fudge) - start;
2388
2389 /* allocate a new map entry */
2390 newentry = uvm_mapent_alloc(dstmap, 0);
2391 if (newentry == NULL) {
2392 error = ENOMEM;
2393 goto bad;
2394 }
2395
2396 /* set up new map entry */
2397 newentry->next = NULL;
2398 newentry->prev = endchain;
2399 newentry->start = dstaddr + oldoffset;
2400 newentry->end =
2401 newentry->start + (entry->end - (entry->start + fudge));
2402 if (newentry->end > newend || newentry->end < newentry->start)
2403 newentry->end = newend;
2404 newentry->object.uvm_obj = entry->object.uvm_obj;
2405 if (newentry->object.uvm_obj) {
2406 if (newentry->object.uvm_obj->pgops->pgo_reference)
2407 newentry->object.uvm_obj->pgops->
2408 pgo_reference(newentry->object.uvm_obj);
2409 newentry->offset = entry->offset + fudge;
2410 } else {
2411 newentry->offset = 0;
2412 }
2413 newentry->etype = entry->etype;
2414 newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
2415 entry->max_protection : entry->protection;
2416 newentry->max_protection = entry->max_protection;
2417 newentry->inheritance = entry->inheritance;
2418 newentry->wired_count = 0;
2419 newentry->aref.ar_amap = entry->aref.ar_amap;
2420 if (newentry->aref.ar_amap) {
2421 newentry->aref.ar_pageoff =
2422 entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
2423 uvm_map_reference_amap(newentry, AMAP_SHARED |
2424 ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
2425 } else {
2426 newentry->aref.ar_pageoff = 0;
2427 }
2428 newentry->advice = entry->advice;
2429
2430 /* now link it on the chain */
2431 nchain++;
2432 if (endchain == NULL) {
2433 chain = endchain = newentry;
2434 } else {
2435 endchain->next = newentry;
2436 endchain = newentry;
2437 }
2438
2439 /* end of 'while' loop! */
2440 if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
2441 (entry->next == &srcmap->header ||
2442 entry->next->start != entry->end)) {
2443 error = EINVAL;
2444 goto bad;
2445 }
2446 entry = entry->next;
2447 fudge = 0;
2448 }
2449
2450 /*
2451 * step 4: close off chain (in format expected by uvm_map_replace)
2452 */
2453
2454 if (chain)
2455 chain->prev = endchain;
2456
2457 /*
2458 * step 5: attempt to lock the dest map so we can pmap_copy.
2459 * note usage of copy_ok:
2460 * 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
2461 * 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
2462 */
2463
2464 if (srcmap == dstmap || vm_map_lock_try(dstmap) == TRUE) {
2465 copy_ok = 1;
2466 if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2467 nchain)) {
2468 if (srcmap != dstmap)
2469 vm_map_unlock(dstmap);
2470 error = EIO;
2471 goto bad;
2472 }
2473 } else {
2474 copy_ok = 0;
2475 /* replace defered until step 7 */
2476 }
2477
2478 /*
2479 * step 6: traverse the srcmap a second time to do the following:
2480 * - if we got a lock on the dstmap do pmap_copy
2481 * - if UVM_EXTRACT_REMOVE remove the entries
2482 * we make use of orig_entry and orig_fudge (saved in step 2)
2483 */
2484
2485 if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
2486
2487 /* purge possible stale hints from srcmap */
2488 if (flags & UVM_EXTRACT_REMOVE) {
2489 SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
2490 if (srcmap->first_free->start >= start)
2491 srcmap->first_free = orig_entry->prev;
2492 }
2493
2494 entry = orig_entry;
2495 fudge = orig_fudge;
2496 deadentry = NULL; /* for UVM_EXTRACT_REMOVE */
2497
2498 while (entry->start < end && entry != &srcmap->header) {
2499 if (copy_ok) {
2500 oldoffset = (entry->start + fudge) - start;
2501 elen = MIN(end, entry->end) -
2502 (entry->start + fudge);
2503 pmap_copy(dstmap->pmap, srcmap->pmap,
2504 dstaddr + oldoffset, elen,
2505 entry->start + fudge);
2506 }
2507
2508 /* we advance "entry" in the following if statement */
2509 if (flags & UVM_EXTRACT_REMOVE) {
2510 pmap_remove(srcmap->pmap, entry->start,
2511 entry->end);
2512 oldentry = entry; /* save entry */
2513 entry = entry->next; /* advance */
2514 uvm_map_entry_unlink(srcmap, oldentry);
2515 /* add to dead list */
2516 oldentry->next = deadentry;
2517 deadentry = oldentry;
2518 } else {
2519 entry = entry->next; /* advance */
2520 }
2521
2522 /* end of 'while' loop */
2523 fudge = 0;
2524 }
2525 pmap_update(srcmap->pmap);
2526
2527 /*
2528 * unlock dstmap. we will dispose of deadentry in
2529 * step 7 if needed
2530 */
2531
2532 if (copy_ok && srcmap != dstmap)
2533 vm_map_unlock(dstmap);
2534
2535 } else {
2536 deadentry = NULL;
2537 }
2538
2539 /*
2540 * step 7: we are done with the source map, unlock. if copy_ok
2541 * is 0 then we have not replaced the dummy mapping in dstmap yet
2542 * and we need to do so now.
2543 */
2544
2545 vm_map_unlock(srcmap);
2546 if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
2547 uvm_unmap_detach(deadentry, 0); /* dispose of old entries */
2548
2549 /* now do the replacement if we didn't do it in step 5 */
2550 if (copy_ok == 0) {
2551 vm_map_lock(dstmap);
2552 error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2553 nchain);
2554 vm_map_unlock(dstmap);
2555
2556 if (error == FALSE) {
2557 error = EIO;
2558 goto bad2;
2559 }
2560 }
2561
2562 uvm_tree_sanity(srcmap, "map_extract src leave");
2563 uvm_tree_sanity(dstmap, "map_extract dst leave");
2564
2565 return (0);
2566
2567 /*
2568 * bad: failure recovery
2569 */
2570 bad:
2571 vm_map_unlock(srcmap);
2572 bad2: /* src already unlocked */
2573 if (chain)
2574 uvm_unmap_detach(chain,
2575 (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
2576
2577 uvm_tree_sanity(srcmap, "map_extract src err leave");
2578 uvm_tree_sanity(dstmap, "map_extract dst err leave");
2579
2580 uvm_unmap(dstmap, dstaddr, dstaddr+len); /* ??? */
2581 return (error);
2582 }
2583
2584 /* end of extraction functions */
2585
2586 /*
2587 * uvm_map_submap: punch down part of a map into a submap
2588 *
2589 * => only the kernel_map is allowed to be submapped
2590 * => the purpose of submapping is to break up the locking granularity
2591 * of a larger map
2592 * => the range specified must have been mapped previously with a uvm_map()
2593 * call [with uobj==NULL] to create a blank map entry in the main map.
2594 * [And it had better still be blank!]
2595 * => maps which contain submaps should never be copied or forked.
2596 * => to remove a submap, use uvm_unmap() on the main map
2597 * and then uvm_map_deallocate() the submap.
2598 * => main map must be unlocked.
2599 * => submap must have been init'd and have a zero reference count.
2600 * [need not be locked as we don't actually reference it]
2601 */
2602
2603 int
2604 uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
2605 struct vm_map *submap)
2606 {
2607 struct vm_map_entry *entry;
2608 struct uvm_mapent_reservation umr;
2609 int error;
2610
2611 uvm_mapent_reserve(map, &umr, 2, 0);
2612
2613 vm_map_lock(map);
2614 VM_MAP_RANGE_CHECK(map, start, end);
2615
2616 if (uvm_map_lookup_entry(map, start, &entry)) {
2617 UVM_MAP_CLIP_START(map, entry, start, &umr);
2618 UVM_MAP_CLIP_END(map, entry, end, &umr); /* to be safe */
2619 } else {
2620 entry = NULL;
2621 }
2622
2623 if (entry != NULL &&
2624 entry->start == start && entry->end == end &&
2625 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
2626 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
2627 entry->etype |= UVM_ET_SUBMAP;
2628 entry->object.sub_map = submap;
2629 entry->offset = 0;
2630 uvm_map_reference(submap);
2631 error = 0;
2632 } else {
2633 error = EINVAL;
2634 }
2635 vm_map_unlock(map);
2636
2637 uvm_mapent_unreserve(map, &umr);
2638
2639 return error;
2640 }
2641
2642 /*
2643 * uvm_map_setup_kernel: init in-kernel map
2644 *
2645 * => map must not be in service yet.
2646 */
2647
2648 void
2649 uvm_map_setup_kernel(struct vm_map_kernel *map,
2650 vaddr_t min, vaddr_t max, int flags)
2651 {
2652
2653 uvm_map_setup(&map->vmk_map, min, max, flags);
2654
2655 LIST_INIT(&map->vmk_kentry_free);
2656 map->vmk_merged_entries = NULL;
2657 }
2658
2659
2660 /*
2661 * uvm_map_protect: change map protection
2662 *
2663 * => set_max means set max_protection.
2664 * => map must be unlocked.
2665 */
2666
2667 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
2668 ~VM_PROT_WRITE : VM_PROT_ALL)
2669
2670 int
2671 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
2672 vm_prot_t new_prot, boolean_t set_max)
2673 {
2674 struct vm_map_entry *current, *entry;
2675 int error = 0;
2676 UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
2677 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
2678 map, start, end, new_prot);
2679
2680 vm_map_lock(map);
2681 VM_MAP_RANGE_CHECK(map, start, end);
2682 if (uvm_map_lookup_entry(map, start, &entry)) {
2683 UVM_MAP_CLIP_START(map, entry, start, NULL);
2684 } else {
2685 entry = entry->next;
2686 }
2687
2688 /*
2689 * make a first pass to check for protection violations.
2690 */
2691
2692 current = entry;
2693 while ((current != &map->header) && (current->start < end)) {
2694 if (UVM_ET_ISSUBMAP(current)) {
2695 error = EINVAL;
2696 goto out;
2697 }
2698 if ((new_prot & current->max_protection) != new_prot) {
2699 error = EACCES;
2700 goto out;
2701 }
2702 /*
2703 * Don't allow VM_PROT_EXECUTE to be set on entries that
2704 * point to vnodes that are associated with a NOEXEC file
2705 * system.
2706 */
2707 if (UVM_ET_ISOBJ(current) &&
2708 UVM_OBJ_IS_VNODE(current->object.uvm_obj)) {
2709 struct vnode *vp =
2710 (struct vnode *) current->object.uvm_obj;
2711
2712 if ((new_prot & VM_PROT_EXECUTE) != 0 &&
2713 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
2714 error = EACCES;
2715 goto out;
2716 }
2717 }
2718 current = current->next;
2719 }
2720
2721 /* go back and fix up protections (no need to clip this time). */
2722
2723 current = entry;
2724 while ((current != &map->header) && (current->start < end)) {
2725 vm_prot_t old_prot;
2726
2727 UVM_MAP_CLIP_END(map, current, end, NULL);
2728 old_prot = current->protection;
2729 if (set_max)
2730 current->protection =
2731 (current->max_protection = new_prot) & old_prot;
2732 else
2733 current->protection = new_prot;
2734
2735 /*
2736 * update physical map if necessary. worry about copy-on-write
2737 * here -- CHECK THIS XXX
2738 */
2739
2740 if (current->protection != old_prot) {
2741 /* update pmap! */
2742 pmap_protect(map->pmap, current->start, current->end,
2743 current->protection & MASK(entry));
2744
2745 /*
2746 * If this entry points at a vnode, and the
2747 * protection includes VM_PROT_EXECUTE, mark
2748 * the vnode as VEXECMAP.
2749 */
2750 if (UVM_ET_ISOBJ(current)) {
2751 struct uvm_object *uobj =
2752 current->object.uvm_obj;
2753
2754 if (UVM_OBJ_IS_VNODE(uobj) &&
2755 (current->protection & VM_PROT_EXECUTE))
2756 vn_markexec((struct vnode *) uobj);
2757 }
2758 }
2759
2760 /*
2761 * If the map is configured to lock any future mappings,
2762 * wire this entry now if the old protection was VM_PROT_NONE
2763 * and the new protection is not VM_PROT_NONE.
2764 */
2765
2766 if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
2767 VM_MAPENT_ISWIRED(entry) == 0 &&
2768 old_prot == VM_PROT_NONE &&
2769 new_prot != VM_PROT_NONE) {
2770 if (uvm_map_pageable(map, entry->start,
2771 entry->end, FALSE,
2772 UVM_LK_ENTER|UVM_LK_EXIT) != 0) {
2773
2774 /*
2775 * If locking the entry fails, remember the
2776 * error if it's the first one. Note we
2777 * still continue setting the protection in
2778 * the map, but will return the error
2779 * condition regardless.
2780 *
2781 * XXX Ignore what the actual error is,
2782 * XXX just call it a resource shortage
2783 * XXX so that it doesn't get confused
2784 * XXX what uvm_map_protect() itself would
2785 * XXX normally return.
2786 */
2787
2788 error = ENOMEM;
2789 }
2790 }
2791 current = current->next;
2792 }
2793 pmap_update(map->pmap);
2794
2795 out:
2796 vm_map_unlock(map);
2797
2798 UVMHIST_LOG(maphist, "<- done, error=%d",error,0,0,0);
2799 return error;
2800 }
2801
2802 #undef MASK
2803
2804 /*
2805 * uvm_map_inherit: set inheritance code for range of addrs in map.
2806 *
2807 * => map must be unlocked
2808 * => note that the inherit code is used during a "fork". see fork
2809 * code for details.
2810 */
2811
2812 int
2813 uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
2814 vm_inherit_t new_inheritance)
2815 {
2816 struct vm_map_entry *entry, *temp_entry;
2817 UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
2818 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)",
2819 map, start, end, new_inheritance);
2820
2821 switch (new_inheritance) {
2822 case MAP_INHERIT_NONE:
2823 case MAP_INHERIT_COPY:
2824 case MAP_INHERIT_SHARE:
2825 break;
2826 default:
2827 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
2828 return EINVAL;
2829 }
2830
2831 vm_map_lock(map);
2832 VM_MAP_RANGE_CHECK(map, start, end);
2833 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
2834 entry = temp_entry;
2835 UVM_MAP_CLIP_START(map, entry, start, NULL);
2836 } else {
2837 entry = temp_entry->next;
2838 }
2839 while ((entry != &map->header) && (entry->start < end)) {
2840 UVM_MAP_CLIP_END(map, entry, end, NULL);
2841 entry->inheritance = new_inheritance;
2842 entry = entry->next;
2843 }
2844 vm_map_unlock(map);
2845 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
2846 return 0;
2847 }
2848
2849 /*
2850 * uvm_map_advice: set advice code for range of addrs in map.
2851 *
2852 * => map must be unlocked
2853 */
2854
2855 int
2856 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
2857 {
2858 struct vm_map_entry *entry, *temp_entry;
2859 UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist);
2860 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_adv=0x%x)",
2861 map, start, end, new_advice);
2862
2863 vm_map_lock(map);
2864 VM_MAP_RANGE_CHECK(map, start, end);
2865 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
2866 entry = temp_entry;
2867 UVM_MAP_CLIP_START(map, entry, start, NULL);
2868 } else {
2869 entry = temp_entry->next;
2870 }
2871
2872 /*
2873 * XXXJRT: disallow holes?
2874 */
2875
2876 while ((entry != &map->header) && (entry->start < end)) {
2877 UVM_MAP_CLIP_END(map, entry, end, NULL);
2878
2879 switch (new_advice) {
2880 case MADV_NORMAL:
2881 case MADV_RANDOM:
2882 case MADV_SEQUENTIAL:
2883 /* nothing special here */
2884 break;
2885
2886 default:
2887 vm_map_unlock(map);
2888 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
2889 return EINVAL;
2890 }
2891 entry->advice = new_advice;
2892 entry = entry->next;
2893 }
2894
2895 vm_map_unlock(map);
2896 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
2897 return 0;
2898 }
2899
2900 /*
2901 * uvm_map_pageable: sets the pageability of a range in a map.
2902 *
2903 * => wires map entries. should not be used for transient page locking.
2904 * for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
2905 * => regions sepcified as not pageable require lock-down (wired) memory
2906 * and page tables.
2907 * => map must never be read-locked
2908 * => if islocked is TRUE, map is already write-locked
2909 * => we always unlock the map, since we must downgrade to a read-lock
2910 * to call uvm_fault_wire()
2911 * => XXXCDC: check this and try and clean it up.
2912 */
2913
2914 int
2915 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
2916 boolean_t new_pageable, int lockflags)
2917 {
2918 struct vm_map_entry *entry, *start_entry, *failed_entry;
2919 int rv;
2920 #ifdef DIAGNOSTIC
2921 u_int timestamp_save;
2922 #endif
2923 UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
2924 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
2925 map, start, end, new_pageable);
2926 KASSERT(map->flags & VM_MAP_PAGEABLE);
2927
2928 if ((lockflags & UVM_LK_ENTER) == 0)
2929 vm_map_lock(map);
2930 VM_MAP_RANGE_CHECK(map, start, end);
2931
2932 /*
2933 * only one pageability change may take place at one time, since
2934 * uvm_fault_wire assumes it will be called only once for each
2935 * wiring/unwiring. therefore, we have to make sure we're actually
2936 * changing the pageability for the entire region. we do so before
2937 * making any changes.
2938 */
2939
2940 if (uvm_map_lookup_entry(map, start, &start_entry) == FALSE) {
2941 if ((lockflags & UVM_LK_EXIT) == 0)
2942 vm_map_unlock(map);
2943
2944 UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0);
2945 return EFAULT;
2946 }
2947 entry = start_entry;
2948
2949 /*
2950 * handle wiring and unwiring separately.
2951 */
2952
2953 if (new_pageable) { /* unwire */
2954 UVM_MAP_CLIP_START(map, entry, start, NULL);
2955
2956 /*
2957 * unwiring. first ensure that the range to be unwired is
2958 * really wired down and that there are no holes.
2959 */
2960
2961 while ((entry != &map->header) && (entry->start < end)) {
2962 if (entry->wired_count == 0 ||
2963 (entry->end < end &&
2964 (entry->next == &map->header ||
2965 entry->next->start > entry->end))) {
2966 if ((lockflags & UVM_LK_EXIT) == 0)
2967 vm_map_unlock(map);
2968 UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0);
2969 return EINVAL;
2970 }
2971 entry = entry->next;
2972 }
2973
2974 /*
2975 * POSIX 1003.1b - a single munlock call unlocks a region,
2976 * regardless of the number of mlock calls made on that
2977 * region.
2978 */
2979
2980 entry = start_entry;
2981 while ((entry != &map->header) && (entry->start < end)) {
2982 UVM_MAP_CLIP_END(map, entry, end, NULL);
2983 if (VM_MAPENT_ISWIRED(entry))
2984 uvm_map_entry_unwire(map, entry);
2985 entry = entry->next;
2986 }
2987 if ((lockflags & UVM_LK_EXIT) == 0)
2988 vm_map_unlock(map);
2989 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
2990 return 0;
2991 }
2992
2993 /*
2994 * wire case: in two passes [XXXCDC: ugly block of code here]
2995 *
2996 * 1: holding the write lock, we create any anonymous maps that need
2997 * to be created. then we clip each map entry to the region to
2998 * be wired and increment its wiring count.
2999 *
3000 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
3001 * in the pages for any newly wired area (wired_count == 1).
3002 *
3003 * downgrading to a read lock for uvm_fault_wire avoids a possible
3004 * deadlock with another thread that may have faulted on one of
3005 * the pages to be wired (it would mark the page busy, blocking
3006 * us, then in turn block on the map lock that we hold). because
3007 * of problems in the recursive lock package, we cannot upgrade
3008 * to a write lock in vm_map_lookup. thus, any actions that
3009 * require the write lock must be done beforehand. because we
3010 * keep the read lock on the map, the copy-on-write status of the
3011 * entries we modify here cannot change.
3012 */
3013
3014 while ((entry != &map->header) && (entry->start < end)) {
3015 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3016
3017 /*
3018 * perform actions of vm_map_lookup that need the
3019 * write lock on the map: create an anonymous map
3020 * for a copy-on-write region, or an anonymous map
3021 * for a zero-fill region. (XXXCDC: submap case
3022 * ok?)
3023 */
3024
3025 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3026 if (UVM_ET_ISNEEDSCOPY(entry) &&
3027 ((entry->max_protection & VM_PROT_WRITE) ||
3028 (entry->object.uvm_obj == NULL))) {
3029 amap_copy(map, entry, M_WAITOK, TRUE,
3030 start, end);
3031 /* XXXCDC: wait OK? */
3032 }
3033 }
3034 }
3035 UVM_MAP_CLIP_START(map, entry, start, NULL);
3036 UVM_MAP_CLIP_END(map, entry, end, NULL);
3037 entry->wired_count++;
3038
3039 /*
3040 * Check for holes
3041 */
3042
3043 if (entry->protection == VM_PROT_NONE ||
3044 (entry->end < end &&
3045 (entry->next == &map->header ||
3046 entry->next->start > entry->end))) {
3047
3048 /*
3049 * found one. amap creation actions do not need to
3050 * be undone, but the wired counts need to be restored.
3051 */
3052
3053 while (entry != &map->header && entry->end > start) {
3054 entry->wired_count--;
3055 entry = entry->prev;
3056 }
3057 if ((lockflags & UVM_LK_EXIT) == 0)
3058 vm_map_unlock(map);
3059 UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
3060 return EINVAL;
3061 }
3062 entry = entry->next;
3063 }
3064
3065 /*
3066 * Pass 2.
3067 */
3068
3069 #ifdef DIAGNOSTIC
3070 timestamp_save = map->timestamp;
3071 #endif
3072 vm_map_busy(map);
3073 vm_map_downgrade(map);
3074
3075 rv = 0;
3076 entry = start_entry;
3077 while (entry != &map->header && entry->start < end) {
3078 if (entry->wired_count == 1) {
3079 rv = uvm_fault_wire(map, entry->start, entry->end,
3080 VM_FAULT_WIREMAX, entry->max_protection);
3081 if (rv) {
3082
3083 /*
3084 * wiring failed. break out of the loop.
3085 * we'll clean up the map below, once we
3086 * have a write lock again.
3087 */
3088
3089 break;
3090 }
3091 }
3092 entry = entry->next;
3093 }
3094
3095 if (rv) { /* failed? */
3096
3097 /*
3098 * Get back to an exclusive (write) lock.
3099 */
3100
3101 vm_map_upgrade(map);
3102 vm_map_unbusy(map);
3103
3104 #ifdef DIAGNOSTIC
3105 if (timestamp_save != map->timestamp)
3106 panic("uvm_map_pageable: stale map");
3107 #endif
3108
3109 /*
3110 * first drop the wiring count on all the entries
3111 * which haven't actually been wired yet.
3112 */
3113
3114 failed_entry = entry;
3115 while (entry != &map->header && entry->start < end) {
3116 entry->wired_count--;
3117 entry = entry->next;
3118 }
3119
3120 /*
3121 * now, unwire all the entries that were successfully
3122 * wired above.
3123 */
3124
3125 entry = start_entry;
3126 while (entry != failed_entry) {
3127 entry->wired_count--;
3128 if (VM_MAPENT_ISWIRED(entry) == 0)
3129 uvm_map_entry_unwire(map, entry);
3130 entry = entry->next;
3131 }
3132 if ((lockflags & UVM_LK_EXIT) == 0)
3133 vm_map_unlock(map);
3134 UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
3135 return (rv);
3136 }
3137
3138 /* We are holding a read lock here. */
3139 if ((lockflags & UVM_LK_EXIT) == 0) {
3140 vm_map_unbusy(map);
3141 vm_map_unlock_read(map);
3142 } else {
3143
3144 /*
3145 * Get back to an exclusive (write) lock.
3146 */
3147
3148 vm_map_upgrade(map);
3149 vm_map_unbusy(map);
3150 }
3151
3152 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3153 return 0;
3154 }
3155
3156 /*
3157 * uvm_map_pageable_all: special case of uvm_map_pageable - affects
3158 * all mapped regions.
3159 *
3160 * => map must not be locked.
3161 * => if no flags are specified, all regions are unwired.
3162 * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
3163 */
3164
3165 int
3166 uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
3167 {
3168 struct vm_map_entry *entry, *failed_entry;
3169 vsize_t size;
3170 int rv;
3171 #ifdef DIAGNOSTIC
3172 u_int timestamp_save;
3173 #endif
3174 UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist);
3175 UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0);
3176
3177 KASSERT(map->flags & VM_MAP_PAGEABLE);
3178
3179 vm_map_lock(map);
3180
3181 /*
3182 * handle wiring and unwiring separately.
3183 */
3184
3185 if (flags == 0) { /* unwire */
3186
3187 /*
3188 * POSIX 1003.1b -- munlockall unlocks all regions,
3189 * regardless of how many times mlockall has been called.
3190 */
3191
3192 for (entry = map->header.next; entry != &map->header;
3193 entry = entry->next) {
3194 if (VM_MAPENT_ISWIRED(entry))
3195 uvm_map_entry_unwire(map, entry);
3196 }
3197 vm_map_modflags(map, 0, VM_MAP_WIREFUTURE);
3198 vm_map_unlock(map);
3199 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3200 return 0;
3201 }
3202
3203 if (flags & MCL_FUTURE) {
3204
3205 /*
3206 * must wire all future mappings; remember this.
3207 */
3208
3209 vm_map_modflags(map, VM_MAP_WIREFUTURE, 0);
3210 }
3211
3212 if ((flags & MCL_CURRENT) == 0) {
3213
3214 /*
3215 * no more work to do!
3216 */
3217
3218 UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
3219 vm_map_unlock(map);
3220 return 0;
3221 }
3222
3223 /*
3224 * wire case: in three passes [XXXCDC: ugly block of code here]
3225 *
3226 * 1: holding the write lock, count all pages mapped by non-wired
3227 * entries. if this would cause us to go over our limit, we fail.
3228 *
3229 * 2: still holding the write lock, we create any anonymous maps that
3230 * need to be created. then we increment its wiring count.
3231 *
3232 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
3233 * in the pages for any newly wired area (wired_count == 1).
3234 *
3235 * downgrading to a read lock for uvm_fault_wire avoids a possible
3236 * deadlock with another thread that may have faulted on one of
3237 * the pages to be wired (it would mark the page busy, blocking
3238 * us, then in turn block on the map lock that we hold). because
3239 * of problems in the recursive lock package, we cannot upgrade
3240 * to a write lock in vm_map_lookup. thus, any actions that
3241 * require the write lock must be done beforehand. because we
3242 * keep the read lock on the map, the copy-on-write status of the
3243 * entries we modify here cannot change.
3244 */
3245
3246 for (size = 0, entry = map->header.next; entry != &map->header;
3247 entry = entry->next) {
3248 if (entry->protection != VM_PROT_NONE &&
3249 VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3250 size += entry->end - entry->start;
3251 }
3252 }
3253
3254 if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
3255 vm_map_unlock(map);
3256 return ENOMEM;
3257 }
3258
3259 /* XXX non-pmap_wired_count case must be handled by caller */
3260 #ifdef pmap_wired_count
3261 if (limit != 0 &&
3262 (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
3263 vm_map_unlock(map);
3264 return ENOMEM;
3265 }
3266 #endif
3267
3268 /*
3269 * Pass 2.
3270 */
3271
3272 for (entry = map->header.next; entry != &map->header;
3273 entry = entry->next) {
3274 if (entry->protection == VM_PROT_NONE)
3275 continue;
3276 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3277
3278 /*
3279 * perform actions of vm_map_lookup that need the
3280 * write lock on the map: create an anonymous map
3281 * for a copy-on-write region, or an anonymous map
3282 * for a zero-fill region. (XXXCDC: submap case
3283 * ok?)
3284 */
3285
3286 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3287 if (UVM_ET_ISNEEDSCOPY(entry) &&
3288 ((entry->max_protection & VM_PROT_WRITE) ||
3289 (entry->object.uvm_obj == NULL))) {
3290 amap_copy(map, entry, M_WAITOK, TRUE,
3291 entry->start, entry->end);
3292 /* XXXCDC: wait OK? */
3293 }
3294 }
3295 }
3296 entry->wired_count++;
3297 }
3298
3299 /*
3300 * Pass 3.
3301 */
3302
3303 #ifdef DIAGNOSTIC
3304 timestamp_save = map->timestamp;
3305 #endif
3306 vm_map_busy(map);
3307 vm_map_downgrade(map);
3308
3309 rv = 0;
3310 for (entry = map->header.next; entry != &map->header;
3311 entry = entry->next) {
3312 if (entry->wired_count == 1) {
3313 rv = uvm_fault_wire(map, entry->start, entry->end,
3314 VM_FAULT_WIREMAX, entry->max_protection);
3315 if (rv) {
3316
3317 /*
3318 * wiring failed. break out of the loop.
3319 * we'll clean up the map below, once we
3320 * have a write lock again.
3321 */
3322
3323 break;
3324 }
3325 }
3326 }
3327
3328 if (rv) {
3329
3330 /*
3331 * Get back an exclusive (write) lock.
3332 */
3333
3334 vm_map_upgrade(map);
3335 vm_map_unbusy(map);
3336
3337 #ifdef DIAGNOSTIC
3338 if (timestamp_save != map->timestamp)
3339 panic("uvm_map_pageable_all: stale map");
3340 #endif
3341
3342 /*
3343 * first drop the wiring count on all the entries
3344 * which haven't actually been wired yet.
3345 *
3346 * Skip VM_PROT_NONE entries like we did above.
3347 */
3348
3349 failed_entry = entry;
3350 for (/* nothing */; entry != &map->header;
3351 entry = entry->next) {
3352 if (entry->protection == VM_PROT_NONE)
3353 continue;
3354 entry->wired_count--;
3355 }
3356
3357 /*
3358 * now, unwire all the entries that were successfully
3359 * wired above.
3360 *
3361 * Skip VM_PROT_NONE entries like we did above.
3362 */
3363
3364 for (entry = map->header.next; entry != failed_entry;
3365 entry = entry->next) {
3366 if (entry->protection == VM_PROT_NONE)
3367 continue;
3368 entry->wired_count--;
3369 if (VM_MAPENT_ISWIRED(entry))
3370 uvm_map_entry_unwire(map, entry);
3371 }
3372 vm_map_unlock(map);
3373 UVMHIST_LOG(maphist,"<- done (RV=%d)", rv,0,0,0);
3374 return (rv);
3375 }
3376
3377 /* We are holding a read lock here. */
3378 vm_map_unbusy(map);
3379 vm_map_unlock_read(map);
3380
3381 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3382 return 0;
3383 }
3384
3385 /*
3386 * uvm_map_clean: clean out a map range
3387 *
3388 * => valid flags:
3389 * if (flags & PGO_CLEANIT): dirty pages are cleaned first
3390 * if (flags & PGO_SYNCIO): dirty pages are written synchronously
3391 * if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
3392 * if (flags & PGO_FREE): any cached pages are freed after clean
3393 * => returns an error if any part of the specified range isn't mapped
3394 * => never a need to flush amap layer since the anonymous memory has
3395 * no permanent home, but may deactivate pages there
3396 * => called from sys_msync() and sys_madvise()
3397 * => caller must not write-lock map (read OK).
3398 * => we may sleep while cleaning if SYNCIO [with map read-locked]
3399 */
3400
3401 int
3402 uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
3403 {
3404 struct vm_map_entry *current, *entry;
3405 struct uvm_object *uobj;
3406 struct vm_amap *amap;
3407 struct vm_anon *anon;
3408 struct vm_page *pg;
3409 vaddr_t offset;
3410 vsize_t size;
3411 int error, refs;
3412 UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
3413
3414 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
3415 map, start, end, flags);
3416 KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
3417 (PGO_FREE|PGO_DEACTIVATE));
3418
3419 vm_map_lock_read(map);
3420 VM_MAP_RANGE_CHECK(map, start, end);
3421 if (uvm_map_lookup_entry(map, start, &entry) == FALSE) {
3422 vm_map_unlock_read(map);
3423 return EFAULT;
3424 }
3425
3426 /*
3427 * Make a first pass to check for holes.
3428 */
3429
3430 for (current = entry; current->start < end; current = current->next) {
3431 if (UVM_ET_ISSUBMAP(current)) {
3432 vm_map_unlock_read(map);
3433 return EINVAL;
3434 }
3435 if (end <= current->end) {
3436 break;
3437 }
3438 if (current->end != current->next->start) {
3439 vm_map_unlock_read(map);
3440 return EFAULT;
3441 }
3442 }
3443
3444 error = 0;
3445 for (current = entry; start < end; current = current->next) {
3446 amap = current->aref.ar_amap; /* top layer */
3447 uobj = current->object.uvm_obj; /* bottom layer */
3448 KASSERT(start >= current->start);
3449
3450 /*
3451 * No amap cleaning necessary if:
3452 *
3453 * (1) There's no amap.
3454 *
3455 * (2) We're not deactivating or freeing pages.
3456 */
3457
3458 if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
3459 goto flush_object;
3460
3461 amap_lock(amap);
3462 offset = start - current->start;
3463 size = MIN(end, current->end) - start;
3464 for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) {
3465 anon = amap_lookup(¤t->aref, offset);
3466 if (anon == NULL)
3467 continue;
3468
3469 simple_lock(&anon->an_lock);
3470 pg = anon->u.an_page;
3471 if (pg == NULL) {
3472 simple_unlock(&anon->an_lock);
3473 continue;
3474 }
3475
3476 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
3477
3478 /*
3479 * In these first 3 cases, we just deactivate the page.
3480 */
3481
3482 case PGO_CLEANIT|PGO_FREE:
3483 case PGO_CLEANIT|PGO_DEACTIVATE:
3484 case PGO_DEACTIVATE:
3485 deactivate_it:
3486 /*
3487 * skip the page if it's loaned or wired,
3488 * since it shouldn't be on a paging queue
3489 * at all in these cases.
3490 */
3491
3492 uvm_lock_pageq();
3493 if (pg->loan_count != 0 ||
3494 pg->wire_count != 0) {
3495 uvm_unlock_pageq();
3496 simple_unlock(&anon->an_lock);
3497 continue;
3498 }
3499 KASSERT(pg->uanon == anon);
3500 pmap_clear_reference(pg);
3501 uvm_pagedeactivate(pg);
3502 uvm_unlock_pageq();
3503 simple_unlock(&anon->an_lock);
3504 continue;
3505
3506 case PGO_FREE:
3507
3508 /*
3509 * If there are multiple references to
3510 * the amap, just deactivate the page.
3511 */
3512
3513 if (amap_refs(amap) > 1)
3514 goto deactivate_it;
3515
3516 /* skip the page if it's wired */
3517 if (pg->wire_count != 0) {
3518 simple_unlock(&anon->an_lock);
3519 continue;
3520 }
3521 amap_unadd(¤t->aref, offset);
3522 refs = --anon->an_ref;
3523 simple_unlock(&anon->an_lock);
3524 if (refs == 0)
3525 uvm_anfree(anon);
3526 continue;
3527 }
3528 }
3529 amap_unlock(amap);
3530
3531 flush_object:
3532 /*
3533 * flush pages if we've got a valid backing object.
3534 * note that we must always clean object pages before
3535 * freeing them since otherwise we could reveal stale
3536 * data from files.
3537 */
3538
3539 offset = current->offset + (start - current->start);
3540 size = MIN(end, current->end) - start;
3541 if (uobj != NULL) {
3542 simple_lock(&uobj->vmobjlock);
3543 if (uobj->pgops->pgo_put != NULL)
3544 error = (uobj->pgops->pgo_put)(uobj, offset,
3545 offset + size, flags | PGO_CLEANIT);
3546 else
3547 error = 0;
3548 }
3549 start += size;
3550 }
3551 vm_map_unlock_read(map);
3552 return (error);
3553 }
3554
3555
3556 /*
3557 * uvm_map_checkprot: check protection in map
3558 *
3559 * => must allow specified protection in a fully allocated region.
3560 * => map must be read or write locked by caller.
3561 */
3562
3563 boolean_t
3564 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
3565 vm_prot_t protection)
3566 {
3567 struct vm_map_entry *entry;
3568 struct vm_map_entry *tmp_entry;
3569
3570 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
3571 return (FALSE);
3572 }
3573 entry = tmp_entry;
3574 while (start < end) {
3575 if (entry == &map->header) {
3576 return (FALSE);
3577 }
3578
3579 /*
3580 * no holes allowed
3581 */
3582
3583 if (start < entry->start) {
3584 return (FALSE);
3585 }
3586
3587 /*
3588 * check protection associated with entry
3589 */
3590
3591 if ((entry->protection & protection) != protection) {
3592 return (FALSE);
3593 }
3594 start = entry->end;
3595 entry = entry->next;
3596 }
3597 return (TRUE);
3598 }
3599
3600 /*
3601 * uvmspace_alloc: allocate a vmspace structure.
3602 *
3603 * - structure includes vm_map and pmap
3604 * - XXX: no locking on this structure
3605 * - refcnt set to 1, rest must be init'd by caller
3606 */
3607 struct vmspace *
3608 uvmspace_alloc(vaddr_t min, vaddr_t max)
3609 {
3610 struct vmspace *vm;
3611 UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
3612
3613 vm = pool_get(&uvm_vmspace_pool, PR_WAITOK);
3614 uvmspace_init(vm, NULL, min, max);
3615 UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0);
3616 return (vm);
3617 }
3618
3619 /*
3620 * uvmspace_init: initialize a vmspace structure.
3621 *
3622 * - XXX: no locking on this structure
3623 * - refcnt set to 1, rest must be init'd by caller
3624 */
3625 void
3626 uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t min, vaddr_t max)
3627 {
3628 UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist);
3629
3630 memset(vm, 0, sizeof(*vm));
3631 uvm_map_setup(&vm->vm_map, min, max, VM_MAP_PAGEABLE
3632 #ifdef __USING_TOPDOWN_VM
3633 | VM_MAP_TOPDOWN
3634 #endif
3635 );
3636 if (pmap)
3637 pmap_reference(pmap);
3638 else
3639 pmap = pmap_create();
3640 vm->vm_map.pmap = pmap;
3641 vm->vm_refcnt = 1;
3642 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
3643 }
3644
3645 /*
3646 * uvmspace_share: share a vmspace between two processes
3647 *
3648 * - used for vfork, threads(?)
3649 */
3650
3651 void
3652 uvmspace_share(struct proc *p1, struct proc *p2)
3653 {
3654 struct simplelock *slock = &p1->p_vmspace->vm_map.ref_lock;
3655
3656 p2->p_vmspace = p1->p_vmspace;
3657 simple_lock(slock);
3658 p1->p_vmspace->vm_refcnt++;
3659 simple_unlock(slock);
3660 }
3661
3662 /*
3663 * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
3664 *
3665 * - XXX: no locking on vmspace
3666 */
3667
3668 void
3669 uvmspace_unshare(struct lwp *l)
3670 {
3671 struct proc *p = l->l_proc;
3672 struct vmspace *nvm, *ovm = p->p_vmspace;
3673
3674 if (ovm->vm_refcnt == 1)
3675 /* nothing to do: vmspace isn't shared in the first place */
3676 return;
3677
3678 /* make a new vmspace, still holding old one */
3679 nvm = uvmspace_fork(ovm);
3680
3681 pmap_deactivate(l); /* unbind old vmspace */
3682 p->p_vmspace = nvm;
3683 pmap_activate(l); /* switch to new vmspace */
3684
3685 uvmspace_free(ovm); /* drop reference to old vmspace */
3686 }
3687
3688 /*
3689 * uvmspace_exec: the process wants to exec a new program
3690 */
3691
3692 void
3693 uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end)
3694 {
3695 struct proc *p = l->l_proc;
3696 struct vmspace *nvm, *ovm = p->p_vmspace;
3697 struct vm_map *map = &ovm->vm_map;
3698
3699 #ifdef __sparc__
3700 /* XXX cgd 960926: the sparc #ifdef should be a MD hook */
3701 kill_user_windows(l); /* before stack addresses go away */
3702 #endif
3703
3704 /*
3705 * see if more than one process is using this vmspace...
3706 */
3707
3708 if (ovm->vm_refcnt == 1) {
3709
3710 /*
3711 * if p is the only process using its vmspace then we can safely
3712 * recycle that vmspace for the program that is being exec'd.
3713 */
3714
3715 #ifdef SYSVSHM
3716 /*
3717 * SYSV SHM semantics require us to kill all segments on an exec
3718 */
3719
3720 if (ovm->vm_shm)
3721 shmexit(ovm);
3722 #endif
3723
3724 /*
3725 * POSIX 1003.1b -- "lock future mappings" is revoked
3726 * when a process execs another program image.
3727 */
3728
3729 vm_map_modflags(map, 0, VM_MAP_WIREFUTURE);
3730
3731 /*
3732 * now unmap the old program
3733 */
3734
3735 pmap_remove_all(map->pmap);
3736 uvm_unmap(map, map->min_offset, map->max_offset);
3737 KASSERT(map->header.prev == &map->header);
3738 KASSERT(map->nentries == 0);
3739
3740 /*
3741 * resize the map
3742 */
3743
3744 map->min_offset = start;
3745 map->max_offset = end;
3746 } else {
3747
3748 /*
3749 * p's vmspace is being shared, so we can't reuse it for p since
3750 * it is still being used for others. allocate a new vmspace
3751 * for p
3752 */
3753
3754 nvm = uvmspace_alloc(start, end);
3755
3756 /*
3757 * install new vmspace and drop our ref to the old one.
3758 */
3759
3760 pmap_deactivate(l);
3761 p->p_vmspace = nvm;
3762 pmap_activate(l);
3763
3764 uvmspace_free(ovm);
3765 }
3766 }
3767
3768 /*
3769 * uvmspace_free: free a vmspace data structure
3770 */
3771
3772 void
3773 uvmspace_free(struct vmspace *vm)
3774 {
3775 struct vm_map_entry *dead_entries;
3776 struct vm_map *map = &vm->vm_map;
3777 int n;
3778
3779 UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
3780
3781 UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0);
3782 simple_lock(&map->ref_lock);
3783 n = --vm->vm_refcnt;
3784 simple_unlock(&map->ref_lock);
3785 if (n > 0)
3786 return;
3787
3788 /*
3789 * at this point, there should be no other references to the map.
3790 * delete all of the mappings, then destroy the pmap.
3791 */
3792
3793 map->flags |= VM_MAP_DYING;
3794 pmap_remove_all(map->pmap);
3795 #ifdef SYSVSHM
3796 /* Get rid of any SYSV shared memory segments. */
3797 if (vm->vm_shm != NULL)
3798 shmexit(vm);
3799 #endif
3800 if (map->nentries) {
3801 uvm_unmap_remove(map, map->min_offset, map->max_offset,
3802 &dead_entries, NULL);
3803 if (dead_entries != NULL)
3804 uvm_unmap_detach(dead_entries, 0);
3805 }
3806 KASSERT(map->nentries == 0);
3807 KASSERT(map->size == 0);
3808 pmap_destroy(map->pmap);
3809 pool_put(&uvm_vmspace_pool, vm);
3810 }
3811
3812 /*
3813 * F O R K - m a i n e n t r y p o i n t
3814 */
3815 /*
3816 * uvmspace_fork: fork a process' main map
3817 *
3818 * => create a new vmspace for child process from parent.
3819 * => parent's map must not be locked.
3820 */
3821
3822 struct vmspace *
3823 uvmspace_fork(struct vmspace *vm1)
3824 {
3825 struct vmspace *vm2;
3826 struct vm_map *old_map = &vm1->vm_map;
3827 struct vm_map *new_map;
3828 struct vm_map_entry *old_entry;
3829 struct vm_map_entry *new_entry;
3830 UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
3831
3832 vm_map_lock(old_map);
3833
3834 vm2 = uvmspace_alloc(old_map->min_offset, old_map->max_offset);
3835 memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
3836 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
3837 new_map = &vm2->vm_map; /* XXX */
3838
3839 old_entry = old_map->header.next;
3840 new_map->size = old_map->size;
3841
3842 /*
3843 * go entry-by-entry
3844 */
3845
3846 while (old_entry != &old_map->header) {
3847
3848 /*
3849 * first, some sanity checks on the old entry
3850 */
3851
3852 KASSERT(!UVM_ET_ISSUBMAP(old_entry));
3853 KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) ||
3854 !UVM_ET_ISNEEDSCOPY(old_entry));
3855
3856 switch (old_entry->inheritance) {
3857 case MAP_INHERIT_NONE:
3858
3859 /*
3860 * drop the mapping, modify size
3861 */
3862 new_map->size -= old_entry->end - old_entry->start;
3863 break;
3864
3865 case MAP_INHERIT_SHARE:
3866
3867 /*
3868 * share the mapping: this means we want the old and
3869 * new entries to share amaps and backing objects.
3870 */
3871 /*
3872 * if the old_entry needs a new amap (due to prev fork)
3873 * then we need to allocate it now so that we have
3874 * something we own to share with the new_entry. [in
3875 * other words, we need to clear needs_copy]
3876 */
3877
3878 if (UVM_ET_ISNEEDSCOPY(old_entry)) {
3879 /* get our own amap, clears needs_copy */
3880 amap_copy(old_map, old_entry, M_WAITOK, FALSE,
3881 0, 0);
3882 /* XXXCDC: WAITOK??? */
3883 }
3884
3885 new_entry = uvm_mapent_alloc(new_map, 0);
3886 /* old_entry -> new_entry */
3887 uvm_mapent_copy(old_entry, new_entry);
3888
3889 /* new pmap has nothing wired in it */
3890 new_entry->wired_count = 0;
3891
3892 /*
3893 * gain reference to object backing the map (can't
3894 * be a submap, already checked this case).
3895 */
3896
3897 if (new_entry->aref.ar_amap)
3898 uvm_map_reference_amap(new_entry, AMAP_SHARED);
3899
3900 if (new_entry->object.uvm_obj &&
3901 new_entry->object.uvm_obj->pgops->pgo_reference)
3902 new_entry->object.uvm_obj->
3903 pgops->pgo_reference(
3904 new_entry->object.uvm_obj);
3905
3906 /* insert entry at end of new_map's entry list */
3907 uvm_map_entry_link(new_map, new_map->header.prev,
3908 new_entry);
3909
3910 break;
3911
3912 case MAP_INHERIT_COPY:
3913
3914 /*
3915 * copy-on-write the mapping (using mmap's
3916 * MAP_PRIVATE semantics)
3917 *
3918 * allocate new_entry, adjust reference counts.
3919 * (note that new references are read-only).
3920 */
3921
3922 new_entry = uvm_mapent_alloc(new_map, 0);
3923 /* old_entry -> new_entry */
3924 uvm_mapent_copy(old_entry, new_entry);
3925
3926 if (new_entry->aref.ar_amap)
3927 uvm_map_reference_amap(new_entry, 0);
3928
3929 if (new_entry->object.uvm_obj &&
3930 new_entry->object.uvm_obj->pgops->pgo_reference)
3931 new_entry->object.uvm_obj->pgops->pgo_reference
3932 (new_entry->object.uvm_obj);
3933
3934 /* new pmap has nothing wired in it */
3935 new_entry->wired_count = 0;
3936
3937 new_entry->etype |=
3938 (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
3939 uvm_map_entry_link(new_map, new_map->header.prev,
3940 new_entry);
3941
3942 /*
3943 * the new entry will need an amap. it will either
3944 * need to be copied from the old entry or created
3945 * from scratch (if the old entry does not have an
3946 * amap). can we defer this process until later
3947 * (by setting "needs_copy") or do we need to copy
3948 * the amap now?
3949 *
3950 * we must copy the amap now if any of the following
3951 * conditions hold:
3952 * 1. the old entry has an amap and that amap is
3953 * being shared. this means that the old (parent)
3954 * process is sharing the amap with another
3955 * process. if we do not clear needs_copy here
3956 * we will end up in a situation where both the
3957 * parent and child process are refering to the
3958 * same amap with "needs_copy" set. if the
3959 * parent write-faults, the fault routine will
3960 * clear "needs_copy" in the parent by allocating
3961 * a new amap. this is wrong because the
3962 * parent is supposed to be sharing the old amap
3963 * and the new amap will break that.
3964 *
3965 * 2. if the old entry has an amap and a non-zero
3966 * wire count then we are going to have to call
3967 * amap_cow_now to avoid page faults in the
3968 * parent process. since amap_cow_now requires
3969 * "needs_copy" to be clear we might as well
3970 * clear it here as well.
3971 *
3972 */
3973
3974 if (old_entry->aref.ar_amap != NULL) {
3975 if ((amap_flags(old_entry->aref.ar_amap) &
3976 AMAP_SHARED) != 0 ||
3977 VM_MAPENT_ISWIRED(old_entry)) {
3978
3979 amap_copy(new_map, new_entry, M_WAITOK,
3980 FALSE, 0, 0);
3981 /* XXXCDC: M_WAITOK ... ok? */
3982 }
3983 }
3984
3985 /*
3986 * if the parent's entry is wired down, then the
3987 * parent process does not want page faults on
3988 * access to that memory. this means that we
3989 * cannot do copy-on-write because we can't write
3990 * protect the old entry. in this case we
3991 * resolve all copy-on-write faults now, using
3992 * amap_cow_now. note that we have already
3993 * allocated any needed amap (above).
3994 */
3995
3996 if (VM_MAPENT_ISWIRED(old_entry)) {
3997
3998 /*
3999 * resolve all copy-on-write faults now
4000 * (note that there is nothing to do if
4001 * the old mapping does not have an amap).
4002 */
4003 if (old_entry->aref.ar_amap)
4004 amap_cow_now(new_map, new_entry);
4005
4006 } else {
4007
4008 /*
4009 * setup mappings to trigger copy-on-write faults
4010 * we must write-protect the parent if it has
4011 * an amap and it is not already "needs_copy"...
4012 * if it is already "needs_copy" then the parent
4013 * has already been write-protected by a previous
4014 * fork operation.
4015 */
4016
4017 if (old_entry->aref.ar_amap &&
4018 !UVM_ET_ISNEEDSCOPY(old_entry)) {
4019 if (old_entry->max_protection & VM_PROT_WRITE) {
4020 pmap_protect(old_map->pmap,
4021 old_entry->start,
4022 old_entry->end,
4023 old_entry->protection &
4024 ~VM_PROT_WRITE);
4025 pmap_update(old_map->pmap);
4026 }
4027 old_entry->etype |= UVM_ET_NEEDSCOPY;
4028 }
4029 }
4030 break;
4031 } /* end of switch statement */
4032 old_entry = old_entry->next;
4033 }
4034
4035 vm_map_unlock(old_map);
4036
4037 #ifdef SYSVSHM
4038 if (vm1->vm_shm)
4039 shmfork(vm1, vm2);
4040 #endif
4041
4042 #ifdef PMAP_FORK
4043 pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap);
4044 #endif
4045
4046 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4047 return (vm2);
4048 }
4049
4050
4051 /*
4052 * in-kernel map entry allocation.
4053 */
4054
4055 int ukh_alloc, ukh_free;
4056 int uke_alloc, uke_free;
4057
4058 struct uvm_kmapent_hdr {
4059 LIST_ENTRY(uvm_kmapent_hdr) ukh_listq;
4060 int ukh_nused;
4061 struct vm_map_entry *ukh_freelist;
4062 struct vm_map *ukh_map;
4063 struct vm_map_entry ukh_entries[0];
4064 };
4065
4066 #define UVM_KMAPENT_CHUNK \
4067 ((PAGE_SIZE - sizeof(struct uvm_kmapent_hdr)) \
4068 / sizeof(struct vm_map_entry))
4069
4070 #define UVM_KHDR_FIND(entry) \
4071 ((struct uvm_kmapent_hdr *)(((vaddr_t)entry) & ~PAGE_MASK))
4072
4073 static __inline struct vm_map_entry *uvm_kmapent_get(struct uvm_kmapent_hdr *);
4074 static __inline void uvm_kmapent_put(struct uvm_kmapent_hdr *,
4075 struct vm_map_entry *);
4076
4077 static __inline struct vm_map *
4078 uvm_kmapent_map(struct vm_map_entry *entry)
4079 {
4080 const struct uvm_kmapent_hdr *ukh;
4081
4082 ukh = UVM_KHDR_FIND(entry);
4083 return ukh->ukh_map;
4084 }
4085
4086 static __inline struct vm_map_entry *
4087 uvm_kmapent_get(struct uvm_kmapent_hdr *ukh)
4088 {
4089 struct vm_map_entry *entry;
4090
4091 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4092 KASSERT(ukh->ukh_nused >= 0);
4093
4094 entry = ukh->ukh_freelist;
4095 if (entry) {
4096 KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4097 == UVM_MAP_KERNEL);
4098 ukh->ukh_freelist = entry->next;
4099 ukh->ukh_nused++;
4100 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4101 } else {
4102 KASSERT(ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4103 }
4104
4105 return entry;
4106 }
4107
4108 static __inline void
4109 uvm_kmapent_put(struct uvm_kmapent_hdr *ukh, struct vm_map_entry *entry)
4110 {
4111
4112 KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4113 == UVM_MAP_KERNEL);
4114 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4115 KASSERT(ukh->ukh_nused > 0);
4116 KASSERT(ukh->ukh_freelist != NULL ||
4117 ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4118 KASSERT(ukh->ukh_freelist == NULL ||
4119 ukh->ukh_nused < UVM_KMAPENT_CHUNK);
4120
4121 ukh->ukh_nused--;
4122 entry->next = ukh->ukh_freelist;
4123 ukh->ukh_freelist = entry;
4124 }
4125
4126 /*
4127 * uvm_kmapent_alloc: allocate a map entry for in-kernel map
4128 */
4129
4130 static struct vm_map_entry *
4131 uvm_kmapent_alloc(struct vm_map *map, int flags)
4132 {
4133 struct vm_page *pg;
4134 struct uvm_map_args args;
4135 struct uvm_kmapent_hdr *ukh;
4136 struct vm_map_entry *entry;
4137 uvm_flag_t mapflags = UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
4138 UVM_INH_NONE, UVM_ADV_RANDOM, flags | UVM_FLAG_NOMERGE);
4139 vaddr_t va;
4140 int error;
4141 int i;
4142 int s;
4143
4144 KDASSERT(UVM_KMAPENT_CHUNK > 2);
4145 KDASSERT(kernel_map != NULL);
4146 KASSERT(vm_map_pmap(map) == pmap_kernel());
4147
4148 uke_alloc++;
4149 entry = NULL;
4150 again:
4151 /*
4152 * try to grab an entry from freelist.
4153 */
4154 s = splvm();
4155 simple_lock(&uvm.kentry_lock);
4156 ukh = LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free);
4157 if (ukh) {
4158 entry = uvm_kmapent_get(ukh);
4159 if (ukh->ukh_nused == UVM_KMAPENT_CHUNK)
4160 LIST_REMOVE(ukh, ukh_listq);
4161 }
4162 simple_unlock(&uvm.kentry_lock);
4163 splx(s);
4164
4165 if (entry)
4166 return entry;
4167
4168 /*
4169 * there's no free entry for this vm_map.
4170 * now we need to allocate some vm_map_entry.
4171 * for simplicity, always allocate one page chunk of them at once.
4172 */
4173
4174 pg = uvm_pagealloc(NULL, 0, NULL, 0);
4175 if (__predict_false(pg == NULL)) {
4176 if (flags & UVM_FLAG_NOWAIT)
4177 return NULL;
4178 uvm_wait("kme_alloc");
4179 goto again;
4180 }
4181
4182 error = uvm_map_prepare(map, 0, PAGE_SIZE, NULL, 0, 0, mapflags, &args);
4183 if (error) {
4184 uvm_pagefree(pg);
4185 return NULL;
4186 }
4187
4188 va = args.uma_start;
4189
4190 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE);
4191 pmap_update(vm_map_pmap(map));
4192
4193 ukh = (void *)va;
4194
4195 /*
4196 * use the first entry for ukh itsself.
4197 */
4198
4199 entry = &ukh->ukh_entries[0];
4200 entry->flags = UVM_MAP_KERNEL | UVM_MAP_KMAPENT;
4201 error = uvm_map_enter(map, &args, entry);
4202 KASSERT(error == 0);
4203
4204 ukh->ukh_nused = UVM_KMAPENT_CHUNK;
4205 ukh->ukh_map = map;
4206 ukh->ukh_freelist = NULL;
4207 for (i = UVM_KMAPENT_CHUNK - 1; i >= 2; i--) {
4208 struct vm_map_entry *entry = &ukh->ukh_entries[i];
4209
4210 entry->flags = UVM_MAP_KERNEL;
4211 uvm_kmapent_put(ukh, entry);
4212 }
4213 KASSERT(ukh->ukh_nused == 2);
4214
4215 s = splvm();
4216 simple_lock(&uvm.kentry_lock);
4217 LIST_INSERT_HEAD(&vm_map_to_kernel(map)->vmk_kentry_free,
4218 ukh, ukh_listq);
4219 simple_unlock(&uvm.kentry_lock);
4220 splx(s);
4221
4222 /*
4223 * return second entry.
4224 */
4225
4226 entry = &ukh->ukh_entries[1];
4227 entry->flags = UVM_MAP_KERNEL;
4228 ukh_alloc++;
4229 return entry;
4230 }
4231
4232 /*
4233 * uvm_mapent_free: free map entry for in-kernel map
4234 */
4235
4236 static void
4237 uvm_kmapent_free(struct vm_map_entry *entry)
4238 {
4239 struct uvm_kmapent_hdr *ukh;
4240 struct vm_page *pg;
4241 struct vm_map *map;
4242 struct pmap *pmap;
4243 vaddr_t va;
4244 paddr_t pa;
4245 struct vm_map_entry *deadentry;
4246 int s;
4247
4248 uke_free++;
4249 ukh = UVM_KHDR_FIND(entry);
4250 map = ukh->ukh_map;
4251
4252 s = splvm();
4253 simple_lock(&uvm.kentry_lock);
4254 uvm_kmapent_put(ukh, entry);
4255 if (ukh->ukh_nused > 1) {
4256 if (ukh->ukh_nused == UVM_KMAPENT_CHUNK - 1)
4257 LIST_INSERT_HEAD(
4258 &vm_map_to_kernel(map)->vmk_kentry_free,
4259 ukh, ukh_listq);
4260 simple_unlock(&uvm.kentry_lock);
4261 splx(s);
4262 return;
4263 }
4264
4265 /*
4266 * now we can free this ukh.
4267 *
4268 * however, keep an empty ukh to avoid ping-pong.
4269 */
4270
4271 if (LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free) == ukh &&
4272 LIST_NEXT(ukh, ukh_listq) == NULL) {
4273 simple_unlock(&uvm.kentry_lock);
4274 splx(s);
4275 return;
4276 }
4277 LIST_REMOVE(ukh, ukh_listq);
4278 simple_unlock(&uvm.kentry_lock);
4279 splx(s);
4280
4281 KASSERT(ukh->ukh_nused == 1);
4282
4283 /*
4284 * remove map entry for ukh itsself.
4285 */
4286
4287 va = (vaddr_t)ukh;
4288 KASSERT((va & PAGE_MASK) == 0);
4289 uvm_unmap_remove(map, va, va + PAGE_SIZE, &deadentry, NULL);
4290 KASSERT(deadentry->flags & UVM_MAP_KERNEL);
4291 KASSERT(deadentry->flags & UVM_MAP_KMAPENT);
4292 KASSERT(deadentry->next == NULL);
4293 KASSERT(deadentry == &ukh->ukh_entries[0]);
4294
4295 /*
4296 * unmap the page from pmap and free it.
4297 */
4298
4299 pmap = vm_map_pmap(map);
4300 KASSERT(pmap == pmap_kernel());
4301 if (!pmap_extract(pmap, va, &pa))
4302 panic("%s: no mapping", __func__);
4303 pmap_kremove(va, PAGE_SIZE);
4304 pg = PHYS_TO_VM_PAGE(pa);
4305 uvm_pagefree(pg);
4306 ukh_free++;
4307 }
4308
4309 /*
4310 * map entry reservation
4311 */
4312
4313 /*
4314 * uvm_mapent_reserve: reserve map entries for clipping before locking map.
4315 *
4316 * => needed when unmapping entries allocated without UVM_FLAG_QUANTUM.
4317 * => caller shouldn't hold map locked.
4318 */
4319 int
4320 uvm_mapent_reserve(struct vm_map *map, struct uvm_mapent_reservation *umr,
4321 int nentries, int flags)
4322 {
4323
4324 umr->umr_nentries = 0;
4325
4326 if ((flags & UVM_FLAG_QUANTUM) != 0)
4327 return 0;
4328
4329 if (!VM_MAP_USE_KMAPENT(map))
4330 return 0;
4331
4332 while (nentries--) {
4333 struct vm_map_entry *ent;
4334 ent = uvm_kmapent_alloc(map, flags);
4335 if (!ent) {
4336 uvm_mapent_unreserve(map, umr);
4337 return ENOMEM;
4338 }
4339 UMR_PUTENTRY(umr, ent);
4340 }
4341
4342 return 0;
4343 }
4344
4345 /*
4346 * uvm_mapent_unreserve:
4347 *
4348 * => caller shouldn't hold map locked.
4349 * => never fail or sleep.
4350 */
4351 void
4352 uvm_mapent_unreserve(struct vm_map *map, struct uvm_mapent_reservation *umr)
4353 {
4354
4355 while (!UMR_EMPTY(umr))
4356 uvm_kmapent_free(UMR_GETENTRY(umr));
4357 }
4358
4359 #if defined(DDB)
4360
4361 /*
4362 * DDB hooks
4363 */
4364
4365 /*
4366 * uvm_map_printit: actually prints the map
4367 */
4368
4369 void
4370 uvm_map_printit(struct vm_map *map, boolean_t full,
4371 void (*pr)(const char *, ...))
4372 {
4373 struct vm_map_entry *entry;
4374
4375 (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, map->min_offset,map->max_offset);
4376 (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=0x%x\n",
4377 map->nentries, map->size, map->ref_count, map->timestamp,
4378 map->flags);
4379 #ifdef pmap_wired_count
4380 (*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap,
4381 pmap_resident_count(map->pmap), pmap_wired_count(map->pmap));
4382 #else
4383 (*pr)("\tpmap=%p(resident=%ld)\n", map->pmap,
4384 pmap_resident_count(map->pmap));
4385 #endif
4386 if (!full)
4387 return;
4388 for (entry = map->header.next; entry != &map->header;
4389 entry = entry->next) {
4390 (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%llx, amap=%p/%d\n",
4391 entry, entry->start, entry->end, entry->object.uvm_obj,
4392 (long long)entry->offset, entry->aref.ar_amap,
4393 entry->aref.ar_pageoff);
4394 (*pr)(
4395 "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
4396 "wc=%d, adv=%d\n",
4397 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
4398 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
4399 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
4400 entry->protection, entry->max_protection,
4401 entry->inheritance, entry->wired_count, entry->advice);
4402 }
4403 }
4404
4405 /*
4406 * uvm_object_printit: actually prints the object
4407 */
4408
4409 void
4410 uvm_object_printit(struct uvm_object *uobj, boolean_t full,
4411 void (*pr)(const char *, ...))
4412 {
4413 struct vm_page *pg;
4414 int cnt = 0;
4415
4416 (*pr)("OBJECT %p: locked=%d, pgops=%p, npages=%d, ",
4417 uobj, uobj->vmobjlock.lock_data, uobj->pgops, uobj->uo_npages);
4418 if (UVM_OBJ_IS_KERN_OBJECT(uobj))
4419 (*pr)("refs=<SYSTEM>\n");
4420 else
4421 (*pr)("refs=%d\n", uobj->uo_refs);
4422
4423 if (!full) {
4424 return;
4425 }
4426 (*pr)(" PAGES <pg,offset>:\n ");
4427 TAILQ_FOREACH(pg, &uobj->memq, listq) {
4428 cnt++;
4429 (*pr)("<%p,0x%llx> ", pg, (long long)pg->offset);
4430 if ((cnt % 3) == 0) {
4431 (*pr)("\n ");
4432 }
4433 }
4434 if ((cnt % 3) != 0) {
4435 (*pr)("\n");
4436 }
4437 }
4438
4439 /*
4440 * uvm_page_printit: actually print the page
4441 */
4442
4443 static const char page_flagbits[] =
4444 "\20\1BUSY\2WANTED\3TABLED\4CLEAN\5PAGEOUT\6RELEASED\7FAKE\10RDONLY"
4445 "\11ZERO\15PAGER1";
4446 static const char page_pqflagbits[] =
4447 "\20\1FREE\2INACTIVE\3ACTIVE\5ANON\6AOBJ";
4448
4449 void
4450 uvm_page_printit(struct vm_page *pg, boolean_t full,
4451 void (*pr)(const char *, ...))
4452 {
4453 struct vm_page *tpg;
4454 struct uvm_object *uobj;
4455 struct pglist *pgl;
4456 char pgbuf[128];
4457 char pqbuf[128];
4458
4459 (*pr)("PAGE %p:\n", pg);
4460 bitmask_snprintf(pg->flags, page_flagbits, pgbuf, sizeof(pgbuf));
4461 bitmask_snprintf(pg->pqflags, page_pqflagbits, pqbuf, sizeof(pqbuf));
4462 (*pr)(" flags=%s, pqflags=%s, wire_count=%d, pa=0x%lx\n",
4463 pgbuf, pqbuf, pg->wire_count, (long)VM_PAGE_TO_PHYS(pg));
4464 (*pr)(" uobject=%p, uanon=%p, offset=0x%llx loan_count=%d\n",
4465 pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count);
4466 #if defined(UVM_PAGE_TRKOWN)
4467 if (pg->flags & PG_BUSY)
4468 (*pr)(" owning process = %d, tag=%s\n",
4469 pg->owner, pg->owner_tag);
4470 else
4471 (*pr)(" page not busy, no owner\n");
4472 #else
4473 (*pr)(" [page ownership tracking disabled]\n");
4474 #endif
4475
4476 if (!full)
4477 return;
4478
4479 /* cross-verify object/anon */
4480 if ((pg->pqflags & PQ_FREE) == 0) {
4481 if (pg->pqflags & PQ_ANON) {
4482 if (pg->uanon == NULL || pg->uanon->u.an_page != pg)
4483 (*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n",
4484 (pg->uanon) ? pg->uanon->u.an_page : NULL);
4485 else
4486 (*pr)(" anon backpointer is OK\n");
4487 } else {
4488 uobj = pg->uobject;
4489 if (uobj) {
4490 (*pr)(" checking object list\n");
4491 TAILQ_FOREACH(tpg, &uobj->memq, listq) {
4492 if (tpg == pg) {
4493 break;
4494 }
4495 }
4496 if (tpg)
4497 (*pr)(" page found on object list\n");
4498 else
4499 (*pr)(" >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
4500 }
4501 }
4502 }
4503
4504 /* cross-verify page queue */
4505 if (pg->pqflags & PQ_FREE) {
4506 int fl = uvm_page_lookup_freelist(pg);
4507 int color = VM_PGCOLOR_BUCKET(pg);
4508 pgl = &uvm.page_free[fl].pgfl_buckets[color].pgfl_queues[
4509 ((pg)->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN];
4510 } else if (pg->pqflags & PQ_INACTIVE) {
4511 pgl = &uvm.page_inactive;
4512 } else if (pg->pqflags & PQ_ACTIVE) {
4513 pgl = &uvm.page_active;
4514 } else {
4515 pgl = NULL;
4516 }
4517
4518 if (pgl) {
4519 (*pr)(" checking pageq list\n");
4520 TAILQ_FOREACH(tpg, pgl, pageq) {
4521 if (tpg == pg) {
4522 break;
4523 }
4524 }
4525 if (tpg)
4526 (*pr)(" page found on pageq list\n");
4527 else
4528 (*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
4529 }
4530 }
4531 #endif
4532