uvm_map.c revision 1.182 1 /* $NetBSD: uvm_map.c,v 1.182 2005/01/17 04:37:20 atatat Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_map.c 8.3 (Berkeley) 1/12/94
42 * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 /*
70 * uvm_map.c: uvm map operations
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.182 2005/01/17 04:37:20 atatat Exp $");
75
76 #include "opt_ddb.h"
77 #include "opt_uvmhist.h"
78 #include "opt_uvm.h"
79 #include "opt_sysv.h"
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/mman.h>
84 #include <sys/proc.h>
85 #include <sys/malloc.h>
86 #include <sys/pool.h>
87 #include <sys/kernel.h>
88 #include <sys/mount.h>
89 #include <sys/vnode.h>
90
91 #ifdef SYSVSHM
92 #include <sys/shm.h>
93 #endif
94
95 #define UVM_MAP
96 #include <uvm/uvm.h>
97 #undef RB_AUGMENT
98 #define RB_AUGMENT(x) uvm_rb_augment(x)
99
100 #ifdef DDB
101 #include <uvm/uvm_ddb.h>
102 #endif
103
104 #ifndef UVMMAP_NOCOUNTERS
105 #include <sys/device.h>
106 struct evcnt map_ubackmerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
107 "uvmmap", "ubackmerge");
108 struct evcnt map_uforwmerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
109 "uvmmap", "uforwmerge");
110 struct evcnt map_ubimerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
111 "uvmmap", "ubimerge");
112 struct evcnt map_unomerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
113 "uvmmap", "unomerge");
114 struct evcnt map_kbackmerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
115 "uvmmap", "kbackmerge");
116 struct evcnt map_kforwmerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
117 "uvmmap", "kforwmerge");
118 struct evcnt map_kbimerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
119 "uvmmap", "kbimerge");
120 struct evcnt map_knomerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
121 "uvmmap", "knomerge");
122 struct evcnt uvm_map_call = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
123 "uvmmap", "map_call");
124 struct evcnt uvm_mlk_call = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
125 "uvmmap", "mlk_call");
126 struct evcnt uvm_mlk_hint = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
127 "uvmmap", "mlk_hint");
128
129 EVCNT_ATTACH_STATIC(map_ubackmerge);
130 EVCNT_ATTACH_STATIC(map_uforwmerge);
131 EVCNT_ATTACH_STATIC(map_ubimerge);
132 EVCNT_ATTACH_STATIC(map_unomerge);
133 EVCNT_ATTACH_STATIC(map_kbackmerge);
134 EVCNT_ATTACH_STATIC(map_kforwmerge);
135 EVCNT_ATTACH_STATIC(map_kbimerge);
136 EVCNT_ATTACH_STATIC(map_knomerge);
137 EVCNT_ATTACH_STATIC(uvm_map_call);
138 EVCNT_ATTACH_STATIC(uvm_mlk_call);
139 EVCNT_ATTACH_STATIC(uvm_mlk_hint);
140
141 #define UVMCNT_INCR(ev) ev.ev_count++
142 #define UVMCNT_DECR(ev) ev.ev_count--
143 #else
144 #define UVMCNT_INCR(ev)
145 #define UVMCNT_DECR(ev)
146 #endif
147
148 const char vmmapbsy[] = "vmmapbsy";
149
150 /*
151 * pool for vmspace structures.
152 */
153
154 POOL_INIT(uvm_vmspace_pool, sizeof(struct vmspace), 0, 0, 0, "vmsppl",
155 &pool_allocator_nointr);
156
157 /*
158 * pool for dynamically-allocated map entries.
159 */
160
161 POOL_INIT(uvm_map_entry_pool, sizeof(struct vm_map_entry), 0, 0, 0, "vmmpepl",
162 &pool_allocator_nointr);
163
164 MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures");
165 MALLOC_DEFINE(M_VMPMAP, "VM pmap", "VM pmap");
166
167 #ifdef PMAP_GROWKERNEL
168 /*
169 * This global represents the end of the kernel virtual address
170 * space. If we want to exceed this, we must grow the kernel
171 * virtual address space dynamically.
172 *
173 * Note, this variable is locked by kernel_map's lock.
174 */
175 vaddr_t uvm_maxkaddr;
176 #endif
177
178 /*
179 * macros
180 */
181
182 /*
183 * VM_MAP_USE_KMAPENT: determine if uvm_kmapent_alloc/free is used
184 * for the vm_map.
185 */
186 extern struct vm_map *pager_map; /* XXX */
187 #define VM_MAP_USE_KMAPENT(map) \
188 (((map)->flags & VM_MAP_INTRSAFE) || (map) == kernel_map)
189
190 /*
191 * uvm_map_entry_link: insert entry into a map
192 *
193 * => map must be locked
194 */
195 #define uvm_map_entry_link(map, after_where, entry) do { \
196 KASSERT(entry->start < entry->end); \
197 (map)->nentries++; \
198 (entry)->prev = (after_where); \
199 (entry)->next = (after_where)->next; \
200 (entry)->prev->next = (entry); \
201 (entry)->next->prev = (entry); \
202 uvm_rb_insert((map), (entry)); \
203 } while (/*CONSTCOND*/ 0)
204
205 /*
206 * uvm_map_entry_unlink: remove entry from a map
207 *
208 * => map must be locked
209 */
210 #define uvm_map_entry_unlink(map, entry) do { \
211 (map)->nentries--; \
212 (entry)->next->prev = (entry)->prev; \
213 (entry)->prev->next = (entry)->next; \
214 uvm_rb_remove((map), (entry)); \
215 } while (/*CONSTCOND*/ 0)
216
217 /*
218 * SAVE_HINT: saves the specified entry as the hint for future lookups.
219 *
220 * => map need not be locked (protected by hint_lock).
221 */
222 #define SAVE_HINT(map,check,value) do { \
223 simple_lock(&(map)->hint_lock); \
224 if ((map)->hint == (check)) \
225 (map)->hint = (value); \
226 simple_unlock(&(map)->hint_lock); \
227 } while (/*CONSTCOND*/ 0)
228
229 /*
230 * VM_MAP_RANGE_CHECK: check and correct range
231 *
232 * => map must at least be read locked
233 */
234
235 #define VM_MAP_RANGE_CHECK(map, start, end) do { \
236 if (start < vm_map_min(map)) \
237 start = vm_map_min(map); \
238 if (end > vm_map_max(map)) \
239 end = vm_map_max(map); \
240 if (start > end) \
241 start = end; \
242 } while (/*CONSTCOND*/ 0)
243
244 /*
245 * local prototypes
246 */
247
248 static struct vm_map_entry *
249 uvm_mapent_alloc(struct vm_map *, int);
250 static struct vm_map_entry *
251 uvm_mapent_alloc_split(struct vm_map *,
252 const struct vm_map_entry *, int,
253 struct uvm_mapent_reservation *);
254 static void uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *);
255 static void uvm_mapent_free(struct vm_map_entry *);
256 static struct vm_map_entry *
257 uvm_kmapent_alloc(struct vm_map *, int);
258 static void uvm_kmapent_free(struct vm_map_entry *);
259 static void uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
260 static void uvm_map_reference_amap(struct vm_map_entry *, int);
261 static int uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int,
262 struct vm_map_entry *);
263 static void uvm_map_unreference_amap(struct vm_map_entry *, int);
264
265 int _uvm_tree_sanity(struct vm_map *, const char *);
266 static vsize_t uvm_rb_subtree_space(const struct vm_map_entry *);
267
268 static __inline int
269 uvm_compare(const struct vm_map_entry *a, const struct vm_map_entry *b)
270 {
271
272 if (a->start < b->start)
273 return (-1);
274 else if (a->start > b->start)
275 return (1);
276
277 return (0);
278 }
279
280 static __inline void
281 uvm_rb_augment(struct vm_map_entry *entry)
282 {
283
284 entry->space = uvm_rb_subtree_space(entry);
285 }
286
287 RB_PROTOTYPE(uvm_tree, vm_map_entry, rb_entry, uvm_compare);
288
289 RB_GENERATE(uvm_tree, vm_map_entry, rb_entry, uvm_compare);
290
291 static __inline vsize_t
292 uvm_rb_space(const struct vm_map *map, const struct vm_map_entry *entry)
293 {
294 /* XXX map is not used */
295
296 KASSERT(entry->next != NULL);
297 return entry->next->start - entry->end;
298 }
299
300 static vsize_t
301 uvm_rb_subtree_space(const struct vm_map_entry *entry)
302 {
303 vaddr_t space, tmp;
304
305 space = entry->ownspace;
306 if (RB_LEFT(entry, rb_entry)) {
307 tmp = RB_LEFT(entry, rb_entry)->space;
308 if (tmp > space)
309 space = tmp;
310 }
311
312 if (RB_RIGHT(entry, rb_entry)) {
313 tmp = RB_RIGHT(entry, rb_entry)->space;
314 if (tmp > space)
315 space = tmp;
316 }
317
318 return (space);
319 }
320
321 static __inline void
322 uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
323 {
324 /* We need to traverse to the very top */
325 do {
326 entry->ownspace = uvm_rb_space(map, entry);
327 entry->space = uvm_rb_subtree_space(entry);
328 } while ((entry = RB_PARENT(entry, rb_entry)) != NULL);
329 }
330
331 static __inline void
332 uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
333 {
334 vaddr_t space = uvm_rb_space(map, entry);
335 struct vm_map_entry *tmp;
336
337 entry->ownspace = entry->space = space;
338 tmp = RB_INSERT(uvm_tree, &(map)->rbhead, entry);
339 #ifdef DIAGNOSTIC
340 if (tmp != NULL)
341 panic("uvm_rb_insert: duplicate entry?");
342 #endif
343 uvm_rb_fixup(map, entry);
344 if (entry->prev != &map->header)
345 uvm_rb_fixup(map, entry->prev);
346 }
347
348 static __inline void
349 uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
350 {
351 struct vm_map_entry *parent;
352
353 parent = RB_PARENT(entry, rb_entry);
354 RB_REMOVE(uvm_tree, &(map)->rbhead, entry);
355 if (entry->prev != &map->header)
356 uvm_rb_fixup(map, entry->prev);
357 if (parent)
358 uvm_rb_fixup(map, parent);
359 }
360
361 #ifdef DEBUG
362 int uvm_debug_check_rbtree = 0;
363 #define uvm_tree_sanity(x,y) \
364 if (uvm_debug_check_rbtree) \
365 _uvm_tree_sanity(x,y)
366 #else
367 #define uvm_tree_sanity(x,y)
368 #endif
369
370 int
371 _uvm_tree_sanity(struct vm_map *map, const char *name)
372 {
373 struct vm_map_entry *tmp, *trtmp;
374 int n = 0, i = 1;
375
376 RB_FOREACH(tmp, uvm_tree, &map->rbhead) {
377 if (tmp->ownspace != uvm_rb_space(map, tmp)) {
378 printf("%s: %d/%d ownspace %lx != %lx %s\n",
379 name, n + 1, map->nentries,
380 (ulong)tmp->ownspace, (ulong)uvm_rb_space(map, tmp),
381 tmp->next == &map->header ? "(last)" : "");
382 goto error;
383 }
384 }
385 trtmp = NULL;
386 RB_FOREACH(tmp, uvm_tree, &map->rbhead) {
387 if (tmp->space != uvm_rb_subtree_space(tmp)) {
388 printf("%s: space %lx != %lx\n",
389 name, (ulong)tmp->space,
390 (ulong)uvm_rb_subtree_space(tmp));
391 goto error;
392 }
393 if (trtmp != NULL && trtmp->start >= tmp->start) {
394 printf("%s: corrupt: 0x%lx >= 0x%lx\n",
395 name, trtmp->start, tmp->start);
396 goto error;
397 }
398 n++;
399
400 trtmp = tmp;
401 }
402
403 if (n != map->nentries) {
404 printf("%s: nentries: %d vs %d\n",
405 name, n, map->nentries);
406 goto error;
407 }
408
409 for (tmp = map->header.next; tmp && tmp != &map->header;
410 tmp = tmp->next, i++) {
411 trtmp = RB_FIND(uvm_tree, &map->rbhead, tmp);
412 if (trtmp != tmp) {
413 printf("%s: lookup: %d: %p - %p: %p\n",
414 name, i, tmp, trtmp,
415 RB_PARENT(tmp, rb_entry));
416 goto error;
417 }
418 }
419
420 return (0);
421 error:
422 #ifdef DDB
423 /* handy breakpoint location for error case */
424 __asm(".globl treesanity_label\ntreesanity_label:");
425 #endif
426 return (-1);
427 }
428
429 /*
430 * local inlines
431 */
432
433 static __inline struct vm_map *uvm_kmapent_map(struct vm_map_entry *);
434
435 /*
436 * uvm_mapent_alloc: allocate a map entry
437 */
438
439 static __inline struct vm_map_entry *
440 uvm_mapent_alloc(struct vm_map *map, int flags)
441 {
442 struct vm_map_entry *me;
443 int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
444 UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
445
446 if (VM_MAP_USE_KMAPENT(map)) {
447 me = uvm_kmapent_alloc(map, flags);
448 } else {
449 me = pool_get(&uvm_map_entry_pool, pflags);
450 if (__predict_false(me == NULL))
451 return NULL;
452 me->flags = 0;
453 }
454
455 UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me,
456 ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0);
457 return (me);
458 }
459
460 /*
461 * uvm_mapent_alloc_split: allocate a map entry for clipping.
462 */
463
464 static __inline struct vm_map_entry *
465 uvm_mapent_alloc_split(struct vm_map *map,
466 const struct vm_map_entry *old_entry, int flags,
467 struct uvm_mapent_reservation *umr)
468 {
469 struct vm_map_entry *me;
470
471 KASSERT(!VM_MAP_USE_KMAPENT(map) ||
472 (old_entry->flags & UVM_MAP_QUANTUM) || !UMR_EMPTY(umr));
473
474 if (old_entry->flags & UVM_MAP_QUANTUM) {
475 int s;
476 struct vm_map_kernel *vmk = vm_map_to_kernel(map);
477
478 s = splvm();
479 simple_lock(&uvm.kentry_lock);
480 me = vmk->vmk_merged_entries;
481 KASSERT(me);
482 vmk->vmk_merged_entries = me->next;
483 simple_unlock(&uvm.kentry_lock);
484 splx(s);
485 KASSERT(me->flags & UVM_MAP_QUANTUM);
486 } else {
487 me = uvm_mapent_alloc(map, flags);
488 }
489
490 return me;
491 }
492
493 /*
494 * uvm_mapent_free: free map entry
495 */
496
497 static __inline void
498 uvm_mapent_free(struct vm_map_entry *me)
499 {
500 UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
501
502 UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
503 me, me->flags, 0, 0);
504 if (me->flags & UVM_MAP_KERNEL) {
505 uvm_kmapent_free(me);
506 } else {
507 pool_put(&uvm_map_entry_pool, me);
508 }
509 }
510
511 /*
512 * uvm_mapent_free_merge: free merged map entry
513 *
514 * => keep the entry if needed.
515 * => caller shouldn't hold map locked.
516 */
517
518 static __inline void
519 uvm_mapent_free_merged(struct vm_map *map, struct vm_map_entry *me)
520 {
521
522 KASSERT(!(me->flags & UVM_MAP_KERNEL) || uvm_kmapent_map(me) == map);
523
524 if (me->flags & UVM_MAP_QUANTUM) {
525 /*
526 * keep this entry for later splitting.
527 */
528 struct vm_map_kernel *vmk;
529 int s;
530
531 KASSERT(VM_MAP_IS_KERNEL(map));
532 KASSERT(!VM_MAP_USE_KMAPENT(map) ||
533 (me->flags & UVM_MAP_KERNEL));
534
535 vmk = vm_map_to_kernel(map);
536 s = splvm();
537 simple_lock(&uvm.kentry_lock);
538 me->next = vmk->vmk_merged_entries;
539 vmk->vmk_merged_entries = me;
540 simple_unlock(&uvm.kentry_lock);
541 splx(s);
542 } else {
543 uvm_mapent_free(me);
544 }
545 }
546
547 /*
548 * uvm_mapent_copy: copy a map entry, preserving flags
549 */
550
551 static __inline void
552 uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
553 {
554
555 memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) -
556 ((char *)src));
557 }
558
559 /*
560 * uvm_map_entry_unwire: unwire a map entry
561 *
562 * => map should be locked by caller
563 */
564
565 static __inline void
566 uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
567 {
568
569 entry->wired_count = 0;
570 uvm_fault_unwire_locked(map, entry->start, entry->end);
571 }
572
573
574 /*
575 * wrapper for calling amap_ref()
576 */
577 static __inline void
578 uvm_map_reference_amap(struct vm_map_entry *entry, int flags)
579 {
580
581 amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
582 (entry->end - entry->start) >> PAGE_SHIFT, flags);
583 }
584
585
586 /*
587 * wrapper for calling amap_unref()
588 */
589 static __inline void
590 uvm_map_unreference_amap(struct vm_map_entry *entry, int flags)
591 {
592
593 amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
594 (entry->end - entry->start) >> PAGE_SHIFT, flags);
595 }
596
597
598 /*
599 * uvm_map_init: init mapping system at boot time. note that we allocate
600 * and init the static pool of struct vm_map_entry *'s for the kernel here.
601 */
602
603 void
604 uvm_map_init(void)
605 {
606 #if defined(UVMHIST)
607 static struct uvm_history_ent maphistbuf[100];
608 static struct uvm_history_ent pdhistbuf[100];
609 #endif
610
611 /*
612 * first, init logging system.
613 */
614
615 UVMHIST_FUNC("uvm_map_init");
616 UVMHIST_INIT_STATIC(maphist, maphistbuf);
617 UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
618 UVMHIST_CALLED(maphist);
619 UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
620
621 /*
622 * initialize the global lock for kernel map entry.
623 *
624 * XXX is it worth to have per-map lock instead?
625 */
626
627 simple_lock_init(&uvm.kentry_lock);
628 }
629
630 /*
631 * clippers
632 */
633
634 /*
635 * uvm_map_clip_start: ensure that the entry begins at or after
636 * the starting address, if it doesn't we split the entry.
637 *
638 * => caller should use UVM_MAP_CLIP_START macro rather than calling
639 * this directly
640 * => map must be locked by caller
641 */
642
643 void
644 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
645 vaddr_t start, struct uvm_mapent_reservation *umr)
646 {
647 struct vm_map_entry *new_entry;
648 vaddr_t new_adj;
649
650 /* uvm_map_simplify_entry(map, entry); */ /* XXX */
651
652 uvm_tree_sanity(map, "clip_start entry");
653
654 /*
655 * Split off the front portion. note that we must insert the new
656 * entry BEFORE this one, so that this entry has the specified
657 * starting address.
658 */
659 new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
660 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
661
662 new_entry->end = start;
663 new_adj = start - new_entry->start;
664 if (entry->object.uvm_obj)
665 entry->offset += new_adj; /* shift start over */
666
667 /* Does not change order for the RB tree */
668 entry->start = start;
669
670 if (new_entry->aref.ar_amap) {
671 amap_splitref(&new_entry->aref, &entry->aref, new_adj);
672 }
673
674 uvm_map_entry_link(map, entry->prev, new_entry);
675
676 if (UVM_ET_ISSUBMAP(entry)) {
677 /* ... unlikely to happen, but play it safe */
678 uvm_map_reference(new_entry->object.sub_map);
679 } else {
680 if (UVM_ET_ISOBJ(entry) &&
681 entry->object.uvm_obj->pgops &&
682 entry->object.uvm_obj->pgops->pgo_reference)
683 entry->object.uvm_obj->pgops->pgo_reference(
684 entry->object.uvm_obj);
685 }
686
687 uvm_tree_sanity(map, "clip_start leave");
688 }
689
690 /*
691 * uvm_map_clip_end: ensure that the entry ends at or before
692 * the ending address, if it does't we split the reference
693 *
694 * => caller should use UVM_MAP_CLIP_END macro rather than calling
695 * this directly
696 * => map must be locked by caller
697 */
698
699 void
700 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end,
701 struct uvm_mapent_reservation *umr)
702 {
703 struct vm_map_entry * new_entry;
704 vaddr_t new_adj; /* #bytes we move start forward */
705
706 uvm_tree_sanity(map, "clip_end entry");
707
708 /*
709 * Create a new entry and insert it
710 * AFTER the specified entry
711 */
712 new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
713 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
714
715 new_entry->start = entry->end = end;
716 new_adj = end - entry->start;
717 if (new_entry->object.uvm_obj)
718 new_entry->offset += new_adj;
719
720 if (entry->aref.ar_amap)
721 amap_splitref(&entry->aref, &new_entry->aref, new_adj);
722
723 uvm_rb_fixup(map, entry);
724
725 uvm_map_entry_link(map, entry, new_entry);
726
727 if (UVM_ET_ISSUBMAP(entry)) {
728 /* ... unlikely to happen, but play it safe */
729 uvm_map_reference(new_entry->object.sub_map);
730 } else {
731 if (UVM_ET_ISOBJ(entry) &&
732 entry->object.uvm_obj->pgops &&
733 entry->object.uvm_obj->pgops->pgo_reference)
734 entry->object.uvm_obj->pgops->pgo_reference(
735 entry->object.uvm_obj);
736 }
737
738 uvm_tree_sanity(map, "clip_end leave");
739 }
740
741
742 /*
743 * M A P - m a i n e n t r y p o i n t
744 */
745 /*
746 * uvm_map: establish a valid mapping in a map
747 *
748 * => assume startp is page aligned.
749 * => assume size is a multiple of PAGE_SIZE.
750 * => assume sys_mmap provides enough of a "hint" to have us skip
751 * over text/data/bss area.
752 * => map must be unlocked (we will lock it)
753 * => <uobj,uoffset> value meanings (4 cases):
754 * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER
755 * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER
756 * [3] <uobj,uoffset> == normal mapping
757 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA
758 *
759 * case [4] is for kernel mappings where we don't know the offset until
760 * we've found a virtual address. note that kernel object offsets are
761 * always relative to vm_map_min(kernel_map).
762 *
763 * => if `align' is non-zero, we align the virtual address to the specified
764 * alignment.
765 * this is provided as a mechanism for large pages.
766 *
767 * => XXXCDC: need way to map in external amap?
768 */
769
770 int
771 uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
772 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags)
773 {
774 struct uvm_map_args args;
775 struct vm_map_entry *new_entry;
776 int error;
777
778 KASSERT((flags & UVM_FLAG_QUANTUM) == 0 || VM_MAP_IS_KERNEL(map));
779
780 /*
781 * for pager_map, allocate the new entry first to avoid sleeping
782 * for memory while we have the map locked.
783 *
784 * besides, because we allocates entries for in-kernel maps
785 * a bit differently (cf. uvm_kmapent_alloc/free), we need to
786 * allocate them before locking the map.
787 */
788
789 new_entry = NULL;
790 if (VM_MAP_USE_KMAPENT(map) || (flags & UVM_FLAG_QUANTUM) ||
791 map == pager_map) {
792 new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
793 if (__predict_false(new_entry == NULL))
794 return ENOMEM;
795 if (flags & UVM_FLAG_QUANTUM)
796 new_entry->flags |= UVM_MAP_QUANTUM;
797 }
798 if (map == pager_map)
799 flags |= UVM_FLAG_NOMERGE;
800
801 error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align,
802 flags, &args);
803 if (!error) {
804 error = uvm_map_enter(map, &args, new_entry);
805 *startp = args.uma_start;
806 }
807
808 return error;
809 }
810
811 int
812 uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size,
813 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags,
814 struct uvm_map_args *args)
815 {
816 struct vm_map_entry *prev_entry;
817 vm_prot_t prot = UVM_PROTECTION(flags);
818 vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
819
820 UVMHIST_FUNC("uvm_map_prepare");
821 UVMHIST_CALLED(maphist);
822
823 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
824 map, start, size, flags);
825 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
826
827 /*
828 * detect a popular device driver bug.
829 */
830
831 KASSERT(doing_shutdown || curlwp != NULL ||
832 (map->flags & VM_MAP_INTRSAFE));
833
834 /*
835 * zero-sized mapping doesn't make any sense.
836 */
837 KASSERT(size > 0);
838
839 KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0);
840
841 uvm_tree_sanity(map, "map entry");
842
843 /*
844 * check sanity of protection code
845 */
846
847 if ((prot & maxprot) != prot) {
848 UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x",
849 prot, maxprot,0,0);
850 return EACCES;
851 }
852
853 /*
854 * figure out where to put new VM range
855 */
856
857 retry:
858 if (vm_map_lock_try(map) == FALSE) {
859 if (flags & UVM_FLAG_TRYLOCK) {
860 return EAGAIN;
861 }
862 vm_map_lock(map); /* could sleep here */
863 }
864 if ((prev_entry = uvm_map_findspace(map, start, size, &start,
865 uobj, uoffset, align, flags)) == NULL) {
866 unsigned int timestamp;
867
868 if ((flags & UVM_FLAG_WAITVA) == 0) {
869 UVMHIST_LOG(maphist,"<- uvm_map_findspace failed!",
870 0,0,0,0);
871 vm_map_unlock(map);
872 return ENOMEM;
873 }
874 timestamp = map->timestamp;
875 UVMHIST_LOG(maphist,"waiting va timestamp=0x%x",
876 timestamp,0,0,0);
877 simple_lock(&map->flags_lock);
878 map->flags |= VM_MAP_WANTVA;
879 simple_unlock(&map->flags_lock);
880 vm_map_unlock(map);
881
882 /*
883 * wait until someone does unmap.
884 * XXX fragile locking
885 */
886
887 simple_lock(&map->flags_lock);
888 while ((map->flags & VM_MAP_WANTVA) != 0 &&
889 map->timestamp == timestamp) {
890 ltsleep(&map->header, PVM, "vmmapva", 0,
891 &map->flags_lock);
892 }
893 simple_unlock(&map->flags_lock);
894 goto retry;
895 }
896
897 #ifdef PMAP_GROWKERNEL
898 /*
899 * If the kernel pmap can't map the requested space,
900 * then allocate more resources for it.
901 */
902 if (map == kernel_map && uvm_maxkaddr < (start + size))
903 uvm_maxkaddr = pmap_growkernel(start + size);
904 #endif
905
906 UVMCNT_INCR(uvm_map_call);
907
908 /*
909 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
910 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in
911 * either case we want to zero it before storing it in the map entry
912 * (because it looks strange and confusing when debugging...)
913 *
914 * if uobj is not null
915 * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
916 * and we do not need to change uoffset.
917 * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
918 * now (based on the starting address of the map). this case is
919 * for kernel object mappings where we don't know the offset until
920 * the virtual address is found (with uvm_map_findspace). the
921 * offset is the distance we are from the start of the map.
922 */
923
924 if (uobj == NULL) {
925 uoffset = 0;
926 } else {
927 if (uoffset == UVM_UNKNOWN_OFFSET) {
928 KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
929 uoffset = start - vm_map_min(kernel_map);
930 }
931 }
932
933 args->uma_flags = flags;
934 args->uma_prev = prev_entry;
935 args->uma_start = start;
936 args->uma_size = size;
937 args->uma_uobj = uobj;
938 args->uma_uoffset = uoffset;
939
940 return 0;
941 }
942
943 int
944 uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args,
945 struct vm_map_entry *new_entry)
946 {
947 struct vm_map_entry *prev_entry = args->uma_prev;
948 struct vm_map_entry *dead = NULL;
949
950 const uvm_flag_t flags = args->uma_flags;
951 const vm_prot_t prot = UVM_PROTECTION(flags);
952 const vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
953 const vm_inherit_t inherit = UVM_INHERIT(flags);
954 const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
955 AMAP_EXTEND_NOWAIT : 0;
956 const int advice = UVM_ADVICE(flags);
957 const int meflagmask = UVM_MAP_NOMERGE | UVM_MAP_QUANTUM;
958 const int meflagval = (flags & UVM_FLAG_QUANTUM) ?
959 UVM_MAP_QUANTUM : 0;
960
961 vaddr_t start = args->uma_start;
962 vsize_t size = args->uma_size;
963 struct uvm_object *uobj = args->uma_uobj;
964 voff_t uoffset = args->uma_uoffset;
965
966 const int kmap = (vm_map_pmap(map) == pmap_kernel());
967 int merged = 0;
968 int error;
969 int newetype;
970
971 UVMHIST_FUNC("uvm_map_enter");
972 UVMHIST_CALLED(maphist);
973
974 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
975 map, start, size, flags);
976 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
977
978 if (flags & UVM_FLAG_QUANTUM) {
979 KASSERT(new_entry);
980 KASSERT(new_entry->flags & UVM_MAP_QUANTUM);
981 }
982
983 if (uobj)
984 newetype = UVM_ET_OBJ;
985 else
986 newetype = 0;
987
988 if (flags & UVM_FLAG_COPYONW) {
989 newetype |= UVM_ET_COPYONWRITE;
990 if ((flags & UVM_FLAG_OVERLAY) == 0)
991 newetype |= UVM_ET_NEEDSCOPY;
992 }
993
994 /*
995 * try and insert in map by extending previous entry, if possible.
996 * XXX: we don't try and pull back the next entry. might be useful
997 * for a stack, but we are currently allocating our stack in advance.
998 */
999
1000 if (flags & UVM_FLAG_NOMERGE)
1001 goto nomerge;
1002
1003 if (prev_entry->etype == newetype &&
1004 prev_entry->end == start &&
1005 prev_entry != &map->header &&
1006 prev_entry->object.uvm_obj == uobj) {
1007
1008 if ((prev_entry->flags & meflagmask) != meflagval)
1009 goto forwardmerge;
1010
1011 if (uobj && prev_entry->offset +
1012 (prev_entry->end - prev_entry->start) != uoffset)
1013 goto forwardmerge;
1014
1015 if (prev_entry->protection != prot ||
1016 prev_entry->max_protection != maxprot)
1017 goto forwardmerge;
1018
1019 if (prev_entry->inheritance != inherit ||
1020 prev_entry->advice != advice)
1021 goto forwardmerge;
1022
1023 /* wiring status must match (new area is unwired) */
1024 if (VM_MAPENT_ISWIRED(prev_entry))
1025 goto forwardmerge;
1026
1027 /*
1028 * can't extend a shared amap. note: no need to lock amap to
1029 * look at refs since we don't care about its exact value.
1030 * if it is one (i.e. we have only reference) it will stay there
1031 */
1032
1033 if (prev_entry->aref.ar_amap &&
1034 amap_refs(prev_entry->aref.ar_amap) != 1) {
1035 goto forwardmerge;
1036 }
1037
1038 if (prev_entry->aref.ar_amap) {
1039 error = amap_extend(prev_entry, size,
1040 amapwaitflag | AMAP_EXTEND_FORWARDS);
1041 if (error)
1042 goto done;
1043 }
1044
1045 if (kmap)
1046 UVMCNT_INCR(map_kbackmerge);
1047 else
1048 UVMCNT_INCR(map_ubackmerge);
1049 UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0);
1050
1051 /*
1052 * drop our reference to uobj since we are extending a reference
1053 * that we already have (the ref count can not drop to zero).
1054 */
1055
1056 if (uobj && uobj->pgops->pgo_detach)
1057 uobj->pgops->pgo_detach(uobj);
1058
1059 prev_entry->end += size;
1060 uvm_rb_fixup(map, prev_entry);
1061
1062 uvm_tree_sanity(map, "map backmerged");
1063
1064 UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
1065 merged++;
1066 }
1067
1068 forwardmerge:
1069 if (prev_entry->next->etype == newetype &&
1070 prev_entry->next->start == (start + size) &&
1071 prev_entry->next != &map->header &&
1072 prev_entry->next->object.uvm_obj == uobj) {
1073
1074 if ((prev_entry->next->flags & meflagmask) != meflagval)
1075 goto nomerge;
1076
1077 if (uobj && prev_entry->next->offset != uoffset + size)
1078 goto nomerge;
1079
1080 if (prev_entry->next->protection != prot ||
1081 prev_entry->next->max_protection != maxprot)
1082 goto nomerge;
1083
1084 if (prev_entry->next->inheritance != inherit ||
1085 prev_entry->next->advice != advice)
1086 goto nomerge;
1087
1088 /* wiring status must match (new area is unwired) */
1089 if (VM_MAPENT_ISWIRED(prev_entry->next))
1090 goto nomerge;
1091
1092 /*
1093 * can't extend a shared amap. note: no need to lock amap to
1094 * look at refs since we don't care about its exact value.
1095 * if it is one (i.e. we have only reference) it will stay there.
1096 *
1097 * note that we also can't merge two amaps, so if we
1098 * merged with the previous entry which has an amap,
1099 * and the next entry also has an amap, we give up.
1100 *
1101 * Interesting cases:
1102 * amap, new, amap -> give up second merge (single fwd extend)
1103 * amap, new, none -> double forward extend (extend again here)
1104 * none, new, amap -> double backward extend (done here)
1105 * uobj, new, amap -> single backward extend (done here)
1106 *
1107 * XXX should we attempt to deal with someone refilling
1108 * the deallocated region between two entries that are
1109 * backed by the same amap (ie, arefs is 2, "prev" and
1110 * "next" refer to it, and adding this allocation will
1111 * close the hole, thus restoring arefs to 1 and
1112 * deallocating the "next" vm_map_entry)? -- @@@
1113 */
1114
1115 if (prev_entry->next->aref.ar_amap &&
1116 (amap_refs(prev_entry->next->aref.ar_amap) != 1 ||
1117 (merged && prev_entry->aref.ar_amap))) {
1118 goto nomerge;
1119 }
1120
1121 if (merged) {
1122 /*
1123 * Try to extend the amap of the previous entry to
1124 * cover the next entry as well. If it doesn't work
1125 * just skip on, don't actually give up, since we've
1126 * already completed the back merge.
1127 */
1128 if (prev_entry->aref.ar_amap) {
1129 if (amap_extend(prev_entry,
1130 prev_entry->next->end -
1131 prev_entry->next->start,
1132 amapwaitflag | AMAP_EXTEND_FORWARDS))
1133 goto nomerge;
1134 }
1135
1136 /*
1137 * Try to extend the amap of the *next* entry
1138 * back to cover the new allocation *and* the
1139 * previous entry as well (the previous merge
1140 * didn't have an amap already otherwise we
1141 * wouldn't be checking here for an amap). If
1142 * it doesn't work just skip on, again, don't
1143 * actually give up, since we've already
1144 * completed the back merge.
1145 */
1146 else if (prev_entry->next->aref.ar_amap) {
1147 if (amap_extend(prev_entry->next,
1148 prev_entry->end -
1149 prev_entry->start,
1150 amapwaitflag | AMAP_EXTEND_BACKWARDS))
1151 goto nomerge;
1152 }
1153 } else {
1154 /*
1155 * Pull the next entry's amap backwards to cover this
1156 * new allocation.
1157 */
1158 if (prev_entry->next->aref.ar_amap) {
1159 error = amap_extend(prev_entry->next, size,
1160 amapwaitflag | AMAP_EXTEND_BACKWARDS);
1161 if (error)
1162 goto done;
1163 }
1164 }
1165
1166 if (merged) {
1167 if (kmap) {
1168 UVMCNT_DECR(map_kbackmerge);
1169 UVMCNT_INCR(map_kbimerge);
1170 } else {
1171 UVMCNT_DECR(map_ubackmerge);
1172 UVMCNT_INCR(map_ubimerge);
1173 }
1174 } else {
1175 if (kmap)
1176 UVMCNT_INCR(map_kforwmerge);
1177 else
1178 UVMCNT_INCR(map_uforwmerge);
1179 }
1180 UVMHIST_LOG(maphist," starting forward merge", 0, 0, 0, 0);
1181
1182 /*
1183 * drop our reference to uobj since we are extending a reference
1184 * that we already have (the ref count can not drop to zero).
1185 * (if merged, we've already detached)
1186 */
1187 if (uobj && uobj->pgops->pgo_detach && !merged)
1188 uobj->pgops->pgo_detach(uobj);
1189
1190 if (merged) {
1191 dead = prev_entry->next;
1192 prev_entry->end = dead->end;
1193 uvm_map_entry_unlink(map, dead);
1194 if (dead->aref.ar_amap != NULL) {
1195 prev_entry->aref = dead->aref;
1196 dead->aref.ar_amap = NULL;
1197 }
1198 } else {
1199 prev_entry->next->start -= size;
1200 if (prev_entry != &map->header)
1201 uvm_rb_fixup(map, prev_entry);
1202 if (uobj)
1203 prev_entry->next->offset = uoffset;
1204 }
1205
1206 uvm_tree_sanity(map, "map forwardmerged");
1207
1208 UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0);
1209 merged++;
1210 }
1211
1212 nomerge:
1213 if (!merged) {
1214 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0);
1215 if (kmap)
1216 UVMCNT_INCR(map_knomerge);
1217 else
1218 UVMCNT_INCR(map_unomerge);
1219
1220 /*
1221 * allocate new entry and link it in.
1222 */
1223
1224 if (new_entry == NULL) {
1225 new_entry = uvm_mapent_alloc(map,
1226 (flags & UVM_FLAG_NOWAIT));
1227 if (__predict_false(new_entry == NULL)) {
1228 error = ENOMEM;
1229 goto done;
1230 }
1231 }
1232 new_entry->start = start;
1233 new_entry->end = new_entry->start + size;
1234 new_entry->object.uvm_obj = uobj;
1235 new_entry->offset = uoffset;
1236
1237 new_entry->etype = newetype;
1238
1239 if (flags & UVM_FLAG_NOMERGE) {
1240 new_entry->flags |= UVM_MAP_NOMERGE;
1241 }
1242
1243 new_entry->protection = prot;
1244 new_entry->max_protection = maxprot;
1245 new_entry->inheritance = inherit;
1246 new_entry->wired_count = 0;
1247 new_entry->advice = advice;
1248 if (flags & UVM_FLAG_OVERLAY) {
1249
1250 /*
1251 * to_add: for BSS we overallocate a little since we
1252 * are likely to extend
1253 */
1254
1255 vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
1256 UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
1257 struct vm_amap *amap = amap_alloc(size, to_add,
1258 (flags & UVM_FLAG_NOWAIT) ? M_NOWAIT : M_WAITOK);
1259 if (__predict_false(amap == NULL)) {
1260 error = ENOMEM;
1261 goto done;
1262 }
1263 new_entry->aref.ar_pageoff = 0;
1264 new_entry->aref.ar_amap = amap;
1265 } else {
1266 new_entry->aref.ar_pageoff = 0;
1267 new_entry->aref.ar_amap = NULL;
1268 }
1269 uvm_map_entry_link(map, prev_entry, new_entry);
1270
1271 /*
1272 * Update the free space hint
1273 */
1274
1275 if ((map->first_free == prev_entry) &&
1276 (prev_entry->end >= new_entry->start))
1277 map->first_free = new_entry;
1278
1279 new_entry = NULL;
1280 }
1281
1282 map->size += size;
1283
1284 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1285
1286 error = 0;
1287 done:
1288 vm_map_unlock(map);
1289 if (new_entry) {
1290 if (error == 0) {
1291 KDASSERT(merged);
1292 uvm_mapent_free_merged(map, new_entry);
1293 } else {
1294 uvm_mapent_free(new_entry);
1295 }
1296 }
1297 if (dead) {
1298 KDASSERT(merged);
1299 uvm_mapent_free_merged(map, dead);
1300 }
1301 return error;
1302 }
1303
1304 /*
1305 * uvm_map_lookup_entry: find map entry at or before an address
1306 *
1307 * => map must at least be read-locked by caller
1308 * => entry is returned in "entry"
1309 * => return value is true if address is in the returned entry
1310 */
1311
1312 boolean_t
1313 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
1314 struct vm_map_entry **entry /* OUT */)
1315 {
1316 struct vm_map_entry *cur;
1317 boolean_t use_tree = FALSE;
1318 UVMHIST_FUNC("uvm_map_lookup_entry");
1319 UVMHIST_CALLED(maphist);
1320
1321 UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)",
1322 map, address, entry, 0);
1323
1324 /*
1325 * start looking either from the head of the
1326 * list, or from the hint.
1327 */
1328
1329 simple_lock(&map->hint_lock);
1330 cur = map->hint;
1331 simple_unlock(&map->hint_lock);
1332
1333 if (cur == &map->header)
1334 cur = cur->next;
1335
1336 UVMCNT_INCR(uvm_mlk_call);
1337 if (address >= cur->start) {
1338
1339 /*
1340 * go from hint to end of list.
1341 *
1342 * but first, make a quick check to see if
1343 * we are already looking at the entry we
1344 * want (which is usually the case).
1345 * note also that we don't need to save the hint
1346 * here... it is the same hint (unless we are
1347 * at the header, in which case the hint didn't
1348 * buy us anything anyway).
1349 */
1350
1351 if (cur != &map->header && cur->end > address) {
1352 UVMCNT_INCR(uvm_mlk_hint);
1353 *entry = cur;
1354 UVMHIST_LOG(maphist,"<- got it via hint (0x%x)",
1355 cur, 0, 0, 0);
1356 return (TRUE);
1357 }
1358
1359 if (map->nentries > 30)
1360 use_tree = TRUE;
1361 } else {
1362
1363 /*
1364 * invalid hint. use tree.
1365 */
1366 use_tree = TRUE;
1367 }
1368
1369 uvm_tree_sanity(map, __func__);
1370
1371 if (use_tree) {
1372 struct vm_map_entry *prev = &map->header;
1373 cur = RB_ROOT(&map->rbhead);
1374
1375 /*
1376 * Simple lookup in the tree. Happens when the hint is
1377 * invalid, or nentries reach a threshold.
1378 */
1379 while (cur) {
1380 if (address >= cur->start) {
1381 if (address < cur->end) {
1382 *entry = cur;
1383 goto got;
1384 }
1385 prev = cur;
1386 cur = RB_RIGHT(cur, rb_entry);
1387 } else
1388 cur = RB_LEFT(cur, rb_entry);
1389 }
1390 *entry = prev;
1391 goto failed;
1392 }
1393
1394 /*
1395 * search linearly
1396 */
1397
1398 while (cur != &map->header) {
1399 if (cur->end > address) {
1400 if (address >= cur->start) {
1401 /*
1402 * save this lookup for future
1403 * hints, and return
1404 */
1405
1406 *entry = cur;
1407 got:
1408 SAVE_HINT(map, map->hint, *entry);
1409 UVMHIST_LOG(maphist,"<- search got it (0x%x)",
1410 cur, 0, 0, 0);
1411 KDASSERT((*entry)->start <= address);
1412 KDASSERT(address < (*entry)->end);
1413 return (TRUE);
1414 }
1415 break;
1416 }
1417 cur = cur->next;
1418 }
1419 *entry = cur->prev;
1420 failed:
1421 SAVE_HINT(map, map->hint, *entry);
1422 UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
1423 KDASSERT((*entry) == &map->header || (*entry)->end <= address);
1424 KDASSERT((*entry)->next == &map->header ||
1425 address < (*entry)->next->start);
1426 return (FALSE);
1427 }
1428
1429 /*
1430 * See if the range between start and start + length fits in the gap
1431 * entry->next->start and entry->end. Returns 1 if fits, 0 if doesn't
1432 * fit, and -1 address wraps around.
1433 */
1434 static __inline int
1435 uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
1436 vsize_t align, int topdown, struct vm_map_entry *entry)
1437 {
1438 vaddr_t end;
1439
1440 #ifdef PMAP_PREFER
1441 /*
1442 * push start address forward as needed to avoid VAC alias problems.
1443 * we only do this if a valid offset is specified.
1444 */
1445
1446 if (uoffset != UVM_UNKNOWN_OFFSET)
1447 PMAP_PREFER(uoffset, start, length, topdown);
1448 #endif
1449 if (align != 0) {
1450 if ((*start & (align - 1)) != 0) {
1451 if (topdown)
1452 *start &= ~(align - 1);
1453 else
1454 *start = roundup(*start, align);
1455 }
1456 /*
1457 * XXX Should we PMAP_PREFER() here again?
1458 * eh...i think we're okay
1459 */
1460 }
1461
1462 /*
1463 * Find the end of the proposed new region. Be sure we didn't
1464 * wrap around the address; if so, we lose. Otherwise, if the
1465 * proposed new region fits before the next entry, we win.
1466 */
1467
1468 end = *start + length;
1469 if (end < *start)
1470 return (-1);
1471
1472 if (entry->next->start >= end && *start >= entry->end)
1473 return (1);
1474
1475 return (0);
1476 }
1477
1478 /*
1479 * uvm_map_findspace: find "length" sized space in "map".
1480 *
1481 * => "hint" is a hint about where we want it, unless UVM_FLAG_FIXED is
1482 * set in "flags" (in which case we insist on using "hint").
1483 * => "result" is VA returned
1484 * => uobj/uoffset are to be used to handle VAC alignment, if required
1485 * => if "align" is non-zero, we attempt to align to that value.
1486 * => caller must at least have read-locked map
1487 * => returns NULL on failure, or pointer to prev. map entry if success
1488 * => note this is a cross between the old vm_map_findspace and vm_map_find
1489 */
1490
1491 struct vm_map_entry *
1492 uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
1493 vaddr_t *result /* OUT */, struct uvm_object *uobj, voff_t uoffset,
1494 vsize_t align, int flags)
1495 {
1496 struct vm_map_entry *entry;
1497 struct vm_map_entry *child, *prev, *tmp;
1498 vaddr_t orig_hint;
1499 const int topdown = map->flags & VM_MAP_TOPDOWN;
1500 UVMHIST_FUNC("uvm_map_findspace");
1501 UVMHIST_CALLED(maphist);
1502
1503 UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)",
1504 map, hint, length, flags);
1505 KASSERT((align & (align - 1)) == 0);
1506 KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1507
1508 uvm_tree_sanity(map, "map_findspace entry");
1509
1510 /*
1511 * remember the original hint. if we are aligning, then we
1512 * may have to try again with no alignment constraint if
1513 * we fail the first time.
1514 */
1515
1516 orig_hint = hint;
1517 if (hint < map->min_offset) { /* check ranges ... */
1518 if (flags & UVM_FLAG_FIXED) {
1519 UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
1520 return (NULL);
1521 }
1522 hint = map->min_offset;
1523 }
1524 if (hint > map->max_offset) {
1525 UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]",
1526 hint, map->min_offset, map->max_offset, 0);
1527 return (NULL);
1528 }
1529
1530 /*
1531 * Look for the first possible address; if there's already
1532 * something at this address, we have to start after it.
1533 */
1534
1535 /*
1536 * @@@: there are four, no, eight cases to consider.
1537 *
1538 * 0: found, fixed, bottom up -> fail
1539 * 1: found, fixed, top down -> fail
1540 * 2: found, not fixed, bottom up -> start after entry->end,
1541 * loop up
1542 * 3: found, not fixed, top down -> start before entry->start,
1543 * loop down
1544 * 4: not found, fixed, bottom up -> check entry->next->start, fail
1545 * 5: not found, fixed, top down -> check entry->next->start, fail
1546 * 6: not found, not fixed, bottom up -> check entry->next->start,
1547 * loop up
1548 * 7: not found, not fixed, top down -> check entry->next->start,
1549 * loop down
1550 *
1551 * as you can see, it reduces to roughly five cases, and that
1552 * adding top down mapping only adds one unique case (without
1553 * it, there would be four cases).
1554 */
1555
1556 if ((flags & UVM_FLAG_FIXED) == 0 && hint == map->min_offset) {
1557 entry = map->first_free;
1558 } else {
1559 if (uvm_map_lookup_entry(map, hint, &entry)) {
1560 /* "hint" address already in use ... */
1561 if (flags & UVM_FLAG_FIXED) {
1562 UVMHIST_LOG(maphist, "<- fixed & VA in use",
1563 0, 0, 0, 0);
1564 return (NULL);
1565 }
1566 if (topdown)
1567 /* Start from lower gap. */
1568 entry = entry->prev;
1569 } else if (flags & UVM_FLAG_FIXED) {
1570 if (entry->next->start >= hint + length &&
1571 hint + length > hint)
1572 goto found;
1573
1574 /* "hint" address is gap but too small */
1575 UVMHIST_LOG(maphist, "<- fixed mapping failed",
1576 0, 0, 0, 0);
1577 return (NULL); /* only one shot at it ... */
1578 } else {
1579 /*
1580 * See if given hint fits in this gap.
1581 */
1582 switch (uvm_map_space_avail(&hint, length,
1583 uoffset, align, topdown, entry)) {
1584 case 1:
1585 goto found;
1586 case -1:
1587 goto wraparound;
1588 }
1589
1590 if (topdown) {
1591 /*
1592 * Still there is a chance to fit
1593 * if hint > entry->end.
1594 */
1595 } else {
1596 /* Start from higher gap. */
1597 entry = entry->next;
1598 if (entry == &map->header)
1599 goto notfound;
1600 goto nextgap;
1601 }
1602 }
1603 }
1604
1605 /*
1606 * Note that all UVM_FLAGS_FIXED case is already handled.
1607 */
1608 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
1609
1610 /* Try to find the space in the red-black tree */
1611
1612 /* Check slot before any entry */
1613 hint = topdown ? entry->next->start - length : entry->end;
1614 switch (uvm_map_space_avail(&hint, length, uoffset, align,
1615 topdown, entry)) {
1616 case 1:
1617 goto found;
1618 case -1:
1619 goto wraparound;
1620 }
1621
1622 nextgap:
1623 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
1624 /* If there is not enough space in the whole tree, we fail */
1625 tmp = RB_ROOT(&map->rbhead);
1626 if (tmp == NULL || tmp->space < length)
1627 goto notfound;
1628
1629 prev = NULL; /* previous candidate */
1630
1631 /* Find an entry close to hint that has enough space */
1632 for (; tmp;) {
1633 KASSERT(tmp->next->start == tmp->end + tmp->ownspace);
1634 if (topdown) {
1635 if (tmp->next->start < hint + length &&
1636 (prev == NULL || tmp->end > prev->end)) {
1637 if (tmp->ownspace >= length)
1638 prev = tmp;
1639 else if ((child = RB_LEFT(tmp, rb_entry))
1640 != NULL && child->space >= length)
1641 prev = tmp;
1642 }
1643 } else {
1644 if (tmp->end >= hint &&
1645 (prev == NULL || tmp->end < prev->end)) {
1646 if (tmp->ownspace >= length)
1647 prev = tmp;
1648 else if ((child = RB_RIGHT(tmp, rb_entry))
1649 != NULL && child->space >= length)
1650 prev = tmp;
1651 }
1652 }
1653 if (tmp->next->start < hint + length)
1654 child = RB_RIGHT(tmp, rb_entry);
1655 else if (tmp->end > hint)
1656 child = RB_LEFT(tmp, rb_entry);
1657 else {
1658 if (tmp->ownspace >= length)
1659 break;
1660 if (topdown)
1661 child = RB_LEFT(tmp, rb_entry);
1662 else
1663 child = RB_RIGHT(tmp, rb_entry);
1664 }
1665 if (child == NULL || child->space < length)
1666 break;
1667 tmp = child;
1668 }
1669
1670 if (tmp != NULL && tmp->start < hint && hint < tmp->next->start) {
1671 /*
1672 * Check if the entry that we found satifies the
1673 * space requirement
1674 */
1675 if (topdown) {
1676 if (hint > tmp->next->start - length)
1677 hint = tmp->next->start - length;
1678 } else {
1679 if (hint < tmp->end)
1680 hint = tmp->end;
1681 }
1682 switch (uvm_map_space_avail(&hint, length, uoffset, align,
1683 topdown, tmp)) {
1684 case 1:
1685 entry = tmp;
1686 goto found;
1687 case -1:
1688 goto wraparound;
1689 }
1690 if (tmp->ownspace >= length)
1691 goto listsearch;
1692 }
1693 if (prev == NULL)
1694 goto notfound;
1695
1696 if (topdown) {
1697 KASSERT(orig_hint >= prev->next->start - length ||
1698 prev->next->start - length > prev->next->start);
1699 hint = prev->next->start - length;
1700 } else {
1701 KASSERT(orig_hint <= prev->end);
1702 hint = prev->end;
1703 }
1704 switch (uvm_map_space_avail(&hint, length, uoffset, align,
1705 topdown, prev)) {
1706 case 1:
1707 entry = prev;
1708 goto found;
1709 case -1:
1710 goto wraparound;
1711 }
1712 if (prev->ownspace >= length)
1713 goto listsearch;
1714
1715 if (topdown)
1716 tmp = RB_LEFT(prev, rb_entry);
1717 else
1718 tmp = RB_RIGHT(prev, rb_entry);
1719 for (;;) {
1720 KASSERT(tmp && tmp->space >= length);
1721 if (topdown)
1722 child = RB_RIGHT(tmp, rb_entry);
1723 else
1724 child = RB_LEFT(tmp, rb_entry);
1725 if (child && child->space >= length) {
1726 tmp = child;
1727 continue;
1728 }
1729 if (tmp->ownspace >= length)
1730 break;
1731 if (topdown)
1732 tmp = RB_LEFT(tmp, rb_entry);
1733 else
1734 tmp = RB_RIGHT(tmp, rb_entry);
1735 }
1736
1737 if (topdown) {
1738 KASSERT(orig_hint >= tmp->next->start - length ||
1739 tmp->next->start - length > tmp->next->start);
1740 hint = tmp->next->start - length;
1741 } else {
1742 KASSERT(orig_hint <= tmp->end);
1743 hint = tmp->end;
1744 }
1745 switch (uvm_map_space_avail(&hint, length, uoffset, align,
1746 topdown, tmp)) {
1747 case 1:
1748 entry = tmp;
1749 goto found;
1750 case -1:
1751 goto wraparound;
1752 }
1753
1754 /*
1755 * The tree fails to find an entry because of offset or alignment
1756 * restrictions. Search the list instead.
1757 */
1758 listsearch:
1759 /*
1760 * Look through the rest of the map, trying to fit a new region in
1761 * the gap between existing regions, or after the very last region.
1762 * note: entry->end = base VA of current gap,
1763 * entry->next->start = VA of end of current gap
1764 */
1765
1766 for (;;) {
1767 /* Update hint for current gap. */
1768 hint = topdown ? entry->next->start - length : entry->end;
1769
1770 /* See if it fits. */
1771 switch (uvm_map_space_avail(&hint, length, uoffset, align,
1772 topdown, entry)) {
1773 case 1:
1774 goto found;
1775 case -1:
1776 goto wraparound;
1777 }
1778
1779 /* Advance to next/previous gap */
1780 if (topdown) {
1781 if (entry == &map->header) {
1782 UVMHIST_LOG(maphist, "<- failed (off start)",
1783 0,0,0,0);
1784 goto notfound;
1785 }
1786 entry = entry->prev;
1787 } else {
1788 entry = entry->next;
1789 if (entry == &map->header) {
1790 UVMHIST_LOG(maphist, "<- failed (off end)",
1791 0,0,0,0);
1792 goto notfound;
1793 }
1794 }
1795 }
1796
1797 found:
1798 SAVE_HINT(map, map->hint, entry);
1799 *result = hint;
1800 UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0);
1801 KASSERT( topdown || hint >= orig_hint);
1802 KASSERT(!topdown || hint <= orig_hint);
1803 KASSERT(entry->end <= hint);
1804 KASSERT(hint + length <= entry->next->start);
1805 return (entry);
1806
1807 wraparound:
1808 UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0);
1809
1810 return (NULL);
1811
1812 notfound:
1813 UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0);
1814
1815 return (NULL);
1816 }
1817
1818 /*
1819 * U N M A P - m a i n h e l p e r f u n c t i o n s
1820 */
1821
1822 /*
1823 * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
1824 *
1825 * => caller must check alignment and size
1826 * => map must be locked by caller
1827 * => we return a list of map entries that we've remove from the map
1828 * in "entry_list"
1829 */
1830
1831 void
1832 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
1833 struct vm_map_entry **entry_list /* OUT */,
1834 struct uvm_mapent_reservation *umr)
1835 {
1836 struct vm_map_entry *entry, *first_entry, *next;
1837 vaddr_t len;
1838 UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist);
1839
1840 UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)",
1841 map, start, end, 0);
1842 VM_MAP_RANGE_CHECK(map, start, end);
1843
1844 uvm_tree_sanity(map, "unmap_remove entry");
1845
1846 /*
1847 * find first entry
1848 */
1849
1850 if (uvm_map_lookup_entry(map, start, &first_entry) == TRUE) {
1851 /* clip and go... */
1852 entry = first_entry;
1853 UVM_MAP_CLIP_START(map, entry, start, umr);
1854 /* critical! prevents stale hint */
1855 SAVE_HINT(map, entry, entry->prev);
1856 } else {
1857 entry = first_entry->next;
1858 }
1859
1860 /*
1861 * Save the free space hint
1862 */
1863
1864 if (map->first_free->start >= start)
1865 map->first_free = entry->prev;
1866
1867 /*
1868 * note: we now re-use first_entry for a different task. we remove
1869 * a number of map entries from the map and save them in a linked
1870 * list headed by "first_entry". once we remove them from the map
1871 * the caller should unlock the map and drop the references to the
1872 * backing objects [c.f. uvm_unmap_detach]. the object is to
1873 * separate unmapping from reference dropping. why?
1874 * [1] the map has to be locked for unmapping
1875 * [2] the map need not be locked for reference dropping
1876 * [3] dropping references may trigger pager I/O, and if we hit
1877 * a pager that does synchronous I/O we may have to wait for it.
1878 * [4] we would like all waiting for I/O to occur with maps unlocked
1879 * so that we don't block other threads.
1880 */
1881
1882 first_entry = NULL;
1883 *entry_list = NULL;
1884
1885 /*
1886 * break up the area into map entry sized regions and unmap. note
1887 * that all mappings have to be removed before we can even consider
1888 * dropping references to amaps or VM objects (otherwise we could end
1889 * up with a mapping to a page on the free list which would be very bad)
1890 */
1891
1892 while ((entry != &map->header) && (entry->start < end)) {
1893 KASSERT((entry->flags & UVM_MAP_FIRST) == 0);
1894
1895 UVM_MAP_CLIP_END(map, entry, end, umr);
1896 next = entry->next;
1897 len = entry->end - entry->start;
1898
1899 /*
1900 * unwire before removing addresses from the pmap; otherwise
1901 * unwiring will put the entries back into the pmap (XXX).
1902 */
1903
1904 if (VM_MAPENT_ISWIRED(entry)) {
1905 uvm_map_entry_unwire(map, entry);
1906 }
1907 if ((map->flags & VM_MAP_PAGEABLE) == 0) {
1908
1909 /*
1910 * if the map is non-pageable, any pages mapped there
1911 * must be wired and entered with pmap_kenter_pa(),
1912 * and we should free any such pages immediately.
1913 * this is mostly used for kmem_map and mb_map.
1914 */
1915
1916 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
1917 uvm_km_pgremove_intrsafe(entry->start,
1918 entry->end);
1919 pmap_kremove(entry->start, len);
1920 }
1921 } else if (UVM_ET_ISOBJ(entry) &&
1922 UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
1923 KASSERT(vm_map_pmap(map) == pmap_kernel());
1924
1925 /*
1926 * note: kernel object mappings are currently used in
1927 * two ways:
1928 * [1] "normal" mappings of pages in the kernel object
1929 * [2] uvm_km_valloc'd allocations in which we
1930 * pmap_enter in some non-kernel-object page
1931 * (e.g. vmapbuf).
1932 *
1933 * for case [1], we need to remove the mapping from
1934 * the pmap and then remove the page from the kernel
1935 * object (because, once pages in a kernel object are
1936 * unmapped they are no longer needed, unlike, say,
1937 * a vnode where you might want the data to persist
1938 * until flushed out of a queue).
1939 *
1940 * for case [2], we need to remove the mapping from
1941 * the pmap. there shouldn't be any pages at the
1942 * specified offset in the kernel object [but it
1943 * doesn't hurt to call uvm_km_pgremove just to be
1944 * safe?]
1945 *
1946 * uvm_km_pgremove currently does the following:
1947 * for pages in the kernel object in range:
1948 * - drops the swap slot
1949 * - uvm_pagefree the page
1950 */
1951
1952 /*
1953 * remove mappings from pmap and drop the pages
1954 * from the object. offsets are always relative
1955 * to vm_map_min(kernel_map).
1956 */
1957
1958 pmap_remove(pmap_kernel(), entry->start,
1959 entry->start + len);
1960 uvm_km_pgremove(entry->object.uvm_obj,
1961 entry->start - vm_map_min(kernel_map),
1962 entry->end - vm_map_min(kernel_map));
1963
1964 /*
1965 * null out kernel_object reference, we've just
1966 * dropped it
1967 */
1968
1969 entry->etype &= ~UVM_ET_OBJ;
1970 entry->object.uvm_obj = NULL;
1971 } else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
1972
1973 /*
1974 * remove mappings the standard way.
1975 */
1976
1977 pmap_remove(map->pmap, entry->start, entry->end);
1978 }
1979
1980 #if defined(DEBUG)
1981 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
1982
1983 /*
1984 * check if there's remaining mapping,
1985 * which is a bug in caller.
1986 */
1987
1988 vaddr_t va;
1989 for (va = entry->start; va < entry->end;
1990 va += PAGE_SIZE) {
1991 if (pmap_extract(vm_map_pmap(map), va, NULL)) {
1992 panic("uvm_unmap_remove: has mapping");
1993 }
1994 }
1995 }
1996 #endif /* defined(DEBUG) */
1997
1998 /*
1999 * remove entry from map and put it on our list of entries
2000 * that we've nuked. then go to next entry.
2001 */
2002
2003 UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0,0);
2004
2005 /* critical! prevents stale hint */
2006 SAVE_HINT(map, entry, entry->prev);
2007
2008 uvm_map_entry_unlink(map, entry);
2009 KASSERT(map->size >= len);
2010 map->size -= len;
2011 entry->prev = NULL;
2012 entry->next = first_entry;
2013 first_entry = entry;
2014 entry = next;
2015 }
2016 if ((map->flags & VM_MAP_DYING) == 0) {
2017 pmap_update(vm_map_pmap(map));
2018 }
2019
2020 uvm_tree_sanity(map, "unmap_remove leave");
2021
2022 /*
2023 * now we've cleaned up the map and are ready for the caller to drop
2024 * references to the mapped objects.
2025 */
2026
2027 *entry_list = first_entry;
2028 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
2029
2030 simple_lock(&map->flags_lock);
2031 if (map->flags & VM_MAP_WANTVA) {
2032 map->flags &= ~VM_MAP_WANTVA;
2033 wakeup(&map->header);
2034 }
2035 simple_unlock(&map->flags_lock);
2036 }
2037
2038 /*
2039 * uvm_unmap_detach: drop references in a chain of map entries
2040 *
2041 * => we will free the map entries as we traverse the list.
2042 */
2043
2044 void
2045 uvm_unmap_detach(struct vm_map_entry *first_entry, int flags)
2046 {
2047 struct vm_map_entry *next_entry;
2048 UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
2049
2050 while (first_entry) {
2051 KASSERT(!VM_MAPENT_ISWIRED(first_entry));
2052 UVMHIST_LOG(maphist,
2053 " detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d",
2054 first_entry, first_entry->aref.ar_amap,
2055 first_entry->object.uvm_obj,
2056 UVM_ET_ISSUBMAP(first_entry));
2057
2058 /*
2059 * drop reference to amap, if we've got one
2060 */
2061
2062 if (first_entry->aref.ar_amap)
2063 uvm_map_unreference_amap(first_entry, flags);
2064
2065 /*
2066 * drop reference to our backing object, if we've got one
2067 */
2068
2069 KASSERT(!UVM_ET_ISSUBMAP(first_entry));
2070 if (UVM_ET_ISOBJ(first_entry) &&
2071 first_entry->object.uvm_obj->pgops->pgo_detach) {
2072 (*first_entry->object.uvm_obj->pgops->pgo_detach)
2073 (first_entry->object.uvm_obj);
2074 }
2075 next_entry = first_entry->next;
2076 uvm_mapent_free(first_entry);
2077 first_entry = next_entry;
2078 }
2079 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
2080 }
2081
2082 /*
2083 * E X T R A C T I O N F U N C T I O N S
2084 */
2085
2086 /*
2087 * uvm_map_reserve: reserve space in a vm_map for future use.
2088 *
2089 * => we reserve space in a map by putting a dummy map entry in the
2090 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
2091 * => map should be unlocked (we will write lock it)
2092 * => we return true if we were able to reserve space
2093 * => XXXCDC: should be inline?
2094 */
2095
2096 int
2097 uvm_map_reserve(struct vm_map *map, vsize_t size,
2098 vaddr_t offset /* hint for pmap_prefer */,
2099 vsize_t align /* alignment hint */,
2100 vaddr_t *raddr /* IN:hint, OUT: reserved VA */)
2101 {
2102 UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
2103
2104 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
2105 map,size,offset,raddr);
2106
2107 size = round_page(size);
2108 if (*raddr < vm_map_min(map))
2109 *raddr = vm_map_min(map); /* hint */
2110
2111 /*
2112 * reserve some virtual space.
2113 */
2114
2115 if (uvm_map(map, raddr, size, NULL, offset, 0,
2116 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
2117 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
2118 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
2119 return (FALSE);
2120 }
2121
2122 UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
2123 return (TRUE);
2124 }
2125
2126 /*
2127 * uvm_map_replace: replace a reserved (blank) area of memory with
2128 * real mappings.
2129 *
2130 * => caller must WRITE-LOCK the map
2131 * => we return TRUE if replacement was a success
2132 * => we expect the newents chain to have nnewents entrys on it and
2133 * we expect newents->prev to point to the last entry on the list
2134 * => note newents is allowed to be NULL
2135 */
2136
2137 int
2138 uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
2139 struct vm_map_entry *newents, int nnewents)
2140 {
2141 struct vm_map_entry *oldent, *last;
2142
2143 uvm_tree_sanity(map, "map_replace entry");
2144
2145 /*
2146 * first find the blank map entry at the specified address
2147 */
2148
2149 if (!uvm_map_lookup_entry(map, start, &oldent)) {
2150 return (FALSE);
2151 }
2152
2153 /*
2154 * check to make sure we have a proper blank entry
2155 */
2156
2157 if (oldent->start != start || oldent->end != end ||
2158 oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
2159 return (FALSE);
2160 }
2161
2162 #ifdef DIAGNOSTIC
2163
2164 /*
2165 * sanity check the newents chain
2166 */
2167
2168 {
2169 struct vm_map_entry *tmpent = newents;
2170 int nent = 0;
2171 vaddr_t cur = start;
2172
2173 while (tmpent) {
2174 nent++;
2175 if (tmpent->start < cur)
2176 panic("uvm_map_replace1");
2177 if (tmpent->start > tmpent->end || tmpent->end > end) {
2178 printf("tmpent->start=0x%lx, tmpent->end=0x%lx, end=0x%lx\n",
2179 tmpent->start, tmpent->end, end);
2180 panic("uvm_map_replace2");
2181 }
2182 cur = tmpent->end;
2183 if (tmpent->next) {
2184 if (tmpent->next->prev != tmpent)
2185 panic("uvm_map_replace3");
2186 } else {
2187 if (newents->prev != tmpent)
2188 panic("uvm_map_replace4");
2189 }
2190 tmpent = tmpent->next;
2191 }
2192 if (nent != nnewents)
2193 panic("uvm_map_replace5");
2194 }
2195 #endif
2196
2197 /*
2198 * map entry is a valid blank! replace it. (this does all the
2199 * work of map entry link/unlink...).
2200 */
2201
2202 if (newents) {
2203 last = newents->prev;
2204
2205 /* critical: flush stale hints out of map */
2206 SAVE_HINT(map, map->hint, newents);
2207 if (map->first_free == oldent)
2208 map->first_free = last;
2209
2210 last->next = oldent->next;
2211 last->next->prev = last;
2212
2213 /* Fix RB tree */
2214 uvm_rb_remove(map, oldent);
2215
2216 newents->prev = oldent->prev;
2217 newents->prev->next = newents;
2218 map->nentries = map->nentries + (nnewents - 1);
2219
2220 /* Fixup the RB tree */
2221 {
2222 int i;
2223 struct vm_map_entry *tmp;
2224
2225 tmp = newents;
2226 for (i = 0; i < nnewents && tmp; i++) {
2227 uvm_rb_insert(map, tmp);
2228 tmp = tmp->next;
2229 }
2230 }
2231 } else {
2232
2233 /* critical: flush stale hints out of map */
2234 SAVE_HINT(map, map->hint, oldent->prev);
2235 if (map->first_free == oldent)
2236 map->first_free = oldent->prev;
2237
2238 /* NULL list of new entries: just remove the old one */
2239 uvm_map_entry_unlink(map, oldent);
2240 }
2241
2242 uvm_tree_sanity(map, "map_replace leave");
2243
2244 /*
2245 * now we can free the old blank entry, unlock the map and return.
2246 */
2247
2248 uvm_mapent_free(oldent);
2249 return (TRUE);
2250 }
2251
2252 /*
2253 * uvm_map_extract: extract a mapping from a map and put it somewhere
2254 * (maybe removing the old mapping)
2255 *
2256 * => maps should be unlocked (we will write lock them)
2257 * => returns 0 on success, error code otherwise
2258 * => start must be page aligned
2259 * => len must be page sized
2260 * => flags:
2261 * UVM_EXTRACT_REMOVE: remove mappings from srcmap
2262 * UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
2263 * UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
2264 * UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
2265 * >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
2266 * >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
2267 * be used from within the kernel in a kernel level map <<<
2268 */
2269
2270 int
2271 uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
2272 struct vm_map *dstmap, vaddr_t *dstaddrp, int flags)
2273 {
2274 vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge;
2275 struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry,
2276 *deadentry, *oldentry;
2277 vsize_t elen;
2278 int nchain, error, copy_ok;
2279 UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
2280
2281 UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start,
2282 len,0);
2283 UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0);
2284
2285 uvm_tree_sanity(srcmap, "map_extract src enter");
2286 uvm_tree_sanity(dstmap, "map_extract dst enter");
2287
2288 /*
2289 * step 0: sanity check: start must be on a page boundary, length
2290 * must be page sized. can't ask for CONTIG/QREF if you asked for
2291 * REMOVE.
2292 */
2293
2294 KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0);
2295 KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
2296 (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
2297
2298 /*
2299 * step 1: reserve space in the target map for the extracted area
2300 */
2301
2302 dstaddr = vm_map_min(dstmap);
2303 if (uvm_map_reserve(dstmap, len, start, 0, &dstaddr) == FALSE)
2304 return (ENOMEM);
2305 *dstaddrp = dstaddr; /* pass address back to caller */
2306 UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0);
2307
2308 /*
2309 * step 2: setup for the extraction process loop by init'ing the
2310 * map entry chain, locking src map, and looking up the first useful
2311 * entry in the map.
2312 */
2313
2314 end = start + len;
2315 newend = dstaddr + len;
2316 chain = endchain = NULL;
2317 nchain = 0;
2318 vm_map_lock(srcmap);
2319
2320 if (uvm_map_lookup_entry(srcmap, start, &entry)) {
2321
2322 /* "start" is within an entry */
2323 if (flags & UVM_EXTRACT_QREF) {
2324
2325 /*
2326 * for quick references we don't clip the entry, so
2327 * the entry may map space "before" the starting
2328 * virtual address... this is the "fudge" factor
2329 * (which can be non-zero only the first time
2330 * through the "while" loop in step 3).
2331 */
2332
2333 fudge = start - entry->start;
2334 } else {
2335
2336 /*
2337 * normal reference: we clip the map to fit (thus
2338 * fudge is zero)
2339 */
2340
2341 UVM_MAP_CLIP_START(srcmap, entry, start, NULL);
2342 SAVE_HINT(srcmap, srcmap->hint, entry->prev);
2343 fudge = 0;
2344 }
2345 } else {
2346
2347 /* "start" is not within an entry ... skip to next entry */
2348 if (flags & UVM_EXTRACT_CONTIG) {
2349 error = EINVAL;
2350 goto bad; /* definite hole here ... */
2351 }
2352
2353 entry = entry->next;
2354 fudge = 0;
2355 }
2356
2357 /* save values from srcmap for step 6 */
2358 orig_entry = entry;
2359 orig_fudge = fudge;
2360
2361 /*
2362 * step 3: now start looping through the map entries, extracting
2363 * as we go.
2364 */
2365
2366 while (entry->start < end && entry != &srcmap->header) {
2367
2368 /* if we are not doing a quick reference, clip it */
2369 if ((flags & UVM_EXTRACT_QREF) == 0)
2370 UVM_MAP_CLIP_END(srcmap, entry, end, NULL);
2371
2372 /* clear needs_copy (allow chunking) */
2373 if (UVM_ET_ISNEEDSCOPY(entry)) {
2374 amap_copy(srcmap, entry, M_NOWAIT, TRUE, start, end);
2375 if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */
2376 error = ENOMEM;
2377 goto bad;
2378 }
2379
2380 /* amap_copy could clip (during chunk)! update fudge */
2381 if (fudge) {
2382 fudge = start - entry->start;
2383 orig_fudge = fudge;
2384 }
2385 }
2386
2387 /* calculate the offset of this from "start" */
2388 oldoffset = (entry->start + fudge) - start;
2389
2390 /* allocate a new map entry */
2391 newentry = uvm_mapent_alloc(dstmap, 0);
2392 if (newentry == NULL) {
2393 error = ENOMEM;
2394 goto bad;
2395 }
2396
2397 /* set up new map entry */
2398 newentry->next = NULL;
2399 newentry->prev = endchain;
2400 newentry->start = dstaddr + oldoffset;
2401 newentry->end =
2402 newentry->start + (entry->end - (entry->start + fudge));
2403 if (newentry->end > newend || newentry->end < newentry->start)
2404 newentry->end = newend;
2405 newentry->object.uvm_obj = entry->object.uvm_obj;
2406 if (newentry->object.uvm_obj) {
2407 if (newentry->object.uvm_obj->pgops->pgo_reference)
2408 newentry->object.uvm_obj->pgops->
2409 pgo_reference(newentry->object.uvm_obj);
2410 newentry->offset = entry->offset + fudge;
2411 } else {
2412 newentry->offset = 0;
2413 }
2414 newentry->etype = entry->etype;
2415 newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
2416 entry->max_protection : entry->protection;
2417 newentry->max_protection = entry->max_protection;
2418 newentry->inheritance = entry->inheritance;
2419 newentry->wired_count = 0;
2420 newentry->aref.ar_amap = entry->aref.ar_amap;
2421 if (newentry->aref.ar_amap) {
2422 newentry->aref.ar_pageoff =
2423 entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
2424 uvm_map_reference_amap(newentry, AMAP_SHARED |
2425 ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
2426 } else {
2427 newentry->aref.ar_pageoff = 0;
2428 }
2429 newentry->advice = entry->advice;
2430
2431 /* now link it on the chain */
2432 nchain++;
2433 if (endchain == NULL) {
2434 chain = endchain = newentry;
2435 } else {
2436 endchain->next = newentry;
2437 endchain = newentry;
2438 }
2439
2440 /* end of 'while' loop! */
2441 if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
2442 (entry->next == &srcmap->header ||
2443 entry->next->start != entry->end)) {
2444 error = EINVAL;
2445 goto bad;
2446 }
2447 entry = entry->next;
2448 fudge = 0;
2449 }
2450
2451 /*
2452 * step 4: close off chain (in format expected by uvm_map_replace)
2453 */
2454
2455 if (chain)
2456 chain->prev = endchain;
2457
2458 /*
2459 * step 5: attempt to lock the dest map so we can pmap_copy.
2460 * note usage of copy_ok:
2461 * 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
2462 * 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
2463 */
2464
2465 if (srcmap == dstmap || vm_map_lock_try(dstmap) == TRUE) {
2466 copy_ok = 1;
2467 if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2468 nchain)) {
2469 if (srcmap != dstmap)
2470 vm_map_unlock(dstmap);
2471 error = EIO;
2472 goto bad;
2473 }
2474 } else {
2475 copy_ok = 0;
2476 /* replace defered until step 7 */
2477 }
2478
2479 /*
2480 * step 6: traverse the srcmap a second time to do the following:
2481 * - if we got a lock on the dstmap do pmap_copy
2482 * - if UVM_EXTRACT_REMOVE remove the entries
2483 * we make use of orig_entry and orig_fudge (saved in step 2)
2484 */
2485
2486 if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
2487
2488 /* purge possible stale hints from srcmap */
2489 if (flags & UVM_EXTRACT_REMOVE) {
2490 SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
2491 if (srcmap->first_free->start >= start)
2492 srcmap->first_free = orig_entry->prev;
2493 }
2494
2495 entry = orig_entry;
2496 fudge = orig_fudge;
2497 deadentry = NULL; /* for UVM_EXTRACT_REMOVE */
2498
2499 while (entry->start < end && entry != &srcmap->header) {
2500 if (copy_ok) {
2501 oldoffset = (entry->start + fudge) - start;
2502 elen = MIN(end, entry->end) -
2503 (entry->start + fudge);
2504 pmap_copy(dstmap->pmap, srcmap->pmap,
2505 dstaddr + oldoffset, elen,
2506 entry->start + fudge);
2507 }
2508
2509 /* we advance "entry" in the following if statement */
2510 if (flags & UVM_EXTRACT_REMOVE) {
2511 pmap_remove(srcmap->pmap, entry->start,
2512 entry->end);
2513 oldentry = entry; /* save entry */
2514 entry = entry->next; /* advance */
2515 uvm_map_entry_unlink(srcmap, oldentry);
2516 /* add to dead list */
2517 oldentry->next = deadentry;
2518 deadentry = oldentry;
2519 } else {
2520 entry = entry->next; /* advance */
2521 }
2522
2523 /* end of 'while' loop */
2524 fudge = 0;
2525 }
2526 pmap_update(srcmap->pmap);
2527
2528 /*
2529 * unlock dstmap. we will dispose of deadentry in
2530 * step 7 if needed
2531 */
2532
2533 if (copy_ok && srcmap != dstmap)
2534 vm_map_unlock(dstmap);
2535
2536 } else {
2537 deadentry = NULL;
2538 }
2539
2540 /*
2541 * step 7: we are done with the source map, unlock. if copy_ok
2542 * is 0 then we have not replaced the dummy mapping in dstmap yet
2543 * and we need to do so now.
2544 */
2545
2546 vm_map_unlock(srcmap);
2547 if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
2548 uvm_unmap_detach(deadentry, 0); /* dispose of old entries */
2549
2550 /* now do the replacement if we didn't do it in step 5 */
2551 if (copy_ok == 0) {
2552 vm_map_lock(dstmap);
2553 error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2554 nchain);
2555 vm_map_unlock(dstmap);
2556
2557 if (error == FALSE) {
2558 error = EIO;
2559 goto bad2;
2560 }
2561 }
2562
2563 uvm_tree_sanity(srcmap, "map_extract src leave");
2564 uvm_tree_sanity(dstmap, "map_extract dst leave");
2565
2566 return (0);
2567
2568 /*
2569 * bad: failure recovery
2570 */
2571 bad:
2572 vm_map_unlock(srcmap);
2573 bad2: /* src already unlocked */
2574 if (chain)
2575 uvm_unmap_detach(chain,
2576 (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
2577
2578 uvm_tree_sanity(srcmap, "map_extract src err leave");
2579 uvm_tree_sanity(dstmap, "map_extract dst err leave");
2580
2581 uvm_unmap(dstmap, dstaddr, dstaddr+len); /* ??? */
2582 return (error);
2583 }
2584
2585 /* end of extraction functions */
2586
2587 /*
2588 * uvm_map_submap: punch down part of a map into a submap
2589 *
2590 * => only the kernel_map is allowed to be submapped
2591 * => the purpose of submapping is to break up the locking granularity
2592 * of a larger map
2593 * => the range specified must have been mapped previously with a uvm_map()
2594 * call [with uobj==NULL] to create a blank map entry in the main map.
2595 * [And it had better still be blank!]
2596 * => maps which contain submaps should never be copied or forked.
2597 * => to remove a submap, use uvm_unmap() on the main map
2598 * and then uvm_map_deallocate() the submap.
2599 * => main map must be unlocked.
2600 * => submap must have been init'd and have a zero reference count.
2601 * [need not be locked as we don't actually reference it]
2602 */
2603
2604 int
2605 uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
2606 struct vm_map *submap)
2607 {
2608 struct vm_map_entry *entry;
2609 struct uvm_mapent_reservation umr;
2610 int error;
2611
2612 uvm_mapent_reserve(map, &umr, 2, 0);
2613
2614 vm_map_lock(map);
2615 VM_MAP_RANGE_CHECK(map, start, end);
2616
2617 if (uvm_map_lookup_entry(map, start, &entry)) {
2618 UVM_MAP_CLIP_START(map, entry, start, &umr);
2619 UVM_MAP_CLIP_END(map, entry, end, &umr); /* to be safe */
2620 } else {
2621 entry = NULL;
2622 }
2623
2624 if (entry != NULL &&
2625 entry->start == start && entry->end == end &&
2626 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
2627 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
2628 entry->etype |= UVM_ET_SUBMAP;
2629 entry->object.sub_map = submap;
2630 entry->offset = 0;
2631 uvm_map_reference(submap);
2632 error = 0;
2633 } else {
2634 error = EINVAL;
2635 }
2636 vm_map_unlock(map);
2637
2638 uvm_mapent_unreserve(map, &umr);
2639
2640 return error;
2641 }
2642
2643 /*
2644 * uvm_map_setup_kernel: init in-kernel map
2645 *
2646 * => map must not be in service yet.
2647 */
2648
2649 void
2650 uvm_map_setup_kernel(struct vm_map_kernel *map,
2651 vaddr_t min, vaddr_t max, int flags)
2652 {
2653
2654 uvm_map_setup(&map->vmk_map, min, max, flags);
2655
2656 LIST_INIT(&map->vmk_kentry_free);
2657 map->vmk_merged_entries = NULL;
2658 }
2659
2660
2661 /*
2662 * uvm_map_protect: change map protection
2663 *
2664 * => set_max means set max_protection.
2665 * => map must be unlocked.
2666 */
2667
2668 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
2669 ~VM_PROT_WRITE : VM_PROT_ALL)
2670
2671 int
2672 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
2673 vm_prot_t new_prot, boolean_t set_max)
2674 {
2675 struct vm_map_entry *current, *entry;
2676 int error = 0;
2677 UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
2678 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
2679 map, start, end, new_prot);
2680
2681 vm_map_lock(map);
2682 VM_MAP_RANGE_CHECK(map, start, end);
2683 if (uvm_map_lookup_entry(map, start, &entry)) {
2684 UVM_MAP_CLIP_START(map, entry, start, NULL);
2685 } else {
2686 entry = entry->next;
2687 }
2688
2689 /*
2690 * make a first pass to check for protection violations.
2691 */
2692
2693 current = entry;
2694 while ((current != &map->header) && (current->start < end)) {
2695 if (UVM_ET_ISSUBMAP(current)) {
2696 error = EINVAL;
2697 goto out;
2698 }
2699 if ((new_prot & current->max_protection) != new_prot) {
2700 error = EACCES;
2701 goto out;
2702 }
2703 /*
2704 * Don't allow VM_PROT_EXECUTE to be set on entries that
2705 * point to vnodes that are associated with a NOEXEC file
2706 * system.
2707 */
2708 if (UVM_ET_ISOBJ(current) &&
2709 UVM_OBJ_IS_VNODE(current->object.uvm_obj)) {
2710 struct vnode *vp =
2711 (struct vnode *) current->object.uvm_obj;
2712
2713 if ((new_prot & VM_PROT_EXECUTE) != 0 &&
2714 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
2715 error = EACCES;
2716 goto out;
2717 }
2718 }
2719 current = current->next;
2720 }
2721
2722 /* go back and fix up protections (no need to clip this time). */
2723
2724 current = entry;
2725 while ((current != &map->header) && (current->start < end)) {
2726 vm_prot_t old_prot;
2727
2728 UVM_MAP_CLIP_END(map, current, end, NULL);
2729 old_prot = current->protection;
2730 if (set_max)
2731 current->protection =
2732 (current->max_protection = new_prot) & old_prot;
2733 else
2734 current->protection = new_prot;
2735
2736 /*
2737 * update physical map if necessary. worry about copy-on-write
2738 * here -- CHECK THIS XXX
2739 */
2740
2741 if (current->protection != old_prot) {
2742 /* update pmap! */
2743 pmap_protect(map->pmap, current->start, current->end,
2744 current->protection & MASK(entry));
2745
2746 /*
2747 * If this entry points at a vnode, and the
2748 * protection includes VM_PROT_EXECUTE, mark
2749 * the vnode as VEXECMAP.
2750 */
2751 if (UVM_ET_ISOBJ(current)) {
2752 struct uvm_object *uobj =
2753 current->object.uvm_obj;
2754
2755 if (UVM_OBJ_IS_VNODE(uobj) &&
2756 (current->protection & VM_PROT_EXECUTE))
2757 vn_markexec((struct vnode *) uobj);
2758 }
2759 }
2760
2761 /*
2762 * If the map is configured to lock any future mappings,
2763 * wire this entry now if the old protection was VM_PROT_NONE
2764 * and the new protection is not VM_PROT_NONE.
2765 */
2766
2767 if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
2768 VM_MAPENT_ISWIRED(entry) == 0 &&
2769 old_prot == VM_PROT_NONE &&
2770 new_prot != VM_PROT_NONE) {
2771 if (uvm_map_pageable(map, entry->start,
2772 entry->end, FALSE,
2773 UVM_LK_ENTER|UVM_LK_EXIT) != 0) {
2774
2775 /*
2776 * If locking the entry fails, remember the
2777 * error if it's the first one. Note we
2778 * still continue setting the protection in
2779 * the map, but will return the error
2780 * condition regardless.
2781 *
2782 * XXX Ignore what the actual error is,
2783 * XXX just call it a resource shortage
2784 * XXX so that it doesn't get confused
2785 * XXX what uvm_map_protect() itself would
2786 * XXX normally return.
2787 */
2788
2789 error = ENOMEM;
2790 }
2791 }
2792 current = current->next;
2793 }
2794 pmap_update(map->pmap);
2795
2796 out:
2797 vm_map_unlock(map);
2798
2799 UVMHIST_LOG(maphist, "<- done, error=%d",error,0,0,0);
2800 return error;
2801 }
2802
2803 #undef MASK
2804
2805 /*
2806 * uvm_map_inherit: set inheritance code for range of addrs in map.
2807 *
2808 * => map must be unlocked
2809 * => note that the inherit code is used during a "fork". see fork
2810 * code for details.
2811 */
2812
2813 int
2814 uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
2815 vm_inherit_t new_inheritance)
2816 {
2817 struct vm_map_entry *entry, *temp_entry;
2818 UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
2819 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)",
2820 map, start, end, new_inheritance);
2821
2822 switch (new_inheritance) {
2823 case MAP_INHERIT_NONE:
2824 case MAP_INHERIT_COPY:
2825 case MAP_INHERIT_SHARE:
2826 break;
2827 default:
2828 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
2829 return EINVAL;
2830 }
2831
2832 vm_map_lock(map);
2833 VM_MAP_RANGE_CHECK(map, start, end);
2834 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
2835 entry = temp_entry;
2836 UVM_MAP_CLIP_START(map, entry, start, NULL);
2837 } else {
2838 entry = temp_entry->next;
2839 }
2840 while ((entry != &map->header) && (entry->start < end)) {
2841 UVM_MAP_CLIP_END(map, entry, end, NULL);
2842 entry->inheritance = new_inheritance;
2843 entry = entry->next;
2844 }
2845 vm_map_unlock(map);
2846 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
2847 return 0;
2848 }
2849
2850 /*
2851 * uvm_map_advice: set advice code for range of addrs in map.
2852 *
2853 * => map must be unlocked
2854 */
2855
2856 int
2857 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
2858 {
2859 struct vm_map_entry *entry, *temp_entry;
2860 UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist);
2861 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_adv=0x%x)",
2862 map, start, end, new_advice);
2863
2864 vm_map_lock(map);
2865 VM_MAP_RANGE_CHECK(map, start, end);
2866 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
2867 entry = temp_entry;
2868 UVM_MAP_CLIP_START(map, entry, start, NULL);
2869 } else {
2870 entry = temp_entry->next;
2871 }
2872
2873 /*
2874 * XXXJRT: disallow holes?
2875 */
2876
2877 while ((entry != &map->header) && (entry->start < end)) {
2878 UVM_MAP_CLIP_END(map, entry, end, NULL);
2879
2880 switch (new_advice) {
2881 case MADV_NORMAL:
2882 case MADV_RANDOM:
2883 case MADV_SEQUENTIAL:
2884 /* nothing special here */
2885 break;
2886
2887 default:
2888 vm_map_unlock(map);
2889 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
2890 return EINVAL;
2891 }
2892 entry->advice = new_advice;
2893 entry = entry->next;
2894 }
2895
2896 vm_map_unlock(map);
2897 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
2898 return 0;
2899 }
2900
2901 /*
2902 * uvm_map_pageable: sets the pageability of a range in a map.
2903 *
2904 * => wires map entries. should not be used for transient page locking.
2905 * for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
2906 * => regions sepcified as not pageable require lock-down (wired) memory
2907 * and page tables.
2908 * => map must never be read-locked
2909 * => if islocked is TRUE, map is already write-locked
2910 * => we always unlock the map, since we must downgrade to a read-lock
2911 * to call uvm_fault_wire()
2912 * => XXXCDC: check this and try and clean it up.
2913 */
2914
2915 int
2916 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
2917 boolean_t new_pageable, int lockflags)
2918 {
2919 struct vm_map_entry *entry, *start_entry, *failed_entry;
2920 int rv;
2921 #ifdef DIAGNOSTIC
2922 u_int timestamp_save;
2923 #endif
2924 UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
2925 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
2926 map, start, end, new_pageable);
2927 KASSERT(map->flags & VM_MAP_PAGEABLE);
2928
2929 if ((lockflags & UVM_LK_ENTER) == 0)
2930 vm_map_lock(map);
2931 VM_MAP_RANGE_CHECK(map, start, end);
2932
2933 /*
2934 * only one pageability change may take place at one time, since
2935 * uvm_fault_wire assumes it will be called only once for each
2936 * wiring/unwiring. therefore, we have to make sure we're actually
2937 * changing the pageability for the entire region. we do so before
2938 * making any changes.
2939 */
2940
2941 if (uvm_map_lookup_entry(map, start, &start_entry) == FALSE) {
2942 if ((lockflags & UVM_LK_EXIT) == 0)
2943 vm_map_unlock(map);
2944
2945 UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0);
2946 return EFAULT;
2947 }
2948 entry = start_entry;
2949
2950 /*
2951 * handle wiring and unwiring separately.
2952 */
2953
2954 if (new_pageable) { /* unwire */
2955 UVM_MAP_CLIP_START(map, entry, start, NULL);
2956
2957 /*
2958 * unwiring. first ensure that the range to be unwired is
2959 * really wired down and that there are no holes.
2960 */
2961
2962 while ((entry != &map->header) && (entry->start < end)) {
2963 if (entry->wired_count == 0 ||
2964 (entry->end < end &&
2965 (entry->next == &map->header ||
2966 entry->next->start > entry->end))) {
2967 if ((lockflags & UVM_LK_EXIT) == 0)
2968 vm_map_unlock(map);
2969 UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0);
2970 return EINVAL;
2971 }
2972 entry = entry->next;
2973 }
2974
2975 /*
2976 * POSIX 1003.1b - a single munlock call unlocks a region,
2977 * regardless of the number of mlock calls made on that
2978 * region.
2979 */
2980
2981 entry = start_entry;
2982 while ((entry != &map->header) && (entry->start < end)) {
2983 UVM_MAP_CLIP_END(map, entry, end, NULL);
2984 if (VM_MAPENT_ISWIRED(entry))
2985 uvm_map_entry_unwire(map, entry);
2986 entry = entry->next;
2987 }
2988 if ((lockflags & UVM_LK_EXIT) == 0)
2989 vm_map_unlock(map);
2990 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
2991 return 0;
2992 }
2993
2994 /*
2995 * wire case: in two passes [XXXCDC: ugly block of code here]
2996 *
2997 * 1: holding the write lock, we create any anonymous maps that need
2998 * to be created. then we clip each map entry to the region to
2999 * be wired and increment its wiring count.
3000 *
3001 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
3002 * in the pages for any newly wired area (wired_count == 1).
3003 *
3004 * downgrading to a read lock for uvm_fault_wire avoids a possible
3005 * deadlock with another thread that may have faulted on one of
3006 * the pages to be wired (it would mark the page busy, blocking
3007 * us, then in turn block on the map lock that we hold). because
3008 * of problems in the recursive lock package, we cannot upgrade
3009 * to a write lock in vm_map_lookup. thus, any actions that
3010 * require the write lock must be done beforehand. because we
3011 * keep the read lock on the map, the copy-on-write status of the
3012 * entries we modify here cannot change.
3013 */
3014
3015 while ((entry != &map->header) && (entry->start < end)) {
3016 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3017
3018 /*
3019 * perform actions of vm_map_lookup that need the
3020 * write lock on the map: create an anonymous map
3021 * for a copy-on-write region, or an anonymous map
3022 * for a zero-fill region. (XXXCDC: submap case
3023 * ok?)
3024 */
3025
3026 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3027 if (UVM_ET_ISNEEDSCOPY(entry) &&
3028 ((entry->max_protection & VM_PROT_WRITE) ||
3029 (entry->object.uvm_obj == NULL))) {
3030 amap_copy(map, entry, M_WAITOK, TRUE,
3031 start, end);
3032 /* XXXCDC: wait OK? */
3033 }
3034 }
3035 }
3036 UVM_MAP_CLIP_START(map, entry, start, NULL);
3037 UVM_MAP_CLIP_END(map, entry, end, NULL);
3038 entry->wired_count++;
3039
3040 /*
3041 * Check for holes
3042 */
3043
3044 if (entry->protection == VM_PROT_NONE ||
3045 (entry->end < end &&
3046 (entry->next == &map->header ||
3047 entry->next->start > entry->end))) {
3048
3049 /*
3050 * found one. amap creation actions do not need to
3051 * be undone, but the wired counts need to be restored.
3052 */
3053
3054 while (entry != &map->header && entry->end > start) {
3055 entry->wired_count--;
3056 entry = entry->prev;
3057 }
3058 if ((lockflags & UVM_LK_EXIT) == 0)
3059 vm_map_unlock(map);
3060 UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
3061 return EINVAL;
3062 }
3063 entry = entry->next;
3064 }
3065
3066 /*
3067 * Pass 2.
3068 */
3069
3070 #ifdef DIAGNOSTIC
3071 timestamp_save = map->timestamp;
3072 #endif
3073 vm_map_busy(map);
3074 vm_map_downgrade(map);
3075
3076 rv = 0;
3077 entry = start_entry;
3078 while (entry != &map->header && entry->start < end) {
3079 if (entry->wired_count == 1) {
3080 rv = uvm_fault_wire(map, entry->start, entry->end,
3081 VM_FAULT_WIREMAX, entry->max_protection);
3082 if (rv) {
3083
3084 /*
3085 * wiring failed. break out of the loop.
3086 * we'll clean up the map below, once we
3087 * have a write lock again.
3088 */
3089
3090 break;
3091 }
3092 }
3093 entry = entry->next;
3094 }
3095
3096 if (rv) { /* failed? */
3097
3098 /*
3099 * Get back to an exclusive (write) lock.
3100 */
3101
3102 vm_map_upgrade(map);
3103 vm_map_unbusy(map);
3104
3105 #ifdef DIAGNOSTIC
3106 if (timestamp_save != map->timestamp)
3107 panic("uvm_map_pageable: stale map");
3108 #endif
3109
3110 /*
3111 * first drop the wiring count on all the entries
3112 * which haven't actually been wired yet.
3113 */
3114
3115 failed_entry = entry;
3116 while (entry != &map->header && entry->start < end) {
3117 entry->wired_count--;
3118 entry = entry->next;
3119 }
3120
3121 /*
3122 * now, unwire all the entries that were successfully
3123 * wired above.
3124 */
3125
3126 entry = start_entry;
3127 while (entry != failed_entry) {
3128 entry->wired_count--;
3129 if (VM_MAPENT_ISWIRED(entry) == 0)
3130 uvm_map_entry_unwire(map, entry);
3131 entry = entry->next;
3132 }
3133 if ((lockflags & UVM_LK_EXIT) == 0)
3134 vm_map_unlock(map);
3135 UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
3136 return (rv);
3137 }
3138
3139 /* We are holding a read lock here. */
3140 if ((lockflags & UVM_LK_EXIT) == 0) {
3141 vm_map_unbusy(map);
3142 vm_map_unlock_read(map);
3143 } else {
3144
3145 /*
3146 * Get back to an exclusive (write) lock.
3147 */
3148
3149 vm_map_upgrade(map);
3150 vm_map_unbusy(map);
3151 }
3152
3153 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3154 return 0;
3155 }
3156
3157 /*
3158 * uvm_map_pageable_all: special case of uvm_map_pageable - affects
3159 * all mapped regions.
3160 *
3161 * => map must not be locked.
3162 * => if no flags are specified, all regions are unwired.
3163 * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
3164 */
3165
3166 int
3167 uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
3168 {
3169 struct vm_map_entry *entry, *failed_entry;
3170 vsize_t size;
3171 int rv;
3172 #ifdef DIAGNOSTIC
3173 u_int timestamp_save;
3174 #endif
3175 UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist);
3176 UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0);
3177
3178 KASSERT(map->flags & VM_MAP_PAGEABLE);
3179
3180 vm_map_lock(map);
3181
3182 /*
3183 * handle wiring and unwiring separately.
3184 */
3185
3186 if (flags == 0) { /* unwire */
3187
3188 /*
3189 * POSIX 1003.1b -- munlockall unlocks all regions,
3190 * regardless of how many times mlockall has been called.
3191 */
3192
3193 for (entry = map->header.next; entry != &map->header;
3194 entry = entry->next) {
3195 if (VM_MAPENT_ISWIRED(entry))
3196 uvm_map_entry_unwire(map, entry);
3197 }
3198 vm_map_modflags(map, 0, VM_MAP_WIREFUTURE);
3199 vm_map_unlock(map);
3200 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3201 return 0;
3202 }
3203
3204 if (flags & MCL_FUTURE) {
3205
3206 /*
3207 * must wire all future mappings; remember this.
3208 */
3209
3210 vm_map_modflags(map, VM_MAP_WIREFUTURE, 0);
3211 }
3212
3213 if ((flags & MCL_CURRENT) == 0) {
3214
3215 /*
3216 * no more work to do!
3217 */
3218
3219 UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
3220 vm_map_unlock(map);
3221 return 0;
3222 }
3223
3224 /*
3225 * wire case: in three passes [XXXCDC: ugly block of code here]
3226 *
3227 * 1: holding the write lock, count all pages mapped by non-wired
3228 * entries. if this would cause us to go over our limit, we fail.
3229 *
3230 * 2: still holding the write lock, we create any anonymous maps that
3231 * need to be created. then we increment its wiring count.
3232 *
3233 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
3234 * in the pages for any newly wired area (wired_count == 1).
3235 *
3236 * downgrading to a read lock for uvm_fault_wire avoids a possible
3237 * deadlock with another thread that may have faulted on one of
3238 * the pages to be wired (it would mark the page busy, blocking
3239 * us, then in turn block on the map lock that we hold). because
3240 * of problems in the recursive lock package, we cannot upgrade
3241 * to a write lock in vm_map_lookup. thus, any actions that
3242 * require the write lock must be done beforehand. because we
3243 * keep the read lock on the map, the copy-on-write status of the
3244 * entries we modify here cannot change.
3245 */
3246
3247 for (size = 0, entry = map->header.next; entry != &map->header;
3248 entry = entry->next) {
3249 if (entry->protection != VM_PROT_NONE &&
3250 VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3251 size += entry->end - entry->start;
3252 }
3253 }
3254
3255 if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
3256 vm_map_unlock(map);
3257 return ENOMEM;
3258 }
3259
3260 /* XXX non-pmap_wired_count case must be handled by caller */
3261 #ifdef pmap_wired_count
3262 if (limit != 0 &&
3263 (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
3264 vm_map_unlock(map);
3265 return ENOMEM;
3266 }
3267 #endif
3268
3269 /*
3270 * Pass 2.
3271 */
3272
3273 for (entry = map->header.next; entry != &map->header;
3274 entry = entry->next) {
3275 if (entry->protection == VM_PROT_NONE)
3276 continue;
3277 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3278
3279 /*
3280 * perform actions of vm_map_lookup that need the
3281 * write lock on the map: create an anonymous map
3282 * for a copy-on-write region, or an anonymous map
3283 * for a zero-fill region. (XXXCDC: submap case
3284 * ok?)
3285 */
3286
3287 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3288 if (UVM_ET_ISNEEDSCOPY(entry) &&
3289 ((entry->max_protection & VM_PROT_WRITE) ||
3290 (entry->object.uvm_obj == NULL))) {
3291 amap_copy(map, entry, M_WAITOK, TRUE,
3292 entry->start, entry->end);
3293 /* XXXCDC: wait OK? */
3294 }
3295 }
3296 }
3297 entry->wired_count++;
3298 }
3299
3300 /*
3301 * Pass 3.
3302 */
3303
3304 #ifdef DIAGNOSTIC
3305 timestamp_save = map->timestamp;
3306 #endif
3307 vm_map_busy(map);
3308 vm_map_downgrade(map);
3309
3310 rv = 0;
3311 for (entry = map->header.next; entry != &map->header;
3312 entry = entry->next) {
3313 if (entry->wired_count == 1) {
3314 rv = uvm_fault_wire(map, entry->start, entry->end,
3315 VM_FAULT_WIREMAX, entry->max_protection);
3316 if (rv) {
3317
3318 /*
3319 * wiring failed. break out of the loop.
3320 * we'll clean up the map below, once we
3321 * have a write lock again.
3322 */
3323
3324 break;
3325 }
3326 }
3327 }
3328
3329 if (rv) {
3330
3331 /*
3332 * Get back an exclusive (write) lock.
3333 */
3334
3335 vm_map_upgrade(map);
3336 vm_map_unbusy(map);
3337
3338 #ifdef DIAGNOSTIC
3339 if (timestamp_save != map->timestamp)
3340 panic("uvm_map_pageable_all: stale map");
3341 #endif
3342
3343 /*
3344 * first drop the wiring count on all the entries
3345 * which haven't actually been wired yet.
3346 *
3347 * Skip VM_PROT_NONE entries like we did above.
3348 */
3349
3350 failed_entry = entry;
3351 for (/* nothing */; entry != &map->header;
3352 entry = entry->next) {
3353 if (entry->protection == VM_PROT_NONE)
3354 continue;
3355 entry->wired_count--;
3356 }
3357
3358 /*
3359 * now, unwire all the entries that were successfully
3360 * wired above.
3361 *
3362 * Skip VM_PROT_NONE entries like we did above.
3363 */
3364
3365 for (entry = map->header.next; entry != failed_entry;
3366 entry = entry->next) {
3367 if (entry->protection == VM_PROT_NONE)
3368 continue;
3369 entry->wired_count--;
3370 if (VM_MAPENT_ISWIRED(entry))
3371 uvm_map_entry_unwire(map, entry);
3372 }
3373 vm_map_unlock(map);
3374 UVMHIST_LOG(maphist,"<- done (RV=%d)", rv,0,0,0);
3375 return (rv);
3376 }
3377
3378 /* We are holding a read lock here. */
3379 vm_map_unbusy(map);
3380 vm_map_unlock_read(map);
3381
3382 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3383 return 0;
3384 }
3385
3386 /*
3387 * uvm_map_clean: clean out a map range
3388 *
3389 * => valid flags:
3390 * if (flags & PGO_CLEANIT): dirty pages are cleaned first
3391 * if (flags & PGO_SYNCIO): dirty pages are written synchronously
3392 * if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
3393 * if (flags & PGO_FREE): any cached pages are freed after clean
3394 * => returns an error if any part of the specified range isn't mapped
3395 * => never a need to flush amap layer since the anonymous memory has
3396 * no permanent home, but may deactivate pages there
3397 * => called from sys_msync() and sys_madvise()
3398 * => caller must not write-lock map (read OK).
3399 * => we may sleep while cleaning if SYNCIO [with map read-locked]
3400 */
3401
3402 int
3403 uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
3404 {
3405 struct vm_map_entry *current, *entry;
3406 struct uvm_object *uobj;
3407 struct vm_amap *amap;
3408 struct vm_anon *anon;
3409 struct vm_page *pg;
3410 vaddr_t offset;
3411 vsize_t size;
3412 int error, refs;
3413 UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
3414
3415 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
3416 map, start, end, flags);
3417 KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
3418 (PGO_FREE|PGO_DEACTIVATE));
3419
3420 vm_map_lock_read(map);
3421 VM_MAP_RANGE_CHECK(map, start, end);
3422 if (uvm_map_lookup_entry(map, start, &entry) == FALSE) {
3423 vm_map_unlock_read(map);
3424 return EFAULT;
3425 }
3426
3427 /*
3428 * Make a first pass to check for holes.
3429 */
3430
3431 for (current = entry; current->start < end; current = current->next) {
3432 if (UVM_ET_ISSUBMAP(current)) {
3433 vm_map_unlock_read(map);
3434 return EINVAL;
3435 }
3436 if (end <= current->end) {
3437 break;
3438 }
3439 if (current->end != current->next->start) {
3440 vm_map_unlock_read(map);
3441 return EFAULT;
3442 }
3443 }
3444
3445 error = 0;
3446 for (current = entry; start < end; current = current->next) {
3447 amap = current->aref.ar_amap; /* top layer */
3448 uobj = current->object.uvm_obj; /* bottom layer */
3449 KASSERT(start >= current->start);
3450
3451 /*
3452 * No amap cleaning necessary if:
3453 *
3454 * (1) There's no amap.
3455 *
3456 * (2) We're not deactivating or freeing pages.
3457 */
3458
3459 if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
3460 goto flush_object;
3461
3462 amap_lock(amap);
3463 offset = start - current->start;
3464 size = MIN(end, current->end) - start;
3465 for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) {
3466 anon = amap_lookup(¤t->aref, offset);
3467 if (anon == NULL)
3468 continue;
3469
3470 simple_lock(&anon->an_lock);
3471 pg = anon->u.an_page;
3472 if (pg == NULL) {
3473 simple_unlock(&anon->an_lock);
3474 continue;
3475 }
3476
3477 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
3478
3479 /*
3480 * In these first 3 cases, we just deactivate the page.
3481 */
3482
3483 case PGO_CLEANIT|PGO_FREE:
3484 case PGO_CLEANIT|PGO_DEACTIVATE:
3485 case PGO_DEACTIVATE:
3486 deactivate_it:
3487 /*
3488 * skip the page if it's loaned or wired,
3489 * since it shouldn't be on a paging queue
3490 * at all in these cases.
3491 */
3492
3493 uvm_lock_pageq();
3494 if (pg->loan_count != 0 ||
3495 pg->wire_count != 0) {
3496 uvm_unlock_pageq();
3497 simple_unlock(&anon->an_lock);
3498 continue;
3499 }
3500 KASSERT(pg->uanon == anon);
3501 pmap_clear_reference(pg);
3502 uvm_pagedeactivate(pg);
3503 uvm_unlock_pageq();
3504 simple_unlock(&anon->an_lock);
3505 continue;
3506
3507 case PGO_FREE:
3508
3509 /*
3510 * If there are multiple references to
3511 * the amap, just deactivate the page.
3512 */
3513
3514 if (amap_refs(amap) > 1)
3515 goto deactivate_it;
3516
3517 /* skip the page if it's wired */
3518 if (pg->wire_count != 0) {
3519 simple_unlock(&anon->an_lock);
3520 continue;
3521 }
3522 amap_unadd(¤t->aref, offset);
3523 refs = --anon->an_ref;
3524 simple_unlock(&anon->an_lock);
3525 if (refs == 0)
3526 uvm_anfree(anon);
3527 continue;
3528 }
3529 }
3530 amap_unlock(amap);
3531
3532 flush_object:
3533 /*
3534 * flush pages if we've got a valid backing object.
3535 * note that we must always clean object pages before
3536 * freeing them since otherwise we could reveal stale
3537 * data from files.
3538 */
3539
3540 offset = current->offset + (start - current->start);
3541 size = MIN(end, current->end) - start;
3542 if (uobj != NULL) {
3543 simple_lock(&uobj->vmobjlock);
3544 if (uobj->pgops->pgo_put != NULL)
3545 error = (uobj->pgops->pgo_put)(uobj, offset,
3546 offset + size, flags | PGO_CLEANIT);
3547 else
3548 error = 0;
3549 }
3550 start += size;
3551 }
3552 vm_map_unlock_read(map);
3553 return (error);
3554 }
3555
3556
3557 /*
3558 * uvm_map_checkprot: check protection in map
3559 *
3560 * => must allow specified protection in a fully allocated region.
3561 * => map must be read or write locked by caller.
3562 */
3563
3564 boolean_t
3565 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
3566 vm_prot_t protection)
3567 {
3568 struct vm_map_entry *entry;
3569 struct vm_map_entry *tmp_entry;
3570
3571 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
3572 return (FALSE);
3573 }
3574 entry = tmp_entry;
3575 while (start < end) {
3576 if (entry == &map->header) {
3577 return (FALSE);
3578 }
3579
3580 /*
3581 * no holes allowed
3582 */
3583
3584 if (start < entry->start) {
3585 return (FALSE);
3586 }
3587
3588 /*
3589 * check protection associated with entry
3590 */
3591
3592 if ((entry->protection & protection) != protection) {
3593 return (FALSE);
3594 }
3595 start = entry->end;
3596 entry = entry->next;
3597 }
3598 return (TRUE);
3599 }
3600
3601 /*
3602 * uvmspace_alloc: allocate a vmspace structure.
3603 *
3604 * - structure includes vm_map and pmap
3605 * - XXX: no locking on this structure
3606 * - refcnt set to 1, rest must be init'd by caller
3607 */
3608 struct vmspace *
3609 uvmspace_alloc(vaddr_t min, vaddr_t max)
3610 {
3611 struct vmspace *vm;
3612 UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
3613
3614 vm = pool_get(&uvm_vmspace_pool, PR_WAITOK);
3615 uvmspace_init(vm, NULL, min, max);
3616 UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0);
3617 return (vm);
3618 }
3619
3620 /*
3621 * uvmspace_init: initialize a vmspace structure.
3622 *
3623 * - XXX: no locking on this structure
3624 * - refcnt set to 1, rest must be init'd by caller
3625 */
3626 void
3627 uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t min, vaddr_t max)
3628 {
3629 UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist);
3630
3631 memset(vm, 0, sizeof(*vm));
3632 uvm_map_setup(&vm->vm_map, min, max, VM_MAP_PAGEABLE
3633 #ifdef __USING_TOPDOWN_VM
3634 | VM_MAP_TOPDOWN
3635 #endif
3636 );
3637 if (pmap)
3638 pmap_reference(pmap);
3639 else
3640 pmap = pmap_create();
3641 vm->vm_map.pmap = pmap;
3642 vm->vm_refcnt = 1;
3643 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
3644 }
3645
3646 /*
3647 * uvmspace_share: share a vmspace between two processes
3648 *
3649 * - used for vfork, threads(?)
3650 */
3651
3652 void
3653 uvmspace_share(struct proc *p1, struct proc *p2)
3654 {
3655 struct simplelock *slock = &p1->p_vmspace->vm_map.ref_lock;
3656
3657 p2->p_vmspace = p1->p_vmspace;
3658 simple_lock(slock);
3659 p1->p_vmspace->vm_refcnt++;
3660 simple_unlock(slock);
3661 }
3662
3663 /*
3664 * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
3665 *
3666 * - XXX: no locking on vmspace
3667 */
3668
3669 void
3670 uvmspace_unshare(struct lwp *l)
3671 {
3672 struct proc *p = l->l_proc;
3673 struct vmspace *nvm, *ovm = p->p_vmspace;
3674
3675 if (ovm->vm_refcnt == 1)
3676 /* nothing to do: vmspace isn't shared in the first place */
3677 return;
3678
3679 /* make a new vmspace, still holding old one */
3680 nvm = uvmspace_fork(ovm);
3681
3682 pmap_deactivate(l); /* unbind old vmspace */
3683 p->p_vmspace = nvm;
3684 pmap_activate(l); /* switch to new vmspace */
3685
3686 uvmspace_free(ovm); /* drop reference to old vmspace */
3687 }
3688
3689 /*
3690 * uvmspace_exec: the process wants to exec a new program
3691 */
3692
3693 void
3694 uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end)
3695 {
3696 struct proc *p = l->l_proc;
3697 struct vmspace *nvm, *ovm = p->p_vmspace;
3698 struct vm_map *map = &ovm->vm_map;
3699
3700 #ifdef __sparc__
3701 /* XXX cgd 960926: the sparc #ifdef should be a MD hook */
3702 kill_user_windows(l); /* before stack addresses go away */
3703 #endif
3704
3705 /*
3706 * see if more than one process is using this vmspace...
3707 */
3708
3709 if (ovm->vm_refcnt == 1) {
3710
3711 /*
3712 * if p is the only process using its vmspace then we can safely
3713 * recycle that vmspace for the program that is being exec'd.
3714 */
3715
3716 #ifdef SYSVSHM
3717 /*
3718 * SYSV SHM semantics require us to kill all segments on an exec
3719 */
3720
3721 if (ovm->vm_shm)
3722 shmexit(ovm);
3723 #endif
3724
3725 /*
3726 * POSIX 1003.1b -- "lock future mappings" is revoked
3727 * when a process execs another program image.
3728 */
3729
3730 vm_map_modflags(map, 0, VM_MAP_WIREFUTURE);
3731
3732 /*
3733 * now unmap the old program
3734 */
3735
3736 pmap_remove_all(map->pmap);
3737 uvm_unmap(map, map->min_offset, map->max_offset);
3738 KASSERT(map->header.prev == &map->header);
3739 KASSERT(map->nentries == 0);
3740
3741 /*
3742 * resize the map
3743 */
3744
3745 map->min_offset = start;
3746 map->max_offset = end;
3747 } else {
3748
3749 /*
3750 * p's vmspace is being shared, so we can't reuse it for p since
3751 * it is still being used for others. allocate a new vmspace
3752 * for p
3753 */
3754
3755 nvm = uvmspace_alloc(start, end);
3756
3757 /*
3758 * install new vmspace and drop our ref to the old one.
3759 */
3760
3761 pmap_deactivate(l);
3762 p->p_vmspace = nvm;
3763 pmap_activate(l);
3764
3765 uvmspace_free(ovm);
3766 }
3767 }
3768
3769 /*
3770 * uvmspace_free: free a vmspace data structure
3771 */
3772
3773 void
3774 uvmspace_free(struct vmspace *vm)
3775 {
3776 struct vm_map_entry *dead_entries;
3777 struct vm_map *map = &vm->vm_map;
3778 int n;
3779
3780 UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
3781
3782 UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0);
3783 simple_lock(&map->ref_lock);
3784 n = --vm->vm_refcnt;
3785 simple_unlock(&map->ref_lock);
3786 if (n > 0)
3787 return;
3788
3789 /*
3790 * at this point, there should be no other references to the map.
3791 * delete all of the mappings, then destroy the pmap.
3792 */
3793
3794 map->flags |= VM_MAP_DYING;
3795 pmap_remove_all(map->pmap);
3796 #ifdef SYSVSHM
3797 /* Get rid of any SYSV shared memory segments. */
3798 if (vm->vm_shm != NULL)
3799 shmexit(vm);
3800 #endif
3801 if (map->nentries) {
3802 uvm_unmap_remove(map, map->min_offset, map->max_offset,
3803 &dead_entries, NULL);
3804 if (dead_entries != NULL)
3805 uvm_unmap_detach(dead_entries, 0);
3806 }
3807 KASSERT(map->nentries == 0);
3808 KASSERT(map->size == 0);
3809 pmap_destroy(map->pmap);
3810 pool_put(&uvm_vmspace_pool, vm);
3811 }
3812
3813 /*
3814 * F O R K - m a i n e n t r y p o i n t
3815 */
3816 /*
3817 * uvmspace_fork: fork a process' main map
3818 *
3819 * => create a new vmspace for child process from parent.
3820 * => parent's map must not be locked.
3821 */
3822
3823 struct vmspace *
3824 uvmspace_fork(struct vmspace *vm1)
3825 {
3826 struct vmspace *vm2;
3827 struct vm_map *old_map = &vm1->vm_map;
3828 struct vm_map *new_map;
3829 struct vm_map_entry *old_entry;
3830 struct vm_map_entry *new_entry;
3831 UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
3832
3833 vm_map_lock(old_map);
3834
3835 vm2 = uvmspace_alloc(old_map->min_offset, old_map->max_offset);
3836 memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
3837 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
3838 new_map = &vm2->vm_map; /* XXX */
3839
3840 old_entry = old_map->header.next;
3841 new_map->size = old_map->size;
3842
3843 /*
3844 * go entry-by-entry
3845 */
3846
3847 while (old_entry != &old_map->header) {
3848
3849 /*
3850 * first, some sanity checks on the old entry
3851 */
3852
3853 KASSERT(!UVM_ET_ISSUBMAP(old_entry));
3854 KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) ||
3855 !UVM_ET_ISNEEDSCOPY(old_entry));
3856
3857 switch (old_entry->inheritance) {
3858 case MAP_INHERIT_NONE:
3859
3860 /*
3861 * drop the mapping, modify size
3862 */
3863 new_map->size -= old_entry->end - old_entry->start;
3864 break;
3865
3866 case MAP_INHERIT_SHARE:
3867
3868 /*
3869 * share the mapping: this means we want the old and
3870 * new entries to share amaps and backing objects.
3871 */
3872 /*
3873 * if the old_entry needs a new amap (due to prev fork)
3874 * then we need to allocate it now so that we have
3875 * something we own to share with the new_entry. [in
3876 * other words, we need to clear needs_copy]
3877 */
3878
3879 if (UVM_ET_ISNEEDSCOPY(old_entry)) {
3880 /* get our own amap, clears needs_copy */
3881 amap_copy(old_map, old_entry, M_WAITOK, FALSE,
3882 0, 0);
3883 /* XXXCDC: WAITOK??? */
3884 }
3885
3886 new_entry = uvm_mapent_alloc(new_map, 0);
3887 /* old_entry -> new_entry */
3888 uvm_mapent_copy(old_entry, new_entry);
3889
3890 /* new pmap has nothing wired in it */
3891 new_entry->wired_count = 0;
3892
3893 /*
3894 * gain reference to object backing the map (can't
3895 * be a submap, already checked this case).
3896 */
3897
3898 if (new_entry->aref.ar_amap)
3899 uvm_map_reference_amap(new_entry, AMAP_SHARED);
3900
3901 if (new_entry->object.uvm_obj &&
3902 new_entry->object.uvm_obj->pgops->pgo_reference)
3903 new_entry->object.uvm_obj->
3904 pgops->pgo_reference(
3905 new_entry->object.uvm_obj);
3906
3907 /* insert entry at end of new_map's entry list */
3908 uvm_map_entry_link(new_map, new_map->header.prev,
3909 new_entry);
3910
3911 break;
3912
3913 case MAP_INHERIT_COPY:
3914
3915 /*
3916 * copy-on-write the mapping (using mmap's
3917 * MAP_PRIVATE semantics)
3918 *
3919 * allocate new_entry, adjust reference counts.
3920 * (note that new references are read-only).
3921 */
3922
3923 new_entry = uvm_mapent_alloc(new_map, 0);
3924 /* old_entry -> new_entry */
3925 uvm_mapent_copy(old_entry, new_entry);
3926
3927 if (new_entry->aref.ar_amap)
3928 uvm_map_reference_amap(new_entry, 0);
3929
3930 if (new_entry->object.uvm_obj &&
3931 new_entry->object.uvm_obj->pgops->pgo_reference)
3932 new_entry->object.uvm_obj->pgops->pgo_reference
3933 (new_entry->object.uvm_obj);
3934
3935 /* new pmap has nothing wired in it */
3936 new_entry->wired_count = 0;
3937
3938 new_entry->etype |=
3939 (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
3940 uvm_map_entry_link(new_map, new_map->header.prev,
3941 new_entry);
3942
3943 /*
3944 * the new entry will need an amap. it will either
3945 * need to be copied from the old entry or created
3946 * from scratch (if the old entry does not have an
3947 * amap). can we defer this process until later
3948 * (by setting "needs_copy") or do we need to copy
3949 * the amap now?
3950 *
3951 * we must copy the amap now if any of the following
3952 * conditions hold:
3953 * 1. the old entry has an amap and that amap is
3954 * being shared. this means that the old (parent)
3955 * process is sharing the amap with another
3956 * process. if we do not clear needs_copy here
3957 * we will end up in a situation where both the
3958 * parent and child process are refering to the
3959 * same amap with "needs_copy" set. if the
3960 * parent write-faults, the fault routine will
3961 * clear "needs_copy" in the parent by allocating
3962 * a new amap. this is wrong because the
3963 * parent is supposed to be sharing the old amap
3964 * and the new amap will break that.
3965 *
3966 * 2. if the old entry has an amap and a non-zero
3967 * wire count then we are going to have to call
3968 * amap_cow_now to avoid page faults in the
3969 * parent process. since amap_cow_now requires
3970 * "needs_copy" to be clear we might as well
3971 * clear it here as well.
3972 *
3973 */
3974
3975 if (old_entry->aref.ar_amap != NULL) {
3976 if ((amap_flags(old_entry->aref.ar_amap) &
3977 AMAP_SHARED) != 0 ||
3978 VM_MAPENT_ISWIRED(old_entry)) {
3979
3980 amap_copy(new_map, new_entry, M_WAITOK,
3981 FALSE, 0, 0);
3982 /* XXXCDC: M_WAITOK ... ok? */
3983 }
3984 }
3985
3986 /*
3987 * if the parent's entry is wired down, then the
3988 * parent process does not want page faults on
3989 * access to that memory. this means that we
3990 * cannot do copy-on-write because we can't write
3991 * protect the old entry. in this case we
3992 * resolve all copy-on-write faults now, using
3993 * amap_cow_now. note that we have already
3994 * allocated any needed amap (above).
3995 */
3996
3997 if (VM_MAPENT_ISWIRED(old_entry)) {
3998
3999 /*
4000 * resolve all copy-on-write faults now
4001 * (note that there is nothing to do if
4002 * the old mapping does not have an amap).
4003 */
4004 if (old_entry->aref.ar_amap)
4005 amap_cow_now(new_map, new_entry);
4006
4007 } else {
4008
4009 /*
4010 * setup mappings to trigger copy-on-write faults
4011 * we must write-protect the parent if it has
4012 * an amap and it is not already "needs_copy"...
4013 * if it is already "needs_copy" then the parent
4014 * has already been write-protected by a previous
4015 * fork operation.
4016 */
4017
4018 if (old_entry->aref.ar_amap &&
4019 !UVM_ET_ISNEEDSCOPY(old_entry)) {
4020 if (old_entry->max_protection & VM_PROT_WRITE) {
4021 pmap_protect(old_map->pmap,
4022 old_entry->start,
4023 old_entry->end,
4024 old_entry->protection &
4025 ~VM_PROT_WRITE);
4026 pmap_update(old_map->pmap);
4027 }
4028 old_entry->etype |= UVM_ET_NEEDSCOPY;
4029 }
4030 }
4031 break;
4032 } /* end of switch statement */
4033 old_entry = old_entry->next;
4034 }
4035
4036 vm_map_unlock(old_map);
4037
4038 #ifdef SYSVSHM
4039 if (vm1->vm_shm)
4040 shmfork(vm1, vm2);
4041 #endif
4042
4043 #ifdef PMAP_FORK
4044 pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap);
4045 #endif
4046
4047 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4048 return (vm2);
4049 }
4050
4051
4052 /*
4053 * in-kernel map entry allocation.
4054 */
4055
4056 int ukh_alloc, ukh_free;
4057 int uke_alloc, uke_free;
4058
4059 struct uvm_kmapent_hdr {
4060 LIST_ENTRY(uvm_kmapent_hdr) ukh_listq;
4061 int ukh_nused;
4062 struct vm_map_entry *ukh_freelist;
4063 struct vm_map *ukh_map;
4064 struct vm_map_entry ukh_entries[0];
4065 };
4066
4067 #define UVM_KMAPENT_CHUNK \
4068 ((PAGE_SIZE - sizeof(struct uvm_kmapent_hdr)) \
4069 / sizeof(struct vm_map_entry))
4070
4071 #define UVM_KHDR_FIND(entry) \
4072 ((struct uvm_kmapent_hdr *)(((vaddr_t)entry) & ~PAGE_MASK))
4073
4074 static __inline struct vm_map_entry *uvm_kmapent_get(struct uvm_kmapent_hdr *);
4075 static __inline void uvm_kmapent_put(struct uvm_kmapent_hdr *,
4076 struct vm_map_entry *);
4077
4078 static __inline struct vm_map *
4079 uvm_kmapent_map(struct vm_map_entry *entry)
4080 {
4081 const struct uvm_kmapent_hdr *ukh;
4082
4083 ukh = UVM_KHDR_FIND(entry);
4084 return ukh->ukh_map;
4085 }
4086
4087 static __inline struct vm_map_entry *
4088 uvm_kmapent_get(struct uvm_kmapent_hdr *ukh)
4089 {
4090 struct vm_map_entry *entry;
4091
4092 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4093 KASSERT(ukh->ukh_nused >= 0);
4094
4095 entry = ukh->ukh_freelist;
4096 if (entry) {
4097 KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4098 == UVM_MAP_KERNEL);
4099 ukh->ukh_freelist = entry->next;
4100 ukh->ukh_nused++;
4101 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4102 } else {
4103 KASSERT(ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4104 }
4105
4106 return entry;
4107 }
4108
4109 static __inline void
4110 uvm_kmapent_put(struct uvm_kmapent_hdr *ukh, struct vm_map_entry *entry)
4111 {
4112
4113 KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4114 == UVM_MAP_KERNEL);
4115 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4116 KASSERT(ukh->ukh_nused > 0);
4117 KASSERT(ukh->ukh_freelist != NULL ||
4118 ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4119 KASSERT(ukh->ukh_freelist == NULL ||
4120 ukh->ukh_nused < UVM_KMAPENT_CHUNK);
4121
4122 ukh->ukh_nused--;
4123 entry->next = ukh->ukh_freelist;
4124 ukh->ukh_freelist = entry;
4125 }
4126
4127 /*
4128 * uvm_kmapent_alloc: allocate a map entry for in-kernel map
4129 */
4130
4131 static struct vm_map_entry *
4132 uvm_kmapent_alloc(struct vm_map *map, int flags)
4133 {
4134 struct vm_page *pg;
4135 struct uvm_map_args args;
4136 struct uvm_kmapent_hdr *ukh;
4137 struct vm_map_entry *entry;
4138 uvm_flag_t mapflags = UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
4139 UVM_INH_NONE, UVM_ADV_RANDOM, flags | UVM_FLAG_NOMERGE);
4140 vaddr_t va;
4141 int error;
4142 int i;
4143 int s;
4144
4145 KDASSERT(UVM_KMAPENT_CHUNK > 2);
4146 KDASSERT(kernel_map != NULL);
4147 KASSERT(vm_map_pmap(map) == pmap_kernel());
4148
4149 uke_alloc++;
4150 entry = NULL;
4151 again:
4152 /*
4153 * try to grab an entry from freelist.
4154 */
4155 s = splvm();
4156 simple_lock(&uvm.kentry_lock);
4157 ukh = LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free);
4158 if (ukh) {
4159 entry = uvm_kmapent_get(ukh);
4160 if (ukh->ukh_nused == UVM_KMAPENT_CHUNK)
4161 LIST_REMOVE(ukh, ukh_listq);
4162 }
4163 simple_unlock(&uvm.kentry_lock);
4164 splx(s);
4165
4166 if (entry)
4167 return entry;
4168
4169 /*
4170 * there's no free entry for this vm_map.
4171 * now we need to allocate some vm_map_entry.
4172 * for simplicity, always allocate one page chunk of them at once.
4173 */
4174
4175 pg = uvm_pagealloc(NULL, 0, NULL, 0);
4176 if (__predict_false(pg == NULL)) {
4177 if (flags & UVM_FLAG_NOWAIT)
4178 return NULL;
4179 uvm_wait("kme_alloc");
4180 goto again;
4181 }
4182
4183 error = uvm_map_prepare(map, 0, PAGE_SIZE, NULL, 0, 0, mapflags, &args);
4184 if (error) {
4185 uvm_pagefree(pg);
4186 return NULL;
4187 }
4188
4189 va = args.uma_start;
4190
4191 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE);
4192 pmap_update(vm_map_pmap(map));
4193
4194 ukh = (void *)va;
4195
4196 /*
4197 * use the first entry for ukh itsself.
4198 */
4199
4200 entry = &ukh->ukh_entries[0];
4201 entry->flags = UVM_MAP_KERNEL | UVM_MAP_KMAPENT;
4202 error = uvm_map_enter(map, &args, entry);
4203 KASSERT(error == 0);
4204
4205 ukh->ukh_nused = UVM_KMAPENT_CHUNK;
4206 ukh->ukh_map = map;
4207 ukh->ukh_freelist = NULL;
4208 for (i = UVM_KMAPENT_CHUNK - 1; i >= 2; i--) {
4209 struct vm_map_entry *entry = &ukh->ukh_entries[i];
4210
4211 entry->flags = UVM_MAP_KERNEL;
4212 uvm_kmapent_put(ukh, entry);
4213 }
4214 KASSERT(ukh->ukh_nused == 2);
4215
4216 s = splvm();
4217 simple_lock(&uvm.kentry_lock);
4218 LIST_INSERT_HEAD(&vm_map_to_kernel(map)->vmk_kentry_free,
4219 ukh, ukh_listq);
4220 simple_unlock(&uvm.kentry_lock);
4221 splx(s);
4222
4223 /*
4224 * return second entry.
4225 */
4226
4227 entry = &ukh->ukh_entries[1];
4228 entry->flags = UVM_MAP_KERNEL;
4229 ukh_alloc++;
4230 return entry;
4231 }
4232
4233 /*
4234 * uvm_mapent_free: free map entry for in-kernel map
4235 */
4236
4237 static void
4238 uvm_kmapent_free(struct vm_map_entry *entry)
4239 {
4240 struct uvm_kmapent_hdr *ukh;
4241 struct vm_page *pg;
4242 struct vm_map *map;
4243 struct pmap *pmap;
4244 vaddr_t va;
4245 paddr_t pa;
4246 struct vm_map_entry *deadentry;
4247 int s;
4248
4249 uke_free++;
4250 ukh = UVM_KHDR_FIND(entry);
4251 map = ukh->ukh_map;
4252
4253 s = splvm();
4254 simple_lock(&uvm.kentry_lock);
4255 uvm_kmapent_put(ukh, entry);
4256 if (ukh->ukh_nused > 1) {
4257 if (ukh->ukh_nused == UVM_KMAPENT_CHUNK - 1)
4258 LIST_INSERT_HEAD(
4259 &vm_map_to_kernel(map)->vmk_kentry_free,
4260 ukh, ukh_listq);
4261 simple_unlock(&uvm.kentry_lock);
4262 splx(s);
4263 return;
4264 }
4265
4266 /*
4267 * now we can free this ukh.
4268 *
4269 * however, keep an empty ukh to avoid ping-pong.
4270 */
4271
4272 if (LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free) == ukh &&
4273 LIST_NEXT(ukh, ukh_listq) == NULL) {
4274 simple_unlock(&uvm.kentry_lock);
4275 splx(s);
4276 return;
4277 }
4278 LIST_REMOVE(ukh, ukh_listq);
4279 simple_unlock(&uvm.kentry_lock);
4280 splx(s);
4281
4282 KASSERT(ukh->ukh_nused == 1);
4283
4284 /*
4285 * remove map entry for ukh itsself.
4286 */
4287
4288 va = (vaddr_t)ukh;
4289 KASSERT((va & PAGE_MASK) == 0);
4290 uvm_unmap_remove(map, va, va + PAGE_SIZE, &deadentry, NULL);
4291 KASSERT(deadentry->flags & UVM_MAP_KERNEL);
4292 KASSERT(deadentry->flags & UVM_MAP_KMAPENT);
4293 KASSERT(deadentry->next == NULL);
4294 KASSERT(deadentry == &ukh->ukh_entries[0]);
4295
4296 /*
4297 * unmap the page from pmap and free it.
4298 */
4299
4300 pmap = vm_map_pmap(map);
4301 KASSERT(pmap == pmap_kernel());
4302 if (!pmap_extract(pmap, va, &pa))
4303 panic("%s: no mapping", __func__);
4304 pmap_kremove(va, PAGE_SIZE);
4305 pg = PHYS_TO_VM_PAGE(pa);
4306 uvm_pagefree(pg);
4307 ukh_free++;
4308 }
4309
4310 /*
4311 * map entry reservation
4312 */
4313
4314 /*
4315 * uvm_mapent_reserve: reserve map entries for clipping before locking map.
4316 *
4317 * => needed when unmapping entries allocated without UVM_FLAG_QUANTUM.
4318 * => caller shouldn't hold map locked.
4319 */
4320 int
4321 uvm_mapent_reserve(struct vm_map *map, struct uvm_mapent_reservation *umr,
4322 int nentries, int flags)
4323 {
4324
4325 umr->umr_nentries = 0;
4326
4327 if ((flags & UVM_FLAG_QUANTUM) != 0)
4328 return 0;
4329
4330 if (!VM_MAP_USE_KMAPENT(map))
4331 return 0;
4332
4333 while (nentries--) {
4334 struct vm_map_entry *ent;
4335 ent = uvm_kmapent_alloc(map, flags);
4336 if (!ent) {
4337 uvm_mapent_unreserve(map, umr);
4338 return ENOMEM;
4339 }
4340 UMR_PUTENTRY(umr, ent);
4341 }
4342
4343 return 0;
4344 }
4345
4346 /*
4347 * uvm_mapent_unreserve:
4348 *
4349 * => caller shouldn't hold map locked.
4350 * => never fail or sleep.
4351 */
4352 void
4353 uvm_mapent_unreserve(struct vm_map *map, struct uvm_mapent_reservation *umr)
4354 {
4355
4356 while (!UMR_EMPTY(umr))
4357 uvm_kmapent_free(UMR_GETENTRY(umr));
4358 }
4359
4360 #if defined(DDB)
4361
4362 /*
4363 * DDB hooks
4364 */
4365
4366 /*
4367 * uvm_map_printit: actually prints the map
4368 */
4369
4370 void
4371 uvm_map_printit(struct vm_map *map, boolean_t full,
4372 void (*pr)(const char *, ...))
4373 {
4374 struct vm_map_entry *entry;
4375
4376 (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, map->min_offset,map->max_offset);
4377 (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=0x%x\n",
4378 map->nentries, map->size, map->ref_count, map->timestamp,
4379 map->flags);
4380 #ifdef pmap_wired_count
4381 (*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap,
4382 pmap_resident_count(map->pmap), pmap_wired_count(map->pmap));
4383 #else
4384 (*pr)("\tpmap=%p(resident=%ld)\n", map->pmap,
4385 pmap_resident_count(map->pmap));
4386 #endif
4387 if (!full)
4388 return;
4389 for (entry = map->header.next; entry != &map->header;
4390 entry = entry->next) {
4391 (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%llx, amap=%p/%d\n",
4392 entry, entry->start, entry->end, entry->object.uvm_obj,
4393 (long long)entry->offset, entry->aref.ar_amap,
4394 entry->aref.ar_pageoff);
4395 (*pr)(
4396 "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
4397 "wc=%d, adv=%d\n",
4398 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
4399 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
4400 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
4401 entry->protection, entry->max_protection,
4402 entry->inheritance, entry->wired_count, entry->advice);
4403 }
4404 }
4405
4406 /*
4407 * uvm_object_printit: actually prints the object
4408 */
4409
4410 void
4411 uvm_object_printit(struct uvm_object *uobj, boolean_t full,
4412 void (*pr)(const char *, ...))
4413 {
4414 struct vm_page *pg;
4415 int cnt = 0;
4416
4417 (*pr)("OBJECT %p: locked=%d, pgops=%p, npages=%d, ",
4418 uobj, uobj->vmobjlock.lock_data, uobj->pgops, uobj->uo_npages);
4419 if (UVM_OBJ_IS_KERN_OBJECT(uobj))
4420 (*pr)("refs=<SYSTEM>\n");
4421 else
4422 (*pr)("refs=%d\n", uobj->uo_refs);
4423
4424 if (!full) {
4425 return;
4426 }
4427 (*pr)(" PAGES <pg,offset>:\n ");
4428 TAILQ_FOREACH(pg, &uobj->memq, listq) {
4429 cnt++;
4430 (*pr)("<%p,0x%llx> ", pg, (long long)pg->offset);
4431 if ((cnt % 3) == 0) {
4432 (*pr)("\n ");
4433 }
4434 }
4435 if ((cnt % 3) != 0) {
4436 (*pr)("\n");
4437 }
4438 }
4439
4440 /*
4441 * uvm_page_printit: actually print the page
4442 */
4443
4444 static const char page_flagbits[] =
4445 "\20\1BUSY\2WANTED\3TABLED\4CLEAN\5PAGEOUT\6RELEASED\7FAKE\10RDONLY"
4446 "\11ZERO\15PAGER1";
4447 static const char page_pqflagbits[] =
4448 "\20\1FREE\2INACTIVE\3ACTIVE\5ANON\6AOBJ";
4449
4450 void
4451 uvm_page_printit(struct vm_page *pg, boolean_t full,
4452 void (*pr)(const char *, ...))
4453 {
4454 struct vm_page *tpg;
4455 struct uvm_object *uobj;
4456 struct pglist *pgl;
4457 char pgbuf[128];
4458 char pqbuf[128];
4459
4460 (*pr)("PAGE %p:\n", pg);
4461 bitmask_snprintf(pg->flags, page_flagbits, pgbuf, sizeof(pgbuf));
4462 bitmask_snprintf(pg->pqflags, page_pqflagbits, pqbuf, sizeof(pqbuf));
4463 (*pr)(" flags=%s, pqflags=%s, wire_count=%d, pa=0x%lx\n",
4464 pgbuf, pqbuf, pg->wire_count, (long)VM_PAGE_TO_PHYS(pg));
4465 (*pr)(" uobject=%p, uanon=%p, offset=0x%llx loan_count=%d\n",
4466 pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count);
4467 #if defined(UVM_PAGE_TRKOWN)
4468 if (pg->flags & PG_BUSY)
4469 (*pr)(" owning process = %d, tag=%s\n",
4470 pg->owner, pg->owner_tag);
4471 else
4472 (*pr)(" page not busy, no owner\n");
4473 #else
4474 (*pr)(" [page ownership tracking disabled]\n");
4475 #endif
4476
4477 if (!full)
4478 return;
4479
4480 /* cross-verify object/anon */
4481 if ((pg->pqflags & PQ_FREE) == 0) {
4482 if (pg->pqflags & PQ_ANON) {
4483 if (pg->uanon == NULL || pg->uanon->u.an_page != pg)
4484 (*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n",
4485 (pg->uanon) ? pg->uanon->u.an_page : NULL);
4486 else
4487 (*pr)(" anon backpointer is OK\n");
4488 } else {
4489 uobj = pg->uobject;
4490 if (uobj) {
4491 (*pr)(" checking object list\n");
4492 TAILQ_FOREACH(tpg, &uobj->memq, listq) {
4493 if (tpg == pg) {
4494 break;
4495 }
4496 }
4497 if (tpg)
4498 (*pr)(" page found on object list\n");
4499 else
4500 (*pr)(" >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
4501 }
4502 }
4503 }
4504
4505 /* cross-verify page queue */
4506 if (pg->pqflags & PQ_FREE) {
4507 int fl = uvm_page_lookup_freelist(pg);
4508 int color = VM_PGCOLOR_BUCKET(pg);
4509 pgl = &uvm.page_free[fl].pgfl_buckets[color].pgfl_queues[
4510 ((pg)->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN];
4511 } else if (pg->pqflags & PQ_INACTIVE) {
4512 pgl = &uvm.page_inactive;
4513 } else if (pg->pqflags & PQ_ACTIVE) {
4514 pgl = &uvm.page_active;
4515 } else {
4516 pgl = NULL;
4517 }
4518
4519 if (pgl) {
4520 (*pr)(" checking pageq list\n");
4521 TAILQ_FOREACH(tpg, pgl, pageq) {
4522 if (tpg == pg) {
4523 break;
4524 }
4525 }
4526 if (tpg)
4527 (*pr)(" page found on pageq list\n");
4528 else
4529 (*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
4530 }
4531 }
4532 #endif
4533