uvm_map.c revision 1.203 1 /* $NetBSD: uvm_map.c,v 1.203 2005/06/28 01:07:56 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_map.c 8.3 (Berkeley) 1/12/94
42 * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 /*
70 * uvm_map.c: uvm map operations
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.203 2005/06/28 01:07:56 thorpej Exp $");
75
76 #include "opt_ddb.h"
77 #include "opt_uvmhist.h"
78 #include "opt_uvm.h"
79 #include "opt_sysv.h"
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/mman.h>
84 #include <sys/proc.h>
85 #include <sys/malloc.h>
86 #include <sys/pool.h>
87 #include <sys/kernel.h>
88 #include <sys/mount.h>
89 #include <sys/vnode.h>
90
91 #ifdef SYSVSHM
92 #include <sys/shm.h>
93 #endif
94
95 #define UVM_MAP
96 #include <uvm/uvm.h>
97 #undef RB_AUGMENT
98 #define RB_AUGMENT(x) uvm_rb_augment(x)
99
100 #ifdef DDB
101 #include <uvm/uvm_ddb.h>
102 #endif
103
104 #ifndef UVMMAP_NOCOUNTERS
105 #include <sys/device.h>
106 struct evcnt map_ubackmerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
107 "uvmmap", "ubackmerge");
108 struct evcnt map_uforwmerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
109 "uvmmap", "uforwmerge");
110 struct evcnt map_ubimerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
111 "uvmmap", "ubimerge");
112 struct evcnt map_unomerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
113 "uvmmap", "unomerge");
114 struct evcnt map_kbackmerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
115 "uvmmap", "kbackmerge");
116 struct evcnt map_kforwmerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
117 "uvmmap", "kforwmerge");
118 struct evcnt map_kbimerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
119 "uvmmap", "kbimerge");
120 struct evcnt map_knomerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
121 "uvmmap", "knomerge");
122 struct evcnt uvm_map_call = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
123 "uvmmap", "map_call");
124 struct evcnt uvm_mlk_call = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
125 "uvmmap", "mlk_call");
126 struct evcnt uvm_mlk_hint = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
127 "uvmmap", "mlk_hint");
128
129 EVCNT_ATTACH_STATIC(map_ubackmerge);
130 EVCNT_ATTACH_STATIC(map_uforwmerge);
131 EVCNT_ATTACH_STATIC(map_ubimerge);
132 EVCNT_ATTACH_STATIC(map_unomerge);
133 EVCNT_ATTACH_STATIC(map_kbackmerge);
134 EVCNT_ATTACH_STATIC(map_kforwmerge);
135 EVCNT_ATTACH_STATIC(map_kbimerge);
136 EVCNT_ATTACH_STATIC(map_knomerge);
137 EVCNT_ATTACH_STATIC(uvm_map_call);
138 EVCNT_ATTACH_STATIC(uvm_mlk_call);
139 EVCNT_ATTACH_STATIC(uvm_mlk_hint);
140
141 #define UVMCNT_INCR(ev) ev.ev_count++
142 #define UVMCNT_DECR(ev) ev.ev_count--
143 #else
144 #define UVMCNT_INCR(ev)
145 #define UVMCNT_DECR(ev)
146 #endif
147
148 const char vmmapbsy[] = "vmmapbsy";
149
150 /*
151 * pool for vmspace structures.
152 */
153
154 POOL_INIT(uvm_vmspace_pool, sizeof(struct vmspace), 0, 0, 0, "vmsppl",
155 &pool_allocator_nointr);
156
157 /*
158 * pool for dynamically-allocated map entries.
159 */
160
161 POOL_INIT(uvm_map_entry_pool, sizeof(struct vm_map_entry), 0, 0, 0, "vmmpepl",
162 &pool_allocator_nointr);
163
164 MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures");
165 MALLOC_DEFINE(M_VMPMAP, "VM pmap", "VM pmap");
166
167 #ifdef PMAP_GROWKERNEL
168 /*
169 * This global represents the end of the kernel virtual address
170 * space. If we want to exceed this, we must grow the kernel
171 * virtual address space dynamically.
172 *
173 * Note, this variable is locked by kernel_map's lock.
174 */
175 vaddr_t uvm_maxkaddr;
176 #endif
177
178 /*
179 * macros
180 */
181
182 /*
183 * VM_MAP_USE_KMAPENT: determine if uvm_kmapent_alloc/free is used
184 * for the vm_map.
185 */
186 extern struct vm_map *pager_map; /* XXX */
187 #define VM_MAP_USE_KMAPENT(map) \
188 (((map)->flags & VM_MAP_INTRSAFE) || (map) == kernel_map)
189
190 /*
191 * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging
192 */
193
194 #define UVM_ET_ISCOMPATIBLE(ent, type, uobj, meflags, \
195 prot, maxprot, inh, adv, wire) \
196 ((ent)->etype == (type) && \
197 (((ent)->flags ^ (meflags)) & (UVM_MAP_NOMERGE | UVM_MAP_QUANTUM)) \
198 == 0 && \
199 (ent)->object.uvm_obj == (uobj) && \
200 (ent)->protection == (prot) && \
201 (ent)->max_protection == (maxprot) && \
202 (ent)->inheritance == (inh) && \
203 (ent)->advice == (adv) && \
204 (ent)->wired_count == (wire))
205
206 /*
207 * uvm_map_entry_link: insert entry into a map
208 *
209 * => map must be locked
210 */
211 #define uvm_map_entry_link(map, after_where, entry) do { \
212 KASSERT(entry->start < entry->end); \
213 (map)->nentries++; \
214 (entry)->prev = (after_where); \
215 (entry)->next = (after_where)->next; \
216 (entry)->prev->next = (entry); \
217 (entry)->next->prev = (entry); \
218 uvm_rb_insert((map), (entry)); \
219 } while (/*CONSTCOND*/ 0)
220
221 /*
222 * uvm_map_entry_unlink: remove entry from a map
223 *
224 * => map must be locked
225 */
226 #define uvm_map_entry_unlink(map, entry) do { \
227 (map)->nentries--; \
228 (entry)->next->prev = (entry)->prev; \
229 (entry)->prev->next = (entry)->next; \
230 uvm_rb_remove((map), (entry)); \
231 } while (/*CONSTCOND*/ 0)
232
233 /*
234 * SAVE_HINT: saves the specified entry as the hint for future lookups.
235 *
236 * => map need not be locked (protected by hint_lock).
237 */
238 #define SAVE_HINT(map,check,value) do { \
239 simple_lock(&(map)->hint_lock); \
240 if ((map)->hint == (check)) \
241 (map)->hint = (value); \
242 simple_unlock(&(map)->hint_lock); \
243 } while (/*CONSTCOND*/ 0)
244
245 /*
246 * VM_MAP_RANGE_CHECK: check and correct range
247 *
248 * => map must at least be read locked
249 */
250
251 #define VM_MAP_RANGE_CHECK(map, start, end) do { \
252 if (start < vm_map_min(map)) \
253 start = vm_map_min(map); \
254 if (end > vm_map_max(map)) \
255 end = vm_map_max(map); \
256 if (start > end) \
257 start = end; \
258 } while (/*CONSTCOND*/ 0)
259
260 /*
261 * local prototypes
262 */
263
264 static struct vm_map_entry *
265 uvm_mapent_alloc(struct vm_map *, int);
266 static struct vm_map_entry *
267 uvm_mapent_alloc_split(struct vm_map *,
268 const struct vm_map_entry *, int,
269 struct uvm_mapent_reservation *);
270 static void uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *);
271 static void uvm_mapent_free(struct vm_map_entry *);
272 static struct vm_map_entry *
273 uvm_kmapent_alloc(struct vm_map *, int);
274 static void uvm_kmapent_free(struct vm_map_entry *);
275 static void uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
276 static void uvm_map_reference_amap(struct vm_map_entry *, int);
277 static int uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int,
278 struct vm_map_entry *);
279 static void uvm_map_unreference_amap(struct vm_map_entry *, int);
280
281 int _uvm_tree_sanity(struct vm_map *, const char *);
282 static vsize_t uvm_rb_subtree_space(const struct vm_map_entry *);
283
284 static __inline int
285 uvm_compare(const struct vm_map_entry *a, const struct vm_map_entry *b)
286 {
287
288 if (a->start < b->start)
289 return (-1);
290 else if (a->start > b->start)
291 return (1);
292
293 return (0);
294 }
295
296 static __inline void
297 uvm_rb_augment(struct vm_map_entry *entry)
298 {
299
300 entry->space = uvm_rb_subtree_space(entry);
301 }
302
303 RB_PROTOTYPE(uvm_tree, vm_map_entry, rb_entry, uvm_compare);
304
305 RB_GENERATE(uvm_tree, vm_map_entry, rb_entry, uvm_compare);
306
307 static __inline vsize_t
308 uvm_rb_space(const struct vm_map *map, const struct vm_map_entry *entry)
309 {
310 /* XXX map is not used */
311
312 KASSERT(entry->next != NULL);
313 return entry->next->start - entry->end;
314 }
315
316 static vsize_t
317 uvm_rb_subtree_space(const struct vm_map_entry *entry)
318 {
319 vaddr_t space, tmp;
320
321 space = entry->ownspace;
322 if (RB_LEFT(entry, rb_entry)) {
323 tmp = RB_LEFT(entry, rb_entry)->space;
324 if (tmp > space)
325 space = tmp;
326 }
327
328 if (RB_RIGHT(entry, rb_entry)) {
329 tmp = RB_RIGHT(entry, rb_entry)->space;
330 if (tmp > space)
331 space = tmp;
332 }
333
334 return (space);
335 }
336
337 static __inline void
338 uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
339 {
340 /* We need to traverse to the very top */
341 do {
342 entry->ownspace = uvm_rb_space(map, entry);
343 entry->space = uvm_rb_subtree_space(entry);
344 } while ((entry = RB_PARENT(entry, rb_entry)) != NULL);
345 }
346
347 static void
348 uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
349 {
350 vaddr_t space = uvm_rb_space(map, entry);
351 struct vm_map_entry *tmp;
352
353 entry->ownspace = entry->space = space;
354 tmp = RB_INSERT(uvm_tree, &(map)->rbhead, entry);
355 #ifdef DIAGNOSTIC
356 if (tmp != NULL)
357 panic("uvm_rb_insert: duplicate entry?");
358 #endif
359 uvm_rb_fixup(map, entry);
360 if (entry->prev != &map->header)
361 uvm_rb_fixup(map, entry->prev);
362 }
363
364 static void
365 uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
366 {
367 struct vm_map_entry *parent;
368
369 parent = RB_PARENT(entry, rb_entry);
370 RB_REMOVE(uvm_tree, &(map)->rbhead, entry);
371 if (entry->prev != &map->header)
372 uvm_rb_fixup(map, entry->prev);
373 if (parent)
374 uvm_rb_fixup(map, parent);
375 }
376
377 #ifdef DEBUG
378 int uvm_debug_check_rbtree = 0;
379 #define uvm_tree_sanity(x,y) \
380 if (uvm_debug_check_rbtree) \
381 _uvm_tree_sanity(x,y)
382 #else
383 #define uvm_tree_sanity(x,y)
384 #endif
385
386 int
387 _uvm_tree_sanity(struct vm_map *map, const char *name)
388 {
389 struct vm_map_entry *tmp, *trtmp;
390 int n = 0, i = 1;
391
392 RB_FOREACH(tmp, uvm_tree, &map->rbhead) {
393 if (tmp->ownspace != uvm_rb_space(map, tmp)) {
394 printf("%s: %d/%d ownspace %lx != %lx %s\n",
395 name, n + 1, map->nentries,
396 (ulong)tmp->ownspace, (ulong)uvm_rb_space(map, tmp),
397 tmp->next == &map->header ? "(last)" : "");
398 goto error;
399 }
400 }
401 trtmp = NULL;
402 RB_FOREACH(tmp, uvm_tree, &map->rbhead) {
403 if (tmp->space != uvm_rb_subtree_space(tmp)) {
404 printf("%s: space %lx != %lx\n",
405 name, (ulong)tmp->space,
406 (ulong)uvm_rb_subtree_space(tmp));
407 goto error;
408 }
409 if (trtmp != NULL && trtmp->start >= tmp->start) {
410 printf("%s: corrupt: 0x%lx >= 0x%lx\n",
411 name, trtmp->start, tmp->start);
412 goto error;
413 }
414 n++;
415
416 trtmp = tmp;
417 }
418
419 if (n != map->nentries) {
420 printf("%s: nentries: %d vs %d\n",
421 name, n, map->nentries);
422 goto error;
423 }
424
425 for (tmp = map->header.next; tmp && tmp != &map->header;
426 tmp = tmp->next, i++) {
427 trtmp = RB_FIND(uvm_tree, &map->rbhead, tmp);
428 if (trtmp != tmp) {
429 printf("%s: lookup: %d: %p - %p: %p\n",
430 name, i, tmp, trtmp,
431 RB_PARENT(tmp, rb_entry));
432 goto error;
433 }
434 }
435
436 return (0);
437 error:
438 #if defined(DDB) && __GNUC__ < 4
439 /* handy breakpoint location for error case */
440 __asm(".globl treesanity_label\ntreesanity_label:");
441 #endif
442 return (-1);
443 }
444
445 #ifdef DIAGNOSTIC
446 static struct vm_map *uvm_kmapent_map(struct vm_map_entry *);
447 #endif
448
449 /*
450 * uvm_mapent_alloc: allocate a map entry
451 */
452
453 static struct vm_map_entry *
454 uvm_mapent_alloc(struct vm_map *map, int flags)
455 {
456 struct vm_map_entry *me;
457 int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
458 UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
459
460 if (VM_MAP_USE_KMAPENT(map)) {
461 me = uvm_kmapent_alloc(map, flags);
462 } else {
463 me = pool_get(&uvm_map_entry_pool, pflags);
464 if (__predict_false(me == NULL))
465 return NULL;
466 me->flags = 0;
467 }
468
469 UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me,
470 ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0);
471 return (me);
472 }
473
474 /*
475 * uvm_mapent_alloc_split: allocate a map entry for clipping.
476 */
477
478 static struct vm_map_entry *
479 uvm_mapent_alloc_split(struct vm_map *map,
480 const struct vm_map_entry *old_entry, int flags,
481 struct uvm_mapent_reservation *umr)
482 {
483 struct vm_map_entry *me;
484
485 KASSERT(!VM_MAP_USE_KMAPENT(map) ||
486 (old_entry->flags & UVM_MAP_QUANTUM) || !UMR_EMPTY(umr));
487
488 if (old_entry->flags & UVM_MAP_QUANTUM) {
489 int s;
490 struct vm_map_kernel *vmk = vm_map_to_kernel(map);
491
492 s = splvm();
493 simple_lock(&uvm.kentry_lock);
494 me = vmk->vmk_merged_entries;
495 KASSERT(me);
496 vmk->vmk_merged_entries = me->next;
497 simple_unlock(&uvm.kentry_lock);
498 splx(s);
499 KASSERT(me->flags & UVM_MAP_QUANTUM);
500 } else {
501 me = uvm_mapent_alloc(map, flags);
502 }
503
504 return me;
505 }
506
507 /*
508 * uvm_mapent_free: free map entry
509 */
510
511 static void
512 uvm_mapent_free(struct vm_map_entry *me)
513 {
514 UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
515
516 UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
517 me, me->flags, 0, 0);
518 if (me->flags & UVM_MAP_KERNEL) {
519 uvm_kmapent_free(me);
520 } else {
521 pool_put(&uvm_map_entry_pool, me);
522 }
523 }
524
525 /*
526 * uvm_mapent_free_merged: free merged map entry
527 *
528 * => keep the entry if needed.
529 * => caller shouldn't hold map locked if VM_MAP_USE_KMAPENT(map) is true.
530 */
531
532 static void
533 uvm_mapent_free_merged(struct vm_map *map, struct vm_map_entry *me)
534 {
535
536 KASSERT(!(me->flags & UVM_MAP_KERNEL) || uvm_kmapent_map(me) == map);
537
538 if (me->flags & UVM_MAP_QUANTUM) {
539 /*
540 * keep this entry for later splitting.
541 */
542 struct vm_map_kernel *vmk;
543 int s;
544
545 KASSERT(VM_MAP_IS_KERNEL(map));
546 KASSERT(!VM_MAP_USE_KMAPENT(map) ||
547 (me->flags & UVM_MAP_KERNEL));
548
549 vmk = vm_map_to_kernel(map);
550 s = splvm();
551 simple_lock(&uvm.kentry_lock);
552 me->next = vmk->vmk_merged_entries;
553 vmk->vmk_merged_entries = me;
554 simple_unlock(&uvm.kentry_lock);
555 splx(s);
556 } else {
557 uvm_mapent_free(me);
558 }
559 }
560
561 /*
562 * uvm_mapent_copy: copy a map entry, preserving flags
563 */
564
565 static __inline void
566 uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
567 {
568
569 memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) -
570 ((char *)src));
571 }
572
573 /*
574 * uvm_map_entry_unwire: unwire a map entry
575 *
576 * => map should be locked by caller
577 */
578
579 static __inline void
580 uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
581 {
582
583 entry->wired_count = 0;
584 uvm_fault_unwire_locked(map, entry->start, entry->end);
585 }
586
587
588 /*
589 * wrapper for calling amap_ref()
590 */
591 static __inline void
592 uvm_map_reference_amap(struct vm_map_entry *entry, int flags)
593 {
594
595 amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
596 (entry->end - entry->start) >> PAGE_SHIFT, flags);
597 }
598
599
600 /*
601 * wrapper for calling amap_unref()
602 */
603 static __inline void
604 uvm_map_unreference_amap(struct vm_map_entry *entry, int flags)
605 {
606
607 amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
608 (entry->end - entry->start) >> PAGE_SHIFT, flags);
609 }
610
611
612 /*
613 * uvm_map_init: init mapping system at boot time. note that we allocate
614 * and init the static pool of struct vm_map_entry *'s for the kernel here.
615 */
616
617 void
618 uvm_map_init(void)
619 {
620 #if defined(UVMHIST)
621 static struct uvm_history_ent maphistbuf[100];
622 static struct uvm_history_ent pdhistbuf[100];
623 #endif
624
625 /*
626 * first, init logging system.
627 */
628
629 UVMHIST_FUNC("uvm_map_init");
630 UVMHIST_INIT_STATIC(maphist, maphistbuf);
631 UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
632 UVMHIST_CALLED(maphist);
633 UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
634
635 /*
636 * initialize the global lock for kernel map entry.
637 *
638 * XXX is it worth to have per-map lock instead?
639 */
640
641 simple_lock_init(&uvm.kentry_lock);
642 }
643
644 /*
645 * clippers
646 */
647
648 /*
649 * uvm_map_clip_start: ensure that the entry begins at or after
650 * the starting address, if it doesn't we split the entry.
651 *
652 * => caller should use UVM_MAP_CLIP_START macro rather than calling
653 * this directly
654 * => map must be locked by caller
655 */
656
657 void
658 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
659 vaddr_t start, struct uvm_mapent_reservation *umr)
660 {
661 struct vm_map_entry *new_entry;
662 vaddr_t new_adj;
663
664 /* uvm_map_simplify_entry(map, entry); */ /* XXX */
665
666 uvm_tree_sanity(map, "clip_start entry");
667
668 /*
669 * Split off the front portion. note that we must insert the new
670 * entry BEFORE this one, so that this entry has the specified
671 * starting address.
672 */
673 new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
674 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
675
676 new_entry->end = start;
677 new_adj = start - new_entry->start;
678 if (entry->object.uvm_obj)
679 entry->offset += new_adj; /* shift start over */
680
681 /* Does not change order for the RB tree */
682 entry->start = start;
683
684 if (new_entry->aref.ar_amap) {
685 amap_splitref(&new_entry->aref, &entry->aref, new_adj);
686 }
687
688 uvm_map_entry_link(map, entry->prev, new_entry);
689
690 if (UVM_ET_ISSUBMAP(entry)) {
691 /* ... unlikely to happen, but play it safe */
692 uvm_map_reference(new_entry->object.sub_map);
693 } else {
694 if (UVM_ET_ISOBJ(entry) &&
695 entry->object.uvm_obj->pgops &&
696 entry->object.uvm_obj->pgops->pgo_reference)
697 entry->object.uvm_obj->pgops->pgo_reference(
698 entry->object.uvm_obj);
699 }
700
701 uvm_tree_sanity(map, "clip_start leave");
702 }
703
704 /*
705 * uvm_map_clip_end: ensure that the entry ends at or before
706 * the ending address, if it does't we split the reference
707 *
708 * => caller should use UVM_MAP_CLIP_END macro rather than calling
709 * this directly
710 * => map must be locked by caller
711 */
712
713 void
714 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end,
715 struct uvm_mapent_reservation *umr)
716 {
717 struct vm_map_entry * new_entry;
718 vaddr_t new_adj; /* #bytes we move start forward */
719
720 uvm_tree_sanity(map, "clip_end entry");
721
722 /*
723 * Create a new entry and insert it
724 * AFTER the specified entry
725 */
726 new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
727 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
728
729 new_entry->start = entry->end = end;
730 new_adj = end - entry->start;
731 if (new_entry->object.uvm_obj)
732 new_entry->offset += new_adj;
733
734 if (entry->aref.ar_amap)
735 amap_splitref(&entry->aref, &new_entry->aref, new_adj);
736
737 uvm_rb_fixup(map, entry);
738
739 uvm_map_entry_link(map, entry, new_entry);
740
741 if (UVM_ET_ISSUBMAP(entry)) {
742 /* ... unlikely to happen, but play it safe */
743 uvm_map_reference(new_entry->object.sub_map);
744 } else {
745 if (UVM_ET_ISOBJ(entry) &&
746 entry->object.uvm_obj->pgops &&
747 entry->object.uvm_obj->pgops->pgo_reference)
748 entry->object.uvm_obj->pgops->pgo_reference(
749 entry->object.uvm_obj);
750 }
751
752 uvm_tree_sanity(map, "clip_end leave");
753 }
754
755
756 /*
757 * M A P - m a i n e n t r y p o i n t
758 */
759 /*
760 * uvm_map: establish a valid mapping in a map
761 *
762 * => assume startp is page aligned.
763 * => assume size is a multiple of PAGE_SIZE.
764 * => assume sys_mmap provides enough of a "hint" to have us skip
765 * over text/data/bss area.
766 * => map must be unlocked (we will lock it)
767 * => <uobj,uoffset> value meanings (4 cases):
768 * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER
769 * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER
770 * [3] <uobj,uoffset> == normal mapping
771 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA
772 *
773 * case [4] is for kernel mappings where we don't know the offset until
774 * we've found a virtual address. note that kernel object offsets are
775 * always relative to vm_map_min(kernel_map).
776 *
777 * => if `align' is non-zero, we align the virtual address to the specified
778 * alignment.
779 * this is provided as a mechanism for large pages.
780 *
781 * => XXXCDC: need way to map in external amap?
782 */
783
784 int
785 uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
786 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags)
787 {
788 struct uvm_map_args args;
789 struct vm_map_entry *new_entry;
790 int error;
791
792 KASSERT((flags & UVM_FLAG_QUANTUM) == 0 || VM_MAP_IS_KERNEL(map));
793 KASSERT((size & PAGE_MASK) == 0);
794
795 /*
796 * for pager_map, allocate the new entry first to avoid sleeping
797 * for memory while we have the map locked.
798 *
799 * besides, because we allocates entries for in-kernel maps
800 * a bit differently (cf. uvm_kmapent_alloc/free), we need to
801 * allocate them before locking the map.
802 */
803
804 new_entry = NULL;
805 if (VM_MAP_USE_KMAPENT(map) || (flags & UVM_FLAG_QUANTUM) ||
806 map == pager_map) {
807 new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
808 if (__predict_false(new_entry == NULL))
809 return ENOMEM;
810 if (flags & UVM_FLAG_QUANTUM)
811 new_entry->flags |= UVM_MAP_QUANTUM;
812 }
813 if (map == pager_map)
814 flags |= UVM_FLAG_NOMERGE;
815
816 error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align,
817 flags, &args);
818 if (!error) {
819 error = uvm_map_enter(map, &args, new_entry);
820 *startp = args.uma_start;
821 } else if (new_entry) {
822 uvm_mapent_free(new_entry);
823 }
824
825 #if defined(DEBUG)
826 if (!error && VM_MAP_IS_KERNEL(map)) {
827 uvm_km_check_empty(*startp, *startp + size,
828 (map->flags & VM_MAP_INTRSAFE) != 0);
829 }
830 #endif /* defined(DEBUG) */
831
832 return error;
833 }
834
835 int
836 uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size,
837 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags,
838 struct uvm_map_args *args)
839 {
840 struct vm_map_entry *prev_entry;
841 vm_prot_t prot = UVM_PROTECTION(flags);
842 vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
843
844 UVMHIST_FUNC("uvm_map_prepare");
845 UVMHIST_CALLED(maphist);
846
847 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
848 map, start, size, flags);
849 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
850
851 /*
852 * detect a popular device driver bug.
853 */
854
855 KASSERT(doing_shutdown || curlwp != NULL ||
856 (map->flags & VM_MAP_INTRSAFE));
857
858 /*
859 * zero-sized mapping doesn't make any sense.
860 */
861 KASSERT(size > 0);
862
863 KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0);
864
865 uvm_tree_sanity(map, "map entry");
866
867 /*
868 * check sanity of protection code
869 */
870
871 if ((prot & maxprot) != prot) {
872 UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x",
873 prot, maxprot,0,0);
874 return EACCES;
875 }
876
877 /*
878 * figure out where to put new VM range
879 */
880
881 retry:
882 if (vm_map_lock_try(map) == FALSE) {
883 if (flags & UVM_FLAG_TRYLOCK) {
884 return EAGAIN;
885 }
886 vm_map_lock(map); /* could sleep here */
887 }
888 if ((prev_entry = uvm_map_findspace(map, start, size, &start,
889 uobj, uoffset, align, flags)) == NULL) {
890 unsigned int timestamp;
891
892 if ((flags & UVM_FLAG_WAITVA) == 0) {
893 UVMHIST_LOG(maphist,"<- uvm_map_findspace failed!",
894 0,0,0,0);
895 vm_map_unlock(map);
896 return ENOMEM;
897 }
898 timestamp = map->timestamp;
899 UVMHIST_LOG(maphist,"waiting va timestamp=0x%x",
900 timestamp,0,0,0);
901 simple_lock(&map->flags_lock);
902 map->flags |= VM_MAP_WANTVA;
903 simple_unlock(&map->flags_lock);
904 vm_map_unlock(map);
905
906 /*
907 * wait until someone does unmap.
908 * XXX fragile locking
909 */
910
911 simple_lock(&map->flags_lock);
912 while ((map->flags & VM_MAP_WANTVA) != 0 &&
913 map->timestamp == timestamp) {
914 ltsleep(&map->header, PVM, "vmmapva", 0,
915 &map->flags_lock);
916 }
917 simple_unlock(&map->flags_lock);
918 goto retry;
919 }
920
921 #ifdef PMAP_GROWKERNEL
922 /*
923 * If the kernel pmap can't map the requested space,
924 * then allocate more resources for it.
925 */
926 if (map == kernel_map && uvm_maxkaddr < (start + size))
927 uvm_maxkaddr = pmap_growkernel(start + size);
928 #endif
929
930 UVMCNT_INCR(uvm_map_call);
931
932 /*
933 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
934 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in
935 * either case we want to zero it before storing it in the map entry
936 * (because it looks strange and confusing when debugging...)
937 *
938 * if uobj is not null
939 * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
940 * and we do not need to change uoffset.
941 * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
942 * now (based on the starting address of the map). this case is
943 * for kernel object mappings where we don't know the offset until
944 * the virtual address is found (with uvm_map_findspace). the
945 * offset is the distance we are from the start of the map.
946 */
947
948 if (uobj == NULL) {
949 uoffset = 0;
950 } else {
951 if (uoffset == UVM_UNKNOWN_OFFSET) {
952 KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
953 uoffset = start - vm_map_min(kernel_map);
954 }
955 }
956
957 args->uma_flags = flags;
958 args->uma_prev = prev_entry;
959 args->uma_start = start;
960 args->uma_size = size;
961 args->uma_uobj = uobj;
962 args->uma_uoffset = uoffset;
963
964 return 0;
965 }
966
967 int
968 uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args,
969 struct vm_map_entry *new_entry)
970 {
971 struct vm_map_entry *prev_entry = args->uma_prev;
972 struct vm_map_entry *dead = NULL;
973
974 const uvm_flag_t flags = args->uma_flags;
975 const vm_prot_t prot = UVM_PROTECTION(flags);
976 const vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
977 const vm_inherit_t inherit = UVM_INHERIT(flags);
978 const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
979 AMAP_EXTEND_NOWAIT : 0;
980 const int advice = UVM_ADVICE(flags);
981 const int meflagval = (flags & UVM_FLAG_QUANTUM) ?
982 UVM_MAP_QUANTUM : 0;
983
984 vaddr_t start = args->uma_start;
985 vsize_t size = args->uma_size;
986 struct uvm_object *uobj = args->uma_uobj;
987 voff_t uoffset = args->uma_uoffset;
988
989 const int kmap = (vm_map_pmap(map) == pmap_kernel());
990 int merged = 0;
991 int error;
992 int newetype;
993
994 UVMHIST_FUNC("uvm_map_enter");
995 UVMHIST_CALLED(maphist);
996
997 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
998 map, start, size, flags);
999 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
1000
1001 if (flags & UVM_FLAG_QUANTUM) {
1002 KASSERT(new_entry);
1003 KASSERT(new_entry->flags & UVM_MAP_QUANTUM);
1004 }
1005
1006 if (uobj)
1007 newetype = UVM_ET_OBJ;
1008 else
1009 newetype = 0;
1010
1011 if (flags & UVM_FLAG_COPYONW) {
1012 newetype |= UVM_ET_COPYONWRITE;
1013 if ((flags & UVM_FLAG_OVERLAY) == 0)
1014 newetype |= UVM_ET_NEEDSCOPY;
1015 }
1016
1017 /*
1018 * try and insert in map by extending previous entry, if possible.
1019 * XXX: we don't try and pull back the next entry. might be useful
1020 * for a stack, but we are currently allocating our stack in advance.
1021 */
1022
1023 if (flags & UVM_FLAG_NOMERGE)
1024 goto nomerge;
1025
1026 if (prev_entry->end == start &&
1027 prev_entry != &map->header &&
1028 UVM_ET_ISCOMPATIBLE(prev_entry, newetype, uobj, meflagval,
1029 prot, maxprot, inherit, advice, 0)) {
1030
1031 if (uobj && prev_entry->offset +
1032 (prev_entry->end - prev_entry->start) != uoffset)
1033 goto forwardmerge;
1034
1035 /*
1036 * can't extend a shared amap. note: no need to lock amap to
1037 * look at refs since we don't care about its exact value.
1038 * if it is one (i.e. we have only reference) it will stay there
1039 */
1040
1041 if (prev_entry->aref.ar_amap &&
1042 amap_refs(prev_entry->aref.ar_amap) != 1) {
1043 goto forwardmerge;
1044 }
1045
1046 if (prev_entry->aref.ar_amap) {
1047 error = amap_extend(prev_entry, size,
1048 amapwaitflag | AMAP_EXTEND_FORWARDS);
1049 if (error)
1050 goto nomerge;
1051 }
1052
1053 if (kmap)
1054 UVMCNT_INCR(map_kbackmerge);
1055 else
1056 UVMCNT_INCR(map_ubackmerge);
1057 UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0);
1058
1059 /*
1060 * drop our reference to uobj since we are extending a reference
1061 * that we already have (the ref count can not drop to zero).
1062 */
1063
1064 if (uobj && uobj->pgops->pgo_detach)
1065 uobj->pgops->pgo_detach(uobj);
1066
1067 prev_entry->end += size;
1068 uvm_rb_fixup(map, prev_entry);
1069
1070 uvm_tree_sanity(map, "map backmerged");
1071
1072 UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
1073 merged++;
1074 }
1075
1076 forwardmerge:
1077 if (prev_entry->next->start == (start + size) &&
1078 prev_entry->next != &map->header &&
1079 UVM_ET_ISCOMPATIBLE(prev_entry->next, newetype, uobj, meflagval,
1080 prot, maxprot, inherit, advice, 0)) {
1081
1082 if (uobj && prev_entry->next->offset != uoffset + size)
1083 goto nomerge;
1084
1085 /*
1086 * can't extend a shared amap. note: no need to lock amap to
1087 * look at refs since we don't care about its exact value.
1088 * if it is one (i.e. we have only reference) it will stay there.
1089 *
1090 * note that we also can't merge two amaps, so if we
1091 * merged with the previous entry which has an amap,
1092 * and the next entry also has an amap, we give up.
1093 *
1094 * Interesting cases:
1095 * amap, new, amap -> give up second merge (single fwd extend)
1096 * amap, new, none -> double forward extend (extend again here)
1097 * none, new, amap -> double backward extend (done here)
1098 * uobj, new, amap -> single backward extend (done here)
1099 *
1100 * XXX should we attempt to deal with someone refilling
1101 * the deallocated region between two entries that are
1102 * backed by the same amap (ie, arefs is 2, "prev" and
1103 * "next" refer to it, and adding this allocation will
1104 * close the hole, thus restoring arefs to 1 and
1105 * deallocating the "next" vm_map_entry)? -- @@@
1106 */
1107
1108 if (prev_entry->next->aref.ar_amap &&
1109 (amap_refs(prev_entry->next->aref.ar_amap) != 1 ||
1110 (merged && prev_entry->aref.ar_amap))) {
1111 goto nomerge;
1112 }
1113
1114 if (merged) {
1115 /*
1116 * Try to extend the amap of the previous entry to
1117 * cover the next entry as well. If it doesn't work
1118 * just skip on, don't actually give up, since we've
1119 * already completed the back merge.
1120 */
1121 if (prev_entry->aref.ar_amap) {
1122 if (amap_extend(prev_entry,
1123 prev_entry->next->end -
1124 prev_entry->next->start,
1125 amapwaitflag | AMAP_EXTEND_FORWARDS))
1126 goto nomerge;
1127 }
1128
1129 /*
1130 * Try to extend the amap of the *next* entry
1131 * back to cover the new allocation *and* the
1132 * previous entry as well (the previous merge
1133 * didn't have an amap already otherwise we
1134 * wouldn't be checking here for an amap). If
1135 * it doesn't work just skip on, again, don't
1136 * actually give up, since we've already
1137 * completed the back merge.
1138 */
1139 else if (prev_entry->next->aref.ar_amap) {
1140 if (amap_extend(prev_entry->next,
1141 prev_entry->end -
1142 prev_entry->start,
1143 amapwaitflag | AMAP_EXTEND_BACKWARDS))
1144 goto nomerge;
1145 }
1146 } else {
1147 /*
1148 * Pull the next entry's amap backwards to cover this
1149 * new allocation.
1150 */
1151 if (prev_entry->next->aref.ar_amap) {
1152 error = amap_extend(prev_entry->next, size,
1153 amapwaitflag | AMAP_EXTEND_BACKWARDS);
1154 if (error)
1155 goto nomerge;
1156 }
1157 }
1158
1159 if (merged) {
1160 if (kmap) {
1161 UVMCNT_DECR(map_kbackmerge);
1162 UVMCNT_INCR(map_kbimerge);
1163 } else {
1164 UVMCNT_DECR(map_ubackmerge);
1165 UVMCNT_INCR(map_ubimerge);
1166 }
1167 } else {
1168 if (kmap)
1169 UVMCNT_INCR(map_kforwmerge);
1170 else
1171 UVMCNT_INCR(map_uforwmerge);
1172 }
1173 UVMHIST_LOG(maphist," starting forward merge", 0, 0, 0, 0);
1174
1175 /*
1176 * drop our reference to uobj since we are extending a reference
1177 * that we already have (the ref count can not drop to zero).
1178 * (if merged, we've already detached)
1179 */
1180 if (uobj && uobj->pgops->pgo_detach && !merged)
1181 uobj->pgops->pgo_detach(uobj);
1182
1183 if (merged) {
1184 dead = prev_entry->next;
1185 prev_entry->end = dead->end;
1186 uvm_map_entry_unlink(map, dead);
1187 if (dead->aref.ar_amap != NULL) {
1188 prev_entry->aref = dead->aref;
1189 dead->aref.ar_amap = NULL;
1190 }
1191 } else {
1192 prev_entry->next->start -= size;
1193 if (prev_entry != &map->header)
1194 uvm_rb_fixup(map, prev_entry);
1195 if (uobj)
1196 prev_entry->next->offset = uoffset;
1197 }
1198
1199 uvm_tree_sanity(map, "map forwardmerged");
1200
1201 UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0);
1202 merged++;
1203 }
1204
1205 nomerge:
1206 if (!merged) {
1207 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0);
1208 if (kmap)
1209 UVMCNT_INCR(map_knomerge);
1210 else
1211 UVMCNT_INCR(map_unomerge);
1212
1213 /*
1214 * allocate new entry and link it in.
1215 */
1216
1217 if (new_entry == NULL) {
1218 new_entry = uvm_mapent_alloc(map,
1219 (flags & UVM_FLAG_NOWAIT));
1220 if (__predict_false(new_entry == NULL)) {
1221 error = ENOMEM;
1222 goto done;
1223 }
1224 }
1225 new_entry->start = start;
1226 new_entry->end = new_entry->start + size;
1227 new_entry->object.uvm_obj = uobj;
1228 new_entry->offset = uoffset;
1229
1230 new_entry->etype = newetype;
1231
1232 if (flags & UVM_FLAG_NOMERGE) {
1233 new_entry->flags |= UVM_MAP_NOMERGE;
1234 }
1235
1236 new_entry->protection = prot;
1237 new_entry->max_protection = maxprot;
1238 new_entry->inheritance = inherit;
1239 new_entry->wired_count = 0;
1240 new_entry->advice = advice;
1241 if (flags & UVM_FLAG_OVERLAY) {
1242
1243 /*
1244 * to_add: for BSS we overallocate a little since we
1245 * are likely to extend
1246 */
1247
1248 vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
1249 UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
1250 struct vm_amap *amap = amap_alloc(size, to_add,
1251 (flags & UVM_FLAG_NOWAIT) ? M_NOWAIT : M_WAITOK);
1252 if (__predict_false(amap == NULL)) {
1253 error = ENOMEM;
1254 goto done;
1255 }
1256 new_entry->aref.ar_pageoff = 0;
1257 new_entry->aref.ar_amap = amap;
1258 } else {
1259 new_entry->aref.ar_pageoff = 0;
1260 new_entry->aref.ar_amap = NULL;
1261 }
1262 uvm_map_entry_link(map, prev_entry, new_entry);
1263
1264 /*
1265 * Update the free space hint
1266 */
1267
1268 if ((map->first_free == prev_entry) &&
1269 (prev_entry->end >= new_entry->start))
1270 map->first_free = new_entry;
1271
1272 new_entry = NULL;
1273 }
1274
1275 map->size += size;
1276
1277 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1278
1279 error = 0;
1280 done:
1281 vm_map_unlock(map);
1282 if (new_entry) {
1283 if (error == 0) {
1284 KDASSERT(merged);
1285 uvm_mapent_free_merged(map, new_entry);
1286 } else {
1287 uvm_mapent_free(new_entry);
1288 }
1289 }
1290 if (dead) {
1291 KDASSERT(merged);
1292 uvm_mapent_free_merged(map, dead);
1293 }
1294 return error;
1295 }
1296
1297 /*
1298 * uvm_map_lookup_entry: find map entry at or before an address
1299 *
1300 * => map must at least be read-locked by caller
1301 * => entry is returned in "entry"
1302 * => return value is true if address is in the returned entry
1303 */
1304
1305 boolean_t
1306 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
1307 struct vm_map_entry **entry /* OUT */)
1308 {
1309 struct vm_map_entry *cur;
1310 boolean_t use_tree = FALSE;
1311 UVMHIST_FUNC("uvm_map_lookup_entry");
1312 UVMHIST_CALLED(maphist);
1313
1314 UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)",
1315 map, address, entry, 0);
1316
1317 /*
1318 * start looking either from the head of the
1319 * list, or from the hint.
1320 */
1321
1322 simple_lock(&map->hint_lock);
1323 cur = map->hint;
1324 simple_unlock(&map->hint_lock);
1325
1326 if (cur == &map->header)
1327 cur = cur->next;
1328
1329 UVMCNT_INCR(uvm_mlk_call);
1330 if (address >= cur->start) {
1331
1332 /*
1333 * go from hint to end of list.
1334 *
1335 * but first, make a quick check to see if
1336 * we are already looking at the entry we
1337 * want (which is usually the case).
1338 * note also that we don't need to save the hint
1339 * here... it is the same hint (unless we are
1340 * at the header, in which case the hint didn't
1341 * buy us anything anyway).
1342 */
1343
1344 if (cur != &map->header && cur->end > address) {
1345 UVMCNT_INCR(uvm_mlk_hint);
1346 *entry = cur;
1347 UVMHIST_LOG(maphist,"<- got it via hint (0x%x)",
1348 cur, 0, 0, 0);
1349 return (TRUE);
1350 }
1351
1352 if (map->nentries > 30)
1353 use_tree = TRUE;
1354 } else {
1355
1356 /*
1357 * invalid hint. use tree.
1358 */
1359 use_tree = TRUE;
1360 }
1361
1362 uvm_tree_sanity(map, __func__);
1363
1364 if (use_tree) {
1365 struct vm_map_entry *prev = &map->header;
1366 cur = RB_ROOT(&map->rbhead);
1367
1368 /*
1369 * Simple lookup in the tree. Happens when the hint is
1370 * invalid, or nentries reach a threshold.
1371 */
1372 while (cur) {
1373 if (address >= cur->start) {
1374 if (address < cur->end) {
1375 *entry = cur;
1376 goto got;
1377 }
1378 prev = cur;
1379 cur = RB_RIGHT(cur, rb_entry);
1380 } else
1381 cur = RB_LEFT(cur, rb_entry);
1382 }
1383 *entry = prev;
1384 goto failed;
1385 }
1386
1387 /*
1388 * search linearly
1389 */
1390
1391 while (cur != &map->header) {
1392 if (cur->end > address) {
1393 if (address >= cur->start) {
1394 /*
1395 * save this lookup for future
1396 * hints, and return
1397 */
1398
1399 *entry = cur;
1400 got:
1401 SAVE_HINT(map, map->hint, *entry);
1402 UVMHIST_LOG(maphist,"<- search got it (0x%x)",
1403 cur, 0, 0, 0);
1404 KDASSERT((*entry)->start <= address);
1405 KDASSERT(address < (*entry)->end);
1406 return (TRUE);
1407 }
1408 break;
1409 }
1410 cur = cur->next;
1411 }
1412 *entry = cur->prev;
1413 failed:
1414 SAVE_HINT(map, map->hint, *entry);
1415 UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
1416 KDASSERT((*entry) == &map->header || (*entry)->end <= address);
1417 KDASSERT((*entry)->next == &map->header ||
1418 address < (*entry)->next->start);
1419 return (FALSE);
1420 }
1421
1422 /*
1423 * See if the range between start and start + length fits in the gap
1424 * entry->next->start and entry->end. Returns 1 if fits, 0 if doesn't
1425 * fit, and -1 address wraps around.
1426 */
1427 static int
1428 uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
1429 vsize_t align, int topdown, struct vm_map_entry *entry)
1430 {
1431 vaddr_t end;
1432
1433 #ifdef PMAP_PREFER
1434 /*
1435 * push start address forward as needed to avoid VAC alias problems.
1436 * we only do this if a valid offset is specified.
1437 */
1438
1439 if (uoffset != UVM_UNKNOWN_OFFSET)
1440 PMAP_PREFER(uoffset, start, length, topdown);
1441 #endif
1442 if (align != 0) {
1443 if ((*start & (align - 1)) != 0) {
1444 if (topdown)
1445 *start &= ~(align - 1);
1446 else
1447 *start = roundup(*start, align);
1448 }
1449 /*
1450 * XXX Should we PMAP_PREFER() here again?
1451 * eh...i think we're okay
1452 */
1453 }
1454
1455 /*
1456 * Find the end of the proposed new region. Be sure we didn't
1457 * wrap around the address; if so, we lose. Otherwise, if the
1458 * proposed new region fits before the next entry, we win.
1459 */
1460
1461 end = *start + length;
1462 if (end < *start)
1463 return (-1);
1464
1465 if (entry->next->start >= end && *start >= entry->end)
1466 return (1);
1467
1468 return (0);
1469 }
1470
1471 /*
1472 * uvm_map_findspace: find "length" sized space in "map".
1473 *
1474 * => "hint" is a hint about where we want it, unless UVM_FLAG_FIXED is
1475 * set in "flags" (in which case we insist on using "hint").
1476 * => "result" is VA returned
1477 * => uobj/uoffset are to be used to handle VAC alignment, if required
1478 * => if "align" is non-zero, we attempt to align to that value.
1479 * => caller must at least have read-locked map
1480 * => returns NULL on failure, or pointer to prev. map entry if success
1481 * => note this is a cross between the old vm_map_findspace and vm_map_find
1482 */
1483
1484 struct vm_map_entry *
1485 uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
1486 vaddr_t *result /* OUT */, struct uvm_object *uobj, voff_t uoffset,
1487 vsize_t align, int flags)
1488 {
1489 struct vm_map_entry *entry;
1490 struct vm_map_entry *child, *prev, *tmp;
1491 vaddr_t orig_hint;
1492 const int topdown = map->flags & VM_MAP_TOPDOWN;
1493 UVMHIST_FUNC("uvm_map_findspace");
1494 UVMHIST_CALLED(maphist);
1495
1496 UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)",
1497 map, hint, length, flags);
1498 KASSERT((align & (align - 1)) == 0);
1499 KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1500
1501 uvm_tree_sanity(map, "map_findspace entry");
1502
1503 /*
1504 * remember the original hint. if we are aligning, then we
1505 * may have to try again with no alignment constraint if
1506 * we fail the first time.
1507 */
1508
1509 orig_hint = hint;
1510 if (hint < vm_map_min(map)) { /* check ranges ... */
1511 if (flags & UVM_FLAG_FIXED) {
1512 UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
1513 return (NULL);
1514 }
1515 hint = vm_map_min(map);
1516 }
1517 if (hint > vm_map_max(map)) {
1518 UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]",
1519 hint, vm_map_min(map), vm_map_max(map), 0);
1520 return (NULL);
1521 }
1522
1523 /*
1524 * Look for the first possible address; if there's already
1525 * something at this address, we have to start after it.
1526 */
1527
1528 /*
1529 * @@@: there are four, no, eight cases to consider.
1530 *
1531 * 0: found, fixed, bottom up -> fail
1532 * 1: found, fixed, top down -> fail
1533 * 2: found, not fixed, bottom up -> start after entry->end,
1534 * loop up
1535 * 3: found, not fixed, top down -> start before entry->start,
1536 * loop down
1537 * 4: not found, fixed, bottom up -> check entry->next->start, fail
1538 * 5: not found, fixed, top down -> check entry->next->start, fail
1539 * 6: not found, not fixed, bottom up -> check entry->next->start,
1540 * loop up
1541 * 7: not found, not fixed, top down -> check entry->next->start,
1542 * loop down
1543 *
1544 * as you can see, it reduces to roughly five cases, and that
1545 * adding top down mapping only adds one unique case (without
1546 * it, there would be four cases).
1547 */
1548
1549 if ((flags & UVM_FLAG_FIXED) == 0 && hint == vm_map_min(map)) {
1550 entry = map->first_free;
1551 } else {
1552 if (uvm_map_lookup_entry(map, hint, &entry)) {
1553 /* "hint" address already in use ... */
1554 if (flags & UVM_FLAG_FIXED) {
1555 UVMHIST_LOG(maphist, "<- fixed & VA in use",
1556 0, 0, 0, 0);
1557 return (NULL);
1558 }
1559 if (topdown)
1560 /* Start from lower gap. */
1561 entry = entry->prev;
1562 } else if (flags & UVM_FLAG_FIXED) {
1563 if (entry->next->start >= hint + length &&
1564 hint + length > hint)
1565 goto found;
1566
1567 /* "hint" address is gap but too small */
1568 UVMHIST_LOG(maphist, "<- fixed mapping failed",
1569 0, 0, 0, 0);
1570 return (NULL); /* only one shot at it ... */
1571 } else {
1572 /*
1573 * See if given hint fits in this gap.
1574 */
1575 switch (uvm_map_space_avail(&hint, length,
1576 uoffset, align, topdown, entry)) {
1577 case 1:
1578 goto found;
1579 case -1:
1580 goto wraparound;
1581 }
1582
1583 if (topdown) {
1584 /*
1585 * Still there is a chance to fit
1586 * if hint > entry->end.
1587 */
1588 } else {
1589 /* Start from higher gap. */
1590 entry = entry->next;
1591 if (entry == &map->header)
1592 goto notfound;
1593 goto nextgap;
1594 }
1595 }
1596 }
1597
1598 /*
1599 * Note that all UVM_FLAGS_FIXED case is already handled.
1600 */
1601 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
1602
1603 /* Try to find the space in the red-black tree */
1604
1605 /* Check slot before any entry */
1606 hint = topdown ? entry->next->start - length : entry->end;
1607 switch (uvm_map_space_avail(&hint, length, uoffset, align,
1608 topdown, entry)) {
1609 case 1:
1610 goto found;
1611 case -1:
1612 goto wraparound;
1613 }
1614
1615 nextgap:
1616 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
1617 /* If there is not enough space in the whole tree, we fail */
1618 tmp = RB_ROOT(&map->rbhead);
1619 if (tmp == NULL || tmp->space < length)
1620 goto notfound;
1621
1622 prev = NULL; /* previous candidate */
1623
1624 /* Find an entry close to hint that has enough space */
1625 for (; tmp;) {
1626 KASSERT(tmp->next->start == tmp->end + tmp->ownspace);
1627 if (topdown) {
1628 if (tmp->next->start < hint + length &&
1629 (prev == NULL || tmp->end > prev->end)) {
1630 if (tmp->ownspace >= length)
1631 prev = tmp;
1632 else if ((child = RB_LEFT(tmp, rb_entry))
1633 != NULL && child->space >= length)
1634 prev = tmp;
1635 }
1636 } else {
1637 if (tmp->end >= hint &&
1638 (prev == NULL || tmp->end < prev->end)) {
1639 if (tmp->ownspace >= length)
1640 prev = tmp;
1641 else if ((child = RB_RIGHT(tmp, rb_entry))
1642 != NULL && child->space >= length)
1643 prev = tmp;
1644 }
1645 }
1646 if (tmp->next->start < hint + length)
1647 child = RB_RIGHT(tmp, rb_entry);
1648 else if (tmp->end > hint)
1649 child = RB_LEFT(tmp, rb_entry);
1650 else {
1651 if (tmp->ownspace >= length)
1652 break;
1653 if (topdown)
1654 child = RB_LEFT(tmp, rb_entry);
1655 else
1656 child = RB_RIGHT(tmp, rb_entry);
1657 }
1658 if (child == NULL || child->space < length)
1659 break;
1660 tmp = child;
1661 }
1662
1663 if (tmp != NULL && tmp->start < hint && hint < tmp->next->start) {
1664 /*
1665 * Check if the entry that we found satifies the
1666 * space requirement
1667 */
1668 if (topdown) {
1669 if (hint > tmp->next->start - length)
1670 hint = tmp->next->start - length;
1671 } else {
1672 if (hint < tmp->end)
1673 hint = tmp->end;
1674 }
1675 switch (uvm_map_space_avail(&hint, length, uoffset, align,
1676 topdown, tmp)) {
1677 case 1:
1678 entry = tmp;
1679 goto found;
1680 case -1:
1681 goto wraparound;
1682 }
1683 if (tmp->ownspace >= length)
1684 goto listsearch;
1685 }
1686 if (prev == NULL)
1687 goto notfound;
1688
1689 if (topdown) {
1690 KASSERT(orig_hint >= prev->next->start - length ||
1691 prev->next->start - length > prev->next->start);
1692 hint = prev->next->start - length;
1693 } else {
1694 KASSERT(orig_hint <= prev->end);
1695 hint = prev->end;
1696 }
1697 switch (uvm_map_space_avail(&hint, length, uoffset, align,
1698 topdown, prev)) {
1699 case 1:
1700 entry = prev;
1701 goto found;
1702 case -1:
1703 goto wraparound;
1704 }
1705 if (prev->ownspace >= length)
1706 goto listsearch;
1707
1708 if (topdown)
1709 tmp = RB_LEFT(prev, rb_entry);
1710 else
1711 tmp = RB_RIGHT(prev, rb_entry);
1712 for (;;) {
1713 KASSERT(tmp && tmp->space >= length);
1714 if (topdown)
1715 child = RB_RIGHT(tmp, rb_entry);
1716 else
1717 child = RB_LEFT(tmp, rb_entry);
1718 if (child && child->space >= length) {
1719 tmp = child;
1720 continue;
1721 }
1722 if (tmp->ownspace >= length)
1723 break;
1724 if (topdown)
1725 tmp = RB_LEFT(tmp, rb_entry);
1726 else
1727 tmp = RB_RIGHT(tmp, rb_entry);
1728 }
1729
1730 if (topdown) {
1731 KASSERT(orig_hint >= tmp->next->start - length ||
1732 tmp->next->start - length > tmp->next->start);
1733 hint = tmp->next->start - length;
1734 } else {
1735 KASSERT(orig_hint <= tmp->end);
1736 hint = tmp->end;
1737 }
1738 switch (uvm_map_space_avail(&hint, length, uoffset, align,
1739 topdown, tmp)) {
1740 case 1:
1741 entry = tmp;
1742 goto found;
1743 case -1:
1744 goto wraparound;
1745 }
1746
1747 /*
1748 * The tree fails to find an entry because of offset or alignment
1749 * restrictions. Search the list instead.
1750 */
1751 listsearch:
1752 /*
1753 * Look through the rest of the map, trying to fit a new region in
1754 * the gap between existing regions, or after the very last region.
1755 * note: entry->end = base VA of current gap,
1756 * entry->next->start = VA of end of current gap
1757 */
1758
1759 for (;;) {
1760 /* Update hint for current gap. */
1761 hint = topdown ? entry->next->start - length : entry->end;
1762
1763 /* See if it fits. */
1764 switch (uvm_map_space_avail(&hint, length, uoffset, align,
1765 topdown, entry)) {
1766 case 1:
1767 goto found;
1768 case -1:
1769 goto wraparound;
1770 }
1771
1772 /* Advance to next/previous gap */
1773 if (topdown) {
1774 if (entry == &map->header) {
1775 UVMHIST_LOG(maphist, "<- failed (off start)",
1776 0,0,0,0);
1777 goto notfound;
1778 }
1779 entry = entry->prev;
1780 } else {
1781 entry = entry->next;
1782 if (entry == &map->header) {
1783 UVMHIST_LOG(maphist, "<- failed (off end)",
1784 0,0,0,0);
1785 goto notfound;
1786 }
1787 }
1788 }
1789
1790 found:
1791 SAVE_HINT(map, map->hint, entry);
1792 *result = hint;
1793 UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0);
1794 KASSERT( topdown || hint >= orig_hint);
1795 KASSERT(!topdown || hint <= orig_hint);
1796 KASSERT(entry->end <= hint);
1797 KASSERT(hint + length <= entry->next->start);
1798 return (entry);
1799
1800 wraparound:
1801 UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0);
1802
1803 return (NULL);
1804
1805 notfound:
1806 UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0);
1807
1808 return (NULL);
1809 }
1810
1811 /*
1812 * U N M A P - m a i n h e l p e r f u n c t i o n s
1813 */
1814
1815 /*
1816 * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
1817 *
1818 * => caller must check alignment and size
1819 * => map must be locked by caller
1820 * => we return a list of map entries that we've remove from the map
1821 * in "entry_list"
1822 */
1823
1824 void
1825 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
1826 struct vm_map_entry **entry_list /* OUT */,
1827 struct uvm_mapent_reservation *umr, int flags)
1828 {
1829 struct vm_map_entry *entry, *first_entry, *next;
1830 vaddr_t len;
1831 UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist);
1832
1833 UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)",
1834 map, start, end, 0);
1835 VM_MAP_RANGE_CHECK(map, start, end);
1836
1837 uvm_tree_sanity(map, "unmap_remove entry");
1838
1839 /*
1840 * find first entry
1841 */
1842
1843 if (uvm_map_lookup_entry(map, start, &first_entry) == TRUE) {
1844 /* clip and go... */
1845 entry = first_entry;
1846 UVM_MAP_CLIP_START(map, entry, start, umr);
1847 /* critical! prevents stale hint */
1848 SAVE_HINT(map, entry, entry->prev);
1849 } else {
1850 entry = first_entry->next;
1851 }
1852
1853 /*
1854 * Save the free space hint
1855 */
1856
1857 if (map->first_free->start >= start)
1858 map->first_free = entry->prev;
1859
1860 /*
1861 * note: we now re-use first_entry for a different task. we remove
1862 * a number of map entries from the map and save them in a linked
1863 * list headed by "first_entry". once we remove them from the map
1864 * the caller should unlock the map and drop the references to the
1865 * backing objects [c.f. uvm_unmap_detach]. the object is to
1866 * separate unmapping from reference dropping. why?
1867 * [1] the map has to be locked for unmapping
1868 * [2] the map need not be locked for reference dropping
1869 * [3] dropping references may trigger pager I/O, and if we hit
1870 * a pager that does synchronous I/O we may have to wait for it.
1871 * [4] we would like all waiting for I/O to occur with maps unlocked
1872 * so that we don't block other threads.
1873 */
1874
1875 first_entry = NULL;
1876 *entry_list = NULL;
1877
1878 /*
1879 * break up the area into map entry sized regions and unmap. note
1880 * that all mappings have to be removed before we can even consider
1881 * dropping references to amaps or VM objects (otherwise we could end
1882 * up with a mapping to a page on the free list which would be very bad)
1883 */
1884
1885 while ((entry != &map->header) && (entry->start < end)) {
1886 KASSERT((entry->flags & UVM_MAP_FIRST) == 0);
1887
1888 UVM_MAP_CLIP_END(map, entry, end, umr);
1889 next = entry->next;
1890 len = entry->end - entry->start;
1891
1892 /*
1893 * unwire before removing addresses from the pmap; otherwise
1894 * unwiring will put the entries back into the pmap (XXX).
1895 */
1896
1897 if (VM_MAPENT_ISWIRED(entry)) {
1898 uvm_map_entry_unwire(map, entry);
1899 }
1900 if (flags & UVM_FLAG_VAONLY) {
1901
1902 /* nothing */
1903
1904 } else if ((map->flags & VM_MAP_PAGEABLE) == 0) {
1905
1906 /*
1907 * if the map is non-pageable, any pages mapped there
1908 * must be wired and entered with pmap_kenter_pa(),
1909 * and we should free any such pages immediately.
1910 * this is mostly used for kmem_map and mb_map.
1911 */
1912
1913 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
1914 uvm_km_pgremove_intrsafe(entry->start,
1915 entry->end);
1916 pmap_kremove(entry->start, len);
1917 }
1918 } else if (UVM_ET_ISOBJ(entry) &&
1919 UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
1920 KASSERT(vm_map_pmap(map) == pmap_kernel());
1921
1922 /*
1923 * note: kernel object mappings are currently used in
1924 * two ways:
1925 * [1] "normal" mappings of pages in the kernel object
1926 * [2] uvm_km_valloc'd allocations in which we
1927 * pmap_enter in some non-kernel-object page
1928 * (e.g. vmapbuf).
1929 *
1930 * for case [1], we need to remove the mapping from
1931 * the pmap and then remove the page from the kernel
1932 * object (because, once pages in a kernel object are
1933 * unmapped they are no longer needed, unlike, say,
1934 * a vnode where you might want the data to persist
1935 * until flushed out of a queue).
1936 *
1937 * for case [2], we need to remove the mapping from
1938 * the pmap. there shouldn't be any pages at the
1939 * specified offset in the kernel object [but it
1940 * doesn't hurt to call uvm_km_pgremove just to be
1941 * safe?]
1942 *
1943 * uvm_km_pgremove currently does the following:
1944 * for pages in the kernel object in range:
1945 * - drops the swap slot
1946 * - uvm_pagefree the page
1947 */
1948
1949 /*
1950 * remove mappings from pmap and drop the pages
1951 * from the object. offsets are always relative
1952 * to vm_map_min(kernel_map).
1953 */
1954
1955 pmap_remove(pmap_kernel(), entry->start,
1956 entry->start + len);
1957 uvm_km_pgremove(entry->start, entry->end);
1958
1959 /*
1960 * null out kernel_object reference, we've just
1961 * dropped it
1962 */
1963
1964 entry->etype &= ~UVM_ET_OBJ;
1965 entry->object.uvm_obj = NULL;
1966 } else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
1967
1968 /*
1969 * remove mappings the standard way.
1970 */
1971
1972 pmap_remove(map->pmap, entry->start, entry->end);
1973 }
1974
1975 #if defined(DEBUG)
1976 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
1977
1978 /*
1979 * check if there's remaining mapping,
1980 * which is a bug in caller.
1981 */
1982
1983 vaddr_t va;
1984 for (va = entry->start; va < entry->end;
1985 va += PAGE_SIZE) {
1986 if (pmap_extract(vm_map_pmap(map), va, NULL)) {
1987 panic("uvm_unmap_remove: has mapping");
1988 }
1989 }
1990
1991 if (VM_MAP_IS_KERNEL(map)) {
1992 uvm_km_check_empty(entry->start, entry->end,
1993 (map->flags & VM_MAP_INTRSAFE) != 0);
1994 }
1995 }
1996 #endif /* defined(DEBUG) */
1997
1998 /*
1999 * remove entry from map and put it on our list of entries
2000 * that we've nuked. then go to next entry.
2001 */
2002
2003 UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0,0);
2004
2005 /* critical! prevents stale hint */
2006 SAVE_HINT(map, entry, entry->prev);
2007
2008 uvm_map_entry_unlink(map, entry);
2009 KASSERT(map->size >= len);
2010 map->size -= len;
2011 entry->prev = NULL;
2012 entry->next = first_entry;
2013 first_entry = entry;
2014 entry = next;
2015 }
2016 if ((map->flags & VM_MAP_DYING) == 0) {
2017 pmap_update(vm_map_pmap(map));
2018 }
2019
2020 uvm_tree_sanity(map, "unmap_remove leave");
2021
2022 /*
2023 * now we've cleaned up the map and are ready for the caller to drop
2024 * references to the mapped objects.
2025 */
2026
2027 *entry_list = first_entry;
2028 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
2029
2030 simple_lock(&map->flags_lock);
2031 if (map->flags & VM_MAP_WANTVA) {
2032 map->flags &= ~VM_MAP_WANTVA;
2033 wakeup(&map->header);
2034 }
2035 simple_unlock(&map->flags_lock);
2036 }
2037
2038 /*
2039 * uvm_unmap_detach: drop references in a chain of map entries
2040 *
2041 * => we will free the map entries as we traverse the list.
2042 */
2043
2044 void
2045 uvm_unmap_detach(struct vm_map_entry *first_entry, int flags)
2046 {
2047 struct vm_map_entry *next_entry;
2048 UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
2049
2050 while (first_entry) {
2051 KASSERT(!VM_MAPENT_ISWIRED(first_entry));
2052 UVMHIST_LOG(maphist,
2053 " detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d",
2054 first_entry, first_entry->aref.ar_amap,
2055 first_entry->object.uvm_obj,
2056 UVM_ET_ISSUBMAP(first_entry));
2057
2058 /*
2059 * drop reference to amap, if we've got one
2060 */
2061
2062 if (first_entry->aref.ar_amap)
2063 uvm_map_unreference_amap(first_entry, flags);
2064
2065 /*
2066 * drop reference to our backing object, if we've got one
2067 */
2068
2069 KASSERT(!UVM_ET_ISSUBMAP(first_entry));
2070 if (UVM_ET_ISOBJ(first_entry) &&
2071 first_entry->object.uvm_obj->pgops->pgo_detach) {
2072 (*first_entry->object.uvm_obj->pgops->pgo_detach)
2073 (first_entry->object.uvm_obj);
2074 }
2075 next_entry = first_entry->next;
2076 uvm_mapent_free(first_entry);
2077 first_entry = next_entry;
2078 }
2079 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
2080 }
2081
2082 /*
2083 * E X T R A C T I O N F U N C T I O N S
2084 */
2085
2086 /*
2087 * uvm_map_reserve: reserve space in a vm_map for future use.
2088 *
2089 * => we reserve space in a map by putting a dummy map entry in the
2090 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
2091 * => map should be unlocked (we will write lock it)
2092 * => we return true if we were able to reserve space
2093 * => XXXCDC: should be inline?
2094 */
2095
2096 int
2097 uvm_map_reserve(struct vm_map *map, vsize_t size,
2098 vaddr_t offset /* hint for pmap_prefer */,
2099 vsize_t align /* alignment hint */,
2100 vaddr_t *raddr /* IN:hint, OUT: reserved VA */)
2101 {
2102 UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
2103
2104 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
2105 map,size,offset,raddr);
2106
2107 size = round_page(size);
2108 if (*raddr < vm_map_min(map))
2109 *raddr = vm_map_min(map); /* hint */
2110
2111 /*
2112 * reserve some virtual space.
2113 */
2114
2115 if (uvm_map(map, raddr, size, NULL, offset, 0,
2116 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
2117 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
2118 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
2119 return (FALSE);
2120 }
2121
2122 UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
2123 return (TRUE);
2124 }
2125
2126 /*
2127 * uvm_map_replace: replace a reserved (blank) area of memory with
2128 * real mappings.
2129 *
2130 * => caller must WRITE-LOCK the map
2131 * => we return TRUE if replacement was a success
2132 * => we expect the newents chain to have nnewents entrys on it and
2133 * we expect newents->prev to point to the last entry on the list
2134 * => note newents is allowed to be NULL
2135 */
2136
2137 int
2138 uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
2139 struct vm_map_entry *newents, int nnewents)
2140 {
2141 struct vm_map_entry *oldent, *last;
2142
2143 uvm_tree_sanity(map, "map_replace entry");
2144
2145 /*
2146 * first find the blank map entry at the specified address
2147 */
2148
2149 if (!uvm_map_lookup_entry(map, start, &oldent)) {
2150 return (FALSE);
2151 }
2152
2153 /*
2154 * check to make sure we have a proper blank entry
2155 */
2156
2157 if (oldent->start != start || oldent->end != end ||
2158 oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
2159 return (FALSE);
2160 }
2161
2162 #ifdef DIAGNOSTIC
2163
2164 /*
2165 * sanity check the newents chain
2166 */
2167
2168 {
2169 struct vm_map_entry *tmpent = newents;
2170 int nent = 0;
2171 vaddr_t cur = start;
2172
2173 while (tmpent) {
2174 nent++;
2175 if (tmpent->start < cur)
2176 panic("uvm_map_replace1");
2177 if (tmpent->start > tmpent->end || tmpent->end > end) {
2178 printf("tmpent->start=0x%lx, tmpent->end=0x%lx, end=0x%lx\n",
2179 tmpent->start, tmpent->end, end);
2180 panic("uvm_map_replace2");
2181 }
2182 cur = tmpent->end;
2183 if (tmpent->next) {
2184 if (tmpent->next->prev != tmpent)
2185 panic("uvm_map_replace3");
2186 } else {
2187 if (newents->prev != tmpent)
2188 panic("uvm_map_replace4");
2189 }
2190 tmpent = tmpent->next;
2191 }
2192 if (nent != nnewents)
2193 panic("uvm_map_replace5");
2194 }
2195 #endif
2196
2197 /*
2198 * map entry is a valid blank! replace it. (this does all the
2199 * work of map entry link/unlink...).
2200 */
2201
2202 if (newents) {
2203 last = newents->prev;
2204
2205 /* critical: flush stale hints out of map */
2206 SAVE_HINT(map, map->hint, newents);
2207 if (map->first_free == oldent)
2208 map->first_free = last;
2209
2210 last->next = oldent->next;
2211 last->next->prev = last;
2212
2213 /* Fix RB tree */
2214 uvm_rb_remove(map, oldent);
2215
2216 newents->prev = oldent->prev;
2217 newents->prev->next = newents;
2218 map->nentries = map->nentries + (nnewents - 1);
2219
2220 /* Fixup the RB tree */
2221 {
2222 int i;
2223 struct vm_map_entry *tmp;
2224
2225 tmp = newents;
2226 for (i = 0; i < nnewents && tmp; i++) {
2227 uvm_rb_insert(map, tmp);
2228 tmp = tmp->next;
2229 }
2230 }
2231 } else {
2232
2233 /* critical: flush stale hints out of map */
2234 SAVE_HINT(map, map->hint, oldent->prev);
2235 if (map->first_free == oldent)
2236 map->first_free = oldent->prev;
2237
2238 /* NULL list of new entries: just remove the old one */
2239 uvm_map_entry_unlink(map, oldent);
2240 }
2241
2242 uvm_tree_sanity(map, "map_replace leave");
2243
2244 /*
2245 * now we can free the old blank entry, unlock the map and return.
2246 */
2247
2248 uvm_mapent_free(oldent);
2249 return (TRUE);
2250 }
2251
2252 /*
2253 * uvm_map_extract: extract a mapping from a map and put it somewhere
2254 * (maybe removing the old mapping)
2255 *
2256 * => maps should be unlocked (we will write lock them)
2257 * => returns 0 on success, error code otherwise
2258 * => start must be page aligned
2259 * => len must be page sized
2260 * => flags:
2261 * UVM_EXTRACT_REMOVE: remove mappings from srcmap
2262 * UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
2263 * UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
2264 * UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
2265 * >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
2266 * >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
2267 * be used from within the kernel in a kernel level map <<<
2268 */
2269
2270 int
2271 uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
2272 struct vm_map *dstmap, vaddr_t *dstaddrp, int flags)
2273 {
2274 vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge;
2275 struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry,
2276 *deadentry, *oldentry;
2277 vsize_t elen;
2278 int nchain, error, copy_ok;
2279 UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
2280
2281 UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start,
2282 len,0);
2283 UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0);
2284
2285 uvm_tree_sanity(srcmap, "map_extract src enter");
2286 uvm_tree_sanity(dstmap, "map_extract dst enter");
2287
2288 /*
2289 * step 0: sanity check: start must be on a page boundary, length
2290 * must be page sized. can't ask for CONTIG/QREF if you asked for
2291 * REMOVE.
2292 */
2293
2294 KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0);
2295 KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
2296 (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
2297
2298 /*
2299 * step 1: reserve space in the target map for the extracted area
2300 */
2301
2302 dstaddr = vm_map_min(dstmap);
2303 if (uvm_map_reserve(dstmap, len, start, 0, &dstaddr) == FALSE)
2304 return (ENOMEM);
2305 *dstaddrp = dstaddr; /* pass address back to caller */
2306 UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0);
2307
2308 /*
2309 * step 2: setup for the extraction process loop by init'ing the
2310 * map entry chain, locking src map, and looking up the first useful
2311 * entry in the map.
2312 */
2313
2314 end = start + len;
2315 newend = dstaddr + len;
2316 chain = endchain = NULL;
2317 nchain = 0;
2318 vm_map_lock(srcmap);
2319
2320 if (uvm_map_lookup_entry(srcmap, start, &entry)) {
2321
2322 /* "start" is within an entry */
2323 if (flags & UVM_EXTRACT_QREF) {
2324
2325 /*
2326 * for quick references we don't clip the entry, so
2327 * the entry may map space "before" the starting
2328 * virtual address... this is the "fudge" factor
2329 * (which can be non-zero only the first time
2330 * through the "while" loop in step 3).
2331 */
2332
2333 fudge = start - entry->start;
2334 } else {
2335
2336 /*
2337 * normal reference: we clip the map to fit (thus
2338 * fudge is zero)
2339 */
2340
2341 UVM_MAP_CLIP_START(srcmap, entry, start, NULL);
2342 SAVE_HINT(srcmap, srcmap->hint, entry->prev);
2343 fudge = 0;
2344 }
2345 } else {
2346
2347 /* "start" is not within an entry ... skip to next entry */
2348 if (flags & UVM_EXTRACT_CONTIG) {
2349 error = EINVAL;
2350 goto bad; /* definite hole here ... */
2351 }
2352
2353 entry = entry->next;
2354 fudge = 0;
2355 }
2356
2357 /* save values from srcmap for step 6 */
2358 orig_entry = entry;
2359 orig_fudge = fudge;
2360
2361 /*
2362 * step 3: now start looping through the map entries, extracting
2363 * as we go.
2364 */
2365
2366 while (entry->start < end && entry != &srcmap->header) {
2367
2368 /* if we are not doing a quick reference, clip it */
2369 if ((flags & UVM_EXTRACT_QREF) == 0)
2370 UVM_MAP_CLIP_END(srcmap, entry, end, NULL);
2371
2372 /* clear needs_copy (allow chunking) */
2373 if (UVM_ET_ISNEEDSCOPY(entry)) {
2374 amap_copy(srcmap, entry, M_NOWAIT, TRUE, start, end);
2375 if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */
2376 error = ENOMEM;
2377 goto bad;
2378 }
2379
2380 /* amap_copy could clip (during chunk)! update fudge */
2381 if (fudge) {
2382 fudge = start - entry->start;
2383 orig_fudge = fudge;
2384 }
2385 }
2386
2387 /* calculate the offset of this from "start" */
2388 oldoffset = (entry->start + fudge) - start;
2389
2390 /* allocate a new map entry */
2391 newentry = uvm_mapent_alloc(dstmap, 0);
2392 if (newentry == NULL) {
2393 error = ENOMEM;
2394 goto bad;
2395 }
2396
2397 /* set up new map entry */
2398 newentry->next = NULL;
2399 newentry->prev = endchain;
2400 newentry->start = dstaddr + oldoffset;
2401 newentry->end =
2402 newentry->start + (entry->end - (entry->start + fudge));
2403 if (newentry->end > newend || newentry->end < newentry->start)
2404 newentry->end = newend;
2405 newentry->object.uvm_obj = entry->object.uvm_obj;
2406 if (newentry->object.uvm_obj) {
2407 if (newentry->object.uvm_obj->pgops->pgo_reference)
2408 newentry->object.uvm_obj->pgops->
2409 pgo_reference(newentry->object.uvm_obj);
2410 newentry->offset = entry->offset + fudge;
2411 } else {
2412 newentry->offset = 0;
2413 }
2414 newentry->etype = entry->etype;
2415 newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
2416 entry->max_protection : entry->protection;
2417 newentry->max_protection = entry->max_protection;
2418 newentry->inheritance = entry->inheritance;
2419 newentry->wired_count = 0;
2420 newentry->aref.ar_amap = entry->aref.ar_amap;
2421 if (newentry->aref.ar_amap) {
2422 newentry->aref.ar_pageoff =
2423 entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
2424 uvm_map_reference_amap(newentry, AMAP_SHARED |
2425 ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
2426 } else {
2427 newentry->aref.ar_pageoff = 0;
2428 }
2429 newentry->advice = entry->advice;
2430
2431 /* now link it on the chain */
2432 nchain++;
2433 if (endchain == NULL) {
2434 chain = endchain = newentry;
2435 } else {
2436 endchain->next = newentry;
2437 endchain = newentry;
2438 }
2439
2440 /* end of 'while' loop! */
2441 if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
2442 (entry->next == &srcmap->header ||
2443 entry->next->start != entry->end)) {
2444 error = EINVAL;
2445 goto bad;
2446 }
2447 entry = entry->next;
2448 fudge = 0;
2449 }
2450
2451 /*
2452 * step 4: close off chain (in format expected by uvm_map_replace)
2453 */
2454
2455 if (chain)
2456 chain->prev = endchain;
2457
2458 /*
2459 * step 5: attempt to lock the dest map so we can pmap_copy.
2460 * note usage of copy_ok:
2461 * 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
2462 * 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
2463 */
2464
2465 if (srcmap == dstmap || vm_map_lock_try(dstmap) == TRUE) {
2466 copy_ok = 1;
2467 if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2468 nchain)) {
2469 if (srcmap != dstmap)
2470 vm_map_unlock(dstmap);
2471 error = EIO;
2472 goto bad;
2473 }
2474 } else {
2475 copy_ok = 0;
2476 /* replace defered until step 7 */
2477 }
2478
2479 /*
2480 * step 6: traverse the srcmap a second time to do the following:
2481 * - if we got a lock on the dstmap do pmap_copy
2482 * - if UVM_EXTRACT_REMOVE remove the entries
2483 * we make use of orig_entry and orig_fudge (saved in step 2)
2484 */
2485
2486 if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
2487
2488 /* purge possible stale hints from srcmap */
2489 if (flags & UVM_EXTRACT_REMOVE) {
2490 SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
2491 if (srcmap->first_free->start >= start)
2492 srcmap->first_free = orig_entry->prev;
2493 }
2494
2495 entry = orig_entry;
2496 fudge = orig_fudge;
2497 deadentry = NULL; /* for UVM_EXTRACT_REMOVE */
2498
2499 while (entry->start < end && entry != &srcmap->header) {
2500 if (copy_ok) {
2501 oldoffset = (entry->start + fudge) - start;
2502 elen = MIN(end, entry->end) -
2503 (entry->start + fudge);
2504 pmap_copy(dstmap->pmap, srcmap->pmap,
2505 dstaddr + oldoffset, elen,
2506 entry->start + fudge);
2507 }
2508
2509 /* we advance "entry" in the following if statement */
2510 if (flags & UVM_EXTRACT_REMOVE) {
2511 pmap_remove(srcmap->pmap, entry->start,
2512 entry->end);
2513 oldentry = entry; /* save entry */
2514 entry = entry->next; /* advance */
2515 uvm_map_entry_unlink(srcmap, oldentry);
2516 /* add to dead list */
2517 oldentry->next = deadentry;
2518 deadentry = oldentry;
2519 } else {
2520 entry = entry->next; /* advance */
2521 }
2522
2523 /* end of 'while' loop */
2524 fudge = 0;
2525 }
2526 pmap_update(srcmap->pmap);
2527
2528 /*
2529 * unlock dstmap. we will dispose of deadentry in
2530 * step 7 if needed
2531 */
2532
2533 if (copy_ok && srcmap != dstmap)
2534 vm_map_unlock(dstmap);
2535
2536 } else {
2537 deadentry = NULL;
2538 }
2539
2540 /*
2541 * step 7: we are done with the source map, unlock. if copy_ok
2542 * is 0 then we have not replaced the dummy mapping in dstmap yet
2543 * and we need to do so now.
2544 */
2545
2546 vm_map_unlock(srcmap);
2547 if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
2548 uvm_unmap_detach(deadentry, 0); /* dispose of old entries */
2549
2550 /* now do the replacement if we didn't do it in step 5 */
2551 if (copy_ok == 0) {
2552 vm_map_lock(dstmap);
2553 error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2554 nchain);
2555 vm_map_unlock(dstmap);
2556
2557 if (error == FALSE) {
2558 error = EIO;
2559 goto bad2;
2560 }
2561 }
2562
2563 uvm_tree_sanity(srcmap, "map_extract src leave");
2564 uvm_tree_sanity(dstmap, "map_extract dst leave");
2565
2566 return (0);
2567
2568 /*
2569 * bad: failure recovery
2570 */
2571 bad:
2572 vm_map_unlock(srcmap);
2573 bad2: /* src already unlocked */
2574 if (chain)
2575 uvm_unmap_detach(chain,
2576 (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
2577
2578 uvm_tree_sanity(srcmap, "map_extract src err leave");
2579 uvm_tree_sanity(dstmap, "map_extract dst err leave");
2580
2581 uvm_unmap(dstmap, dstaddr, dstaddr+len); /* ??? */
2582 return (error);
2583 }
2584
2585 /* end of extraction functions */
2586
2587 /*
2588 * uvm_map_submap: punch down part of a map into a submap
2589 *
2590 * => only the kernel_map is allowed to be submapped
2591 * => the purpose of submapping is to break up the locking granularity
2592 * of a larger map
2593 * => the range specified must have been mapped previously with a uvm_map()
2594 * call [with uobj==NULL] to create a blank map entry in the main map.
2595 * [And it had better still be blank!]
2596 * => maps which contain submaps should never be copied or forked.
2597 * => to remove a submap, use uvm_unmap() on the main map
2598 * and then uvm_map_deallocate() the submap.
2599 * => main map must be unlocked.
2600 * => submap must have been init'd and have a zero reference count.
2601 * [need not be locked as we don't actually reference it]
2602 */
2603
2604 int
2605 uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
2606 struct vm_map *submap)
2607 {
2608 struct vm_map_entry *entry;
2609 struct uvm_mapent_reservation umr;
2610 int error;
2611
2612 uvm_mapent_reserve(map, &umr, 2, 0);
2613
2614 vm_map_lock(map);
2615 VM_MAP_RANGE_CHECK(map, start, end);
2616
2617 if (uvm_map_lookup_entry(map, start, &entry)) {
2618 UVM_MAP_CLIP_START(map, entry, start, &umr);
2619 UVM_MAP_CLIP_END(map, entry, end, &umr); /* to be safe */
2620 } else {
2621 entry = NULL;
2622 }
2623
2624 if (entry != NULL &&
2625 entry->start == start && entry->end == end &&
2626 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
2627 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
2628 entry->etype |= UVM_ET_SUBMAP;
2629 entry->object.sub_map = submap;
2630 entry->offset = 0;
2631 uvm_map_reference(submap);
2632 error = 0;
2633 } else {
2634 error = EINVAL;
2635 }
2636 vm_map_unlock(map);
2637
2638 uvm_mapent_unreserve(map, &umr);
2639
2640 return error;
2641 }
2642
2643 /*
2644 * uvm_map_setup_kernel: init in-kernel map
2645 *
2646 * => map must not be in service yet.
2647 */
2648
2649 void
2650 uvm_map_setup_kernel(struct vm_map_kernel *map,
2651 vaddr_t vmin, vaddr_t vmax, int flags)
2652 {
2653
2654 uvm_map_setup(&map->vmk_map, vmin, vmax, flags);
2655
2656 LIST_INIT(&map->vmk_kentry_free);
2657 map->vmk_merged_entries = NULL;
2658 }
2659
2660
2661 /*
2662 * uvm_map_protect: change map protection
2663 *
2664 * => set_max means set max_protection.
2665 * => map must be unlocked.
2666 */
2667
2668 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
2669 ~VM_PROT_WRITE : VM_PROT_ALL)
2670
2671 int
2672 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
2673 vm_prot_t new_prot, boolean_t set_max)
2674 {
2675 struct vm_map_entry *current, *entry;
2676 int error = 0;
2677 UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
2678 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
2679 map, start, end, new_prot);
2680
2681 vm_map_lock(map);
2682 VM_MAP_RANGE_CHECK(map, start, end);
2683 if (uvm_map_lookup_entry(map, start, &entry)) {
2684 UVM_MAP_CLIP_START(map, entry, start, NULL);
2685 } else {
2686 entry = entry->next;
2687 }
2688
2689 /*
2690 * make a first pass to check for protection violations.
2691 */
2692
2693 current = entry;
2694 while ((current != &map->header) && (current->start < end)) {
2695 if (UVM_ET_ISSUBMAP(current)) {
2696 error = EINVAL;
2697 goto out;
2698 }
2699 if ((new_prot & current->max_protection) != new_prot) {
2700 error = EACCES;
2701 goto out;
2702 }
2703 /*
2704 * Don't allow VM_PROT_EXECUTE to be set on entries that
2705 * point to vnodes that are associated with a NOEXEC file
2706 * system.
2707 */
2708 if (UVM_ET_ISOBJ(current) &&
2709 UVM_OBJ_IS_VNODE(current->object.uvm_obj)) {
2710 struct vnode *vp =
2711 (struct vnode *) current->object.uvm_obj;
2712
2713 if ((new_prot & VM_PROT_EXECUTE) != 0 &&
2714 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
2715 error = EACCES;
2716 goto out;
2717 }
2718 }
2719 current = current->next;
2720 }
2721
2722 /* go back and fix up protections (no need to clip this time). */
2723
2724 current = entry;
2725 while ((current != &map->header) && (current->start < end)) {
2726 vm_prot_t old_prot;
2727
2728 UVM_MAP_CLIP_END(map, current, end, NULL);
2729 old_prot = current->protection;
2730 if (set_max)
2731 current->protection =
2732 (current->max_protection = new_prot) & old_prot;
2733 else
2734 current->protection = new_prot;
2735
2736 /*
2737 * update physical map if necessary. worry about copy-on-write
2738 * here -- CHECK THIS XXX
2739 */
2740
2741 if (current->protection != old_prot) {
2742 /* update pmap! */
2743 pmap_protect(map->pmap, current->start, current->end,
2744 current->protection & MASK(entry));
2745
2746 /*
2747 * If this entry points at a vnode, and the
2748 * protection includes VM_PROT_EXECUTE, mark
2749 * the vnode as VEXECMAP.
2750 */
2751 if (UVM_ET_ISOBJ(current)) {
2752 struct uvm_object *uobj =
2753 current->object.uvm_obj;
2754
2755 if (UVM_OBJ_IS_VNODE(uobj) &&
2756 (current->protection & VM_PROT_EXECUTE))
2757 vn_markexec((struct vnode *) uobj);
2758 }
2759 }
2760
2761 /*
2762 * If the map is configured to lock any future mappings,
2763 * wire this entry now if the old protection was VM_PROT_NONE
2764 * and the new protection is not VM_PROT_NONE.
2765 */
2766
2767 if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
2768 VM_MAPENT_ISWIRED(entry) == 0 &&
2769 old_prot == VM_PROT_NONE &&
2770 new_prot != VM_PROT_NONE) {
2771 if (uvm_map_pageable(map, entry->start,
2772 entry->end, FALSE,
2773 UVM_LK_ENTER|UVM_LK_EXIT) != 0) {
2774
2775 /*
2776 * If locking the entry fails, remember the
2777 * error if it's the first one. Note we
2778 * still continue setting the protection in
2779 * the map, but will return the error
2780 * condition regardless.
2781 *
2782 * XXX Ignore what the actual error is,
2783 * XXX just call it a resource shortage
2784 * XXX so that it doesn't get confused
2785 * XXX what uvm_map_protect() itself would
2786 * XXX normally return.
2787 */
2788
2789 error = ENOMEM;
2790 }
2791 }
2792 current = current->next;
2793 }
2794 pmap_update(map->pmap);
2795
2796 out:
2797 vm_map_unlock(map);
2798
2799 UVMHIST_LOG(maphist, "<- done, error=%d",error,0,0,0);
2800 return error;
2801 }
2802
2803 #undef MASK
2804
2805 /*
2806 * uvm_map_inherit: set inheritance code for range of addrs in map.
2807 *
2808 * => map must be unlocked
2809 * => note that the inherit code is used during a "fork". see fork
2810 * code for details.
2811 */
2812
2813 int
2814 uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
2815 vm_inherit_t new_inheritance)
2816 {
2817 struct vm_map_entry *entry, *temp_entry;
2818 UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
2819 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)",
2820 map, start, end, new_inheritance);
2821
2822 switch (new_inheritance) {
2823 case MAP_INHERIT_NONE:
2824 case MAP_INHERIT_COPY:
2825 case MAP_INHERIT_SHARE:
2826 break;
2827 default:
2828 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
2829 return EINVAL;
2830 }
2831
2832 vm_map_lock(map);
2833 VM_MAP_RANGE_CHECK(map, start, end);
2834 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
2835 entry = temp_entry;
2836 UVM_MAP_CLIP_START(map, entry, start, NULL);
2837 } else {
2838 entry = temp_entry->next;
2839 }
2840 while ((entry != &map->header) && (entry->start < end)) {
2841 UVM_MAP_CLIP_END(map, entry, end, NULL);
2842 entry->inheritance = new_inheritance;
2843 entry = entry->next;
2844 }
2845 vm_map_unlock(map);
2846 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
2847 return 0;
2848 }
2849
2850 /*
2851 * uvm_map_advice: set advice code for range of addrs in map.
2852 *
2853 * => map must be unlocked
2854 */
2855
2856 int
2857 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
2858 {
2859 struct vm_map_entry *entry, *temp_entry;
2860 UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist);
2861 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_adv=0x%x)",
2862 map, start, end, new_advice);
2863
2864 vm_map_lock(map);
2865 VM_MAP_RANGE_CHECK(map, start, end);
2866 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
2867 entry = temp_entry;
2868 UVM_MAP_CLIP_START(map, entry, start, NULL);
2869 } else {
2870 entry = temp_entry->next;
2871 }
2872
2873 /*
2874 * XXXJRT: disallow holes?
2875 */
2876
2877 while ((entry != &map->header) && (entry->start < end)) {
2878 UVM_MAP_CLIP_END(map, entry, end, NULL);
2879
2880 switch (new_advice) {
2881 case MADV_NORMAL:
2882 case MADV_RANDOM:
2883 case MADV_SEQUENTIAL:
2884 /* nothing special here */
2885 break;
2886
2887 default:
2888 vm_map_unlock(map);
2889 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
2890 return EINVAL;
2891 }
2892 entry->advice = new_advice;
2893 entry = entry->next;
2894 }
2895
2896 vm_map_unlock(map);
2897 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
2898 return 0;
2899 }
2900
2901 /*
2902 * uvm_map_pageable: sets the pageability of a range in a map.
2903 *
2904 * => wires map entries. should not be used for transient page locking.
2905 * for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
2906 * => regions sepcified as not pageable require lock-down (wired) memory
2907 * and page tables.
2908 * => map must never be read-locked
2909 * => if islocked is TRUE, map is already write-locked
2910 * => we always unlock the map, since we must downgrade to a read-lock
2911 * to call uvm_fault_wire()
2912 * => XXXCDC: check this and try and clean it up.
2913 */
2914
2915 int
2916 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
2917 boolean_t new_pageable, int lockflags)
2918 {
2919 struct vm_map_entry *entry, *start_entry, *failed_entry;
2920 int rv;
2921 #ifdef DIAGNOSTIC
2922 u_int timestamp_save;
2923 #endif
2924 UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
2925 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
2926 map, start, end, new_pageable);
2927 KASSERT(map->flags & VM_MAP_PAGEABLE);
2928
2929 if ((lockflags & UVM_LK_ENTER) == 0)
2930 vm_map_lock(map);
2931 VM_MAP_RANGE_CHECK(map, start, end);
2932
2933 /*
2934 * only one pageability change may take place at one time, since
2935 * uvm_fault_wire assumes it will be called only once for each
2936 * wiring/unwiring. therefore, we have to make sure we're actually
2937 * changing the pageability for the entire region. we do so before
2938 * making any changes.
2939 */
2940
2941 if (uvm_map_lookup_entry(map, start, &start_entry) == FALSE) {
2942 if ((lockflags & UVM_LK_EXIT) == 0)
2943 vm_map_unlock(map);
2944
2945 UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0);
2946 return EFAULT;
2947 }
2948 entry = start_entry;
2949
2950 /*
2951 * handle wiring and unwiring separately.
2952 */
2953
2954 if (new_pageable) { /* unwire */
2955 UVM_MAP_CLIP_START(map, entry, start, NULL);
2956
2957 /*
2958 * unwiring. first ensure that the range to be unwired is
2959 * really wired down and that there are no holes.
2960 */
2961
2962 while ((entry != &map->header) && (entry->start < end)) {
2963 if (entry->wired_count == 0 ||
2964 (entry->end < end &&
2965 (entry->next == &map->header ||
2966 entry->next->start > entry->end))) {
2967 if ((lockflags & UVM_LK_EXIT) == 0)
2968 vm_map_unlock(map);
2969 UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0);
2970 return EINVAL;
2971 }
2972 entry = entry->next;
2973 }
2974
2975 /*
2976 * POSIX 1003.1b - a single munlock call unlocks a region,
2977 * regardless of the number of mlock calls made on that
2978 * region.
2979 */
2980
2981 entry = start_entry;
2982 while ((entry != &map->header) && (entry->start < end)) {
2983 UVM_MAP_CLIP_END(map, entry, end, NULL);
2984 if (VM_MAPENT_ISWIRED(entry))
2985 uvm_map_entry_unwire(map, entry);
2986 entry = entry->next;
2987 }
2988 if ((lockflags & UVM_LK_EXIT) == 0)
2989 vm_map_unlock(map);
2990 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
2991 return 0;
2992 }
2993
2994 /*
2995 * wire case: in two passes [XXXCDC: ugly block of code here]
2996 *
2997 * 1: holding the write lock, we create any anonymous maps that need
2998 * to be created. then we clip each map entry to the region to
2999 * be wired and increment its wiring count.
3000 *
3001 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
3002 * in the pages for any newly wired area (wired_count == 1).
3003 *
3004 * downgrading to a read lock for uvm_fault_wire avoids a possible
3005 * deadlock with another thread that may have faulted on one of
3006 * the pages to be wired (it would mark the page busy, blocking
3007 * us, then in turn block on the map lock that we hold). because
3008 * of problems in the recursive lock package, we cannot upgrade
3009 * to a write lock in vm_map_lookup. thus, any actions that
3010 * require the write lock must be done beforehand. because we
3011 * keep the read lock on the map, the copy-on-write status of the
3012 * entries we modify here cannot change.
3013 */
3014
3015 while ((entry != &map->header) && (entry->start < end)) {
3016 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3017
3018 /*
3019 * perform actions of vm_map_lookup that need the
3020 * write lock on the map: create an anonymous map
3021 * for a copy-on-write region, or an anonymous map
3022 * for a zero-fill region. (XXXCDC: submap case
3023 * ok?)
3024 */
3025
3026 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3027 if (UVM_ET_ISNEEDSCOPY(entry) &&
3028 ((entry->max_protection & VM_PROT_WRITE) ||
3029 (entry->object.uvm_obj == NULL))) {
3030 amap_copy(map, entry, M_WAITOK, TRUE,
3031 start, end);
3032 /* XXXCDC: wait OK? */
3033 }
3034 }
3035 }
3036 UVM_MAP_CLIP_START(map, entry, start, NULL);
3037 UVM_MAP_CLIP_END(map, entry, end, NULL);
3038 entry->wired_count++;
3039
3040 /*
3041 * Check for holes
3042 */
3043
3044 if (entry->protection == VM_PROT_NONE ||
3045 (entry->end < end &&
3046 (entry->next == &map->header ||
3047 entry->next->start > entry->end))) {
3048
3049 /*
3050 * found one. amap creation actions do not need to
3051 * be undone, but the wired counts need to be restored.
3052 */
3053
3054 while (entry != &map->header && entry->end > start) {
3055 entry->wired_count--;
3056 entry = entry->prev;
3057 }
3058 if ((lockflags & UVM_LK_EXIT) == 0)
3059 vm_map_unlock(map);
3060 UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
3061 return EINVAL;
3062 }
3063 entry = entry->next;
3064 }
3065
3066 /*
3067 * Pass 2.
3068 */
3069
3070 #ifdef DIAGNOSTIC
3071 timestamp_save = map->timestamp;
3072 #endif
3073 vm_map_busy(map);
3074 vm_map_downgrade(map);
3075
3076 rv = 0;
3077 entry = start_entry;
3078 while (entry != &map->header && entry->start < end) {
3079 if (entry->wired_count == 1) {
3080 rv = uvm_fault_wire(map, entry->start, entry->end,
3081 VM_FAULT_WIREMAX, entry->max_protection);
3082 if (rv) {
3083
3084 /*
3085 * wiring failed. break out of the loop.
3086 * we'll clean up the map below, once we
3087 * have a write lock again.
3088 */
3089
3090 break;
3091 }
3092 }
3093 entry = entry->next;
3094 }
3095
3096 if (rv) { /* failed? */
3097
3098 /*
3099 * Get back to an exclusive (write) lock.
3100 */
3101
3102 vm_map_upgrade(map);
3103 vm_map_unbusy(map);
3104
3105 #ifdef DIAGNOSTIC
3106 if (timestamp_save != map->timestamp)
3107 panic("uvm_map_pageable: stale map");
3108 #endif
3109
3110 /*
3111 * first drop the wiring count on all the entries
3112 * which haven't actually been wired yet.
3113 */
3114
3115 failed_entry = entry;
3116 while (entry != &map->header && entry->start < end) {
3117 entry->wired_count--;
3118 entry = entry->next;
3119 }
3120
3121 /*
3122 * now, unwire all the entries that were successfully
3123 * wired above.
3124 */
3125
3126 entry = start_entry;
3127 while (entry != failed_entry) {
3128 entry->wired_count--;
3129 if (VM_MAPENT_ISWIRED(entry) == 0)
3130 uvm_map_entry_unwire(map, entry);
3131 entry = entry->next;
3132 }
3133 if ((lockflags & UVM_LK_EXIT) == 0)
3134 vm_map_unlock(map);
3135 UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
3136 return (rv);
3137 }
3138
3139 /* We are holding a read lock here. */
3140 if ((lockflags & UVM_LK_EXIT) == 0) {
3141 vm_map_unbusy(map);
3142 vm_map_unlock_read(map);
3143 } else {
3144
3145 /*
3146 * Get back to an exclusive (write) lock.
3147 */
3148
3149 vm_map_upgrade(map);
3150 vm_map_unbusy(map);
3151 }
3152
3153 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3154 return 0;
3155 }
3156
3157 /*
3158 * uvm_map_pageable_all: special case of uvm_map_pageable - affects
3159 * all mapped regions.
3160 *
3161 * => map must not be locked.
3162 * => if no flags are specified, all regions are unwired.
3163 * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
3164 */
3165
3166 int
3167 uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
3168 {
3169 struct vm_map_entry *entry, *failed_entry;
3170 vsize_t size;
3171 int rv;
3172 #ifdef DIAGNOSTIC
3173 u_int timestamp_save;
3174 #endif
3175 UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist);
3176 UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0);
3177
3178 KASSERT(map->flags & VM_MAP_PAGEABLE);
3179
3180 vm_map_lock(map);
3181
3182 /*
3183 * handle wiring and unwiring separately.
3184 */
3185
3186 if (flags == 0) { /* unwire */
3187
3188 /*
3189 * POSIX 1003.1b -- munlockall unlocks all regions,
3190 * regardless of how many times mlockall has been called.
3191 */
3192
3193 for (entry = map->header.next; entry != &map->header;
3194 entry = entry->next) {
3195 if (VM_MAPENT_ISWIRED(entry))
3196 uvm_map_entry_unwire(map, entry);
3197 }
3198 vm_map_modflags(map, 0, VM_MAP_WIREFUTURE);
3199 vm_map_unlock(map);
3200 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3201 return 0;
3202 }
3203
3204 if (flags & MCL_FUTURE) {
3205
3206 /*
3207 * must wire all future mappings; remember this.
3208 */
3209
3210 vm_map_modflags(map, VM_MAP_WIREFUTURE, 0);
3211 }
3212
3213 if ((flags & MCL_CURRENT) == 0) {
3214
3215 /*
3216 * no more work to do!
3217 */
3218
3219 UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
3220 vm_map_unlock(map);
3221 return 0;
3222 }
3223
3224 /*
3225 * wire case: in three passes [XXXCDC: ugly block of code here]
3226 *
3227 * 1: holding the write lock, count all pages mapped by non-wired
3228 * entries. if this would cause us to go over our limit, we fail.
3229 *
3230 * 2: still holding the write lock, we create any anonymous maps that
3231 * need to be created. then we increment its wiring count.
3232 *
3233 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
3234 * in the pages for any newly wired area (wired_count == 1).
3235 *
3236 * downgrading to a read lock for uvm_fault_wire avoids a possible
3237 * deadlock with another thread that may have faulted on one of
3238 * the pages to be wired (it would mark the page busy, blocking
3239 * us, then in turn block on the map lock that we hold). because
3240 * of problems in the recursive lock package, we cannot upgrade
3241 * to a write lock in vm_map_lookup. thus, any actions that
3242 * require the write lock must be done beforehand. because we
3243 * keep the read lock on the map, the copy-on-write status of the
3244 * entries we modify here cannot change.
3245 */
3246
3247 for (size = 0, entry = map->header.next; entry != &map->header;
3248 entry = entry->next) {
3249 if (entry->protection != VM_PROT_NONE &&
3250 VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3251 size += entry->end - entry->start;
3252 }
3253 }
3254
3255 if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
3256 vm_map_unlock(map);
3257 return ENOMEM;
3258 }
3259
3260 if (limit != 0 &&
3261 (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
3262 vm_map_unlock(map);
3263 return ENOMEM;
3264 }
3265
3266 /*
3267 * Pass 2.
3268 */
3269
3270 for (entry = map->header.next; entry != &map->header;
3271 entry = entry->next) {
3272 if (entry->protection == VM_PROT_NONE)
3273 continue;
3274 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3275
3276 /*
3277 * perform actions of vm_map_lookup that need the
3278 * write lock on the map: create an anonymous map
3279 * for a copy-on-write region, or an anonymous map
3280 * for a zero-fill region. (XXXCDC: submap case
3281 * ok?)
3282 */
3283
3284 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3285 if (UVM_ET_ISNEEDSCOPY(entry) &&
3286 ((entry->max_protection & VM_PROT_WRITE) ||
3287 (entry->object.uvm_obj == NULL))) {
3288 amap_copy(map, entry, M_WAITOK, TRUE,
3289 entry->start, entry->end);
3290 /* XXXCDC: wait OK? */
3291 }
3292 }
3293 }
3294 entry->wired_count++;
3295 }
3296
3297 /*
3298 * Pass 3.
3299 */
3300
3301 #ifdef DIAGNOSTIC
3302 timestamp_save = map->timestamp;
3303 #endif
3304 vm_map_busy(map);
3305 vm_map_downgrade(map);
3306
3307 rv = 0;
3308 for (entry = map->header.next; entry != &map->header;
3309 entry = entry->next) {
3310 if (entry->wired_count == 1) {
3311 rv = uvm_fault_wire(map, entry->start, entry->end,
3312 VM_FAULT_WIREMAX, entry->max_protection);
3313 if (rv) {
3314
3315 /*
3316 * wiring failed. break out of the loop.
3317 * we'll clean up the map below, once we
3318 * have a write lock again.
3319 */
3320
3321 break;
3322 }
3323 }
3324 }
3325
3326 if (rv) {
3327
3328 /*
3329 * Get back an exclusive (write) lock.
3330 */
3331
3332 vm_map_upgrade(map);
3333 vm_map_unbusy(map);
3334
3335 #ifdef DIAGNOSTIC
3336 if (timestamp_save != map->timestamp)
3337 panic("uvm_map_pageable_all: stale map");
3338 #endif
3339
3340 /*
3341 * first drop the wiring count on all the entries
3342 * which haven't actually been wired yet.
3343 *
3344 * Skip VM_PROT_NONE entries like we did above.
3345 */
3346
3347 failed_entry = entry;
3348 for (/* nothing */; entry != &map->header;
3349 entry = entry->next) {
3350 if (entry->protection == VM_PROT_NONE)
3351 continue;
3352 entry->wired_count--;
3353 }
3354
3355 /*
3356 * now, unwire all the entries that were successfully
3357 * wired above.
3358 *
3359 * Skip VM_PROT_NONE entries like we did above.
3360 */
3361
3362 for (entry = map->header.next; entry != failed_entry;
3363 entry = entry->next) {
3364 if (entry->protection == VM_PROT_NONE)
3365 continue;
3366 entry->wired_count--;
3367 if (VM_MAPENT_ISWIRED(entry))
3368 uvm_map_entry_unwire(map, entry);
3369 }
3370 vm_map_unlock(map);
3371 UVMHIST_LOG(maphist,"<- done (RV=%d)", rv,0,0,0);
3372 return (rv);
3373 }
3374
3375 /* We are holding a read lock here. */
3376 vm_map_unbusy(map);
3377 vm_map_unlock_read(map);
3378
3379 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3380 return 0;
3381 }
3382
3383 /*
3384 * uvm_map_clean: clean out a map range
3385 *
3386 * => valid flags:
3387 * if (flags & PGO_CLEANIT): dirty pages are cleaned first
3388 * if (flags & PGO_SYNCIO): dirty pages are written synchronously
3389 * if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
3390 * if (flags & PGO_FREE): any cached pages are freed after clean
3391 * => returns an error if any part of the specified range isn't mapped
3392 * => never a need to flush amap layer since the anonymous memory has
3393 * no permanent home, but may deactivate pages there
3394 * => called from sys_msync() and sys_madvise()
3395 * => caller must not write-lock map (read OK).
3396 * => we may sleep while cleaning if SYNCIO [with map read-locked]
3397 */
3398
3399 int
3400 uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
3401 {
3402 struct vm_map_entry *current, *entry;
3403 struct uvm_object *uobj;
3404 struct vm_amap *amap;
3405 struct vm_anon *anon;
3406 struct vm_page *pg;
3407 vaddr_t offset;
3408 vsize_t size;
3409 voff_t uoff;
3410 int error, refs;
3411 UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
3412
3413 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
3414 map, start, end, flags);
3415 KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
3416 (PGO_FREE|PGO_DEACTIVATE));
3417
3418 vm_map_lock_read(map);
3419 VM_MAP_RANGE_CHECK(map, start, end);
3420 if (uvm_map_lookup_entry(map, start, &entry) == FALSE) {
3421 vm_map_unlock_read(map);
3422 return EFAULT;
3423 }
3424
3425 /*
3426 * Make a first pass to check for holes and wiring problems.
3427 */
3428
3429 for (current = entry; current->start < end; current = current->next) {
3430 if (UVM_ET_ISSUBMAP(current)) {
3431 vm_map_unlock_read(map);
3432 return EINVAL;
3433 }
3434 if ((flags & PGO_FREE) != 0 && VM_MAPENT_ISWIRED(entry)) {
3435 vm_map_unlock_read(map);
3436 return EBUSY;
3437 }
3438 if (end <= current->end) {
3439 break;
3440 }
3441 if (current->end != current->next->start) {
3442 vm_map_unlock_read(map);
3443 return EFAULT;
3444 }
3445 }
3446
3447 error = 0;
3448 for (current = entry; start < end; current = current->next) {
3449 amap = current->aref.ar_amap; /* top layer */
3450 uobj = current->object.uvm_obj; /* bottom layer */
3451 KASSERT(start >= current->start);
3452
3453 /*
3454 * No amap cleaning necessary if:
3455 *
3456 * (1) There's no amap.
3457 *
3458 * (2) We're not deactivating or freeing pages.
3459 */
3460
3461 if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
3462 goto flush_object;
3463
3464 amap_lock(amap);
3465 offset = start - current->start;
3466 size = MIN(end, current->end) - start;
3467 for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) {
3468 anon = amap_lookup(¤t->aref, offset);
3469 if (anon == NULL)
3470 continue;
3471
3472 simple_lock(&anon->an_lock);
3473 pg = anon->an_page;
3474 if (pg == NULL) {
3475 simple_unlock(&anon->an_lock);
3476 continue;
3477 }
3478
3479 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
3480
3481 /*
3482 * In these first 3 cases, we just deactivate the page.
3483 */
3484
3485 case PGO_CLEANIT|PGO_FREE:
3486 case PGO_CLEANIT|PGO_DEACTIVATE:
3487 case PGO_DEACTIVATE:
3488 deactivate_it:
3489 /*
3490 * skip the page if it's loaned or wired,
3491 * since it shouldn't be on a paging queue
3492 * at all in these cases.
3493 */
3494
3495 uvm_lock_pageq();
3496 if (pg->loan_count != 0 ||
3497 pg->wire_count != 0) {
3498 uvm_unlock_pageq();
3499 simple_unlock(&anon->an_lock);
3500 continue;
3501 }
3502 KASSERT(pg->uanon == anon);
3503 pmap_clear_reference(pg);
3504 uvm_pagedeactivate(pg);
3505 uvm_unlock_pageq();
3506 simple_unlock(&anon->an_lock);
3507 continue;
3508
3509 case PGO_FREE:
3510
3511 /*
3512 * If there are multiple references to
3513 * the amap, just deactivate the page.
3514 */
3515
3516 if (amap_refs(amap) > 1)
3517 goto deactivate_it;
3518
3519 /* skip the page if it's wired */
3520 if (pg->wire_count != 0) {
3521 simple_unlock(&anon->an_lock);
3522 continue;
3523 }
3524 amap_unadd(¤t->aref, offset);
3525 refs = --anon->an_ref;
3526 simple_unlock(&anon->an_lock);
3527 if (refs == 0)
3528 uvm_anfree(anon);
3529 continue;
3530 }
3531 }
3532 amap_unlock(amap);
3533
3534 flush_object:
3535 /*
3536 * flush pages if we've got a valid backing object.
3537 * note that we must always clean object pages before
3538 * freeing them since otherwise we could reveal stale
3539 * data from files.
3540 */
3541
3542 uoff = current->offset + (start - current->start);
3543 size = MIN(end, current->end) - start;
3544 if (uobj != NULL) {
3545 simple_lock(&uobj->vmobjlock);
3546 if (uobj->pgops->pgo_put != NULL)
3547 error = (uobj->pgops->pgo_put)(uobj, uoff,
3548 uoff + size, flags | PGO_CLEANIT);
3549 else
3550 error = 0;
3551 }
3552 start += size;
3553 }
3554 vm_map_unlock_read(map);
3555 return (error);
3556 }
3557
3558
3559 /*
3560 * uvm_map_checkprot: check protection in map
3561 *
3562 * => must allow specified protection in a fully allocated region.
3563 * => map must be read or write locked by caller.
3564 */
3565
3566 boolean_t
3567 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
3568 vm_prot_t protection)
3569 {
3570 struct vm_map_entry *entry;
3571 struct vm_map_entry *tmp_entry;
3572
3573 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
3574 return (FALSE);
3575 }
3576 entry = tmp_entry;
3577 while (start < end) {
3578 if (entry == &map->header) {
3579 return (FALSE);
3580 }
3581
3582 /*
3583 * no holes allowed
3584 */
3585
3586 if (start < entry->start) {
3587 return (FALSE);
3588 }
3589
3590 /*
3591 * check protection associated with entry
3592 */
3593
3594 if ((entry->protection & protection) != protection) {
3595 return (FALSE);
3596 }
3597 start = entry->end;
3598 entry = entry->next;
3599 }
3600 return (TRUE);
3601 }
3602
3603 /*
3604 * uvmspace_alloc: allocate a vmspace structure.
3605 *
3606 * - structure includes vm_map and pmap
3607 * - XXX: no locking on this structure
3608 * - refcnt set to 1, rest must be init'd by caller
3609 */
3610 struct vmspace *
3611 uvmspace_alloc(vaddr_t vmin, vaddr_t vmax)
3612 {
3613 struct vmspace *vm;
3614 UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
3615
3616 vm = pool_get(&uvm_vmspace_pool, PR_WAITOK);
3617 uvmspace_init(vm, NULL, vmin, vmax);
3618 UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0);
3619 return (vm);
3620 }
3621
3622 /*
3623 * uvmspace_init: initialize a vmspace structure.
3624 *
3625 * - XXX: no locking on this structure
3626 * - refcnt set to 1, rest must be init'd by caller
3627 */
3628 void
3629 uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin, vaddr_t vmax)
3630 {
3631 UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist);
3632
3633 memset(vm, 0, sizeof(*vm));
3634 uvm_map_setup(&vm->vm_map, vmin, vmax, VM_MAP_PAGEABLE
3635 #ifdef __USING_TOPDOWN_VM
3636 | VM_MAP_TOPDOWN
3637 #endif
3638 );
3639 if (pmap)
3640 pmap_reference(pmap);
3641 else
3642 pmap = pmap_create();
3643 vm->vm_map.pmap = pmap;
3644 vm->vm_refcnt = 1;
3645 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
3646 }
3647
3648 /*
3649 * uvmspace_share: share a vmspace between two processes
3650 *
3651 * - used for vfork, threads(?)
3652 */
3653
3654 void
3655 uvmspace_share(struct proc *p1, struct proc *p2)
3656 {
3657 struct simplelock *slock = &p1->p_vmspace->vm_map.ref_lock;
3658
3659 p2->p_vmspace = p1->p_vmspace;
3660 simple_lock(slock);
3661 p1->p_vmspace->vm_refcnt++;
3662 simple_unlock(slock);
3663 }
3664
3665 /*
3666 * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
3667 *
3668 * - XXX: no locking on vmspace
3669 */
3670
3671 void
3672 uvmspace_unshare(struct lwp *l)
3673 {
3674 struct proc *p = l->l_proc;
3675 struct vmspace *nvm, *ovm = p->p_vmspace;
3676
3677 if (ovm->vm_refcnt == 1)
3678 /* nothing to do: vmspace isn't shared in the first place */
3679 return;
3680
3681 /* make a new vmspace, still holding old one */
3682 nvm = uvmspace_fork(ovm);
3683
3684 pmap_deactivate(l); /* unbind old vmspace */
3685 p->p_vmspace = nvm;
3686 pmap_activate(l); /* switch to new vmspace */
3687
3688 uvmspace_free(ovm); /* drop reference to old vmspace */
3689 }
3690
3691 /*
3692 * uvmspace_exec: the process wants to exec a new program
3693 */
3694
3695 void
3696 uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end)
3697 {
3698 struct proc *p = l->l_proc;
3699 struct vmspace *nvm, *ovm = p->p_vmspace;
3700 struct vm_map *map = &ovm->vm_map;
3701
3702 #ifdef __sparc__
3703 /* XXX cgd 960926: the sparc #ifdef should be a MD hook */
3704 kill_user_windows(l); /* before stack addresses go away */
3705 #endif
3706
3707 /*
3708 * see if more than one process is using this vmspace...
3709 */
3710
3711 if (ovm->vm_refcnt == 1) {
3712
3713 /*
3714 * if p is the only process using its vmspace then we can safely
3715 * recycle that vmspace for the program that is being exec'd.
3716 */
3717
3718 #ifdef SYSVSHM
3719 /*
3720 * SYSV SHM semantics require us to kill all segments on an exec
3721 */
3722
3723 if (ovm->vm_shm)
3724 shmexit(ovm);
3725 #endif
3726
3727 /*
3728 * POSIX 1003.1b -- "lock future mappings" is revoked
3729 * when a process execs another program image.
3730 */
3731
3732 vm_map_modflags(map, 0, VM_MAP_WIREFUTURE);
3733
3734 /*
3735 * now unmap the old program
3736 */
3737
3738 pmap_remove_all(map->pmap);
3739 uvm_unmap(map, vm_map_min(map), vm_map_max(map));
3740 KASSERT(map->header.prev == &map->header);
3741 KASSERT(map->nentries == 0);
3742
3743 /*
3744 * resize the map
3745 */
3746
3747 vm_map_setmin(map, start);
3748 vm_map_setmax(map, end);
3749 } else {
3750
3751 /*
3752 * p's vmspace is being shared, so we can't reuse it for p since
3753 * it is still being used for others. allocate a new vmspace
3754 * for p
3755 */
3756
3757 nvm = uvmspace_alloc(start, end);
3758
3759 /*
3760 * install new vmspace and drop our ref to the old one.
3761 */
3762
3763 pmap_deactivate(l);
3764 p->p_vmspace = nvm;
3765 pmap_activate(l);
3766
3767 uvmspace_free(ovm);
3768 }
3769 }
3770
3771 /*
3772 * uvmspace_free: free a vmspace data structure
3773 */
3774
3775 void
3776 uvmspace_free(struct vmspace *vm)
3777 {
3778 struct vm_map_entry *dead_entries;
3779 struct vm_map *map = &vm->vm_map;
3780 int n;
3781
3782 UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
3783
3784 UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0);
3785 simple_lock(&map->ref_lock);
3786 n = --vm->vm_refcnt;
3787 simple_unlock(&map->ref_lock);
3788 if (n > 0)
3789 return;
3790
3791 /*
3792 * at this point, there should be no other references to the map.
3793 * delete all of the mappings, then destroy the pmap.
3794 */
3795
3796 map->flags |= VM_MAP_DYING;
3797 pmap_remove_all(map->pmap);
3798 #ifdef SYSVSHM
3799 /* Get rid of any SYSV shared memory segments. */
3800 if (vm->vm_shm != NULL)
3801 shmexit(vm);
3802 #endif
3803 if (map->nentries) {
3804 uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map),
3805 &dead_entries, NULL, 0);
3806 if (dead_entries != NULL)
3807 uvm_unmap_detach(dead_entries, 0);
3808 }
3809 KASSERT(map->nentries == 0);
3810 KASSERT(map->size == 0);
3811 pmap_destroy(map->pmap);
3812 pool_put(&uvm_vmspace_pool, vm);
3813 }
3814
3815 /*
3816 * F O R K - m a i n e n t r y p o i n t
3817 */
3818 /*
3819 * uvmspace_fork: fork a process' main map
3820 *
3821 * => create a new vmspace for child process from parent.
3822 * => parent's map must not be locked.
3823 */
3824
3825 struct vmspace *
3826 uvmspace_fork(struct vmspace *vm1)
3827 {
3828 struct vmspace *vm2;
3829 struct vm_map *old_map = &vm1->vm_map;
3830 struct vm_map *new_map;
3831 struct vm_map_entry *old_entry;
3832 struct vm_map_entry *new_entry;
3833 UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
3834
3835 vm_map_lock(old_map);
3836
3837 vm2 = uvmspace_alloc(vm_map_min(old_map), vm_map_max(old_map));
3838 memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
3839 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
3840 new_map = &vm2->vm_map; /* XXX */
3841
3842 old_entry = old_map->header.next;
3843 new_map->size = old_map->size;
3844
3845 /*
3846 * go entry-by-entry
3847 */
3848
3849 while (old_entry != &old_map->header) {
3850
3851 /*
3852 * first, some sanity checks on the old entry
3853 */
3854
3855 KASSERT(!UVM_ET_ISSUBMAP(old_entry));
3856 KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) ||
3857 !UVM_ET_ISNEEDSCOPY(old_entry));
3858
3859 switch (old_entry->inheritance) {
3860 case MAP_INHERIT_NONE:
3861
3862 /*
3863 * drop the mapping, modify size
3864 */
3865 new_map->size -= old_entry->end - old_entry->start;
3866 break;
3867
3868 case MAP_INHERIT_SHARE:
3869
3870 /*
3871 * share the mapping: this means we want the old and
3872 * new entries to share amaps and backing objects.
3873 */
3874 /*
3875 * if the old_entry needs a new amap (due to prev fork)
3876 * then we need to allocate it now so that we have
3877 * something we own to share with the new_entry. [in
3878 * other words, we need to clear needs_copy]
3879 */
3880
3881 if (UVM_ET_ISNEEDSCOPY(old_entry)) {
3882 /* get our own amap, clears needs_copy */
3883 amap_copy(old_map, old_entry, M_WAITOK, FALSE,
3884 0, 0);
3885 /* XXXCDC: WAITOK??? */
3886 }
3887
3888 new_entry = uvm_mapent_alloc(new_map, 0);
3889 /* old_entry -> new_entry */
3890 uvm_mapent_copy(old_entry, new_entry);
3891
3892 /* new pmap has nothing wired in it */
3893 new_entry->wired_count = 0;
3894
3895 /*
3896 * gain reference to object backing the map (can't
3897 * be a submap, already checked this case).
3898 */
3899
3900 if (new_entry->aref.ar_amap)
3901 uvm_map_reference_amap(new_entry, AMAP_SHARED);
3902
3903 if (new_entry->object.uvm_obj &&
3904 new_entry->object.uvm_obj->pgops->pgo_reference)
3905 new_entry->object.uvm_obj->
3906 pgops->pgo_reference(
3907 new_entry->object.uvm_obj);
3908
3909 /* insert entry at end of new_map's entry list */
3910 uvm_map_entry_link(new_map, new_map->header.prev,
3911 new_entry);
3912
3913 break;
3914
3915 case MAP_INHERIT_COPY:
3916
3917 /*
3918 * copy-on-write the mapping (using mmap's
3919 * MAP_PRIVATE semantics)
3920 *
3921 * allocate new_entry, adjust reference counts.
3922 * (note that new references are read-only).
3923 */
3924
3925 new_entry = uvm_mapent_alloc(new_map, 0);
3926 /* old_entry -> new_entry */
3927 uvm_mapent_copy(old_entry, new_entry);
3928
3929 if (new_entry->aref.ar_amap)
3930 uvm_map_reference_amap(new_entry, 0);
3931
3932 if (new_entry->object.uvm_obj &&
3933 new_entry->object.uvm_obj->pgops->pgo_reference)
3934 new_entry->object.uvm_obj->pgops->pgo_reference
3935 (new_entry->object.uvm_obj);
3936
3937 /* new pmap has nothing wired in it */
3938 new_entry->wired_count = 0;
3939
3940 new_entry->etype |=
3941 (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
3942 uvm_map_entry_link(new_map, new_map->header.prev,
3943 new_entry);
3944
3945 /*
3946 * the new entry will need an amap. it will either
3947 * need to be copied from the old entry or created
3948 * from scratch (if the old entry does not have an
3949 * amap). can we defer this process until later
3950 * (by setting "needs_copy") or do we need to copy
3951 * the amap now?
3952 *
3953 * we must copy the amap now if any of the following
3954 * conditions hold:
3955 * 1. the old entry has an amap and that amap is
3956 * being shared. this means that the old (parent)
3957 * process is sharing the amap with another
3958 * process. if we do not clear needs_copy here
3959 * we will end up in a situation where both the
3960 * parent and child process are refering to the
3961 * same amap with "needs_copy" set. if the
3962 * parent write-faults, the fault routine will
3963 * clear "needs_copy" in the parent by allocating
3964 * a new amap. this is wrong because the
3965 * parent is supposed to be sharing the old amap
3966 * and the new amap will break that.
3967 *
3968 * 2. if the old entry has an amap and a non-zero
3969 * wire count then we are going to have to call
3970 * amap_cow_now to avoid page faults in the
3971 * parent process. since amap_cow_now requires
3972 * "needs_copy" to be clear we might as well
3973 * clear it here as well.
3974 *
3975 */
3976
3977 if (old_entry->aref.ar_amap != NULL) {
3978 if ((amap_flags(old_entry->aref.ar_amap) &
3979 AMAP_SHARED) != 0 ||
3980 VM_MAPENT_ISWIRED(old_entry)) {
3981
3982 amap_copy(new_map, new_entry, M_WAITOK,
3983 FALSE, 0, 0);
3984 /* XXXCDC: M_WAITOK ... ok? */
3985 }
3986 }
3987
3988 /*
3989 * if the parent's entry is wired down, then the
3990 * parent process does not want page faults on
3991 * access to that memory. this means that we
3992 * cannot do copy-on-write because we can't write
3993 * protect the old entry. in this case we
3994 * resolve all copy-on-write faults now, using
3995 * amap_cow_now. note that we have already
3996 * allocated any needed amap (above).
3997 */
3998
3999 if (VM_MAPENT_ISWIRED(old_entry)) {
4000
4001 /*
4002 * resolve all copy-on-write faults now
4003 * (note that there is nothing to do if
4004 * the old mapping does not have an amap).
4005 */
4006 if (old_entry->aref.ar_amap)
4007 amap_cow_now(new_map, new_entry);
4008
4009 } else {
4010
4011 /*
4012 * setup mappings to trigger copy-on-write faults
4013 * we must write-protect the parent if it has
4014 * an amap and it is not already "needs_copy"...
4015 * if it is already "needs_copy" then the parent
4016 * has already been write-protected by a previous
4017 * fork operation.
4018 */
4019
4020 if (old_entry->aref.ar_amap &&
4021 !UVM_ET_ISNEEDSCOPY(old_entry)) {
4022 if (old_entry->max_protection & VM_PROT_WRITE) {
4023 pmap_protect(old_map->pmap,
4024 old_entry->start,
4025 old_entry->end,
4026 old_entry->protection &
4027 ~VM_PROT_WRITE);
4028 pmap_update(old_map->pmap);
4029 }
4030 old_entry->etype |= UVM_ET_NEEDSCOPY;
4031 }
4032 }
4033 break;
4034 } /* end of switch statement */
4035 old_entry = old_entry->next;
4036 }
4037
4038 vm_map_unlock(old_map);
4039
4040 #ifdef SYSVSHM
4041 if (vm1->vm_shm)
4042 shmfork(vm1, vm2);
4043 #endif
4044
4045 #ifdef PMAP_FORK
4046 pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap);
4047 #endif
4048
4049 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4050 return (vm2);
4051 }
4052
4053
4054 /*
4055 * in-kernel map entry allocation.
4056 */
4057
4058 int ukh_alloc, ukh_free;
4059 int uke_alloc, uke_free;
4060
4061 struct uvm_kmapent_hdr {
4062 LIST_ENTRY(uvm_kmapent_hdr) ukh_listq;
4063 int ukh_nused;
4064 struct vm_map_entry *ukh_freelist;
4065 struct vm_map *ukh_map;
4066 struct vm_map_entry ukh_entries[0];
4067 };
4068
4069 #define UVM_KMAPENT_CHUNK \
4070 ((PAGE_SIZE - sizeof(struct uvm_kmapent_hdr)) \
4071 / sizeof(struct vm_map_entry))
4072
4073 #define UVM_KHDR_FIND(entry) \
4074 ((struct uvm_kmapent_hdr *)(((vaddr_t)entry) & ~PAGE_MASK))
4075
4076
4077 #ifdef DIAGNOSTIC
4078 static struct vm_map *
4079 uvm_kmapent_map(struct vm_map_entry *entry)
4080 {
4081 const struct uvm_kmapent_hdr *ukh;
4082
4083 ukh = UVM_KHDR_FIND(entry);
4084 return ukh->ukh_map;
4085 }
4086 #endif
4087
4088 static __inline struct vm_map_entry *
4089 uvm_kmapent_get(struct uvm_kmapent_hdr *ukh)
4090 {
4091 struct vm_map_entry *entry;
4092
4093 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4094 KASSERT(ukh->ukh_nused >= 0);
4095
4096 entry = ukh->ukh_freelist;
4097 if (entry) {
4098 KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4099 == UVM_MAP_KERNEL);
4100 ukh->ukh_freelist = entry->next;
4101 ukh->ukh_nused++;
4102 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4103 } else {
4104 KASSERT(ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4105 }
4106
4107 return entry;
4108 }
4109
4110 static __inline void
4111 uvm_kmapent_put(struct uvm_kmapent_hdr *ukh, struct vm_map_entry *entry)
4112 {
4113
4114 KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4115 == UVM_MAP_KERNEL);
4116 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4117 KASSERT(ukh->ukh_nused > 0);
4118 KASSERT(ukh->ukh_freelist != NULL ||
4119 ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4120 KASSERT(ukh->ukh_freelist == NULL ||
4121 ukh->ukh_nused < UVM_KMAPENT_CHUNK);
4122
4123 ukh->ukh_nused--;
4124 entry->next = ukh->ukh_freelist;
4125 ukh->ukh_freelist = entry;
4126 }
4127
4128 /*
4129 * uvm_kmapent_alloc: allocate a map entry for in-kernel map
4130 */
4131
4132 static struct vm_map_entry *
4133 uvm_kmapent_alloc(struct vm_map *map, int flags)
4134 {
4135 struct vm_page *pg;
4136 struct uvm_map_args args;
4137 struct uvm_kmapent_hdr *ukh;
4138 struct vm_map_entry *entry;
4139 uvm_flag_t mapflags = UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
4140 UVM_INH_NONE, UVM_ADV_RANDOM, flags | UVM_FLAG_NOMERGE);
4141 vaddr_t va;
4142 int error;
4143 int i;
4144 int s;
4145
4146 KDASSERT(UVM_KMAPENT_CHUNK > 2);
4147 KDASSERT(kernel_map != NULL);
4148 KASSERT(vm_map_pmap(map) == pmap_kernel());
4149
4150 uke_alloc++;
4151 entry = NULL;
4152 again:
4153 /*
4154 * try to grab an entry from freelist.
4155 */
4156 s = splvm();
4157 simple_lock(&uvm.kentry_lock);
4158 ukh = LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free);
4159 if (ukh) {
4160 entry = uvm_kmapent_get(ukh);
4161 if (ukh->ukh_nused == UVM_KMAPENT_CHUNK)
4162 LIST_REMOVE(ukh, ukh_listq);
4163 }
4164 simple_unlock(&uvm.kentry_lock);
4165 splx(s);
4166
4167 if (entry)
4168 return entry;
4169
4170 /*
4171 * there's no free entry for this vm_map.
4172 * now we need to allocate some vm_map_entry.
4173 * for simplicity, always allocate one page chunk of them at once.
4174 */
4175
4176 pg = uvm_pagealloc(NULL, 0, NULL, 0);
4177 if (__predict_false(pg == NULL)) {
4178 if (flags & UVM_FLAG_NOWAIT)
4179 return NULL;
4180 uvm_wait("kme_alloc");
4181 goto again;
4182 }
4183
4184 error = uvm_map_prepare(map, 0, PAGE_SIZE, NULL, 0, 0, mapflags, &args);
4185 if (error) {
4186 uvm_pagefree(pg);
4187 return NULL;
4188 }
4189
4190 va = args.uma_start;
4191
4192 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE);
4193 pmap_update(vm_map_pmap(map));
4194
4195 ukh = (void *)va;
4196
4197 /*
4198 * use the first entry for ukh itsself.
4199 */
4200
4201 entry = &ukh->ukh_entries[0];
4202 entry->flags = UVM_MAP_KERNEL | UVM_MAP_KMAPENT;
4203 error = uvm_map_enter(map, &args, entry);
4204 KASSERT(error == 0);
4205
4206 ukh->ukh_nused = UVM_KMAPENT_CHUNK;
4207 ukh->ukh_map = map;
4208 ukh->ukh_freelist = NULL;
4209 for (i = UVM_KMAPENT_CHUNK - 1; i >= 2; i--) {
4210 struct vm_map_entry *xentry = &ukh->ukh_entries[i];
4211
4212 xentry->flags = UVM_MAP_KERNEL;
4213 uvm_kmapent_put(ukh, xentry);
4214 }
4215 KASSERT(ukh->ukh_nused == 2);
4216
4217 s = splvm();
4218 simple_lock(&uvm.kentry_lock);
4219 LIST_INSERT_HEAD(&vm_map_to_kernel(map)->vmk_kentry_free,
4220 ukh, ukh_listq);
4221 simple_unlock(&uvm.kentry_lock);
4222 splx(s);
4223
4224 /*
4225 * return second entry.
4226 */
4227
4228 entry = &ukh->ukh_entries[1];
4229 entry->flags = UVM_MAP_KERNEL;
4230 ukh_alloc++;
4231 return entry;
4232 }
4233
4234 /*
4235 * uvm_mapent_free: free map entry for in-kernel map
4236 */
4237
4238 static void
4239 uvm_kmapent_free(struct vm_map_entry *entry)
4240 {
4241 struct uvm_kmapent_hdr *ukh;
4242 struct vm_page *pg;
4243 struct vm_map *map;
4244 struct pmap *pmap;
4245 vaddr_t va;
4246 paddr_t pa;
4247 struct vm_map_entry *deadentry;
4248 int s;
4249
4250 uke_free++;
4251 ukh = UVM_KHDR_FIND(entry);
4252 map = ukh->ukh_map;
4253
4254 s = splvm();
4255 simple_lock(&uvm.kentry_lock);
4256 uvm_kmapent_put(ukh, entry);
4257 if (ukh->ukh_nused > 1) {
4258 if (ukh->ukh_nused == UVM_KMAPENT_CHUNK - 1)
4259 LIST_INSERT_HEAD(
4260 &vm_map_to_kernel(map)->vmk_kentry_free,
4261 ukh, ukh_listq);
4262 simple_unlock(&uvm.kentry_lock);
4263 splx(s);
4264 return;
4265 }
4266
4267 /*
4268 * now we can free this ukh.
4269 *
4270 * however, keep an empty ukh to avoid ping-pong.
4271 */
4272
4273 if (LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free) == ukh &&
4274 LIST_NEXT(ukh, ukh_listq) == NULL) {
4275 simple_unlock(&uvm.kentry_lock);
4276 splx(s);
4277 return;
4278 }
4279 LIST_REMOVE(ukh, ukh_listq);
4280 simple_unlock(&uvm.kentry_lock);
4281 splx(s);
4282
4283 KASSERT(ukh->ukh_nused == 1);
4284
4285 /*
4286 * remove map entry for ukh itsself.
4287 */
4288
4289 va = (vaddr_t)ukh;
4290 KASSERT((va & PAGE_MASK) == 0);
4291 vm_map_lock(map);
4292 uvm_unmap_remove(map, va, va + PAGE_SIZE, &deadentry, NULL, 0);
4293 KASSERT(deadentry->flags & UVM_MAP_KERNEL);
4294 KASSERT(deadentry->flags & UVM_MAP_KMAPENT);
4295 KASSERT(deadentry->next == NULL);
4296 KASSERT(deadentry == &ukh->ukh_entries[0]);
4297
4298 /*
4299 * unmap the page from pmap and free it.
4300 */
4301
4302 pmap = vm_map_pmap(map);
4303 KASSERT(pmap == pmap_kernel());
4304 if (!pmap_extract(pmap, va, &pa))
4305 panic("%s: no mapping", __func__);
4306 pmap_kremove(va, PAGE_SIZE);
4307 vm_map_unlock(map);
4308 pg = PHYS_TO_VM_PAGE(pa);
4309 uvm_pagefree(pg);
4310 ukh_free++;
4311 }
4312
4313 /*
4314 * map entry reservation
4315 */
4316
4317 /*
4318 * uvm_mapent_reserve: reserve map entries for clipping before locking map.
4319 *
4320 * => needed when unmapping entries allocated without UVM_FLAG_QUANTUM.
4321 * => caller shouldn't hold map locked.
4322 */
4323 int
4324 uvm_mapent_reserve(struct vm_map *map, struct uvm_mapent_reservation *umr,
4325 int nentries, int flags)
4326 {
4327
4328 umr->umr_nentries = 0;
4329
4330 if ((flags & UVM_FLAG_QUANTUM) != 0)
4331 return 0;
4332
4333 if (!VM_MAP_USE_KMAPENT(map))
4334 return 0;
4335
4336 while (nentries--) {
4337 struct vm_map_entry *ent;
4338 ent = uvm_kmapent_alloc(map, flags);
4339 if (!ent) {
4340 uvm_mapent_unreserve(map, umr);
4341 return ENOMEM;
4342 }
4343 UMR_PUTENTRY(umr, ent);
4344 }
4345
4346 return 0;
4347 }
4348
4349 /*
4350 * uvm_mapent_unreserve:
4351 *
4352 * => caller shouldn't hold map locked.
4353 * => never fail or sleep.
4354 */
4355 void
4356 uvm_mapent_unreserve(struct vm_map *map, struct uvm_mapent_reservation *umr)
4357 {
4358
4359 while (!UMR_EMPTY(umr))
4360 uvm_kmapent_free(UMR_GETENTRY(umr));
4361 }
4362
4363 /*
4364 * uvm_mapent_trymerge: try to merge an entry with its neighbors.
4365 *
4366 * => called with map locked.
4367 * => return non zero if successfully merged.
4368 */
4369
4370 int
4371 uvm_mapent_trymerge(struct vm_map *map, struct vm_map_entry *entry, int flags)
4372 {
4373 struct uvm_object *uobj;
4374 struct vm_map_entry *next;
4375 struct vm_map_entry *prev;
4376 vsize_t size;
4377 int merged = 0;
4378 boolean_t copying;
4379 int newetype;
4380
4381 if (VM_MAP_USE_KMAPENT(map)) {
4382 return 0;
4383 }
4384 if (entry->aref.ar_amap != NULL) {
4385 return 0;
4386 }
4387 if ((entry->flags & UVM_MAP_NOMERGE) != 0) {
4388 return 0;
4389 }
4390
4391 uobj = entry->object.uvm_obj;
4392 size = entry->end - entry->start;
4393 copying = (flags & UVM_MERGE_COPYING) != 0;
4394 newetype = copying ? (entry->etype & ~UVM_ET_NEEDSCOPY) : entry->etype;
4395
4396 next = entry->next;
4397 if (next != &map->header &&
4398 next->start == entry->end &&
4399 ((copying && next->aref.ar_amap != NULL &&
4400 amap_refs(next->aref.ar_amap) == 1) ||
4401 (!copying && next->aref.ar_amap == NULL)) &&
4402 UVM_ET_ISCOMPATIBLE(next, newetype,
4403 uobj, entry->flags, entry->protection,
4404 entry->max_protection, entry->inheritance, entry->advice,
4405 entry->wired_count) &&
4406 (uobj == NULL || entry->offset + size == next->offset)) {
4407 int error;
4408
4409 if (copying) {
4410 error = amap_extend(next, size,
4411 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_BACKWARDS);
4412 } else {
4413 error = 0;
4414 }
4415 if (error == 0) {
4416 if (uobj) {
4417 if (uobj->pgops->pgo_detach) {
4418 uobj->pgops->pgo_detach(uobj);
4419 }
4420 }
4421
4422 entry->end = next->end;
4423 uvm_map_entry_unlink(map, next);
4424 if (copying) {
4425 entry->aref = next->aref;
4426 entry->etype &= ~UVM_ET_NEEDSCOPY;
4427 }
4428 uvm_tree_sanity(map, "trymerge forwardmerge");
4429 uvm_mapent_free_merged(map, next);
4430 merged++;
4431 }
4432 }
4433
4434 prev = entry->prev;
4435 if (prev != &map->header &&
4436 prev->end == entry->start &&
4437 ((copying && !merged && prev->aref.ar_amap != NULL &&
4438 amap_refs(prev->aref.ar_amap) == 1) ||
4439 (!copying && prev->aref.ar_amap == NULL)) &&
4440 UVM_ET_ISCOMPATIBLE(prev, newetype,
4441 uobj, entry->flags, entry->protection,
4442 entry->max_protection, entry->inheritance, entry->advice,
4443 entry->wired_count) &&
4444 (uobj == NULL ||
4445 prev->offset + prev->end - prev->start == entry->offset)) {
4446 int error;
4447
4448 if (copying) {
4449 error = amap_extend(prev, size,
4450 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_FORWARDS);
4451 } else {
4452 error = 0;
4453 }
4454 if (error == 0) {
4455 if (uobj) {
4456 if (uobj->pgops->pgo_detach) {
4457 uobj->pgops->pgo_detach(uobj);
4458 }
4459 entry->offset = prev->offset;
4460 }
4461
4462 entry->start = prev->start;
4463 uvm_map_entry_unlink(map, prev);
4464 if (copying) {
4465 entry->aref = prev->aref;
4466 entry->etype &= ~UVM_ET_NEEDSCOPY;
4467 }
4468 uvm_tree_sanity(map, "trymerge backmerge");
4469 uvm_mapent_free_merged(map, prev);
4470 merged++;
4471 }
4472 }
4473
4474 return merged;
4475 }
4476
4477 #if defined(DDB)
4478
4479 /*
4480 * DDB hooks
4481 */
4482
4483 /*
4484 * uvm_map_printit: actually prints the map
4485 */
4486
4487 void
4488 uvm_map_printit(struct vm_map *map, boolean_t full,
4489 void (*pr)(const char *, ...))
4490 {
4491 struct vm_map_entry *entry;
4492
4493 (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, vm_map_min(map),
4494 vm_map_max(map));
4495 (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=0x%x\n",
4496 map->nentries, map->size, map->ref_count, map->timestamp,
4497 map->flags);
4498 (*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap,
4499 pmap_resident_count(map->pmap), pmap_wired_count(map->pmap));
4500 if (!full)
4501 return;
4502 for (entry = map->header.next; entry != &map->header;
4503 entry = entry->next) {
4504 (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%llx, amap=%p/%d\n",
4505 entry, entry->start, entry->end, entry->object.uvm_obj,
4506 (long long)entry->offset, entry->aref.ar_amap,
4507 entry->aref.ar_pageoff);
4508 (*pr)(
4509 "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
4510 "wc=%d, adv=%d\n",
4511 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
4512 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
4513 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
4514 entry->protection, entry->max_protection,
4515 entry->inheritance, entry->wired_count, entry->advice);
4516 }
4517 }
4518
4519 /*
4520 * uvm_object_printit: actually prints the object
4521 */
4522
4523 void
4524 uvm_object_printit(struct uvm_object *uobj, boolean_t full,
4525 void (*pr)(const char *, ...))
4526 {
4527 struct vm_page *pg;
4528 int cnt = 0;
4529
4530 (*pr)("OBJECT %p: locked=%d, pgops=%p, npages=%d, ",
4531 uobj, uobj->vmobjlock.lock_data, uobj->pgops, uobj->uo_npages);
4532 if (UVM_OBJ_IS_KERN_OBJECT(uobj))
4533 (*pr)("refs=<SYSTEM>\n");
4534 else
4535 (*pr)("refs=%d\n", uobj->uo_refs);
4536
4537 if (!full) {
4538 return;
4539 }
4540 (*pr)(" PAGES <pg,offset>:\n ");
4541 TAILQ_FOREACH(pg, &uobj->memq, listq) {
4542 cnt++;
4543 (*pr)("<%p,0x%llx> ", pg, (long long)pg->offset);
4544 if ((cnt % 3) == 0) {
4545 (*pr)("\n ");
4546 }
4547 }
4548 if ((cnt % 3) != 0) {
4549 (*pr)("\n");
4550 }
4551 }
4552
4553 /*
4554 * uvm_page_printit: actually print the page
4555 */
4556
4557 static const char page_flagbits[] =
4558 "\20\1BUSY\2WANTED\3TABLED\4CLEAN\5PAGEOUT\6RELEASED\7FAKE\10RDONLY"
4559 "\11ZERO\15PAGER1";
4560 static const char page_pqflagbits[] =
4561 "\20\1FREE\2INACTIVE\3ACTIVE\5ANON\6AOBJ";
4562
4563 void
4564 uvm_page_printit(struct vm_page *pg, boolean_t full,
4565 void (*pr)(const char *, ...))
4566 {
4567 struct vm_page *tpg;
4568 struct uvm_object *uobj;
4569 struct pglist *pgl;
4570 char pgbuf[128];
4571 char pqbuf[128];
4572
4573 (*pr)("PAGE %p:\n", pg);
4574 bitmask_snprintf(pg->flags, page_flagbits, pgbuf, sizeof(pgbuf));
4575 bitmask_snprintf(pg->pqflags, page_pqflagbits, pqbuf, sizeof(pqbuf));
4576 (*pr)(" flags=%s, pqflags=%s, wire_count=%d, pa=0x%lx\n",
4577 pgbuf, pqbuf, pg->wire_count, (long)VM_PAGE_TO_PHYS(pg));
4578 (*pr)(" uobject=%p, uanon=%p, offset=0x%llx loan_count=%d\n",
4579 pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count);
4580 #if defined(UVM_PAGE_TRKOWN)
4581 if (pg->flags & PG_BUSY)
4582 (*pr)(" owning process = %d, tag=%s\n",
4583 pg->owner, pg->owner_tag);
4584 else
4585 (*pr)(" page not busy, no owner\n");
4586 #else
4587 (*pr)(" [page ownership tracking disabled]\n");
4588 #endif
4589
4590 if (!full)
4591 return;
4592
4593 /* cross-verify object/anon */
4594 if ((pg->pqflags & PQ_FREE) == 0) {
4595 if (pg->pqflags & PQ_ANON) {
4596 if (pg->uanon == NULL || pg->uanon->an_page != pg)
4597 (*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n",
4598 (pg->uanon) ? pg->uanon->an_page : NULL);
4599 else
4600 (*pr)(" anon backpointer is OK\n");
4601 } else {
4602 uobj = pg->uobject;
4603 if (uobj) {
4604 (*pr)(" checking object list\n");
4605 TAILQ_FOREACH(tpg, &uobj->memq, listq) {
4606 if (tpg == pg) {
4607 break;
4608 }
4609 }
4610 if (tpg)
4611 (*pr)(" page found on object list\n");
4612 else
4613 (*pr)(" >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
4614 }
4615 }
4616 }
4617
4618 /* cross-verify page queue */
4619 if (pg->pqflags & PQ_FREE) {
4620 int fl = uvm_page_lookup_freelist(pg);
4621 int color = VM_PGCOLOR_BUCKET(pg);
4622 pgl = &uvm.page_free[fl].pgfl_buckets[color].pgfl_queues[
4623 ((pg)->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN];
4624 } else if (pg->pqflags & PQ_INACTIVE) {
4625 pgl = &uvm.page_inactive;
4626 } else if (pg->pqflags & PQ_ACTIVE) {
4627 pgl = &uvm.page_active;
4628 } else {
4629 pgl = NULL;
4630 }
4631
4632 if (pgl) {
4633 (*pr)(" checking pageq list\n");
4634 TAILQ_FOREACH(tpg, pgl, pageq) {
4635 if (tpg == pg) {
4636 break;
4637 }
4638 }
4639 if (tpg)
4640 (*pr)(" page found on pageq list\n");
4641 else
4642 (*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
4643 }
4644 }
4645 #endif
4646