uvm_map.c revision 1.275 1 /* $NetBSD: uvm_map.c,v 1.275 2009/08/01 16:35:51 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_map.c 8.3 (Berkeley) 1/12/94
42 * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 /*
70 * uvm_map.c: uvm map operations
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.275 2009/08/01 16:35:51 yamt Exp $");
75
76 #include "opt_ddb.h"
77 #include "opt_uvmhist.h"
78 #include "opt_uvm.h"
79 #include "opt_sysv.h"
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/mman.h>
84 #include <sys/proc.h>
85 #include <sys/malloc.h>
86 #include <sys/pool.h>
87 #include <sys/kernel.h>
88 #include <sys/mount.h>
89 #include <sys/vnode.h>
90 #include <sys/lockdebug.h>
91 #include <sys/atomic.h>
92
93 #ifdef SYSVSHM
94 #include <sys/shm.h>
95 #endif
96
97 #include <uvm/uvm.h>
98 #include <uvm/uvm_readahead.h>
99
100 #if defined(DDB) || defined(DEBUGPRINT)
101 #include <uvm/uvm_ddb.h>
102 #endif
103
104 #if !defined(UVMMAP_COUNTERS)
105
106 #define UVMMAP_EVCNT_DEFINE(name) /* nothing */
107 #define UVMMAP_EVCNT_INCR(ev) /* nothing */
108 #define UVMMAP_EVCNT_DECR(ev) /* nothing */
109
110 #else /* defined(UVMMAP_NOCOUNTERS) */
111
112 #include <sys/evcnt.h>
113 #define UVMMAP_EVCNT_DEFINE(name) \
114 struct evcnt uvmmap_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
115 "uvmmap", #name); \
116 EVCNT_ATTACH_STATIC(uvmmap_evcnt_##name);
117 #define UVMMAP_EVCNT_INCR(ev) uvmmap_evcnt_##ev.ev_count++
118 #define UVMMAP_EVCNT_DECR(ev) uvmmap_evcnt_##ev.ev_count--
119
120 #endif /* defined(UVMMAP_NOCOUNTERS) */
121
122 UVMMAP_EVCNT_DEFINE(ubackmerge)
123 UVMMAP_EVCNT_DEFINE(uforwmerge)
124 UVMMAP_EVCNT_DEFINE(ubimerge)
125 UVMMAP_EVCNT_DEFINE(unomerge)
126 UVMMAP_EVCNT_DEFINE(kbackmerge)
127 UVMMAP_EVCNT_DEFINE(kforwmerge)
128 UVMMAP_EVCNT_DEFINE(kbimerge)
129 UVMMAP_EVCNT_DEFINE(knomerge)
130 UVMMAP_EVCNT_DEFINE(map_call)
131 UVMMAP_EVCNT_DEFINE(mlk_call)
132 UVMMAP_EVCNT_DEFINE(mlk_hint)
133 UVMMAP_EVCNT_DEFINE(mlk_list)
134 UVMMAP_EVCNT_DEFINE(mlk_tree)
135 UVMMAP_EVCNT_DEFINE(mlk_treeloop)
136 UVMMAP_EVCNT_DEFINE(mlk_listloop)
137
138 UVMMAP_EVCNT_DEFINE(uke_alloc)
139 UVMMAP_EVCNT_DEFINE(uke_free)
140 UVMMAP_EVCNT_DEFINE(ukh_alloc)
141 UVMMAP_EVCNT_DEFINE(ukh_free)
142
143 const char vmmapbsy[] = "vmmapbsy";
144
145 /*
146 * cache for vmspace structures.
147 */
148
149 static struct pool_cache uvm_vmspace_cache;
150
151 /*
152 * cache for dynamically-allocated map entries.
153 */
154
155 static struct pool_cache uvm_map_entry_cache;
156
157 MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures");
158 MALLOC_DEFINE(M_VMPMAP, "VM pmap", "VM pmap");
159
160 #ifdef PMAP_GROWKERNEL
161 /*
162 * This global represents the end of the kernel virtual address
163 * space. If we want to exceed this, we must grow the kernel
164 * virtual address space dynamically.
165 *
166 * Note, this variable is locked by kernel_map's lock.
167 */
168 vaddr_t uvm_maxkaddr;
169 #endif
170
171 /*
172 * macros
173 */
174
175 /*
176 * VM_MAP_USE_KMAPENT: determine if uvm_kmapent_alloc/free is used
177 * for the vm_map.
178 */
179 extern struct vm_map *pager_map; /* XXX */
180 #define VM_MAP_USE_KMAPENT_FLAGS(flags) \
181 (((flags) & VM_MAP_INTRSAFE) != 0)
182 #define VM_MAP_USE_KMAPENT(map) \
183 (VM_MAP_USE_KMAPENT_FLAGS((map)->flags) || (map) == kernel_map)
184
185 /*
186 * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging
187 */
188
189 #define UVM_ET_ISCOMPATIBLE(ent, type, uobj, meflags, \
190 prot, maxprot, inh, adv, wire) \
191 ((ent)->etype == (type) && \
192 (((ent)->flags ^ (meflags)) & (UVM_MAP_NOMERGE | UVM_MAP_QUANTUM)) \
193 == 0 && \
194 (ent)->object.uvm_obj == (uobj) && \
195 (ent)->protection == (prot) && \
196 (ent)->max_protection == (maxprot) && \
197 (ent)->inheritance == (inh) && \
198 (ent)->advice == (adv) && \
199 (ent)->wired_count == (wire))
200
201 /*
202 * uvm_map_entry_link: insert entry into a map
203 *
204 * => map must be locked
205 */
206 #define uvm_map_entry_link(map, after_where, entry) do { \
207 uvm_mapent_check(entry); \
208 (map)->nentries++; \
209 (entry)->prev = (after_where); \
210 (entry)->next = (after_where)->next; \
211 (entry)->prev->next = (entry); \
212 (entry)->next->prev = (entry); \
213 uvm_rb_insert((map), (entry)); \
214 } while (/*CONSTCOND*/ 0)
215
216 /*
217 * uvm_map_entry_unlink: remove entry from a map
218 *
219 * => map must be locked
220 */
221 #define uvm_map_entry_unlink(map, entry) do { \
222 KASSERT((entry) != (map)->first_free); \
223 KASSERT((entry) != (map)->hint); \
224 uvm_mapent_check(entry); \
225 (map)->nentries--; \
226 (entry)->next->prev = (entry)->prev; \
227 (entry)->prev->next = (entry)->next; \
228 uvm_rb_remove((map), (entry)); \
229 } while (/*CONSTCOND*/ 0)
230
231 /*
232 * SAVE_HINT: saves the specified entry as the hint for future lookups.
233 *
234 * => map need not be locked.
235 */
236 #define SAVE_HINT(map, check, value) do { \
237 if ((map)->hint == (check)) \
238 (map)->hint = (value); \
239 } while (/*CONSTCOND*/ 0)
240
241 /*
242 * clear_hints: ensure that hints don't point to the entry.
243 *
244 * => map must be write-locked.
245 */
246 static void
247 clear_hints(struct vm_map *map, struct vm_map_entry *ent)
248 {
249
250 SAVE_HINT(map, ent, ent->prev);
251 if (map->first_free == ent) {
252 map->first_free = ent->prev;
253 }
254 }
255
256 /*
257 * VM_MAP_RANGE_CHECK: check and correct range
258 *
259 * => map must at least be read locked
260 */
261
262 #define VM_MAP_RANGE_CHECK(map, start, end) do { \
263 if (start < vm_map_min(map)) \
264 start = vm_map_min(map); \
265 if (end > vm_map_max(map)) \
266 end = vm_map_max(map); \
267 if (start > end) \
268 start = end; \
269 } while (/*CONSTCOND*/ 0)
270
271 /*
272 * local prototypes
273 */
274
275 static struct vm_map_entry *
276 uvm_mapent_alloc(struct vm_map *, int);
277 static struct vm_map_entry *
278 uvm_mapent_alloc_split(struct vm_map *,
279 const struct vm_map_entry *, int,
280 struct uvm_mapent_reservation *);
281 static void uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *);
282 static void uvm_mapent_free(struct vm_map_entry *);
283 #if defined(DEBUG)
284 static void _uvm_mapent_check(const struct vm_map_entry *, const char *,
285 int);
286 #define uvm_mapent_check(map) _uvm_mapent_check(map, __FILE__, __LINE__)
287 #else /* defined(DEBUG) */
288 #define uvm_mapent_check(e) /* nothing */
289 #endif /* defined(DEBUG) */
290 static struct vm_map_entry *
291 uvm_kmapent_alloc(struct vm_map *, int);
292 static void uvm_kmapent_free(struct vm_map_entry *);
293 static vsize_t uvm_kmapent_overhead(vsize_t);
294
295 static void uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
296 static void uvm_map_reference_amap(struct vm_map_entry *, int);
297 static int uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int,
298 struct vm_map_entry *);
299 static void uvm_map_unreference_amap(struct vm_map_entry *, int);
300
301 int _uvm_map_sanity(struct vm_map *);
302 int _uvm_tree_sanity(struct vm_map *);
303 static vsize_t uvm_rb_maxgap(const struct vm_map_entry *);
304
305 CTASSERT(offsetof(struct vm_map_entry, rb_node) == 0);
306 #define ROOT_ENTRY(map) ((struct vm_map_entry *)(map)->rb_tree.rbt_root)
307 #define LEFT_ENTRY(entry) ((struct vm_map_entry *)(entry)->rb_node.rb_left)
308 #define RIGHT_ENTRY(entry) ((struct vm_map_entry *)(entry)->rb_node.rb_right)
309 #define PARENT_ENTRY(map, entry) \
310 (ROOT_ENTRY(map) == (entry) \
311 ? NULL \
312 : (struct vm_map_entry *)RB_FATHER(&(entry)->rb_node))
313
314 static int
315 uvm_map_compare_nodes(const struct rb_node *nparent,
316 const struct rb_node *nkey)
317 {
318 const struct vm_map_entry *eparent = (const void *) nparent;
319 const struct vm_map_entry *ekey = (const void *) nkey;
320
321 KASSERT(eparent->start < ekey->start || eparent->start >= ekey->end);
322 KASSERT(ekey->start < eparent->start || ekey->start >= eparent->end);
323
324 if (ekey->start < eparent->start)
325 return -1;
326 if (ekey->start >= eparent->end)
327 return 1;
328 return 0;
329 }
330
331 static int
332 uvm_map_compare_key(const struct rb_node *nparent, const void *vkey)
333 {
334 const struct vm_map_entry *eparent = (const void *) nparent;
335 const vaddr_t va = *(const vaddr_t *) vkey;
336
337 if (va < eparent->start)
338 return -1;
339 if (va >= eparent->end)
340 return 1;
341 return 0;
342 }
343
344 static const struct rb_tree_ops uvm_map_tree_ops = {
345 .rbto_compare_nodes = uvm_map_compare_nodes,
346 .rbto_compare_key = uvm_map_compare_key,
347 };
348
349 static inline vsize_t
350 uvm_rb_gap(const struct vm_map_entry *entry)
351 {
352 KASSERT(entry->next != NULL);
353 return entry->next->start - entry->end;
354 }
355
356 static vsize_t
357 uvm_rb_maxgap(const struct vm_map_entry *entry)
358 {
359 struct vm_map_entry *child;
360 vsize_t maxgap = entry->gap;
361
362 /*
363 * We need maxgap to be the largest gap of us or any of our
364 * descendents. Since each of our children's maxgap is the
365 * cached value of their largest gap of themselves or their
366 * descendents, we can just use that value and avoid recursing
367 * down the tree to calculate it.
368 */
369 if ((child = LEFT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
370 maxgap = child->maxgap;
371
372 if ((child = RIGHT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
373 maxgap = child->maxgap;
374
375 return maxgap;
376 }
377
378 static void
379 uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
380 {
381 struct vm_map_entry *parent;
382
383 KASSERT(entry->gap == uvm_rb_gap(entry));
384 entry->maxgap = uvm_rb_maxgap(entry);
385
386 while ((parent = PARENT_ENTRY(map, entry)) != NULL) {
387 struct vm_map_entry *brother;
388 vsize_t maxgap = parent->gap;
389
390 KDASSERT(parent->gap == uvm_rb_gap(parent));
391 if (maxgap < entry->maxgap)
392 maxgap = entry->maxgap;
393 /*
394 * Since we work our towards the root, we know entry's maxgap
395 * value is ok but its brothers may now be out-of-date due
396 * rebalancing. So refresh it.
397 */
398 brother = (struct vm_map_entry *)parent->rb_node.rb_nodes[RB_POSITION(&entry->rb_node) ^ RB_DIR_OTHER];
399 if (brother != NULL) {
400 KDASSERT(brother->gap == uvm_rb_gap(brother));
401 brother->maxgap = uvm_rb_maxgap(brother);
402 if (maxgap < brother->maxgap)
403 maxgap = brother->maxgap;
404 }
405
406 parent->maxgap = maxgap;
407 entry = parent;
408 }
409 }
410
411 static void
412 uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
413 {
414 entry->gap = entry->maxgap = uvm_rb_gap(entry);
415 if (entry->prev != &map->header)
416 entry->prev->gap = uvm_rb_gap(entry->prev);
417
418 if (!rb_tree_insert_node(&map->rb_tree, &entry->rb_node))
419 panic("uvm_rb_insert: map %p: duplicate entry?", map);
420
421 /*
422 * If the previous entry is not our immediate left child, then it's an
423 * ancestor and will be fixed up on the way to the root. We don't
424 * have to check entry->prev against &map->header since &map->header
425 * will never be in the tree.
426 */
427 uvm_rb_fixup(map,
428 LEFT_ENTRY(entry) == entry->prev ? entry->prev : entry);
429 }
430
431 static void
432 uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
433 {
434 struct vm_map_entry *prev_parent = NULL, *next_parent = NULL;
435
436 /*
437 * If we are removing an interior node, then an adjacent node will
438 * be used to replace its position in the tree. Therefore we will
439 * need to fixup the tree starting at the parent of the replacement
440 * node. So record their parents for later use.
441 */
442 if (entry->prev != &map->header)
443 prev_parent = PARENT_ENTRY(map, entry->prev);
444 if (entry->next != &map->header)
445 next_parent = PARENT_ENTRY(map, entry->next);
446
447 rb_tree_remove_node(&map->rb_tree, &entry->rb_node);
448
449 /*
450 * If the previous node has a new parent, fixup the tree starting
451 * at the previous node's old parent.
452 */
453 if (entry->prev != &map->header) {
454 /*
455 * Update the previous entry's gap due to our absence.
456 */
457 entry->prev->gap = uvm_rb_gap(entry->prev);
458 uvm_rb_fixup(map, entry->prev);
459 if (prev_parent != NULL
460 && prev_parent != entry
461 && prev_parent != PARENT_ENTRY(map, entry->prev))
462 uvm_rb_fixup(map, prev_parent);
463 }
464
465 /*
466 * If the next node has a new parent, fixup the tree starting
467 * at the next node's old parent.
468 */
469 if (entry->next != &map->header) {
470 uvm_rb_fixup(map, entry->next);
471 if (next_parent != NULL
472 && next_parent != entry
473 && next_parent != PARENT_ENTRY(map, entry->next))
474 uvm_rb_fixup(map, next_parent);
475 }
476 }
477
478 #if defined(DEBUG)
479 int uvm_debug_check_map = 0;
480 int uvm_debug_check_rbtree = 0;
481 #define uvm_map_check(map, name) \
482 _uvm_map_check((map), (name), __FILE__, __LINE__)
483 static void
484 _uvm_map_check(struct vm_map *map, const char *name,
485 const char *file, int line)
486 {
487
488 if ((uvm_debug_check_map && _uvm_map_sanity(map)) ||
489 (uvm_debug_check_rbtree && _uvm_tree_sanity(map))) {
490 panic("uvm_map_check failed: \"%s\" map=%p (%s:%d)",
491 name, map, file, line);
492 }
493 }
494 #else /* defined(DEBUG) */
495 #define uvm_map_check(map, name) /* nothing */
496 #endif /* defined(DEBUG) */
497
498 #if defined(DEBUG) || defined(DDB)
499 int
500 _uvm_map_sanity(struct vm_map *map)
501 {
502 bool first_free_found = false;
503 bool hint_found = false;
504 const struct vm_map_entry *e;
505 struct vm_map_entry *hint = map->hint;
506
507 e = &map->header;
508 for (;;) {
509 if (map->first_free == e) {
510 first_free_found = true;
511 } else if (!first_free_found && e->next->start > e->end) {
512 printf("first_free %p should be %p\n",
513 map->first_free, e);
514 return -1;
515 }
516 if (hint == e) {
517 hint_found = true;
518 }
519
520 e = e->next;
521 if (e == &map->header) {
522 break;
523 }
524 }
525 if (!first_free_found) {
526 printf("stale first_free\n");
527 return -1;
528 }
529 if (!hint_found) {
530 printf("stale hint\n");
531 return -1;
532 }
533 return 0;
534 }
535
536 int
537 _uvm_tree_sanity(struct vm_map *map)
538 {
539 struct vm_map_entry *tmp, *trtmp;
540 int n = 0, i = 1;
541
542 for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
543 if (tmp->gap != uvm_rb_gap(tmp)) {
544 printf("%d/%d gap %lx != %lx %s\n",
545 n + 1, map->nentries,
546 (ulong)tmp->gap, (ulong)uvm_rb_gap(tmp),
547 tmp->next == &map->header ? "(last)" : "");
548 goto error;
549 }
550 /*
551 * If any entries are out of order, tmp->gap will be unsigned
552 * and will likely exceed the size of the map.
553 */
554 if (tmp->gap >= vm_map_max(map) - vm_map_min(map)) {
555 printf("too large gap %zu\n", (size_t)tmp->gap);
556 goto error;
557 }
558 n++;
559 }
560
561 if (n != map->nentries) {
562 printf("nentries: %d vs %d\n", n, map->nentries);
563 goto error;
564 }
565
566 trtmp = NULL;
567 for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
568 if (tmp->maxgap != uvm_rb_maxgap(tmp)) {
569 printf("maxgap %lx != %lx\n",
570 (ulong)tmp->maxgap,
571 (ulong)uvm_rb_maxgap(tmp));
572 goto error;
573 }
574 if (trtmp != NULL && trtmp->start >= tmp->start) {
575 printf("corrupt: 0x%lx >= 0x%lx\n",
576 trtmp->start, tmp->start);
577 goto error;
578 }
579
580 trtmp = tmp;
581 }
582
583 for (tmp = map->header.next; tmp != &map->header;
584 tmp = tmp->next, i++) {
585 trtmp = (void *) rb_tree_iterate(&map->rb_tree, &tmp->rb_node,
586 RB_DIR_LEFT);
587 if (trtmp == NULL)
588 trtmp = &map->header;
589 if (tmp->prev != trtmp) {
590 printf("lookup: %d: %p->prev=%p: %p\n",
591 i, tmp, tmp->prev, trtmp);
592 goto error;
593 }
594 trtmp = (void *) rb_tree_iterate(&map->rb_tree, &tmp->rb_node,
595 RB_DIR_RIGHT);
596 if (trtmp == NULL)
597 trtmp = &map->header;
598 if (tmp->next != trtmp) {
599 printf("lookup: %d: %p->next=%p: %p\n",
600 i, tmp, tmp->next, trtmp);
601 goto error;
602 }
603 trtmp = (void *)rb_tree_find_node(&map->rb_tree, &tmp->start);
604 if (trtmp != tmp) {
605 printf("lookup: %d: %p - %p: %p\n", i, tmp, trtmp,
606 PARENT_ENTRY(map, tmp));
607 goto error;
608 }
609 }
610
611 return (0);
612 error:
613 return (-1);
614 }
615 #endif /* defined(DEBUG) || defined(DDB) */
616
617 #ifdef DIAGNOSTIC
618 static struct vm_map *uvm_kmapent_map(struct vm_map_entry *);
619 #endif
620
621 /*
622 * vm_map_lock: acquire an exclusive (write) lock on a map.
623 *
624 * => Note that "intrsafe" maps use only exclusive, spin locks.
625 *
626 * => The locking protocol provides for guaranteed upgrade from shared ->
627 * exclusive by whichever thread currently has the map marked busy.
628 * See "LOCKING PROTOCOL NOTES" in uvm_map.h. This is horrible; among
629 * other problems, it defeats any fairness guarantees provided by RW
630 * locks.
631 */
632
633 void
634 vm_map_lock(struct vm_map *map)
635 {
636
637 if ((map->flags & VM_MAP_INTRSAFE) != 0) {
638 mutex_spin_enter(&map->mutex);
639 return;
640 }
641
642 for (;;) {
643 rw_enter(&map->lock, RW_WRITER);
644 if (map->busy == NULL)
645 break;
646 if (map->busy == curlwp)
647 break;
648 mutex_enter(&map->misc_lock);
649 rw_exit(&map->lock);
650 if (map->busy != NULL)
651 cv_wait(&map->cv, &map->misc_lock);
652 mutex_exit(&map->misc_lock);
653 }
654
655 map->timestamp++;
656 }
657
658 /*
659 * vm_map_lock_try: try to lock a map, failing if it is already locked.
660 */
661
662 bool
663 vm_map_lock_try(struct vm_map *map)
664 {
665
666 if ((map->flags & VM_MAP_INTRSAFE) != 0)
667 return mutex_tryenter(&map->mutex);
668 if (!rw_tryenter(&map->lock, RW_WRITER))
669 return false;
670 if (map->busy != NULL) {
671 rw_exit(&map->lock);
672 return false;
673 }
674
675 map->timestamp++;
676 return true;
677 }
678
679 /*
680 * vm_map_unlock: release an exclusive lock on a map.
681 */
682
683 void
684 vm_map_unlock(struct vm_map *map)
685 {
686
687 if ((map->flags & VM_MAP_INTRSAFE) != 0)
688 mutex_spin_exit(&map->mutex);
689 else {
690 KASSERT(rw_write_held(&map->lock));
691 KASSERT(map->busy == NULL || map->busy == curlwp);
692 rw_exit(&map->lock);
693 }
694 }
695
696 /*
697 * vm_map_unbusy: mark the map as unbusy, and wake any waiters that
698 * want an exclusive lock.
699 */
700
701 void
702 vm_map_unbusy(struct vm_map *map)
703 {
704
705 KASSERT(map->busy == curlwp);
706
707 /*
708 * Safe to clear 'busy' and 'waiters' with only a read lock held:
709 *
710 * o they can only be set with a write lock held
711 * o writers are blocked out with a read or write hold
712 * o at any time, only one thread owns the set of values
713 */
714 mutex_enter(&map->misc_lock);
715 map->busy = NULL;
716 cv_broadcast(&map->cv);
717 mutex_exit(&map->misc_lock);
718 }
719
720 /*
721 * vm_map_lock_read: acquire a shared (read) lock on a map.
722 */
723
724 void
725 vm_map_lock_read(struct vm_map *map)
726 {
727
728 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
729
730 rw_enter(&map->lock, RW_READER);
731 }
732
733 /*
734 * vm_map_unlock_read: release a shared lock on a map.
735 */
736
737 void
738 vm_map_unlock_read(struct vm_map *map)
739 {
740
741 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
742
743 rw_exit(&map->lock);
744 }
745
746 /*
747 * vm_map_busy: mark a map as busy.
748 *
749 * => the caller must hold the map write locked
750 */
751
752 void
753 vm_map_busy(struct vm_map *map)
754 {
755
756 KASSERT(rw_write_held(&map->lock));
757 KASSERT(map->busy == NULL);
758
759 map->busy = curlwp;
760 }
761
762 /*
763 * vm_map_locked_p: return true if the map is write locked.
764 *
765 * => only for debug purposes like KASSERTs.
766 * => should not be used to verify that a map is not locked.
767 */
768
769 bool
770 vm_map_locked_p(struct vm_map *map)
771 {
772
773 if ((map->flags & VM_MAP_INTRSAFE) != 0) {
774 return mutex_owned(&map->mutex);
775 } else {
776 return rw_write_held(&map->lock);
777 }
778 }
779
780 /*
781 * uvm_mapent_alloc: allocate a map entry
782 */
783
784 static struct vm_map_entry *
785 uvm_mapent_alloc(struct vm_map *map, int flags)
786 {
787 struct vm_map_entry *me;
788 int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
789 UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
790
791 if (VM_MAP_USE_KMAPENT(map)) {
792 me = uvm_kmapent_alloc(map, flags);
793 } else {
794 me = pool_cache_get(&uvm_map_entry_cache, pflags);
795 if (__predict_false(me == NULL))
796 return NULL;
797 me->flags = 0;
798 }
799
800 UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me,
801 ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0);
802 return (me);
803 }
804
805 /*
806 * uvm_mapent_alloc_split: allocate a map entry for clipping.
807 *
808 * => map must be locked by caller if UVM_MAP_QUANTUM is set.
809 */
810
811 static struct vm_map_entry *
812 uvm_mapent_alloc_split(struct vm_map *map,
813 const struct vm_map_entry *old_entry, int flags,
814 struct uvm_mapent_reservation *umr)
815 {
816 struct vm_map_entry *me;
817
818 KASSERT(!VM_MAP_USE_KMAPENT(map) ||
819 (old_entry->flags & UVM_MAP_QUANTUM) || !UMR_EMPTY(umr));
820
821 if (old_entry->flags & UVM_MAP_QUANTUM) {
822 struct vm_map_kernel *vmk = vm_map_to_kernel(map);
823
824 KASSERT(vm_map_locked_p(map));
825 me = vmk->vmk_merged_entries;
826 KASSERT(me);
827 vmk->vmk_merged_entries = me->next;
828 KASSERT(me->flags & UVM_MAP_QUANTUM);
829 } else {
830 me = uvm_mapent_alloc(map, flags);
831 }
832
833 return me;
834 }
835
836 /*
837 * uvm_mapent_free: free map entry
838 */
839
840 static void
841 uvm_mapent_free(struct vm_map_entry *me)
842 {
843 UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
844
845 UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
846 me, me->flags, 0, 0);
847 if (me->flags & UVM_MAP_KERNEL) {
848 uvm_kmapent_free(me);
849 } else {
850 pool_cache_put(&uvm_map_entry_cache, me);
851 }
852 }
853
854 /*
855 * uvm_mapent_free_merged: free merged map entry
856 *
857 * => keep the entry if needed.
858 * => caller shouldn't hold map locked if VM_MAP_USE_KMAPENT(map) is true.
859 * => map should be locked if UVM_MAP_QUANTUM is set.
860 */
861
862 static void
863 uvm_mapent_free_merged(struct vm_map *map, struct vm_map_entry *me)
864 {
865
866 KASSERT(!(me->flags & UVM_MAP_KERNEL) || uvm_kmapent_map(me) == map);
867
868 if (me->flags & UVM_MAP_QUANTUM) {
869 /*
870 * keep this entry for later splitting.
871 */
872 struct vm_map_kernel *vmk;
873
874 KASSERT(vm_map_locked_p(map));
875 KASSERT(VM_MAP_IS_KERNEL(map));
876 KASSERT(!VM_MAP_USE_KMAPENT(map) ||
877 (me->flags & UVM_MAP_KERNEL));
878
879 vmk = vm_map_to_kernel(map);
880 me->next = vmk->vmk_merged_entries;
881 vmk->vmk_merged_entries = me;
882 } else {
883 uvm_mapent_free(me);
884 }
885 }
886
887 /*
888 * uvm_mapent_copy: copy a map entry, preserving flags
889 */
890
891 static inline void
892 uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
893 {
894
895 memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) -
896 ((char *)src));
897 }
898
899 /*
900 * uvm_mapent_overhead: calculate maximum kva overhead necessary for
901 * map entries.
902 *
903 * => size and flags are the same as uvm_km_suballoc's ones.
904 */
905
906 vsize_t
907 uvm_mapent_overhead(vsize_t size, int flags)
908 {
909
910 if (VM_MAP_USE_KMAPENT_FLAGS(flags)) {
911 return uvm_kmapent_overhead(size);
912 }
913 return 0;
914 }
915
916 #if defined(DEBUG)
917 static void
918 _uvm_mapent_check(const struct vm_map_entry *entry, const char *file, int line)
919 {
920
921 if (entry->start >= entry->end) {
922 goto bad;
923 }
924 if (UVM_ET_ISOBJ(entry)) {
925 if (entry->object.uvm_obj == NULL) {
926 goto bad;
927 }
928 } else if (UVM_ET_ISSUBMAP(entry)) {
929 if (entry->object.sub_map == NULL) {
930 goto bad;
931 }
932 } else {
933 if (entry->object.uvm_obj != NULL ||
934 entry->object.sub_map != NULL) {
935 goto bad;
936 }
937 }
938 if (!UVM_ET_ISOBJ(entry)) {
939 if (entry->offset != 0) {
940 goto bad;
941 }
942 }
943
944 return;
945
946 bad:
947 panic("%s: bad entry %p (%s:%d)", __func__, entry, file, line);
948 }
949 #endif /* defined(DEBUG) */
950
951 /*
952 * uvm_map_entry_unwire: unwire a map entry
953 *
954 * => map should be locked by caller
955 */
956
957 static inline void
958 uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
959 {
960
961 entry->wired_count = 0;
962 uvm_fault_unwire_locked(map, entry->start, entry->end);
963 }
964
965
966 /*
967 * wrapper for calling amap_ref()
968 */
969 static inline void
970 uvm_map_reference_amap(struct vm_map_entry *entry, int flags)
971 {
972
973 amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
974 (entry->end - entry->start) >> PAGE_SHIFT, flags);
975 }
976
977
978 /*
979 * wrapper for calling amap_unref()
980 */
981 static inline void
982 uvm_map_unreference_amap(struct vm_map_entry *entry, int flags)
983 {
984
985 amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
986 (entry->end - entry->start) >> PAGE_SHIFT, flags);
987 }
988
989
990 /*
991 * uvm_map_init: init mapping system at boot time.
992 */
993
994 void
995 uvm_map_init(void)
996 {
997 #if defined(UVMHIST)
998 static struct uvm_history_ent maphistbuf[100];
999 static struct uvm_history_ent pdhistbuf[100];
1000 #endif
1001
1002 /*
1003 * first, init logging system.
1004 */
1005
1006 UVMHIST_FUNC("uvm_map_init");
1007 UVMHIST_INIT_STATIC(maphist, maphistbuf);
1008 UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
1009 UVMHIST_CALLED(maphist);
1010 UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
1011
1012 /*
1013 * initialize the global lock for kernel map entry.
1014 */
1015
1016 mutex_init(&uvm_kentry_lock, MUTEX_DRIVER, IPL_VM);
1017
1018 /*
1019 * initialize caches.
1020 */
1021
1022 pool_cache_bootstrap(&uvm_map_entry_cache, sizeof(struct vm_map_entry),
1023 0, 0, 0, "vmmpepl", NULL, IPL_NONE, NULL, NULL, NULL);
1024 pool_cache_bootstrap(&uvm_vmspace_cache, sizeof(struct vmspace),
1025 0, 0, 0, "vmsppl", NULL, IPL_NONE, NULL, NULL, NULL);
1026 }
1027
1028 /*
1029 * clippers
1030 */
1031
1032 /*
1033 * uvm_mapent_splitadj: adjust map entries for splitting, after uvm_mapent_copy.
1034 */
1035
1036 static void
1037 uvm_mapent_splitadj(struct vm_map_entry *entry1, struct vm_map_entry *entry2,
1038 vaddr_t splitat)
1039 {
1040 vaddr_t adj;
1041
1042 KASSERT(entry1->start < splitat);
1043 KASSERT(splitat < entry1->end);
1044
1045 adj = splitat - entry1->start;
1046 entry1->end = entry2->start = splitat;
1047
1048 if (entry1->aref.ar_amap) {
1049 amap_splitref(&entry1->aref, &entry2->aref, adj);
1050 }
1051 if (UVM_ET_ISSUBMAP(entry1)) {
1052 /* ... unlikely to happen, but play it safe */
1053 uvm_map_reference(entry1->object.sub_map);
1054 } else if (UVM_ET_ISOBJ(entry1)) {
1055 KASSERT(entry1->object.uvm_obj != NULL); /* suppress coverity */
1056 entry2->offset += adj;
1057 if (entry1->object.uvm_obj->pgops &&
1058 entry1->object.uvm_obj->pgops->pgo_reference)
1059 entry1->object.uvm_obj->pgops->pgo_reference(
1060 entry1->object.uvm_obj);
1061 }
1062 }
1063
1064 /*
1065 * uvm_map_clip_start: ensure that the entry begins at or after
1066 * the starting address, if it doesn't we split the entry.
1067 *
1068 * => caller should use UVM_MAP_CLIP_START macro rather than calling
1069 * this directly
1070 * => map must be locked by caller
1071 */
1072
1073 void
1074 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
1075 vaddr_t start, struct uvm_mapent_reservation *umr)
1076 {
1077 struct vm_map_entry *new_entry;
1078
1079 /* uvm_map_simplify_entry(map, entry); */ /* XXX */
1080
1081 uvm_map_check(map, "clip_start entry");
1082 uvm_mapent_check(entry);
1083
1084 /*
1085 * Split off the front portion. note that we must insert the new
1086 * entry BEFORE this one, so that this entry has the specified
1087 * starting address.
1088 */
1089 new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
1090 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1091 uvm_mapent_splitadj(new_entry, entry, start);
1092 uvm_map_entry_link(map, entry->prev, new_entry);
1093
1094 uvm_map_check(map, "clip_start leave");
1095 }
1096
1097 /*
1098 * uvm_map_clip_end: ensure that the entry ends at or before
1099 * the ending address, if it does't we split the reference
1100 *
1101 * => caller should use UVM_MAP_CLIP_END macro rather than calling
1102 * this directly
1103 * => map must be locked by caller
1104 */
1105
1106 void
1107 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end,
1108 struct uvm_mapent_reservation *umr)
1109 {
1110 struct vm_map_entry *new_entry;
1111
1112 uvm_map_check(map, "clip_end entry");
1113 uvm_mapent_check(entry);
1114
1115 /*
1116 * Create a new entry and insert it
1117 * AFTER the specified entry
1118 */
1119 new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
1120 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1121 uvm_mapent_splitadj(entry, new_entry, end);
1122 uvm_map_entry_link(map, entry, new_entry);
1123
1124 uvm_map_check(map, "clip_end leave");
1125 }
1126
1127 static void
1128 vm_map_drain(struct vm_map *map, uvm_flag_t flags)
1129 {
1130
1131 if (!VM_MAP_IS_KERNEL(map)) {
1132 return;
1133 }
1134
1135 uvm_km_va_drain(map, flags);
1136 }
1137
1138 /*
1139 * M A P - m a i n e n t r y p o i n t
1140 */
1141 /*
1142 * uvm_map: establish a valid mapping in a map
1143 *
1144 * => assume startp is page aligned.
1145 * => assume size is a multiple of PAGE_SIZE.
1146 * => assume sys_mmap provides enough of a "hint" to have us skip
1147 * over text/data/bss area.
1148 * => map must be unlocked (we will lock it)
1149 * => <uobj,uoffset> value meanings (4 cases):
1150 * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER
1151 * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER
1152 * [3] <uobj,uoffset> == normal mapping
1153 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA
1154 *
1155 * case [4] is for kernel mappings where we don't know the offset until
1156 * we've found a virtual address. note that kernel object offsets are
1157 * always relative to vm_map_min(kernel_map).
1158 *
1159 * => if `align' is non-zero, we align the virtual address to the specified
1160 * alignment.
1161 * this is provided as a mechanism for large pages.
1162 *
1163 * => XXXCDC: need way to map in external amap?
1164 */
1165
1166 int
1167 uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
1168 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags)
1169 {
1170 struct uvm_map_args args;
1171 struct vm_map_entry *new_entry;
1172 int error;
1173
1174 KASSERT((flags & UVM_FLAG_QUANTUM) == 0 || VM_MAP_IS_KERNEL(map));
1175 KASSERT((size & PAGE_MASK) == 0);
1176
1177 /*
1178 * for pager_map, allocate the new entry first to avoid sleeping
1179 * for memory while we have the map locked.
1180 *
1181 * Also, because we allocate entries for in-kernel maps
1182 * a bit differently (cf. uvm_kmapent_alloc/free), we need to
1183 * allocate them before locking the map.
1184 */
1185
1186 new_entry = NULL;
1187 if (VM_MAP_USE_KMAPENT(map) || (flags & UVM_FLAG_QUANTUM) ||
1188 map == pager_map) {
1189 new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
1190 if (__predict_false(new_entry == NULL))
1191 return ENOMEM;
1192 if (flags & UVM_FLAG_QUANTUM)
1193 new_entry->flags |= UVM_MAP_QUANTUM;
1194 }
1195 if (map == pager_map)
1196 flags |= UVM_FLAG_NOMERGE;
1197
1198 error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align,
1199 flags, &args);
1200 if (!error) {
1201 error = uvm_map_enter(map, &args, new_entry);
1202 *startp = args.uma_start;
1203 } else if (new_entry) {
1204 uvm_mapent_free(new_entry);
1205 }
1206
1207 #if defined(DEBUG)
1208 if (!error && VM_MAP_IS_KERNEL(map)) {
1209 uvm_km_check_empty(map, *startp, *startp + size);
1210 }
1211 #endif /* defined(DEBUG) */
1212
1213 return error;
1214 }
1215
1216 int
1217 uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size,
1218 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags,
1219 struct uvm_map_args *args)
1220 {
1221 struct vm_map_entry *prev_entry;
1222 vm_prot_t prot = UVM_PROTECTION(flags);
1223 vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1224
1225 UVMHIST_FUNC("uvm_map_prepare");
1226 UVMHIST_CALLED(maphist);
1227
1228 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
1229 map, start, size, flags);
1230 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
1231
1232 /*
1233 * detect a popular device driver bug.
1234 */
1235
1236 KASSERT(doing_shutdown || curlwp != NULL ||
1237 (map->flags & VM_MAP_INTRSAFE));
1238
1239 /*
1240 * zero-sized mapping doesn't make any sense.
1241 */
1242 KASSERT(size > 0);
1243
1244 KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0);
1245
1246 uvm_map_check(map, "map entry");
1247
1248 /*
1249 * check sanity of protection code
1250 */
1251
1252 if ((prot & maxprot) != prot) {
1253 UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x",
1254 prot, maxprot,0,0);
1255 return EACCES;
1256 }
1257
1258 /*
1259 * figure out where to put new VM range
1260 */
1261
1262 retry:
1263 if (vm_map_lock_try(map) == false) {
1264 if ((flags & UVM_FLAG_TRYLOCK) != 0 &&
1265 (map->flags & VM_MAP_INTRSAFE) == 0) {
1266 return EAGAIN;
1267 }
1268 vm_map_lock(map); /* could sleep here */
1269 }
1270 prev_entry = uvm_map_findspace(map, start, size, &start,
1271 uobj, uoffset, align, flags);
1272 if (prev_entry == NULL) {
1273 unsigned int timestamp;
1274
1275 timestamp = map->timestamp;
1276 UVMHIST_LOG(maphist,"waiting va timestamp=0x%x",
1277 timestamp,0,0,0);
1278 map->flags |= VM_MAP_WANTVA;
1279 vm_map_unlock(map);
1280
1281 /*
1282 * try to reclaim kva and wait until someone does unmap.
1283 * fragile locking here, so we awaken every second to
1284 * recheck the condition.
1285 */
1286
1287 vm_map_drain(map, flags);
1288
1289 mutex_enter(&map->misc_lock);
1290 while ((map->flags & VM_MAP_WANTVA) != 0 &&
1291 map->timestamp == timestamp) {
1292 if ((flags & UVM_FLAG_WAITVA) == 0) {
1293 mutex_exit(&map->misc_lock);
1294 UVMHIST_LOG(maphist,
1295 "<- uvm_map_findspace failed!", 0,0,0,0);
1296 return ENOMEM;
1297 } else {
1298 cv_timedwait(&map->cv, &map->misc_lock, hz);
1299 }
1300 }
1301 mutex_exit(&map->misc_lock);
1302 goto retry;
1303 }
1304
1305 #ifdef PMAP_GROWKERNEL
1306 /*
1307 * If the kernel pmap can't map the requested space,
1308 * then allocate more resources for it.
1309 */
1310 if (map == kernel_map && uvm_maxkaddr < (start + size))
1311 uvm_maxkaddr = pmap_growkernel(start + size);
1312 #endif
1313
1314 UVMMAP_EVCNT_INCR(map_call);
1315
1316 /*
1317 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
1318 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in
1319 * either case we want to zero it before storing it in the map entry
1320 * (because it looks strange and confusing when debugging...)
1321 *
1322 * if uobj is not null
1323 * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
1324 * and we do not need to change uoffset.
1325 * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
1326 * now (based on the starting address of the map). this case is
1327 * for kernel object mappings where we don't know the offset until
1328 * the virtual address is found (with uvm_map_findspace). the
1329 * offset is the distance we are from the start of the map.
1330 */
1331
1332 if (uobj == NULL) {
1333 uoffset = 0;
1334 } else {
1335 if (uoffset == UVM_UNKNOWN_OFFSET) {
1336 KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
1337 uoffset = start - vm_map_min(kernel_map);
1338 }
1339 }
1340
1341 args->uma_flags = flags;
1342 args->uma_prev = prev_entry;
1343 args->uma_start = start;
1344 args->uma_size = size;
1345 args->uma_uobj = uobj;
1346 args->uma_uoffset = uoffset;
1347
1348 return 0;
1349 }
1350
1351 int
1352 uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args,
1353 struct vm_map_entry *new_entry)
1354 {
1355 struct vm_map_entry *prev_entry = args->uma_prev;
1356 struct vm_map_entry *dead = NULL;
1357
1358 const uvm_flag_t flags = args->uma_flags;
1359 const vm_prot_t prot = UVM_PROTECTION(flags);
1360 const vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1361 const vm_inherit_t inherit = UVM_INHERIT(flags);
1362 const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
1363 AMAP_EXTEND_NOWAIT : 0;
1364 const int advice = UVM_ADVICE(flags);
1365 const int meflagval = (flags & UVM_FLAG_QUANTUM) ?
1366 UVM_MAP_QUANTUM : 0;
1367
1368 vaddr_t start = args->uma_start;
1369 vsize_t size = args->uma_size;
1370 struct uvm_object *uobj = args->uma_uobj;
1371 voff_t uoffset = args->uma_uoffset;
1372
1373 const int kmap = (vm_map_pmap(map) == pmap_kernel());
1374 int merged = 0;
1375 int error;
1376 int newetype;
1377
1378 UVMHIST_FUNC("uvm_map_enter");
1379 UVMHIST_CALLED(maphist);
1380
1381 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
1382 map, start, size, flags);
1383 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
1384
1385 KASSERT(map->hint == prev_entry); /* bimerge case assumes this */
1386
1387 if (flags & UVM_FLAG_QUANTUM) {
1388 KASSERT(new_entry);
1389 KASSERT(new_entry->flags & UVM_MAP_QUANTUM);
1390 }
1391
1392 if (uobj)
1393 newetype = UVM_ET_OBJ;
1394 else
1395 newetype = 0;
1396
1397 if (flags & UVM_FLAG_COPYONW) {
1398 newetype |= UVM_ET_COPYONWRITE;
1399 if ((flags & UVM_FLAG_OVERLAY) == 0)
1400 newetype |= UVM_ET_NEEDSCOPY;
1401 }
1402
1403 /*
1404 * try and insert in map by extending previous entry, if possible.
1405 * XXX: we don't try and pull back the next entry. might be useful
1406 * for a stack, but we are currently allocating our stack in advance.
1407 */
1408
1409 if (flags & UVM_FLAG_NOMERGE)
1410 goto nomerge;
1411
1412 if (prev_entry->end == start &&
1413 prev_entry != &map->header &&
1414 UVM_ET_ISCOMPATIBLE(prev_entry, newetype, uobj, meflagval,
1415 prot, maxprot, inherit, advice, 0)) {
1416
1417 if (uobj && prev_entry->offset +
1418 (prev_entry->end - prev_entry->start) != uoffset)
1419 goto forwardmerge;
1420
1421 /*
1422 * can't extend a shared amap. note: no need to lock amap to
1423 * look at refs since we don't care about its exact value.
1424 * if it is one (i.e. we have only reference) it will stay there
1425 */
1426
1427 if (prev_entry->aref.ar_amap &&
1428 amap_refs(prev_entry->aref.ar_amap) != 1) {
1429 goto forwardmerge;
1430 }
1431
1432 if (prev_entry->aref.ar_amap) {
1433 error = amap_extend(prev_entry, size,
1434 amapwaitflag | AMAP_EXTEND_FORWARDS);
1435 if (error)
1436 goto nomerge;
1437 }
1438
1439 if (kmap) {
1440 UVMMAP_EVCNT_INCR(kbackmerge);
1441 } else {
1442 UVMMAP_EVCNT_INCR(ubackmerge);
1443 }
1444 UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0);
1445
1446 /*
1447 * drop our reference to uobj since we are extending a reference
1448 * that we already have (the ref count can not drop to zero).
1449 */
1450
1451 if (uobj && uobj->pgops->pgo_detach)
1452 uobj->pgops->pgo_detach(uobj);
1453
1454 /*
1455 * Now that we've merged the entries, note that we've grown
1456 * and our gap has shrunk. Then fix the tree.
1457 */
1458 prev_entry->end += size;
1459 prev_entry->gap -= size;
1460 uvm_rb_fixup(map, prev_entry);
1461
1462 uvm_map_check(map, "map backmerged");
1463
1464 UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
1465 merged++;
1466 }
1467
1468 forwardmerge:
1469 if (prev_entry->next->start == (start + size) &&
1470 prev_entry->next != &map->header &&
1471 UVM_ET_ISCOMPATIBLE(prev_entry->next, newetype, uobj, meflagval,
1472 prot, maxprot, inherit, advice, 0)) {
1473
1474 if (uobj && prev_entry->next->offset != uoffset + size)
1475 goto nomerge;
1476
1477 /*
1478 * can't extend a shared amap. note: no need to lock amap to
1479 * look at refs since we don't care about its exact value.
1480 * if it is one (i.e. we have only reference) it will stay there.
1481 *
1482 * note that we also can't merge two amaps, so if we
1483 * merged with the previous entry which has an amap,
1484 * and the next entry also has an amap, we give up.
1485 *
1486 * Interesting cases:
1487 * amap, new, amap -> give up second merge (single fwd extend)
1488 * amap, new, none -> double forward extend (extend again here)
1489 * none, new, amap -> double backward extend (done here)
1490 * uobj, new, amap -> single backward extend (done here)
1491 *
1492 * XXX should we attempt to deal with someone refilling
1493 * the deallocated region between two entries that are
1494 * backed by the same amap (ie, arefs is 2, "prev" and
1495 * "next" refer to it, and adding this allocation will
1496 * close the hole, thus restoring arefs to 1 and
1497 * deallocating the "next" vm_map_entry)? -- @@@
1498 */
1499
1500 if (prev_entry->next->aref.ar_amap &&
1501 (amap_refs(prev_entry->next->aref.ar_amap) != 1 ||
1502 (merged && prev_entry->aref.ar_amap))) {
1503 goto nomerge;
1504 }
1505
1506 if (merged) {
1507 /*
1508 * Try to extend the amap of the previous entry to
1509 * cover the next entry as well. If it doesn't work
1510 * just skip on, don't actually give up, since we've
1511 * already completed the back merge.
1512 */
1513 if (prev_entry->aref.ar_amap) {
1514 if (amap_extend(prev_entry,
1515 prev_entry->next->end -
1516 prev_entry->next->start,
1517 amapwaitflag | AMAP_EXTEND_FORWARDS))
1518 goto nomerge;
1519 }
1520
1521 /*
1522 * Try to extend the amap of the *next* entry
1523 * back to cover the new allocation *and* the
1524 * previous entry as well (the previous merge
1525 * didn't have an amap already otherwise we
1526 * wouldn't be checking here for an amap). If
1527 * it doesn't work just skip on, again, don't
1528 * actually give up, since we've already
1529 * completed the back merge.
1530 */
1531 else if (prev_entry->next->aref.ar_amap) {
1532 if (amap_extend(prev_entry->next,
1533 prev_entry->end -
1534 prev_entry->start,
1535 amapwaitflag | AMAP_EXTEND_BACKWARDS))
1536 goto nomerge;
1537 }
1538 } else {
1539 /*
1540 * Pull the next entry's amap backwards to cover this
1541 * new allocation.
1542 */
1543 if (prev_entry->next->aref.ar_amap) {
1544 error = amap_extend(prev_entry->next, size,
1545 amapwaitflag | AMAP_EXTEND_BACKWARDS);
1546 if (error)
1547 goto nomerge;
1548 }
1549 }
1550
1551 if (merged) {
1552 if (kmap) {
1553 UVMMAP_EVCNT_DECR(kbackmerge);
1554 UVMMAP_EVCNT_INCR(kbimerge);
1555 } else {
1556 UVMMAP_EVCNT_DECR(ubackmerge);
1557 UVMMAP_EVCNT_INCR(ubimerge);
1558 }
1559 } else {
1560 if (kmap) {
1561 UVMMAP_EVCNT_INCR(kforwmerge);
1562 } else {
1563 UVMMAP_EVCNT_INCR(uforwmerge);
1564 }
1565 }
1566 UVMHIST_LOG(maphist," starting forward merge", 0, 0, 0, 0);
1567
1568 /*
1569 * drop our reference to uobj since we are extending a reference
1570 * that we already have (the ref count can not drop to zero).
1571 * (if merged, we've already detached)
1572 */
1573 if (uobj && uobj->pgops->pgo_detach && !merged)
1574 uobj->pgops->pgo_detach(uobj);
1575
1576 if (merged) {
1577 dead = prev_entry->next;
1578 prev_entry->end = dead->end;
1579 uvm_map_entry_unlink(map, dead);
1580 if (dead->aref.ar_amap != NULL) {
1581 prev_entry->aref = dead->aref;
1582 dead->aref.ar_amap = NULL;
1583 }
1584 } else {
1585 prev_entry->next->start -= size;
1586 if (prev_entry != &map->header) {
1587 prev_entry->gap -= size;
1588 KASSERT(prev_entry->gap == uvm_rb_gap(prev_entry));
1589 uvm_rb_fixup(map, prev_entry);
1590 }
1591 if (uobj)
1592 prev_entry->next->offset = uoffset;
1593 }
1594
1595 uvm_map_check(map, "map forwardmerged");
1596
1597 UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0);
1598 merged++;
1599 }
1600
1601 nomerge:
1602 if (!merged) {
1603 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0);
1604 if (kmap) {
1605 UVMMAP_EVCNT_INCR(knomerge);
1606 } else {
1607 UVMMAP_EVCNT_INCR(unomerge);
1608 }
1609
1610 /*
1611 * allocate new entry and link it in.
1612 */
1613
1614 if (new_entry == NULL) {
1615 new_entry = uvm_mapent_alloc(map,
1616 (flags & UVM_FLAG_NOWAIT));
1617 if (__predict_false(new_entry == NULL)) {
1618 error = ENOMEM;
1619 goto done;
1620 }
1621 }
1622 new_entry->start = start;
1623 new_entry->end = new_entry->start + size;
1624 new_entry->object.uvm_obj = uobj;
1625 new_entry->offset = uoffset;
1626
1627 new_entry->etype = newetype;
1628
1629 if (flags & UVM_FLAG_NOMERGE) {
1630 new_entry->flags |= UVM_MAP_NOMERGE;
1631 }
1632
1633 new_entry->protection = prot;
1634 new_entry->max_protection = maxprot;
1635 new_entry->inheritance = inherit;
1636 new_entry->wired_count = 0;
1637 new_entry->advice = advice;
1638 if (flags & UVM_FLAG_OVERLAY) {
1639
1640 /*
1641 * to_add: for BSS we overallocate a little since we
1642 * are likely to extend
1643 */
1644
1645 vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
1646 UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
1647 struct vm_amap *amap = amap_alloc(size, to_add,
1648 (flags & UVM_FLAG_NOWAIT));
1649 if (__predict_false(amap == NULL)) {
1650 error = ENOMEM;
1651 goto done;
1652 }
1653 new_entry->aref.ar_pageoff = 0;
1654 new_entry->aref.ar_amap = amap;
1655 } else {
1656 new_entry->aref.ar_pageoff = 0;
1657 new_entry->aref.ar_amap = NULL;
1658 }
1659 uvm_map_entry_link(map, prev_entry, new_entry);
1660
1661 /*
1662 * Update the free space hint
1663 */
1664
1665 if ((map->first_free == prev_entry) &&
1666 (prev_entry->end >= new_entry->start))
1667 map->first_free = new_entry;
1668
1669 new_entry = NULL;
1670 }
1671
1672 map->size += size;
1673
1674 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1675
1676 error = 0;
1677 done:
1678 if ((flags & UVM_FLAG_QUANTUM) == 0) {
1679 /*
1680 * vmk_merged_entries is locked by the map's lock.
1681 */
1682 vm_map_unlock(map);
1683 }
1684 if (new_entry && error == 0) {
1685 KDASSERT(merged);
1686 uvm_mapent_free_merged(map, new_entry);
1687 new_entry = NULL;
1688 }
1689 if (dead) {
1690 KDASSERT(merged);
1691 uvm_mapent_free_merged(map, dead);
1692 }
1693 if ((flags & UVM_FLAG_QUANTUM) != 0) {
1694 vm_map_unlock(map);
1695 }
1696 if (new_entry != NULL) {
1697 uvm_mapent_free(new_entry);
1698 }
1699 return error;
1700 }
1701
1702 /*
1703 * uvm_map_lookup_entry_bytree: lookup an entry in tree
1704 */
1705
1706 static inline bool
1707 uvm_map_lookup_entry_bytree(struct vm_map *map, vaddr_t address,
1708 struct vm_map_entry **entry /* OUT */)
1709 {
1710 struct vm_map_entry *prev = &map->header;
1711 struct vm_map_entry *cur = ROOT_ENTRY(map);
1712
1713 while (cur) {
1714 UVMMAP_EVCNT_INCR(mlk_treeloop);
1715 if (address >= cur->start) {
1716 if (address < cur->end) {
1717 *entry = cur;
1718 return true;
1719 }
1720 prev = cur;
1721 cur = RIGHT_ENTRY(cur);
1722 } else
1723 cur = LEFT_ENTRY(cur);
1724 }
1725 *entry = prev;
1726 return false;
1727 }
1728
1729 /*
1730 * uvm_map_lookup_entry: find map entry at or before an address
1731 *
1732 * => map must at least be read-locked by caller
1733 * => entry is returned in "entry"
1734 * => return value is true if address is in the returned entry
1735 */
1736
1737 bool
1738 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
1739 struct vm_map_entry **entry /* OUT */)
1740 {
1741 struct vm_map_entry *cur;
1742 bool use_tree = false;
1743 UVMHIST_FUNC("uvm_map_lookup_entry");
1744 UVMHIST_CALLED(maphist);
1745
1746 UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)",
1747 map, address, entry, 0);
1748
1749 /*
1750 * start looking either from the head of the
1751 * list, or from the hint.
1752 */
1753
1754 cur = map->hint;
1755
1756 if (cur == &map->header)
1757 cur = cur->next;
1758
1759 UVMMAP_EVCNT_INCR(mlk_call);
1760 if (address >= cur->start) {
1761
1762 /*
1763 * go from hint to end of list.
1764 *
1765 * but first, make a quick check to see if
1766 * we are already looking at the entry we
1767 * want (which is usually the case).
1768 * note also that we don't need to save the hint
1769 * here... it is the same hint (unless we are
1770 * at the header, in which case the hint didn't
1771 * buy us anything anyway).
1772 */
1773
1774 if (cur != &map->header && cur->end > address) {
1775 UVMMAP_EVCNT_INCR(mlk_hint);
1776 *entry = cur;
1777 UVMHIST_LOG(maphist,"<- got it via hint (0x%x)",
1778 cur, 0, 0, 0);
1779 uvm_mapent_check(*entry);
1780 return (true);
1781 }
1782
1783 if (map->nentries > 15)
1784 use_tree = true;
1785 } else {
1786
1787 /*
1788 * invalid hint. use tree.
1789 */
1790 use_tree = true;
1791 }
1792
1793 uvm_map_check(map, __func__);
1794
1795 if (use_tree) {
1796 /*
1797 * Simple lookup in the tree. Happens when the hint is
1798 * invalid, or nentries reach a threshold.
1799 */
1800 UVMMAP_EVCNT_INCR(mlk_tree);
1801 if (uvm_map_lookup_entry_bytree(map, address, entry)) {
1802 goto got;
1803 } else {
1804 goto failed;
1805 }
1806 }
1807
1808 /*
1809 * search linearly
1810 */
1811
1812 UVMMAP_EVCNT_INCR(mlk_list);
1813 while (cur != &map->header) {
1814 UVMMAP_EVCNT_INCR(mlk_listloop);
1815 if (cur->end > address) {
1816 if (address >= cur->start) {
1817 /*
1818 * save this lookup for future
1819 * hints, and return
1820 */
1821
1822 *entry = cur;
1823 got:
1824 SAVE_HINT(map, map->hint, *entry);
1825 UVMHIST_LOG(maphist,"<- search got it (0x%x)",
1826 cur, 0, 0, 0);
1827 KDASSERT((*entry)->start <= address);
1828 KDASSERT(address < (*entry)->end);
1829 uvm_mapent_check(*entry);
1830 return (true);
1831 }
1832 break;
1833 }
1834 cur = cur->next;
1835 }
1836 *entry = cur->prev;
1837 failed:
1838 SAVE_HINT(map, map->hint, *entry);
1839 UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
1840 KDASSERT((*entry) == &map->header || (*entry)->end <= address);
1841 KDASSERT((*entry)->next == &map->header ||
1842 address < (*entry)->next->start);
1843 return (false);
1844 }
1845
1846 /*
1847 * See if the range between start and start + length fits in the gap
1848 * entry->next->start and entry->end. Returns 1 if fits, 0 if doesn't
1849 * fit, and -1 address wraps around.
1850 */
1851 static int
1852 uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
1853 vsize_t align, int topdown, struct vm_map_entry *entry)
1854 {
1855 vaddr_t end;
1856
1857 #ifdef PMAP_PREFER
1858 /*
1859 * push start address forward as needed to avoid VAC alias problems.
1860 * we only do this if a valid offset is specified.
1861 */
1862
1863 if (uoffset != UVM_UNKNOWN_OFFSET)
1864 PMAP_PREFER(uoffset, start, length, topdown);
1865 #endif
1866 if (align != 0) {
1867 if ((*start & (align - 1)) != 0) {
1868 if (topdown)
1869 *start &= ~(align - 1);
1870 else
1871 *start = roundup(*start, align);
1872 }
1873 /*
1874 * XXX Should we PMAP_PREFER() here again?
1875 * eh...i think we're okay
1876 */
1877 }
1878
1879 /*
1880 * Find the end of the proposed new region. Be sure we didn't
1881 * wrap around the address; if so, we lose. Otherwise, if the
1882 * proposed new region fits before the next entry, we win.
1883 */
1884
1885 end = *start + length;
1886 if (end < *start)
1887 return (-1);
1888
1889 if (entry->next->start >= end && *start >= entry->end)
1890 return (1);
1891
1892 return (0);
1893 }
1894
1895 /*
1896 * uvm_map_findspace: find "length" sized space in "map".
1897 *
1898 * => "hint" is a hint about where we want it, unless UVM_FLAG_FIXED is
1899 * set in "flags" (in which case we insist on using "hint").
1900 * => "result" is VA returned
1901 * => uobj/uoffset are to be used to handle VAC alignment, if required
1902 * => if "align" is non-zero, we attempt to align to that value.
1903 * => caller must at least have read-locked map
1904 * => returns NULL on failure, or pointer to prev. map entry if success
1905 * => note this is a cross between the old vm_map_findspace and vm_map_find
1906 */
1907
1908 struct vm_map_entry *
1909 uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
1910 vaddr_t *result /* OUT */, struct uvm_object *uobj, voff_t uoffset,
1911 vsize_t align, int flags)
1912 {
1913 struct vm_map_entry *entry;
1914 struct vm_map_entry *child, *prev, *tmp;
1915 vaddr_t orig_hint;
1916 const int topdown = map->flags & VM_MAP_TOPDOWN;
1917 UVMHIST_FUNC("uvm_map_findspace");
1918 UVMHIST_CALLED(maphist);
1919
1920 UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)",
1921 map, hint, length, flags);
1922 KASSERT((align & (align - 1)) == 0);
1923 KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1924
1925 uvm_map_check(map, "map_findspace entry");
1926
1927 /*
1928 * remember the original hint. if we are aligning, then we
1929 * may have to try again with no alignment constraint if
1930 * we fail the first time.
1931 */
1932
1933 orig_hint = hint;
1934 if (hint < vm_map_min(map)) { /* check ranges ... */
1935 if (flags & UVM_FLAG_FIXED) {
1936 UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
1937 return (NULL);
1938 }
1939 hint = vm_map_min(map);
1940 }
1941 if (hint > vm_map_max(map)) {
1942 UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]",
1943 hint, vm_map_min(map), vm_map_max(map), 0);
1944 return (NULL);
1945 }
1946
1947 /*
1948 * Look for the first possible address; if there's already
1949 * something at this address, we have to start after it.
1950 */
1951
1952 /*
1953 * @@@: there are four, no, eight cases to consider.
1954 *
1955 * 0: found, fixed, bottom up -> fail
1956 * 1: found, fixed, top down -> fail
1957 * 2: found, not fixed, bottom up -> start after entry->end,
1958 * loop up
1959 * 3: found, not fixed, top down -> start before entry->start,
1960 * loop down
1961 * 4: not found, fixed, bottom up -> check entry->next->start, fail
1962 * 5: not found, fixed, top down -> check entry->next->start, fail
1963 * 6: not found, not fixed, bottom up -> check entry->next->start,
1964 * loop up
1965 * 7: not found, not fixed, top down -> check entry->next->start,
1966 * loop down
1967 *
1968 * as you can see, it reduces to roughly five cases, and that
1969 * adding top down mapping only adds one unique case (without
1970 * it, there would be four cases).
1971 */
1972
1973 if ((flags & UVM_FLAG_FIXED) == 0 && hint == vm_map_min(map)) {
1974 entry = map->first_free;
1975 } else {
1976 if (uvm_map_lookup_entry(map, hint, &entry)) {
1977 /* "hint" address already in use ... */
1978 if (flags & UVM_FLAG_FIXED) {
1979 UVMHIST_LOG(maphist, "<- fixed & VA in use",
1980 0, 0, 0, 0);
1981 return (NULL);
1982 }
1983 if (topdown)
1984 /* Start from lower gap. */
1985 entry = entry->prev;
1986 } else if (flags & UVM_FLAG_FIXED) {
1987 if (entry->next->start >= hint + length &&
1988 hint + length > hint)
1989 goto found;
1990
1991 /* "hint" address is gap but too small */
1992 UVMHIST_LOG(maphist, "<- fixed mapping failed",
1993 0, 0, 0, 0);
1994 return (NULL); /* only one shot at it ... */
1995 } else {
1996 /*
1997 * See if given hint fits in this gap.
1998 */
1999 switch (uvm_map_space_avail(&hint, length,
2000 uoffset, align, topdown, entry)) {
2001 case 1:
2002 goto found;
2003 case -1:
2004 goto wraparound;
2005 }
2006
2007 if (topdown) {
2008 /*
2009 * Still there is a chance to fit
2010 * if hint > entry->end.
2011 */
2012 } else {
2013 /* Start from higher gap. */
2014 entry = entry->next;
2015 if (entry == &map->header)
2016 goto notfound;
2017 goto nextgap;
2018 }
2019 }
2020 }
2021
2022 /*
2023 * Note that all UVM_FLAGS_FIXED case is already handled.
2024 */
2025 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
2026
2027 /* Try to find the space in the red-black tree */
2028
2029 /* Check slot before any entry */
2030 hint = topdown ? entry->next->start - length : entry->end;
2031 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2032 topdown, entry)) {
2033 case 1:
2034 goto found;
2035 case -1:
2036 goto wraparound;
2037 }
2038
2039 nextgap:
2040 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
2041 /* If there is not enough space in the whole tree, we fail */
2042 tmp = ROOT_ENTRY(map);
2043 if (tmp == NULL || tmp->maxgap < length)
2044 goto notfound;
2045
2046 prev = NULL; /* previous candidate */
2047
2048 /* Find an entry close to hint that has enough space */
2049 for (; tmp;) {
2050 KASSERT(tmp->next->start == tmp->end + tmp->gap);
2051 if (topdown) {
2052 if (tmp->next->start < hint + length &&
2053 (prev == NULL || tmp->end > prev->end)) {
2054 if (tmp->gap >= length)
2055 prev = tmp;
2056 else if ((child = LEFT_ENTRY(tmp)) != NULL
2057 && child->maxgap >= length)
2058 prev = tmp;
2059 }
2060 } else {
2061 if (tmp->end >= hint &&
2062 (prev == NULL || tmp->end < prev->end)) {
2063 if (tmp->gap >= length)
2064 prev = tmp;
2065 else if ((child = RIGHT_ENTRY(tmp)) != NULL
2066 && child->maxgap >= length)
2067 prev = tmp;
2068 }
2069 }
2070 if (tmp->next->start < hint + length)
2071 child = RIGHT_ENTRY(tmp);
2072 else if (tmp->end > hint)
2073 child = LEFT_ENTRY(tmp);
2074 else {
2075 if (tmp->gap >= length)
2076 break;
2077 if (topdown)
2078 child = LEFT_ENTRY(tmp);
2079 else
2080 child = RIGHT_ENTRY(tmp);
2081 }
2082 if (child == NULL || child->maxgap < length)
2083 break;
2084 tmp = child;
2085 }
2086
2087 if (tmp != NULL && tmp->start < hint && hint < tmp->next->start) {
2088 /*
2089 * Check if the entry that we found satifies the
2090 * space requirement
2091 */
2092 if (topdown) {
2093 if (hint > tmp->next->start - length)
2094 hint = tmp->next->start - length;
2095 } else {
2096 if (hint < tmp->end)
2097 hint = tmp->end;
2098 }
2099 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2100 topdown, tmp)) {
2101 case 1:
2102 entry = tmp;
2103 goto found;
2104 case -1:
2105 goto wraparound;
2106 }
2107 if (tmp->gap >= length)
2108 goto listsearch;
2109 }
2110 if (prev == NULL)
2111 goto notfound;
2112
2113 if (topdown) {
2114 KASSERT(orig_hint >= prev->next->start - length ||
2115 prev->next->start - length > prev->next->start);
2116 hint = prev->next->start - length;
2117 } else {
2118 KASSERT(orig_hint <= prev->end);
2119 hint = prev->end;
2120 }
2121 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2122 topdown, prev)) {
2123 case 1:
2124 entry = prev;
2125 goto found;
2126 case -1:
2127 goto wraparound;
2128 }
2129 if (prev->gap >= length)
2130 goto listsearch;
2131
2132 if (topdown)
2133 tmp = LEFT_ENTRY(prev);
2134 else
2135 tmp = RIGHT_ENTRY(prev);
2136 for (;;) {
2137 KASSERT(tmp && tmp->maxgap >= length);
2138 if (topdown)
2139 child = RIGHT_ENTRY(tmp);
2140 else
2141 child = LEFT_ENTRY(tmp);
2142 if (child && child->maxgap >= length) {
2143 tmp = child;
2144 continue;
2145 }
2146 if (tmp->gap >= length)
2147 break;
2148 if (topdown)
2149 tmp = LEFT_ENTRY(tmp);
2150 else
2151 tmp = RIGHT_ENTRY(tmp);
2152 }
2153
2154 if (topdown) {
2155 KASSERT(orig_hint >= tmp->next->start - length ||
2156 tmp->next->start - length > tmp->next->start);
2157 hint = tmp->next->start - length;
2158 } else {
2159 KASSERT(orig_hint <= tmp->end);
2160 hint = tmp->end;
2161 }
2162 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2163 topdown, tmp)) {
2164 case 1:
2165 entry = tmp;
2166 goto found;
2167 case -1:
2168 goto wraparound;
2169 }
2170
2171 /*
2172 * The tree fails to find an entry because of offset or alignment
2173 * restrictions. Search the list instead.
2174 */
2175 listsearch:
2176 /*
2177 * Look through the rest of the map, trying to fit a new region in
2178 * the gap between existing regions, or after the very last region.
2179 * note: entry->end = base VA of current gap,
2180 * entry->next->start = VA of end of current gap
2181 */
2182
2183 for (;;) {
2184 /* Update hint for current gap. */
2185 hint = topdown ? entry->next->start - length : entry->end;
2186
2187 /* See if it fits. */
2188 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2189 topdown, entry)) {
2190 case 1:
2191 goto found;
2192 case -1:
2193 goto wraparound;
2194 }
2195
2196 /* Advance to next/previous gap */
2197 if (topdown) {
2198 if (entry == &map->header) {
2199 UVMHIST_LOG(maphist, "<- failed (off start)",
2200 0,0,0,0);
2201 goto notfound;
2202 }
2203 entry = entry->prev;
2204 } else {
2205 entry = entry->next;
2206 if (entry == &map->header) {
2207 UVMHIST_LOG(maphist, "<- failed (off end)",
2208 0,0,0,0);
2209 goto notfound;
2210 }
2211 }
2212 }
2213
2214 found:
2215 SAVE_HINT(map, map->hint, entry);
2216 *result = hint;
2217 UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0);
2218 KASSERT( topdown || hint >= orig_hint);
2219 KASSERT(!topdown || hint <= orig_hint);
2220 KASSERT(entry->end <= hint);
2221 KASSERT(hint + length <= entry->next->start);
2222 return (entry);
2223
2224 wraparound:
2225 UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0);
2226
2227 return (NULL);
2228
2229 notfound:
2230 UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0);
2231
2232 return (NULL);
2233 }
2234
2235 /*
2236 * U N M A P - m a i n h e l p e r f u n c t i o n s
2237 */
2238
2239 /*
2240 * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
2241 *
2242 * => caller must check alignment and size
2243 * => map must be locked by caller
2244 * => we return a list of map entries that we've remove from the map
2245 * in "entry_list"
2246 */
2247
2248 void
2249 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
2250 struct vm_map_entry **entry_list /* OUT */,
2251 struct uvm_mapent_reservation *umr, int flags)
2252 {
2253 struct vm_map_entry *entry, *first_entry, *next;
2254 vaddr_t len;
2255 UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist);
2256
2257 UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)",
2258 map, start, end, 0);
2259 VM_MAP_RANGE_CHECK(map, start, end);
2260
2261 uvm_map_check(map, "unmap_remove entry");
2262
2263 /*
2264 * find first entry
2265 */
2266
2267 if (uvm_map_lookup_entry(map, start, &first_entry) == true) {
2268 /* clip and go... */
2269 entry = first_entry;
2270 UVM_MAP_CLIP_START(map, entry, start, umr);
2271 /* critical! prevents stale hint */
2272 SAVE_HINT(map, entry, entry->prev);
2273 } else {
2274 entry = first_entry->next;
2275 }
2276
2277 /*
2278 * Save the free space hint
2279 */
2280
2281 if (map->first_free != &map->header && map->first_free->start >= start)
2282 map->first_free = entry->prev;
2283
2284 /*
2285 * note: we now re-use first_entry for a different task. we remove
2286 * a number of map entries from the map and save them in a linked
2287 * list headed by "first_entry". once we remove them from the map
2288 * the caller should unlock the map and drop the references to the
2289 * backing objects [c.f. uvm_unmap_detach]. the object is to
2290 * separate unmapping from reference dropping. why?
2291 * [1] the map has to be locked for unmapping
2292 * [2] the map need not be locked for reference dropping
2293 * [3] dropping references may trigger pager I/O, and if we hit
2294 * a pager that does synchronous I/O we may have to wait for it.
2295 * [4] we would like all waiting for I/O to occur with maps unlocked
2296 * so that we don't block other threads.
2297 */
2298
2299 first_entry = NULL;
2300 *entry_list = NULL;
2301
2302 /*
2303 * break up the area into map entry sized regions and unmap. note
2304 * that all mappings have to be removed before we can even consider
2305 * dropping references to amaps or VM objects (otherwise we could end
2306 * up with a mapping to a page on the free list which would be very bad)
2307 */
2308
2309 while ((entry != &map->header) && (entry->start < end)) {
2310 KASSERT((entry->flags & UVM_MAP_FIRST) == 0);
2311
2312 UVM_MAP_CLIP_END(map, entry, end, umr);
2313 next = entry->next;
2314 len = entry->end - entry->start;
2315
2316 /*
2317 * unwire before removing addresses from the pmap; otherwise
2318 * unwiring will put the entries back into the pmap (XXX).
2319 */
2320
2321 if (VM_MAPENT_ISWIRED(entry)) {
2322 uvm_map_entry_unwire(map, entry);
2323 }
2324 if (flags & UVM_FLAG_VAONLY) {
2325
2326 /* nothing */
2327
2328 } else if ((map->flags & VM_MAP_PAGEABLE) == 0) {
2329
2330 /*
2331 * if the map is non-pageable, any pages mapped there
2332 * must be wired and entered with pmap_kenter_pa(),
2333 * and we should free any such pages immediately.
2334 * this is mostly used for kmem_map and mb_map.
2335 */
2336
2337 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
2338 uvm_km_pgremove_intrsafe(map, entry->start,
2339 entry->end);
2340 pmap_kremove(entry->start, len);
2341 }
2342 } else if (UVM_ET_ISOBJ(entry) &&
2343 UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
2344 KASSERT(vm_map_pmap(map) == pmap_kernel());
2345
2346 /*
2347 * note: kernel object mappings are currently used in
2348 * two ways:
2349 * [1] "normal" mappings of pages in the kernel object
2350 * [2] uvm_km_valloc'd allocations in which we
2351 * pmap_enter in some non-kernel-object page
2352 * (e.g. vmapbuf).
2353 *
2354 * for case [1], we need to remove the mapping from
2355 * the pmap and then remove the page from the kernel
2356 * object (because, once pages in a kernel object are
2357 * unmapped they are no longer needed, unlike, say,
2358 * a vnode where you might want the data to persist
2359 * until flushed out of a queue).
2360 *
2361 * for case [2], we need to remove the mapping from
2362 * the pmap. there shouldn't be any pages at the
2363 * specified offset in the kernel object [but it
2364 * doesn't hurt to call uvm_km_pgremove just to be
2365 * safe?]
2366 *
2367 * uvm_km_pgremove currently does the following:
2368 * for pages in the kernel object in range:
2369 * - drops the swap slot
2370 * - uvm_pagefree the page
2371 */
2372
2373 /*
2374 * remove mappings from pmap and drop the pages
2375 * from the object. offsets are always relative
2376 * to vm_map_min(kernel_map).
2377 */
2378
2379 pmap_remove(pmap_kernel(), entry->start,
2380 entry->start + len);
2381 uvm_km_pgremove(entry->start, entry->end);
2382
2383 /*
2384 * null out kernel_object reference, we've just
2385 * dropped it
2386 */
2387
2388 entry->etype &= ~UVM_ET_OBJ;
2389 entry->object.uvm_obj = NULL;
2390 } else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
2391
2392 /*
2393 * remove mappings the standard way.
2394 */
2395
2396 pmap_remove(map->pmap, entry->start, entry->end);
2397 }
2398
2399 #if defined(DEBUG)
2400 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
2401
2402 /*
2403 * check if there's remaining mapping,
2404 * which is a bug in caller.
2405 */
2406
2407 vaddr_t va;
2408 for (va = entry->start; va < entry->end;
2409 va += PAGE_SIZE) {
2410 if (pmap_extract(vm_map_pmap(map), va, NULL)) {
2411 panic("uvm_unmap_remove: has mapping");
2412 }
2413 }
2414
2415 if (VM_MAP_IS_KERNEL(map)) {
2416 uvm_km_check_empty(map, entry->start,
2417 entry->end);
2418 }
2419 }
2420 #endif /* defined(DEBUG) */
2421
2422 /*
2423 * remove entry from map and put it on our list of entries
2424 * that we've nuked. then go to next entry.
2425 */
2426
2427 UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0,0);
2428
2429 /* critical! prevents stale hint */
2430 SAVE_HINT(map, entry, entry->prev);
2431
2432 uvm_map_entry_unlink(map, entry);
2433 KASSERT(map->size >= len);
2434 map->size -= len;
2435 entry->prev = NULL;
2436 entry->next = first_entry;
2437 first_entry = entry;
2438 entry = next;
2439 }
2440 if ((map->flags & VM_MAP_DYING) == 0) {
2441 pmap_update(vm_map_pmap(map));
2442 }
2443
2444 uvm_map_check(map, "unmap_remove leave");
2445
2446 /*
2447 * now we've cleaned up the map and are ready for the caller to drop
2448 * references to the mapped objects.
2449 */
2450
2451 *entry_list = first_entry;
2452 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
2453
2454 if (map->flags & VM_MAP_WANTVA) {
2455 mutex_enter(&map->misc_lock);
2456 map->flags &= ~VM_MAP_WANTVA;
2457 cv_broadcast(&map->cv);
2458 mutex_exit(&map->misc_lock);
2459 }
2460 }
2461
2462 /*
2463 * uvm_unmap_detach: drop references in a chain of map entries
2464 *
2465 * => we will free the map entries as we traverse the list.
2466 */
2467
2468 void
2469 uvm_unmap_detach(struct vm_map_entry *first_entry, int flags)
2470 {
2471 struct vm_map_entry *next_entry;
2472 UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
2473
2474 while (first_entry) {
2475 KASSERT(!VM_MAPENT_ISWIRED(first_entry));
2476 UVMHIST_LOG(maphist,
2477 " detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d",
2478 first_entry, first_entry->aref.ar_amap,
2479 first_entry->object.uvm_obj,
2480 UVM_ET_ISSUBMAP(first_entry));
2481
2482 /*
2483 * drop reference to amap, if we've got one
2484 */
2485
2486 if (first_entry->aref.ar_amap)
2487 uvm_map_unreference_amap(first_entry, flags);
2488
2489 /*
2490 * drop reference to our backing object, if we've got one
2491 */
2492
2493 KASSERT(!UVM_ET_ISSUBMAP(first_entry));
2494 if (UVM_ET_ISOBJ(first_entry) &&
2495 first_entry->object.uvm_obj->pgops->pgo_detach) {
2496 (*first_entry->object.uvm_obj->pgops->pgo_detach)
2497 (first_entry->object.uvm_obj);
2498 }
2499 next_entry = first_entry->next;
2500 uvm_mapent_free(first_entry);
2501 first_entry = next_entry;
2502 }
2503 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
2504 }
2505
2506 /*
2507 * E X T R A C T I O N F U N C T I O N S
2508 */
2509
2510 /*
2511 * uvm_map_reserve: reserve space in a vm_map for future use.
2512 *
2513 * => we reserve space in a map by putting a dummy map entry in the
2514 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
2515 * => map should be unlocked (we will write lock it)
2516 * => we return true if we were able to reserve space
2517 * => XXXCDC: should be inline?
2518 */
2519
2520 int
2521 uvm_map_reserve(struct vm_map *map, vsize_t size,
2522 vaddr_t offset /* hint for pmap_prefer */,
2523 vsize_t align /* alignment */,
2524 vaddr_t *raddr /* IN:hint, OUT: reserved VA */,
2525 uvm_flag_t flags /* UVM_FLAG_FIXED or 0 */)
2526 {
2527 UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
2528
2529 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
2530 map,size,offset,raddr);
2531
2532 size = round_page(size);
2533
2534 /*
2535 * reserve some virtual space.
2536 */
2537
2538 if (uvm_map(map, raddr, size, NULL, offset, align,
2539 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
2540 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE|flags)) != 0) {
2541 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
2542 return (false);
2543 }
2544
2545 UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
2546 return (true);
2547 }
2548
2549 /*
2550 * uvm_map_replace: replace a reserved (blank) area of memory with
2551 * real mappings.
2552 *
2553 * => caller must WRITE-LOCK the map
2554 * => we return true if replacement was a success
2555 * => we expect the newents chain to have nnewents entrys on it and
2556 * we expect newents->prev to point to the last entry on the list
2557 * => note newents is allowed to be NULL
2558 */
2559
2560 static int
2561 uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
2562 struct vm_map_entry *newents, int nnewents, vsize_t nsize,
2563 struct vm_map_entry **oldentryp)
2564 {
2565 struct vm_map_entry *oldent, *last;
2566
2567 uvm_map_check(map, "map_replace entry");
2568
2569 /*
2570 * first find the blank map entry at the specified address
2571 */
2572
2573 if (!uvm_map_lookup_entry(map, start, &oldent)) {
2574 return (false);
2575 }
2576
2577 /*
2578 * check to make sure we have a proper blank entry
2579 */
2580
2581 if (end < oldent->end && !VM_MAP_USE_KMAPENT(map)) {
2582 UVM_MAP_CLIP_END(map, oldent, end, NULL);
2583 }
2584 if (oldent->start != start || oldent->end != end ||
2585 oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
2586 return (false);
2587 }
2588
2589 #ifdef DIAGNOSTIC
2590
2591 /*
2592 * sanity check the newents chain
2593 */
2594
2595 {
2596 struct vm_map_entry *tmpent = newents;
2597 int nent = 0;
2598 vsize_t sz = 0;
2599 vaddr_t cur = start;
2600
2601 while (tmpent) {
2602 nent++;
2603 sz += tmpent->end - tmpent->start;
2604 if (tmpent->start < cur)
2605 panic("uvm_map_replace1");
2606 if (tmpent->start >= tmpent->end || tmpent->end > end) {
2607 printf("tmpent->start=0x%lx, tmpent->end=0x%lx, end=0x%lx\n",
2608 tmpent->start, tmpent->end, end);
2609 panic("uvm_map_replace2");
2610 }
2611 cur = tmpent->end;
2612 if (tmpent->next) {
2613 if (tmpent->next->prev != tmpent)
2614 panic("uvm_map_replace3");
2615 } else {
2616 if (newents->prev != tmpent)
2617 panic("uvm_map_replace4");
2618 }
2619 tmpent = tmpent->next;
2620 }
2621 if (nent != nnewents)
2622 panic("uvm_map_replace5");
2623 if (sz != nsize)
2624 panic("uvm_map_replace6");
2625 }
2626 #endif
2627
2628 /*
2629 * map entry is a valid blank! replace it. (this does all the
2630 * work of map entry link/unlink...).
2631 */
2632
2633 if (newents) {
2634 last = newents->prev;
2635
2636 /* critical: flush stale hints out of map */
2637 SAVE_HINT(map, map->hint, newents);
2638 if (map->first_free == oldent)
2639 map->first_free = last;
2640
2641 last->next = oldent->next;
2642 last->next->prev = last;
2643
2644 /* Fix RB tree */
2645 uvm_rb_remove(map, oldent);
2646
2647 newents->prev = oldent->prev;
2648 newents->prev->next = newents;
2649 map->nentries = map->nentries + (nnewents - 1);
2650
2651 /* Fixup the RB tree */
2652 {
2653 int i;
2654 struct vm_map_entry *tmp;
2655
2656 tmp = newents;
2657 for (i = 0; i < nnewents && tmp; i++) {
2658 uvm_rb_insert(map, tmp);
2659 tmp = tmp->next;
2660 }
2661 }
2662 } else {
2663 /* NULL list of new entries: just remove the old one */
2664 clear_hints(map, oldent);
2665 uvm_map_entry_unlink(map, oldent);
2666 }
2667 map->size -= end - start - nsize;
2668
2669 uvm_map_check(map, "map_replace leave");
2670
2671 /*
2672 * now we can free the old blank entry and return.
2673 */
2674
2675 *oldentryp = oldent;
2676 return (true);
2677 }
2678
2679 /*
2680 * uvm_map_extract: extract a mapping from a map and put it somewhere
2681 * (maybe removing the old mapping)
2682 *
2683 * => maps should be unlocked (we will write lock them)
2684 * => returns 0 on success, error code otherwise
2685 * => start must be page aligned
2686 * => len must be page sized
2687 * => flags:
2688 * UVM_EXTRACT_REMOVE: remove mappings from srcmap
2689 * UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
2690 * UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
2691 * UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
2692 * >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
2693 * >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
2694 * be used from within the kernel in a kernel level map <<<
2695 */
2696
2697 int
2698 uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
2699 struct vm_map *dstmap, vaddr_t *dstaddrp, int flags)
2700 {
2701 vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge;
2702 struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry,
2703 *deadentry, *oldentry;
2704 struct vm_map_entry *resentry = NULL; /* a dummy reservation entry */
2705 vsize_t elen;
2706 int nchain, error, copy_ok;
2707 vsize_t nsize;
2708 UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
2709
2710 UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start,
2711 len,0);
2712 UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0);
2713
2714 /*
2715 * step 0: sanity check: start must be on a page boundary, length
2716 * must be page sized. can't ask for CONTIG/QREF if you asked for
2717 * REMOVE.
2718 */
2719
2720 KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0);
2721 KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
2722 (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
2723
2724 /*
2725 * step 1: reserve space in the target map for the extracted area
2726 */
2727
2728 if ((flags & UVM_EXTRACT_RESERVED) == 0) {
2729 dstaddr = vm_map_min(dstmap);
2730 if (!uvm_map_reserve(dstmap, len, start, 0, &dstaddr, 0))
2731 return (ENOMEM);
2732 *dstaddrp = dstaddr; /* pass address back to caller */
2733 UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0);
2734 } else {
2735 dstaddr = *dstaddrp;
2736 }
2737
2738 /*
2739 * step 2: setup for the extraction process loop by init'ing the
2740 * map entry chain, locking src map, and looking up the first useful
2741 * entry in the map.
2742 */
2743
2744 end = start + len;
2745 newend = dstaddr + len;
2746 chain = endchain = NULL;
2747 nchain = 0;
2748 nsize = 0;
2749 vm_map_lock(srcmap);
2750
2751 if (uvm_map_lookup_entry(srcmap, start, &entry)) {
2752
2753 /* "start" is within an entry */
2754 if (flags & UVM_EXTRACT_QREF) {
2755
2756 /*
2757 * for quick references we don't clip the entry, so
2758 * the entry may map space "before" the starting
2759 * virtual address... this is the "fudge" factor
2760 * (which can be non-zero only the first time
2761 * through the "while" loop in step 3).
2762 */
2763
2764 fudge = start - entry->start;
2765 } else {
2766
2767 /*
2768 * normal reference: we clip the map to fit (thus
2769 * fudge is zero)
2770 */
2771
2772 UVM_MAP_CLIP_START(srcmap, entry, start, NULL);
2773 SAVE_HINT(srcmap, srcmap->hint, entry->prev);
2774 fudge = 0;
2775 }
2776 } else {
2777
2778 /* "start" is not within an entry ... skip to next entry */
2779 if (flags & UVM_EXTRACT_CONTIG) {
2780 error = EINVAL;
2781 goto bad; /* definite hole here ... */
2782 }
2783
2784 entry = entry->next;
2785 fudge = 0;
2786 }
2787
2788 /* save values from srcmap for step 6 */
2789 orig_entry = entry;
2790 orig_fudge = fudge;
2791
2792 /*
2793 * step 3: now start looping through the map entries, extracting
2794 * as we go.
2795 */
2796
2797 while (entry->start < end && entry != &srcmap->header) {
2798
2799 /* if we are not doing a quick reference, clip it */
2800 if ((flags & UVM_EXTRACT_QREF) == 0)
2801 UVM_MAP_CLIP_END(srcmap, entry, end, NULL);
2802
2803 /* clear needs_copy (allow chunking) */
2804 if (UVM_ET_ISNEEDSCOPY(entry)) {
2805 amap_copy(srcmap, entry,
2806 AMAP_COPY_NOWAIT|AMAP_COPY_NOMERGE, start, end);
2807 if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */
2808 error = ENOMEM;
2809 goto bad;
2810 }
2811
2812 /* amap_copy could clip (during chunk)! update fudge */
2813 if (fudge) {
2814 fudge = start - entry->start;
2815 orig_fudge = fudge;
2816 }
2817 }
2818
2819 /* calculate the offset of this from "start" */
2820 oldoffset = (entry->start + fudge) - start;
2821
2822 /* allocate a new map entry */
2823 newentry = uvm_mapent_alloc(dstmap, 0);
2824 if (newentry == NULL) {
2825 error = ENOMEM;
2826 goto bad;
2827 }
2828
2829 /* set up new map entry */
2830 newentry->next = NULL;
2831 newentry->prev = endchain;
2832 newentry->start = dstaddr + oldoffset;
2833 newentry->end =
2834 newentry->start + (entry->end - (entry->start + fudge));
2835 if (newentry->end > newend || newentry->end < newentry->start)
2836 newentry->end = newend;
2837 newentry->object.uvm_obj = entry->object.uvm_obj;
2838 if (newentry->object.uvm_obj) {
2839 if (newentry->object.uvm_obj->pgops->pgo_reference)
2840 newentry->object.uvm_obj->pgops->
2841 pgo_reference(newentry->object.uvm_obj);
2842 newentry->offset = entry->offset + fudge;
2843 } else {
2844 newentry->offset = 0;
2845 }
2846 newentry->etype = entry->etype;
2847 newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
2848 entry->max_protection : entry->protection;
2849 newentry->max_protection = entry->max_protection;
2850 newentry->inheritance = entry->inheritance;
2851 newentry->wired_count = 0;
2852 newentry->aref.ar_amap = entry->aref.ar_amap;
2853 if (newentry->aref.ar_amap) {
2854 newentry->aref.ar_pageoff =
2855 entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
2856 uvm_map_reference_amap(newentry, AMAP_SHARED |
2857 ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
2858 } else {
2859 newentry->aref.ar_pageoff = 0;
2860 }
2861 newentry->advice = entry->advice;
2862 if ((flags & UVM_EXTRACT_QREF) != 0) {
2863 newentry->flags |= UVM_MAP_NOMERGE;
2864 }
2865
2866 /* now link it on the chain */
2867 nchain++;
2868 nsize += newentry->end - newentry->start;
2869 if (endchain == NULL) {
2870 chain = endchain = newentry;
2871 } else {
2872 endchain->next = newentry;
2873 endchain = newentry;
2874 }
2875
2876 /* end of 'while' loop! */
2877 if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
2878 (entry->next == &srcmap->header ||
2879 entry->next->start != entry->end)) {
2880 error = EINVAL;
2881 goto bad;
2882 }
2883 entry = entry->next;
2884 fudge = 0;
2885 }
2886
2887 /*
2888 * step 4: close off chain (in format expected by uvm_map_replace)
2889 */
2890
2891 if (chain)
2892 chain->prev = endchain;
2893
2894 /*
2895 * step 5: attempt to lock the dest map so we can pmap_copy.
2896 * note usage of copy_ok:
2897 * 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
2898 * 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
2899 */
2900
2901 if (srcmap == dstmap || vm_map_lock_try(dstmap) == true) {
2902 copy_ok = 1;
2903 if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2904 nchain, nsize, &resentry)) {
2905 if (srcmap != dstmap)
2906 vm_map_unlock(dstmap);
2907 error = EIO;
2908 goto bad;
2909 }
2910 } else {
2911 copy_ok = 0;
2912 /* replace defered until step 7 */
2913 }
2914
2915 /*
2916 * step 6: traverse the srcmap a second time to do the following:
2917 * - if we got a lock on the dstmap do pmap_copy
2918 * - if UVM_EXTRACT_REMOVE remove the entries
2919 * we make use of orig_entry and orig_fudge (saved in step 2)
2920 */
2921
2922 if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
2923
2924 /* purge possible stale hints from srcmap */
2925 if (flags & UVM_EXTRACT_REMOVE) {
2926 SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
2927 if (srcmap->first_free != &srcmap->header &&
2928 srcmap->first_free->start >= start)
2929 srcmap->first_free = orig_entry->prev;
2930 }
2931
2932 entry = orig_entry;
2933 fudge = orig_fudge;
2934 deadentry = NULL; /* for UVM_EXTRACT_REMOVE */
2935
2936 while (entry->start < end && entry != &srcmap->header) {
2937 if (copy_ok) {
2938 oldoffset = (entry->start + fudge) - start;
2939 elen = MIN(end, entry->end) -
2940 (entry->start + fudge);
2941 pmap_copy(dstmap->pmap, srcmap->pmap,
2942 dstaddr + oldoffset, elen,
2943 entry->start + fudge);
2944 }
2945
2946 /* we advance "entry" in the following if statement */
2947 if (flags & UVM_EXTRACT_REMOVE) {
2948 pmap_remove(srcmap->pmap, entry->start,
2949 entry->end);
2950 oldentry = entry; /* save entry */
2951 entry = entry->next; /* advance */
2952 uvm_map_entry_unlink(srcmap, oldentry);
2953 /* add to dead list */
2954 oldentry->next = deadentry;
2955 deadentry = oldentry;
2956 } else {
2957 entry = entry->next; /* advance */
2958 }
2959
2960 /* end of 'while' loop */
2961 fudge = 0;
2962 }
2963 pmap_update(srcmap->pmap);
2964
2965 /*
2966 * unlock dstmap. we will dispose of deadentry in
2967 * step 7 if needed
2968 */
2969
2970 if (copy_ok && srcmap != dstmap)
2971 vm_map_unlock(dstmap);
2972
2973 } else {
2974 deadentry = NULL;
2975 }
2976
2977 /*
2978 * step 7: we are done with the source map, unlock. if copy_ok
2979 * is 0 then we have not replaced the dummy mapping in dstmap yet
2980 * and we need to do so now.
2981 */
2982
2983 vm_map_unlock(srcmap);
2984 if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
2985 uvm_unmap_detach(deadentry, 0); /* dispose of old entries */
2986
2987 /* now do the replacement if we didn't do it in step 5 */
2988 if (copy_ok == 0) {
2989 vm_map_lock(dstmap);
2990 error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2991 nchain, nsize, &resentry);
2992 vm_map_unlock(dstmap);
2993
2994 if (error == false) {
2995 error = EIO;
2996 goto bad2;
2997 }
2998 }
2999
3000 if (resentry != NULL)
3001 uvm_mapent_free(resentry);
3002
3003 return (0);
3004
3005 /*
3006 * bad: failure recovery
3007 */
3008 bad:
3009 vm_map_unlock(srcmap);
3010 bad2: /* src already unlocked */
3011 if (chain)
3012 uvm_unmap_detach(chain,
3013 (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
3014
3015 if (resentry != NULL)
3016 uvm_mapent_free(resentry);
3017
3018 if ((flags & UVM_EXTRACT_RESERVED) == 0) {
3019 uvm_unmap(dstmap, dstaddr, dstaddr+len); /* ??? */
3020 }
3021 return (error);
3022 }
3023
3024 /* end of extraction functions */
3025
3026 /*
3027 * uvm_map_submap: punch down part of a map into a submap
3028 *
3029 * => only the kernel_map is allowed to be submapped
3030 * => the purpose of submapping is to break up the locking granularity
3031 * of a larger map
3032 * => the range specified must have been mapped previously with a uvm_map()
3033 * call [with uobj==NULL] to create a blank map entry in the main map.
3034 * [And it had better still be blank!]
3035 * => maps which contain submaps should never be copied or forked.
3036 * => to remove a submap, use uvm_unmap() on the main map
3037 * and then uvm_map_deallocate() the submap.
3038 * => main map must be unlocked.
3039 * => submap must have been init'd and have a zero reference count.
3040 * [need not be locked as we don't actually reference it]
3041 */
3042
3043 int
3044 uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
3045 struct vm_map *submap)
3046 {
3047 struct vm_map_entry *entry;
3048 struct uvm_mapent_reservation umr;
3049 int error;
3050
3051 uvm_mapent_reserve(map, &umr, 2, 0);
3052
3053 vm_map_lock(map);
3054 VM_MAP_RANGE_CHECK(map, start, end);
3055
3056 if (uvm_map_lookup_entry(map, start, &entry)) {
3057 UVM_MAP_CLIP_START(map, entry, start, &umr);
3058 UVM_MAP_CLIP_END(map, entry, end, &umr); /* to be safe */
3059 } else {
3060 entry = NULL;
3061 }
3062
3063 if (entry != NULL &&
3064 entry->start == start && entry->end == end &&
3065 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
3066 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
3067 entry->etype |= UVM_ET_SUBMAP;
3068 entry->object.sub_map = submap;
3069 entry->offset = 0;
3070 uvm_map_reference(submap);
3071 error = 0;
3072 } else {
3073 error = EINVAL;
3074 }
3075 vm_map_unlock(map);
3076
3077 uvm_mapent_unreserve(map, &umr);
3078
3079 return error;
3080 }
3081
3082 /*
3083 * uvm_map_setup_kernel: init in-kernel map
3084 *
3085 * => map must not be in service yet.
3086 */
3087
3088 void
3089 uvm_map_setup_kernel(struct vm_map_kernel *map,
3090 vaddr_t vmin, vaddr_t vmax, int flags)
3091 {
3092
3093 uvm_map_setup(&map->vmk_map, vmin, vmax, flags);
3094 callback_head_init(&map->vmk_reclaim_callback, IPL_VM);
3095 LIST_INIT(&map->vmk_kentry_free);
3096 map->vmk_merged_entries = NULL;
3097 }
3098
3099
3100 /*
3101 * uvm_map_protect: change map protection
3102 *
3103 * => set_max means set max_protection.
3104 * => map must be unlocked.
3105 */
3106
3107 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
3108 ~VM_PROT_WRITE : VM_PROT_ALL)
3109
3110 int
3111 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
3112 vm_prot_t new_prot, bool set_max)
3113 {
3114 struct vm_map_entry *current, *entry;
3115 int error = 0;
3116 UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
3117 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
3118 map, start, end, new_prot);
3119
3120 vm_map_lock(map);
3121 VM_MAP_RANGE_CHECK(map, start, end);
3122 if (uvm_map_lookup_entry(map, start, &entry)) {
3123 UVM_MAP_CLIP_START(map, entry, start, NULL);
3124 } else {
3125 entry = entry->next;
3126 }
3127
3128 /*
3129 * make a first pass to check for protection violations.
3130 */
3131
3132 current = entry;
3133 while ((current != &map->header) && (current->start < end)) {
3134 if (UVM_ET_ISSUBMAP(current)) {
3135 error = EINVAL;
3136 goto out;
3137 }
3138 if ((new_prot & current->max_protection) != new_prot) {
3139 error = EACCES;
3140 goto out;
3141 }
3142 /*
3143 * Don't allow VM_PROT_EXECUTE to be set on entries that
3144 * point to vnodes that are associated with a NOEXEC file
3145 * system.
3146 */
3147 if (UVM_ET_ISOBJ(current) &&
3148 UVM_OBJ_IS_VNODE(current->object.uvm_obj)) {
3149 struct vnode *vp =
3150 (struct vnode *) current->object.uvm_obj;
3151
3152 if ((new_prot & VM_PROT_EXECUTE) != 0 &&
3153 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
3154 error = EACCES;
3155 goto out;
3156 }
3157 }
3158
3159 current = current->next;
3160 }
3161
3162 /* go back and fix up protections (no need to clip this time). */
3163
3164 current = entry;
3165 while ((current != &map->header) && (current->start < end)) {
3166 vm_prot_t old_prot;
3167
3168 UVM_MAP_CLIP_END(map, current, end, NULL);
3169 old_prot = current->protection;
3170 if (set_max)
3171 current->protection =
3172 (current->max_protection = new_prot) & old_prot;
3173 else
3174 current->protection = new_prot;
3175
3176 /*
3177 * update physical map if necessary. worry about copy-on-write
3178 * here -- CHECK THIS XXX
3179 */
3180
3181 if (current->protection != old_prot) {
3182 /* update pmap! */
3183 pmap_protect(map->pmap, current->start, current->end,
3184 current->protection & MASK(entry));
3185
3186 /*
3187 * If this entry points at a vnode, and the
3188 * protection includes VM_PROT_EXECUTE, mark
3189 * the vnode as VEXECMAP.
3190 */
3191 if (UVM_ET_ISOBJ(current)) {
3192 struct uvm_object *uobj =
3193 current->object.uvm_obj;
3194
3195 if (UVM_OBJ_IS_VNODE(uobj) &&
3196 (current->protection & VM_PROT_EXECUTE)) {
3197 vn_markexec((struct vnode *) uobj);
3198 }
3199 }
3200 }
3201
3202 /*
3203 * If the map is configured to lock any future mappings,
3204 * wire this entry now if the old protection was VM_PROT_NONE
3205 * and the new protection is not VM_PROT_NONE.
3206 */
3207
3208 if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
3209 VM_MAPENT_ISWIRED(entry) == 0 &&
3210 old_prot == VM_PROT_NONE &&
3211 new_prot != VM_PROT_NONE) {
3212 if (uvm_map_pageable(map, entry->start,
3213 entry->end, false,
3214 UVM_LK_ENTER|UVM_LK_EXIT) != 0) {
3215
3216 /*
3217 * If locking the entry fails, remember the
3218 * error if it's the first one. Note we
3219 * still continue setting the protection in
3220 * the map, but will return the error
3221 * condition regardless.
3222 *
3223 * XXX Ignore what the actual error is,
3224 * XXX just call it a resource shortage
3225 * XXX so that it doesn't get confused
3226 * XXX what uvm_map_protect() itself would
3227 * XXX normally return.
3228 */
3229
3230 error = ENOMEM;
3231 }
3232 }
3233 current = current->next;
3234 }
3235 pmap_update(map->pmap);
3236
3237 out:
3238 vm_map_unlock(map);
3239
3240 UVMHIST_LOG(maphist, "<- done, error=%d",error,0,0,0);
3241 return error;
3242 }
3243
3244 #undef MASK
3245
3246 /*
3247 * uvm_map_inherit: set inheritance code for range of addrs in map.
3248 *
3249 * => map must be unlocked
3250 * => note that the inherit code is used during a "fork". see fork
3251 * code for details.
3252 */
3253
3254 int
3255 uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
3256 vm_inherit_t new_inheritance)
3257 {
3258 struct vm_map_entry *entry, *temp_entry;
3259 UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
3260 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)",
3261 map, start, end, new_inheritance);
3262
3263 switch (new_inheritance) {
3264 case MAP_INHERIT_NONE:
3265 case MAP_INHERIT_COPY:
3266 case MAP_INHERIT_SHARE:
3267 break;
3268 default:
3269 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3270 return EINVAL;
3271 }
3272
3273 vm_map_lock(map);
3274 VM_MAP_RANGE_CHECK(map, start, end);
3275 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3276 entry = temp_entry;
3277 UVM_MAP_CLIP_START(map, entry, start, NULL);
3278 } else {
3279 entry = temp_entry->next;
3280 }
3281 while ((entry != &map->header) && (entry->start < end)) {
3282 UVM_MAP_CLIP_END(map, entry, end, NULL);
3283 entry->inheritance = new_inheritance;
3284 entry = entry->next;
3285 }
3286 vm_map_unlock(map);
3287 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3288 return 0;
3289 }
3290
3291 /*
3292 * uvm_map_advice: set advice code for range of addrs in map.
3293 *
3294 * => map must be unlocked
3295 */
3296
3297 int
3298 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
3299 {
3300 struct vm_map_entry *entry, *temp_entry;
3301 UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist);
3302 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_adv=0x%x)",
3303 map, start, end, new_advice);
3304
3305 vm_map_lock(map);
3306 VM_MAP_RANGE_CHECK(map, start, end);
3307 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3308 entry = temp_entry;
3309 UVM_MAP_CLIP_START(map, entry, start, NULL);
3310 } else {
3311 entry = temp_entry->next;
3312 }
3313
3314 /*
3315 * XXXJRT: disallow holes?
3316 */
3317
3318 while ((entry != &map->header) && (entry->start < end)) {
3319 UVM_MAP_CLIP_END(map, entry, end, NULL);
3320
3321 switch (new_advice) {
3322 case MADV_NORMAL:
3323 case MADV_RANDOM:
3324 case MADV_SEQUENTIAL:
3325 /* nothing special here */
3326 break;
3327
3328 default:
3329 vm_map_unlock(map);
3330 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3331 return EINVAL;
3332 }
3333 entry->advice = new_advice;
3334 entry = entry->next;
3335 }
3336
3337 vm_map_unlock(map);
3338 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3339 return 0;
3340 }
3341
3342 /*
3343 * uvm_map_willneed: apply MADV_WILLNEED
3344 */
3345
3346 int
3347 uvm_map_willneed(struct vm_map *map, vaddr_t start, vaddr_t end)
3348 {
3349 struct vm_map_entry *entry;
3350 UVMHIST_FUNC("uvm_map_willneed"); UVMHIST_CALLED(maphist);
3351 UVMHIST_LOG(maphist,"(map=0x%lx,start=0x%lx,end=0x%lx)",
3352 map, start, end, 0);
3353
3354 vm_map_lock_read(map);
3355 VM_MAP_RANGE_CHECK(map, start, end);
3356 if (!uvm_map_lookup_entry(map, start, &entry)) {
3357 entry = entry->next;
3358 }
3359 while (entry->start < end) {
3360 struct vm_amap * const amap = entry->aref.ar_amap;
3361 struct uvm_object * const uobj = entry->object.uvm_obj;
3362
3363 KASSERT(entry != &map->header);
3364 KASSERT(start < entry->end);
3365 /*
3366 * XXX IMPLEMENT ME.
3367 * Should invent a "weak" mode for uvm_fault()
3368 * which would only do the PGO_LOCKED pgo_get().
3369 *
3370 * for now, we handle only the easy but common case.
3371 */
3372 if (UVM_ET_ISOBJ(entry) && amap == NULL && uobj != NULL) {
3373 off_t offset;
3374 off_t size;
3375
3376 offset = entry->offset;
3377 if (start < entry->start) {
3378 offset += entry->start - start;
3379 }
3380 size = entry->offset + (entry->end - entry->start);
3381 if (entry->end < end) {
3382 size -= end - entry->end;
3383 }
3384 uvm_readahead(uobj, offset, size);
3385 }
3386 entry = entry->next;
3387 }
3388 vm_map_unlock_read(map);
3389 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3390 return 0;
3391 }
3392
3393 /*
3394 * uvm_map_pageable: sets the pageability of a range in a map.
3395 *
3396 * => wires map entries. should not be used for transient page locking.
3397 * for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
3398 * => regions specified as not pageable require lock-down (wired) memory
3399 * and page tables.
3400 * => map must never be read-locked
3401 * => if islocked is true, map is already write-locked
3402 * => we always unlock the map, since we must downgrade to a read-lock
3403 * to call uvm_fault_wire()
3404 * => XXXCDC: check this and try and clean it up.
3405 */
3406
3407 int
3408 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
3409 bool new_pageable, int lockflags)
3410 {
3411 struct vm_map_entry *entry, *start_entry, *failed_entry;
3412 int rv;
3413 #ifdef DIAGNOSTIC
3414 u_int timestamp_save;
3415 #endif
3416 UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
3417 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
3418 map, start, end, new_pageable);
3419 KASSERT(map->flags & VM_MAP_PAGEABLE);
3420
3421 if ((lockflags & UVM_LK_ENTER) == 0)
3422 vm_map_lock(map);
3423 VM_MAP_RANGE_CHECK(map, start, end);
3424
3425 /*
3426 * only one pageability change may take place at one time, since
3427 * uvm_fault_wire assumes it will be called only once for each
3428 * wiring/unwiring. therefore, we have to make sure we're actually
3429 * changing the pageability for the entire region. we do so before
3430 * making any changes.
3431 */
3432
3433 if (uvm_map_lookup_entry(map, start, &start_entry) == false) {
3434 if ((lockflags & UVM_LK_EXIT) == 0)
3435 vm_map_unlock(map);
3436
3437 UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0);
3438 return EFAULT;
3439 }
3440 entry = start_entry;
3441
3442 /*
3443 * handle wiring and unwiring separately.
3444 */
3445
3446 if (new_pageable) { /* unwire */
3447 UVM_MAP_CLIP_START(map, entry, start, NULL);
3448
3449 /*
3450 * unwiring. first ensure that the range to be unwired is
3451 * really wired down and that there are no holes.
3452 */
3453
3454 while ((entry != &map->header) && (entry->start < end)) {
3455 if (entry->wired_count == 0 ||
3456 (entry->end < end &&
3457 (entry->next == &map->header ||
3458 entry->next->start > entry->end))) {
3459 if ((lockflags & UVM_LK_EXIT) == 0)
3460 vm_map_unlock(map);
3461 UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0);
3462 return EINVAL;
3463 }
3464 entry = entry->next;
3465 }
3466
3467 /*
3468 * POSIX 1003.1b - a single munlock call unlocks a region,
3469 * regardless of the number of mlock calls made on that
3470 * region.
3471 */
3472
3473 entry = start_entry;
3474 while ((entry != &map->header) && (entry->start < end)) {
3475 UVM_MAP_CLIP_END(map, entry, end, NULL);
3476 if (VM_MAPENT_ISWIRED(entry))
3477 uvm_map_entry_unwire(map, entry);
3478 entry = entry->next;
3479 }
3480 if ((lockflags & UVM_LK_EXIT) == 0)
3481 vm_map_unlock(map);
3482 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3483 return 0;
3484 }
3485
3486 /*
3487 * wire case: in two passes [XXXCDC: ugly block of code here]
3488 *
3489 * 1: holding the write lock, we create any anonymous maps that need
3490 * to be created. then we clip each map entry to the region to
3491 * be wired and increment its wiring count.
3492 *
3493 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
3494 * in the pages for any newly wired area (wired_count == 1).
3495 *
3496 * downgrading to a read lock for uvm_fault_wire avoids a possible
3497 * deadlock with another thread that may have faulted on one of
3498 * the pages to be wired (it would mark the page busy, blocking
3499 * us, then in turn block on the map lock that we hold). because
3500 * of problems in the recursive lock package, we cannot upgrade
3501 * to a write lock in vm_map_lookup. thus, any actions that
3502 * require the write lock must be done beforehand. because we
3503 * keep the read lock on the map, the copy-on-write status of the
3504 * entries we modify here cannot change.
3505 */
3506
3507 while ((entry != &map->header) && (entry->start < end)) {
3508 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3509
3510 /*
3511 * perform actions of vm_map_lookup that need the
3512 * write lock on the map: create an anonymous map
3513 * for a copy-on-write region, or an anonymous map
3514 * for a zero-fill region. (XXXCDC: submap case
3515 * ok?)
3516 */
3517
3518 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3519 if (UVM_ET_ISNEEDSCOPY(entry) &&
3520 ((entry->max_protection & VM_PROT_WRITE) ||
3521 (entry->object.uvm_obj == NULL))) {
3522 amap_copy(map, entry, 0, start, end);
3523 /* XXXCDC: wait OK? */
3524 }
3525 }
3526 }
3527 UVM_MAP_CLIP_START(map, entry, start, NULL);
3528 UVM_MAP_CLIP_END(map, entry, end, NULL);
3529 entry->wired_count++;
3530
3531 /*
3532 * Check for holes
3533 */
3534
3535 if (entry->protection == VM_PROT_NONE ||
3536 (entry->end < end &&
3537 (entry->next == &map->header ||
3538 entry->next->start > entry->end))) {
3539
3540 /*
3541 * found one. amap creation actions do not need to
3542 * be undone, but the wired counts need to be restored.
3543 */
3544
3545 while (entry != &map->header && entry->end > start) {
3546 entry->wired_count--;
3547 entry = entry->prev;
3548 }
3549 if ((lockflags & UVM_LK_EXIT) == 0)
3550 vm_map_unlock(map);
3551 UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
3552 return EINVAL;
3553 }
3554 entry = entry->next;
3555 }
3556
3557 /*
3558 * Pass 2.
3559 */
3560
3561 #ifdef DIAGNOSTIC
3562 timestamp_save = map->timestamp;
3563 #endif
3564 vm_map_busy(map);
3565 vm_map_unlock(map);
3566
3567 rv = 0;
3568 entry = start_entry;
3569 while (entry != &map->header && entry->start < end) {
3570 if (entry->wired_count == 1) {
3571 rv = uvm_fault_wire(map, entry->start, entry->end,
3572 entry->max_protection, 1);
3573 if (rv) {
3574
3575 /*
3576 * wiring failed. break out of the loop.
3577 * we'll clean up the map below, once we
3578 * have a write lock again.
3579 */
3580
3581 break;
3582 }
3583 }
3584 entry = entry->next;
3585 }
3586
3587 if (rv) { /* failed? */
3588
3589 /*
3590 * Get back to an exclusive (write) lock.
3591 */
3592
3593 vm_map_lock(map);
3594 vm_map_unbusy(map);
3595
3596 #ifdef DIAGNOSTIC
3597 if (timestamp_save + 1 != map->timestamp)
3598 panic("uvm_map_pageable: stale map");
3599 #endif
3600
3601 /*
3602 * first drop the wiring count on all the entries
3603 * which haven't actually been wired yet.
3604 */
3605
3606 failed_entry = entry;
3607 while (entry != &map->header && entry->start < end) {
3608 entry->wired_count--;
3609 entry = entry->next;
3610 }
3611
3612 /*
3613 * now, unwire all the entries that were successfully
3614 * wired above.
3615 */
3616
3617 entry = start_entry;
3618 while (entry != failed_entry) {
3619 entry->wired_count--;
3620 if (VM_MAPENT_ISWIRED(entry) == 0)
3621 uvm_map_entry_unwire(map, entry);
3622 entry = entry->next;
3623 }
3624 if ((lockflags & UVM_LK_EXIT) == 0)
3625 vm_map_unlock(map);
3626 UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
3627 return (rv);
3628 }
3629
3630 if ((lockflags & UVM_LK_EXIT) == 0) {
3631 vm_map_unbusy(map);
3632 } else {
3633
3634 /*
3635 * Get back to an exclusive (write) lock.
3636 */
3637
3638 vm_map_lock(map);
3639 vm_map_unbusy(map);
3640 }
3641
3642 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3643 return 0;
3644 }
3645
3646 /*
3647 * uvm_map_pageable_all: special case of uvm_map_pageable - affects
3648 * all mapped regions.
3649 *
3650 * => map must not be locked.
3651 * => if no flags are specified, all regions are unwired.
3652 * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
3653 */
3654
3655 int
3656 uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
3657 {
3658 struct vm_map_entry *entry, *failed_entry;
3659 vsize_t size;
3660 int rv;
3661 #ifdef DIAGNOSTIC
3662 u_int timestamp_save;
3663 #endif
3664 UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist);
3665 UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0);
3666
3667 KASSERT(map->flags & VM_MAP_PAGEABLE);
3668
3669 vm_map_lock(map);
3670
3671 /*
3672 * handle wiring and unwiring separately.
3673 */
3674
3675 if (flags == 0) { /* unwire */
3676
3677 /*
3678 * POSIX 1003.1b -- munlockall unlocks all regions,
3679 * regardless of how many times mlockall has been called.
3680 */
3681
3682 for (entry = map->header.next; entry != &map->header;
3683 entry = entry->next) {
3684 if (VM_MAPENT_ISWIRED(entry))
3685 uvm_map_entry_unwire(map, entry);
3686 }
3687 map->flags &= ~VM_MAP_WIREFUTURE;
3688 vm_map_unlock(map);
3689 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3690 return 0;
3691 }
3692
3693 if (flags & MCL_FUTURE) {
3694
3695 /*
3696 * must wire all future mappings; remember this.
3697 */
3698
3699 map->flags |= VM_MAP_WIREFUTURE;
3700 }
3701
3702 if ((flags & MCL_CURRENT) == 0) {
3703
3704 /*
3705 * no more work to do!
3706 */
3707
3708 UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
3709 vm_map_unlock(map);
3710 return 0;
3711 }
3712
3713 /*
3714 * wire case: in three passes [XXXCDC: ugly block of code here]
3715 *
3716 * 1: holding the write lock, count all pages mapped by non-wired
3717 * entries. if this would cause us to go over our limit, we fail.
3718 *
3719 * 2: still holding the write lock, we create any anonymous maps that
3720 * need to be created. then we increment its wiring count.
3721 *
3722 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
3723 * in the pages for any newly wired area (wired_count == 1).
3724 *
3725 * downgrading to a read lock for uvm_fault_wire avoids a possible
3726 * deadlock with another thread that may have faulted on one of
3727 * the pages to be wired (it would mark the page busy, blocking
3728 * us, then in turn block on the map lock that we hold). because
3729 * of problems in the recursive lock package, we cannot upgrade
3730 * to a write lock in vm_map_lookup. thus, any actions that
3731 * require the write lock must be done beforehand. because we
3732 * keep the read lock on the map, the copy-on-write status of the
3733 * entries we modify here cannot change.
3734 */
3735
3736 for (size = 0, entry = map->header.next; entry != &map->header;
3737 entry = entry->next) {
3738 if (entry->protection != VM_PROT_NONE &&
3739 VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3740 size += entry->end - entry->start;
3741 }
3742 }
3743
3744 if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
3745 vm_map_unlock(map);
3746 return ENOMEM;
3747 }
3748
3749 if (limit != 0 &&
3750 (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
3751 vm_map_unlock(map);
3752 return ENOMEM;
3753 }
3754
3755 /*
3756 * Pass 2.
3757 */
3758
3759 for (entry = map->header.next; entry != &map->header;
3760 entry = entry->next) {
3761 if (entry->protection == VM_PROT_NONE)
3762 continue;
3763 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3764
3765 /*
3766 * perform actions of vm_map_lookup that need the
3767 * write lock on the map: create an anonymous map
3768 * for a copy-on-write region, or an anonymous map
3769 * for a zero-fill region. (XXXCDC: submap case
3770 * ok?)
3771 */
3772
3773 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3774 if (UVM_ET_ISNEEDSCOPY(entry) &&
3775 ((entry->max_protection & VM_PROT_WRITE) ||
3776 (entry->object.uvm_obj == NULL))) {
3777 amap_copy(map, entry, 0, entry->start,
3778 entry->end);
3779 /* XXXCDC: wait OK? */
3780 }
3781 }
3782 }
3783 entry->wired_count++;
3784 }
3785
3786 /*
3787 * Pass 3.
3788 */
3789
3790 #ifdef DIAGNOSTIC
3791 timestamp_save = map->timestamp;
3792 #endif
3793 vm_map_busy(map);
3794 vm_map_unlock(map);
3795
3796 rv = 0;
3797 for (entry = map->header.next; entry != &map->header;
3798 entry = entry->next) {
3799 if (entry->wired_count == 1) {
3800 rv = uvm_fault_wire(map, entry->start, entry->end,
3801 entry->max_protection, 1);
3802 if (rv) {
3803
3804 /*
3805 * wiring failed. break out of the loop.
3806 * we'll clean up the map below, once we
3807 * have a write lock again.
3808 */
3809
3810 break;
3811 }
3812 }
3813 }
3814
3815 if (rv) {
3816
3817 /*
3818 * Get back an exclusive (write) lock.
3819 */
3820
3821 vm_map_lock(map);
3822 vm_map_unbusy(map);
3823
3824 #ifdef DIAGNOSTIC
3825 if (timestamp_save + 1 != map->timestamp)
3826 panic("uvm_map_pageable_all: stale map");
3827 #endif
3828
3829 /*
3830 * first drop the wiring count on all the entries
3831 * which haven't actually been wired yet.
3832 *
3833 * Skip VM_PROT_NONE entries like we did above.
3834 */
3835
3836 failed_entry = entry;
3837 for (/* nothing */; entry != &map->header;
3838 entry = entry->next) {
3839 if (entry->protection == VM_PROT_NONE)
3840 continue;
3841 entry->wired_count--;
3842 }
3843
3844 /*
3845 * now, unwire all the entries that were successfully
3846 * wired above.
3847 *
3848 * Skip VM_PROT_NONE entries like we did above.
3849 */
3850
3851 for (entry = map->header.next; entry != failed_entry;
3852 entry = entry->next) {
3853 if (entry->protection == VM_PROT_NONE)
3854 continue;
3855 entry->wired_count--;
3856 if (VM_MAPENT_ISWIRED(entry))
3857 uvm_map_entry_unwire(map, entry);
3858 }
3859 vm_map_unlock(map);
3860 UVMHIST_LOG(maphist,"<- done (RV=%d)", rv,0,0,0);
3861 return (rv);
3862 }
3863
3864 vm_map_unbusy(map);
3865
3866 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3867 return 0;
3868 }
3869
3870 /*
3871 * uvm_map_clean: clean out a map range
3872 *
3873 * => valid flags:
3874 * if (flags & PGO_CLEANIT): dirty pages are cleaned first
3875 * if (flags & PGO_SYNCIO): dirty pages are written synchronously
3876 * if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
3877 * if (flags & PGO_FREE): any cached pages are freed after clean
3878 * => returns an error if any part of the specified range isn't mapped
3879 * => never a need to flush amap layer since the anonymous memory has
3880 * no permanent home, but may deactivate pages there
3881 * => called from sys_msync() and sys_madvise()
3882 * => caller must not write-lock map (read OK).
3883 * => we may sleep while cleaning if SYNCIO [with map read-locked]
3884 */
3885
3886 int
3887 uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
3888 {
3889 struct vm_map_entry *current, *entry;
3890 struct uvm_object *uobj;
3891 struct vm_amap *amap;
3892 struct vm_anon *anon;
3893 struct vm_page *pg;
3894 vaddr_t offset;
3895 vsize_t size;
3896 voff_t uoff;
3897 int error, refs;
3898 UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
3899
3900 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
3901 map, start, end, flags);
3902 KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
3903 (PGO_FREE|PGO_DEACTIVATE));
3904
3905 vm_map_lock_read(map);
3906 VM_MAP_RANGE_CHECK(map, start, end);
3907 if (uvm_map_lookup_entry(map, start, &entry) == false) {
3908 vm_map_unlock_read(map);
3909 return EFAULT;
3910 }
3911
3912 /*
3913 * Make a first pass to check for holes and wiring problems.
3914 */
3915
3916 for (current = entry; current->start < end; current = current->next) {
3917 if (UVM_ET_ISSUBMAP(current)) {
3918 vm_map_unlock_read(map);
3919 return EINVAL;
3920 }
3921 if ((flags & PGO_FREE) != 0 && VM_MAPENT_ISWIRED(entry)) {
3922 vm_map_unlock_read(map);
3923 return EBUSY;
3924 }
3925 if (end <= current->end) {
3926 break;
3927 }
3928 if (current->end != current->next->start) {
3929 vm_map_unlock_read(map);
3930 return EFAULT;
3931 }
3932 }
3933
3934 error = 0;
3935 for (current = entry; start < end; current = current->next) {
3936 amap = current->aref.ar_amap; /* top layer */
3937 uobj = current->object.uvm_obj; /* bottom layer */
3938 KASSERT(start >= current->start);
3939
3940 /*
3941 * No amap cleaning necessary if:
3942 *
3943 * (1) There's no amap.
3944 *
3945 * (2) We're not deactivating or freeing pages.
3946 */
3947
3948 if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
3949 goto flush_object;
3950
3951 amap_lock(amap);
3952 offset = start - current->start;
3953 size = MIN(end, current->end) - start;
3954 for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) {
3955 anon = amap_lookup(¤t->aref, offset);
3956 if (anon == NULL)
3957 continue;
3958
3959 mutex_enter(&anon->an_lock);
3960 pg = anon->an_page;
3961 if (pg == NULL) {
3962 mutex_exit(&anon->an_lock);
3963 continue;
3964 }
3965
3966 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
3967
3968 /*
3969 * In these first 3 cases, we just deactivate the page.
3970 */
3971
3972 case PGO_CLEANIT|PGO_FREE:
3973 case PGO_CLEANIT|PGO_DEACTIVATE:
3974 case PGO_DEACTIVATE:
3975 deactivate_it:
3976 /*
3977 * skip the page if it's loaned or wired,
3978 * since it shouldn't be on a paging queue
3979 * at all in these cases.
3980 */
3981
3982 mutex_enter(&uvm_pageqlock);
3983 if (pg->loan_count != 0 ||
3984 pg->wire_count != 0) {
3985 mutex_exit(&uvm_pageqlock);
3986 mutex_exit(&anon->an_lock);
3987 continue;
3988 }
3989 KASSERT(pg->uanon == anon);
3990 uvm_pagedeactivate(pg);
3991 mutex_exit(&uvm_pageqlock);
3992 mutex_exit(&anon->an_lock);
3993 continue;
3994
3995 case PGO_FREE:
3996
3997 /*
3998 * If there are multiple references to
3999 * the amap, just deactivate the page.
4000 */
4001
4002 if (amap_refs(amap) > 1)
4003 goto deactivate_it;
4004
4005 /* skip the page if it's wired */
4006 if (pg->wire_count != 0) {
4007 mutex_exit(&anon->an_lock);
4008 continue;
4009 }
4010 amap_unadd(¤t->aref, offset);
4011 refs = --anon->an_ref;
4012 mutex_exit(&anon->an_lock);
4013 if (refs == 0)
4014 uvm_anfree(anon);
4015 continue;
4016 }
4017 }
4018 amap_unlock(amap);
4019
4020 flush_object:
4021 /*
4022 * flush pages if we've got a valid backing object.
4023 * note that we must always clean object pages before
4024 * freeing them since otherwise we could reveal stale
4025 * data from files.
4026 */
4027
4028 uoff = current->offset + (start - current->start);
4029 size = MIN(end, current->end) - start;
4030 if (uobj != NULL) {
4031 mutex_enter(&uobj->vmobjlock);
4032 if (uobj->pgops->pgo_put != NULL)
4033 error = (uobj->pgops->pgo_put)(uobj, uoff,
4034 uoff + size, flags | PGO_CLEANIT);
4035 else
4036 error = 0;
4037 }
4038 start += size;
4039 }
4040 vm_map_unlock_read(map);
4041 return (error);
4042 }
4043
4044
4045 /*
4046 * uvm_map_checkprot: check protection in map
4047 *
4048 * => must allow specified protection in a fully allocated region.
4049 * => map must be read or write locked by caller.
4050 */
4051
4052 bool
4053 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
4054 vm_prot_t protection)
4055 {
4056 struct vm_map_entry *entry;
4057 struct vm_map_entry *tmp_entry;
4058
4059 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
4060 return (false);
4061 }
4062 entry = tmp_entry;
4063 while (start < end) {
4064 if (entry == &map->header) {
4065 return (false);
4066 }
4067
4068 /*
4069 * no holes allowed
4070 */
4071
4072 if (start < entry->start) {
4073 return (false);
4074 }
4075
4076 /*
4077 * check protection associated with entry
4078 */
4079
4080 if ((entry->protection & protection) != protection) {
4081 return (false);
4082 }
4083 start = entry->end;
4084 entry = entry->next;
4085 }
4086 return (true);
4087 }
4088
4089 /*
4090 * uvmspace_alloc: allocate a vmspace structure.
4091 *
4092 * - structure includes vm_map and pmap
4093 * - XXX: no locking on this structure
4094 * - refcnt set to 1, rest must be init'd by caller
4095 */
4096 struct vmspace *
4097 uvmspace_alloc(vaddr_t vmin, vaddr_t vmax)
4098 {
4099 struct vmspace *vm;
4100 UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
4101
4102 vm = pool_cache_get(&uvm_vmspace_cache, PR_WAITOK);
4103 uvmspace_init(vm, NULL, vmin, vmax);
4104 UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0);
4105 return (vm);
4106 }
4107
4108 /*
4109 * uvmspace_init: initialize a vmspace structure.
4110 *
4111 * - XXX: no locking on this structure
4112 * - refcnt set to 1, rest must be init'd by caller
4113 */
4114 void
4115 uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin, vaddr_t vmax)
4116 {
4117 UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist);
4118
4119 memset(vm, 0, sizeof(*vm));
4120 uvm_map_setup(&vm->vm_map, vmin, vmax, VM_MAP_PAGEABLE
4121 #ifdef __USING_TOPDOWN_VM
4122 | VM_MAP_TOPDOWN
4123 #endif
4124 );
4125 if (pmap)
4126 pmap_reference(pmap);
4127 else
4128 pmap = pmap_create();
4129 vm->vm_map.pmap = pmap;
4130 vm->vm_refcnt = 1;
4131 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4132 }
4133
4134 /*
4135 * uvmspace_share: share a vmspace between two processes
4136 *
4137 * - used for vfork, threads(?)
4138 */
4139
4140 void
4141 uvmspace_share(struct proc *p1, struct proc *p2)
4142 {
4143
4144 uvmspace_addref(p1->p_vmspace);
4145 p2->p_vmspace = p1->p_vmspace;
4146 }
4147
4148 /*
4149 * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
4150 *
4151 * - XXX: no locking on vmspace
4152 */
4153
4154 void
4155 uvmspace_unshare(struct lwp *l)
4156 {
4157 struct proc *p = l->l_proc;
4158 struct vmspace *nvm, *ovm = p->p_vmspace;
4159
4160 if (ovm->vm_refcnt == 1)
4161 /* nothing to do: vmspace isn't shared in the first place */
4162 return;
4163
4164 /* make a new vmspace, still holding old one */
4165 nvm = uvmspace_fork(ovm);
4166
4167 kpreempt_disable();
4168 pmap_deactivate(l); /* unbind old vmspace */
4169 p->p_vmspace = nvm;
4170 pmap_activate(l); /* switch to new vmspace */
4171 kpreempt_enable();
4172
4173 uvmspace_free(ovm); /* drop reference to old vmspace */
4174 }
4175
4176 /*
4177 * uvmspace_exec: the process wants to exec a new program
4178 */
4179
4180 void
4181 uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end)
4182 {
4183 struct proc *p = l->l_proc;
4184 struct vmspace *nvm, *ovm = p->p_vmspace;
4185 struct vm_map *map = &ovm->vm_map;
4186
4187 #ifdef __sparc__
4188 /* XXX cgd 960926: the sparc #ifdef should be a MD hook */
4189 kill_user_windows(l); /* before stack addresses go away */
4190 #endif
4191
4192 /*
4193 * see if more than one process is using this vmspace...
4194 */
4195
4196 if (ovm->vm_refcnt == 1) {
4197
4198 /*
4199 * if p is the only process using its vmspace then we can safely
4200 * recycle that vmspace for the program that is being exec'd.
4201 */
4202
4203 #ifdef SYSVSHM
4204 /*
4205 * SYSV SHM semantics require us to kill all segments on an exec
4206 */
4207
4208 if (ovm->vm_shm)
4209 shmexit(ovm);
4210 #endif
4211
4212 /*
4213 * POSIX 1003.1b -- "lock future mappings" is revoked
4214 * when a process execs another program image.
4215 */
4216
4217 map->flags &= ~VM_MAP_WIREFUTURE;
4218
4219 /*
4220 * now unmap the old program
4221 */
4222
4223 pmap_remove_all(map->pmap);
4224 uvm_unmap(map, vm_map_min(map), vm_map_max(map));
4225 KASSERT(map->header.prev == &map->header);
4226 KASSERT(map->nentries == 0);
4227
4228 /*
4229 * resize the map
4230 */
4231
4232 vm_map_setmin(map, start);
4233 vm_map_setmax(map, end);
4234 } else {
4235
4236 /*
4237 * p's vmspace is being shared, so we can't reuse it for p since
4238 * it is still being used for others. allocate a new vmspace
4239 * for p
4240 */
4241
4242 nvm = uvmspace_alloc(start, end);
4243
4244 /*
4245 * install new vmspace and drop our ref to the old one.
4246 */
4247
4248 kpreempt_disable();
4249 pmap_deactivate(l);
4250 p->p_vmspace = nvm;
4251 pmap_activate(l);
4252 kpreempt_enable();
4253
4254 uvmspace_free(ovm);
4255 }
4256 }
4257
4258 /*
4259 * uvmspace_addref: add a referece to a vmspace.
4260 */
4261
4262 void
4263 uvmspace_addref(struct vmspace *vm)
4264 {
4265 struct vm_map *map = &vm->vm_map;
4266
4267 KASSERT((map->flags & VM_MAP_DYING) == 0);
4268
4269 mutex_enter(&map->misc_lock);
4270 KASSERT(vm->vm_refcnt > 0);
4271 vm->vm_refcnt++;
4272 mutex_exit(&map->misc_lock);
4273 }
4274
4275 /*
4276 * uvmspace_free: free a vmspace data structure
4277 */
4278
4279 void
4280 uvmspace_free(struct vmspace *vm)
4281 {
4282 struct vm_map_entry *dead_entries;
4283 struct vm_map *map = &vm->vm_map;
4284 int n;
4285
4286 UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
4287
4288 UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0);
4289 mutex_enter(&map->misc_lock);
4290 n = --vm->vm_refcnt;
4291 mutex_exit(&map->misc_lock);
4292 if (n > 0)
4293 return;
4294
4295 /*
4296 * at this point, there should be no other references to the map.
4297 * delete all of the mappings, then destroy the pmap.
4298 */
4299
4300 map->flags |= VM_MAP_DYING;
4301 pmap_remove_all(map->pmap);
4302 #ifdef SYSVSHM
4303 /* Get rid of any SYSV shared memory segments. */
4304 if (vm->vm_shm != NULL)
4305 shmexit(vm);
4306 #endif
4307 if (map->nentries) {
4308 uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map),
4309 &dead_entries, NULL, 0);
4310 if (dead_entries != NULL)
4311 uvm_unmap_detach(dead_entries, 0);
4312 }
4313 KASSERT(map->nentries == 0);
4314 KASSERT(map->size == 0);
4315 mutex_destroy(&map->misc_lock);
4316 mutex_destroy(&map->mutex);
4317 rw_destroy(&map->lock);
4318 cv_destroy(&map->cv);
4319 pmap_destroy(map->pmap);
4320 pool_cache_put(&uvm_vmspace_cache, vm);
4321 }
4322
4323 /*
4324 * F O R K - m a i n e n t r y p o i n t
4325 */
4326 /*
4327 * uvmspace_fork: fork a process' main map
4328 *
4329 * => create a new vmspace for child process from parent.
4330 * => parent's map must not be locked.
4331 */
4332
4333 struct vmspace *
4334 uvmspace_fork(struct vmspace *vm1)
4335 {
4336 struct vmspace *vm2;
4337 struct vm_map *old_map = &vm1->vm_map;
4338 struct vm_map *new_map;
4339 struct vm_map_entry *old_entry;
4340 struct vm_map_entry *new_entry;
4341 UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
4342
4343 vm_map_lock(old_map);
4344
4345 vm2 = uvmspace_alloc(vm_map_min(old_map), vm_map_max(old_map));
4346 memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
4347 (char *) (vm1 + 1) - (char *) &vm1->vm_startcopy);
4348 new_map = &vm2->vm_map; /* XXX */
4349
4350 old_entry = old_map->header.next;
4351 new_map->size = old_map->size;
4352
4353 /*
4354 * go entry-by-entry
4355 */
4356
4357 while (old_entry != &old_map->header) {
4358
4359 /*
4360 * first, some sanity checks on the old entry
4361 */
4362
4363 KASSERT(!UVM_ET_ISSUBMAP(old_entry));
4364 KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) ||
4365 !UVM_ET_ISNEEDSCOPY(old_entry));
4366
4367 switch (old_entry->inheritance) {
4368 case MAP_INHERIT_NONE:
4369
4370 /*
4371 * drop the mapping, modify size
4372 */
4373 new_map->size -= old_entry->end - old_entry->start;
4374 break;
4375
4376 case MAP_INHERIT_SHARE:
4377
4378 /*
4379 * share the mapping: this means we want the old and
4380 * new entries to share amaps and backing objects.
4381 */
4382 /*
4383 * if the old_entry needs a new amap (due to prev fork)
4384 * then we need to allocate it now so that we have
4385 * something we own to share with the new_entry. [in
4386 * other words, we need to clear needs_copy]
4387 */
4388
4389 if (UVM_ET_ISNEEDSCOPY(old_entry)) {
4390 /* get our own amap, clears needs_copy */
4391 amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,
4392 0, 0);
4393 /* XXXCDC: WAITOK??? */
4394 }
4395
4396 new_entry = uvm_mapent_alloc(new_map, 0);
4397 /* old_entry -> new_entry */
4398 uvm_mapent_copy(old_entry, new_entry);
4399
4400 /* new pmap has nothing wired in it */
4401 new_entry->wired_count = 0;
4402
4403 /*
4404 * gain reference to object backing the map (can't
4405 * be a submap, already checked this case).
4406 */
4407
4408 if (new_entry->aref.ar_amap)
4409 uvm_map_reference_amap(new_entry, AMAP_SHARED);
4410
4411 if (new_entry->object.uvm_obj &&
4412 new_entry->object.uvm_obj->pgops->pgo_reference)
4413 new_entry->object.uvm_obj->
4414 pgops->pgo_reference(
4415 new_entry->object.uvm_obj);
4416
4417 /* insert entry at end of new_map's entry list */
4418 uvm_map_entry_link(new_map, new_map->header.prev,
4419 new_entry);
4420
4421 break;
4422
4423 case MAP_INHERIT_COPY:
4424
4425 /*
4426 * copy-on-write the mapping (using mmap's
4427 * MAP_PRIVATE semantics)
4428 *
4429 * allocate new_entry, adjust reference counts.
4430 * (note that new references are read-only).
4431 */
4432
4433 new_entry = uvm_mapent_alloc(new_map, 0);
4434 /* old_entry -> new_entry */
4435 uvm_mapent_copy(old_entry, new_entry);
4436
4437 if (new_entry->aref.ar_amap)
4438 uvm_map_reference_amap(new_entry, 0);
4439
4440 if (new_entry->object.uvm_obj &&
4441 new_entry->object.uvm_obj->pgops->pgo_reference)
4442 new_entry->object.uvm_obj->pgops->pgo_reference
4443 (new_entry->object.uvm_obj);
4444
4445 /* new pmap has nothing wired in it */
4446 new_entry->wired_count = 0;
4447
4448 new_entry->etype |=
4449 (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
4450 uvm_map_entry_link(new_map, new_map->header.prev,
4451 new_entry);
4452
4453 /*
4454 * the new entry will need an amap. it will either
4455 * need to be copied from the old entry or created
4456 * from scratch (if the old entry does not have an
4457 * amap). can we defer this process until later
4458 * (by setting "needs_copy") or do we need to copy
4459 * the amap now?
4460 *
4461 * we must copy the amap now if any of the following
4462 * conditions hold:
4463 * 1. the old entry has an amap and that amap is
4464 * being shared. this means that the old (parent)
4465 * process is sharing the amap with another
4466 * process. if we do not clear needs_copy here
4467 * we will end up in a situation where both the
4468 * parent and child process are refering to the
4469 * same amap with "needs_copy" set. if the
4470 * parent write-faults, the fault routine will
4471 * clear "needs_copy" in the parent by allocating
4472 * a new amap. this is wrong because the
4473 * parent is supposed to be sharing the old amap
4474 * and the new amap will break that.
4475 *
4476 * 2. if the old entry has an amap and a non-zero
4477 * wire count then we are going to have to call
4478 * amap_cow_now to avoid page faults in the
4479 * parent process. since amap_cow_now requires
4480 * "needs_copy" to be clear we might as well
4481 * clear it here as well.
4482 *
4483 */
4484
4485 if (old_entry->aref.ar_amap != NULL) {
4486 if ((amap_flags(old_entry->aref.ar_amap) &
4487 AMAP_SHARED) != 0 ||
4488 VM_MAPENT_ISWIRED(old_entry)) {
4489
4490 amap_copy(new_map, new_entry,
4491 AMAP_COPY_NOCHUNK, 0, 0);
4492 /* XXXCDC: M_WAITOK ... ok? */
4493 }
4494 }
4495
4496 /*
4497 * if the parent's entry is wired down, then the
4498 * parent process does not want page faults on
4499 * access to that memory. this means that we
4500 * cannot do copy-on-write because we can't write
4501 * protect the old entry. in this case we
4502 * resolve all copy-on-write faults now, using
4503 * amap_cow_now. note that we have already
4504 * allocated any needed amap (above).
4505 */
4506
4507 if (VM_MAPENT_ISWIRED(old_entry)) {
4508
4509 /*
4510 * resolve all copy-on-write faults now
4511 * (note that there is nothing to do if
4512 * the old mapping does not have an amap).
4513 */
4514 if (old_entry->aref.ar_amap)
4515 amap_cow_now(new_map, new_entry);
4516
4517 } else {
4518
4519 /*
4520 * setup mappings to trigger copy-on-write faults
4521 * we must write-protect the parent if it has
4522 * an amap and it is not already "needs_copy"...
4523 * if it is already "needs_copy" then the parent
4524 * has already been write-protected by a previous
4525 * fork operation.
4526 */
4527
4528 if (old_entry->aref.ar_amap &&
4529 !UVM_ET_ISNEEDSCOPY(old_entry)) {
4530 if (old_entry->max_protection & VM_PROT_WRITE) {
4531 pmap_protect(old_map->pmap,
4532 old_entry->start,
4533 old_entry->end,
4534 old_entry->protection &
4535 ~VM_PROT_WRITE);
4536 }
4537 old_entry->etype |= UVM_ET_NEEDSCOPY;
4538 }
4539 }
4540 break;
4541 } /* end of switch statement */
4542 old_entry = old_entry->next;
4543 }
4544
4545 pmap_update(old_map->pmap);
4546 vm_map_unlock(old_map);
4547
4548 #ifdef SYSVSHM
4549 if (vm1->vm_shm)
4550 shmfork(vm1, vm2);
4551 #endif
4552
4553 #ifdef PMAP_FORK
4554 pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap);
4555 #endif
4556
4557 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4558 return (vm2);
4559 }
4560
4561
4562 /*
4563 * in-kernel map entry allocation.
4564 */
4565
4566 struct uvm_kmapent_hdr {
4567 LIST_ENTRY(uvm_kmapent_hdr) ukh_listq;
4568 int ukh_nused;
4569 struct vm_map_entry *ukh_freelist;
4570 struct vm_map *ukh_map;
4571 struct vm_map_entry ukh_entries[0];
4572 };
4573
4574 #define UVM_KMAPENT_CHUNK \
4575 ((PAGE_SIZE - sizeof(struct uvm_kmapent_hdr)) \
4576 / sizeof(struct vm_map_entry))
4577
4578 #define UVM_KHDR_FIND(entry) \
4579 ((struct uvm_kmapent_hdr *)(((vaddr_t)entry) & ~PAGE_MASK))
4580
4581
4582 #ifdef DIAGNOSTIC
4583 static struct vm_map *
4584 uvm_kmapent_map(struct vm_map_entry *entry)
4585 {
4586 const struct uvm_kmapent_hdr *ukh;
4587
4588 ukh = UVM_KHDR_FIND(entry);
4589 return ukh->ukh_map;
4590 }
4591 #endif
4592
4593 static inline struct vm_map_entry *
4594 uvm_kmapent_get(struct uvm_kmapent_hdr *ukh)
4595 {
4596 struct vm_map_entry *entry;
4597
4598 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4599 KASSERT(ukh->ukh_nused >= 0);
4600
4601 entry = ukh->ukh_freelist;
4602 if (entry) {
4603 KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4604 == UVM_MAP_KERNEL);
4605 ukh->ukh_freelist = entry->next;
4606 ukh->ukh_nused++;
4607 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4608 } else {
4609 KASSERT(ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4610 }
4611
4612 return entry;
4613 }
4614
4615 static inline void
4616 uvm_kmapent_put(struct uvm_kmapent_hdr *ukh, struct vm_map_entry *entry)
4617 {
4618
4619 KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4620 == UVM_MAP_KERNEL);
4621 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4622 KASSERT(ukh->ukh_nused > 0);
4623 KASSERT(ukh->ukh_freelist != NULL ||
4624 ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4625 KASSERT(ukh->ukh_freelist == NULL ||
4626 ukh->ukh_nused < UVM_KMAPENT_CHUNK);
4627
4628 ukh->ukh_nused--;
4629 entry->next = ukh->ukh_freelist;
4630 ukh->ukh_freelist = entry;
4631 }
4632
4633 /*
4634 * uvm_kmapent_alloc: allocate a map entry for in-kernel map
4635 */
4636
4637 static struct vm_map_entry *
4638 uvm_kmapent_alloc(struct vm_map *map, int flags)
4639 {
4640 struct vm_page *pg;
4641 struct uvm_map_args args;
4642 struct uvm_kmapent_hdr *ukh;
4643 struct vm_map_entry *entry;
4644 uvm_flag_t mapflags = UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
4645 UVM_INH_NONE, UVM_ADV_RANDOM, flags | UVM_FLAG_NOMERGE);
4646 vaddr_t va;
4647 int error;
4648 int i;
4649
4650 KDASSERT(UVM_KMAPENT_CHUNK > 2);
4651 KDASSERT(kernel_map != NULL);
4652 KASSERT(vm_map_pmap(map) == pmap_kernel());
4653
4654 UVMMAP_EVCNT_INCR(uke_alloc);
4655 entry = NULL;
4656 again:
4657 /*
4658 * try to grab an entry from freelist.
4659 */
4660 mutex_spin_enter(&uvm_kentry_lock);
4661 ukh = LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free);
4662 if (ukh) {
4663 entry = uvm_kmapent_get(ukh);
4664 if (ukh->ukh_nused == UVM_KMAPENT_CHUNK)
4665 LIST_REMOVE(ukh, ukh_listq);
4666 }
4667 mutex_spin_exit(&uvm_kentry_lock);
4668
4669 if (entry)
4670 return entry;
4671
4672 /*
4673 * there's no free entry for this vm_map.
4674 * now we need to allocate some vm_map_entry.
4675 * for simplicity, always allocate one page chunk of them at once.
4676 */
4677
4678 pg = uvm_pagealloc(NULL, 0, NULL,
4679 (flags & UVM_KMF_NOWAIT) != 0 ? UVM_PGA_USERESERVE : 0);
4680 if (__predict_false(pg == NULL)) {
4681 if (flags & UVM_FLAG_NOWAIT)
4682 return NULL;
4683 uvm_wait("kme_alloc");
4684 goto again;
4685 }
4686
4687 error = uvm_map_prepare(map, 0, PAGE_SIZE, NULL, UVM_UNKNOWN_OFFSET,
4688 0, mapflags, &args);
4689 if (error) {
4690 uvm_pagefree(pg);
4691 return NULL;
4692 }
4693
4694 va = args.uma_start;
4695
4696 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
4697 VM_PROT_READ|VM_PROT_WRITE|PMAP_KMPAGE);
4698 pmap_update(vm_map_pmap(map));
4699
4700 ukh = (void *)va;
4701
4702 /*
4703 * use the first entry for ukh itsself.
4704 */
4705
4706 entry = &ukh->ukh_entries[0];
4707 entry->flags = UVM_MAP_KERNEL | UVM_MAP_KMAPENT;
4708 error = uvm_map_enter(map, &args, entry);
4709 KASSERT(error == 0);
4710
4711 ukh->ukh_nused = UVM_KMAPENT_CHUNK;
4712 ukh->ukh_map = map;
4713 ukh->ukh_freelist = NULL;
4714 for (i = UVM_KMAPENT_CHUNK - 1; i >= 2; i--) {
4715 struct vm_map_entry *xentry = &ukh->ukh_entries[i];
4716
4717 xentry->flags = UVM_MAP_KERNEL;
4718 uvm_kmapent_put(ukh, xentry);
4719 }
4720 KASSERT(ukh->ukh_nused == 2);
4721
4722 mutex_spin_enter(&uvm_kentry_lock);
4723 LIST_INSERT_HEAD(&vm_map_to_kernel(map)->vmk_kentry_free,
4724 ukh, ukh_listq);
4725 mutex_spin_exit(&uvm_kentry_lock);
4726
4727 /*
4728 * return second entry.
4729 */
4730
4731 entry = &ukh->ukh_entries[1];
4732 entry->flags = UVM_MAP_KERNEL;
4733 UVMMAP_EVCNT_INCR(ukh_alloc);
4734 return entry;
4735 }
4736
4737 /*
4738 * uvm_mapent_free: free map entry for in-kernel map
4739 */
4740
4741 static void
4742 uvm_kmapent_free(struct vm_map_entry *entry)
4743 {
4744 struct uvm_kmapent_hdr *ukh;
4745 struct vm_page *pg;
4746 struct vm_map *map;
4747 struct pmap *pmap;
4748 vaddr_t va;
4749 paddr_t pa;
4750 struct vm_map_entry *deadentry;
4751
4752 UVMMAP_EVCNT_INCR(uke_free);
4753 ukh = UVM_KHDR_FIND(entry);
4754 map = ukh->ukh_map;
4755
4756 mutex_spin_enter(&uvm_kentry_lock);
4757 uvm_kmapent_put(ukh, entry);
4758 if (ukh->ukh_nused > 1) {
4759 if (ukh->ukh_nused == UVM_KMAPENT_CHUNK - 1)
4760 LIST_INSERT_HEAD(
4761 &vm_map_to_kernel(map)->vmk_kentry_free,
4762 ukh, ukh_listq);
4763 mutex_spin_exit(&uvm_kentry_lock);
4764 return;
4765 }
4766
4767 /*
4768 * now we can free this ukh.
4769 *
4770 * however, keep an empty ukh to avoid ping-pong.
4771 */
4772
4773 if (LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free) == ukh &&
4774 LIST_NEXT(ukh, ukh_listq) == NULL) {
4775 mutex_spin_exit(&uvm_kentry_lock);
4776 return;
4777 }
4778 LIST_REMOVE(ukh, ukh_listq);
4779 mutex_spin_exit(&uvm_kentry_lock);
4780
4781 KASSERT(ukh->ukh_nused == 1);
4782
4783 /*
4784 * remove map entry for ukh itsself.
4785 */
4786
4787 va = (vaddr_t)ukh;
4788 KASSERT((va & PAGE_MASK) == 0);
4789 vm_map_lock(map);
4790 uvm_unmap_remove(map, va, va + PAGE_SIZE, &deadentry, NULL, 0);
4791 KASSERT(deadentry->flags & UVM_MAP_KERNEL);
4792 KASSERT(deadentry->flags & UVM_MAP_KMAPENT);
4793 KASSERT(deadentry->next == NULL);
4794 KASSERT(deadentry == &ukh->ukh_entries[0]);
4795
4796 /*
4797 * unmap the page from pmap and free it.
4798 */
4799
4800 pmap = vm_map_pmap(map);
4801 KASSERT(pmap == pmap_kernel());
4802 if (!pmap_extract(pmap, va, &pa))
4803 panic("%s: no mapping", __func__);
4804 pmap_kremove(va, PAGE_SIZE);
4805 pmap_update(vm_map_pmap(map));
4806 vm_map_unlock(map);
4807 pg = PHYS_TO_VM_PAGE(pa);
4808 uvm_pagefree(pg);
4809 UVMMAP_EVCNT_INCR(ukh_free);
4810 }
4811
4812 static vsize_t
4813 uvm_kmapent_overhead(vsize_t size)
4814 {
4815
4816 /*
4817 * - the max number of unmerged entries is howmany(size, PAGE_SIZE)
4818 * as the min allocation unit is PAGE_SIZE.
4819 * - UVM_KMAPENT_CHUNK "kmapent"s are allocated from a page.
4820 * one of them are used to map the page itself.
4821 */
4822
4823 return howmany(howmany(size, PAGE_SIZE), (UVM_KMAPENT_CHUNK - 1)) *
4824 PAGE_SIZE;
4825 }
4826
4827 /*
4828 * map entry reservation
4829 */
4830
4831 /*
4832 * uvm_mapent_reserve: reserve map entries for clipping before locking map.
4833 *
4834 * => needed when unmapping entries allocated without UVM_FLAG_QUANTUM.
4835 * => caller shouldn't hold map locked.
4836 */
4837 int
4838 uvm_mapent_reserve(struct vm_map *map, struct uvm_mapent_reservation *umr,
4839 int nentries, int flags)
4840 {
4841
4842 umr->umr_nentries = 0;
4843
4844 if ((flags & UVM_FLAG_QUANTUM) != 0)
4845 return 0;
4846
4847 if (!VM_MAP_USE_KMAPENT(map))
4848 return 0;
4849
4850 while (nentries--) {
4851 struct vm_map_entry *ent;
4852 ent = uvm_kmapent_alloc(map, flags);
4853 if (!ent) {
4854 uvm_mapent_unreserve(map, umr);
4855 return ENOMEM;
4856 }
4857 UMR_PUTENTRY(umr, ent);
4858 }
4859
4860 return 0;
4861 }
4862
4863 /*
4864 * uvm_mapent_unreserve:
4865 *
4866 * => caller shouldn't hold map locked.
4867 * => never fail or sleep.
4868 */
4869 void
4870 uvm_mapent_unreserve(struct vm_map *map, struct uvm_mapent_reservation *umr)
4871 {
4872
4873 while (!UMR_EMPTY(umr))
4874 uvm_kmapent_free(UMR_GETENTRY(umr));
4875 }
4876
4877 /*
4878 * uvm_mapent_trymerge: try to merge an entry with its neighbors.
4879 *
4880 * => called with map locked.
4881 * => return non zero if successfully merged.
4882 */
4883
4884 int
4885 uvm_mapent_trymerge(struct vm_map *map, struct vm_map_entry *entry, int flags)
4886 {
4887 struct uvm_object *uobj;
4888 struct vm_map_entry *next;
4889 struct vm_map_entry *prev;
4890 vsize_t size;
4891 int merged = 0;
4892 bool copying;
4893 int newetype;
4894
4895 if (VM_MAP_USE_KMAPENT(map)) {
4896 return 0;
4897 }
4898 if (entry->aref.ar_amap != NULL) {
4899 return 0;
4900 }
4901 if ((entry->flags & UVM_MAP_NOMERGE) != 0) {
4902 return 0;
4903 }
4904
4905 uobj = entry->object.uvm_obj;
4906 size = entry->end - entry->start;
4907 copying = (flags & UVM_MERGE_COPYING) != 0;
4908 newetype = copying ? (entry->etype & ~UVM_ET_NEEDSCOPY) : entry->etype;
4909
4910 next = entry->next;
4911 if (next != &map->header &&
4912 next->start == entry->end &&
4913 ((copying && next->aref.ar_amap != NULL &&
4914 amap_refs(next->aref.ar_amap) == 1) ||
4915 (!copying && next->aref.ar_amap == NULL)) &&
4916 UVM_ET_ISCOMPATIBLE(next, newetype,
4917 uobj, entry->flags, entry->protection,
4918 entry->max_protection, entry->inheritance, entry->advice,
4919 entry->wired_count) &&
4920 (uobj == NULL || entry->offset + size == next->offset)) {
4921 int error;
4922
4923 if (copying) {
4924 error = amap_extend(next, size,
4925 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_BACKWARDS);
4926 } else {
4927 error = 0;
4928 }
4929 if (error == 0) {
4930 if (uobj) {
4931 if (uobj->pgops->pgo_detach) {
4932 uobj->pgops->pgo_detach(uobj);
4933 }
4934 }
4935
4936 entry->end = next->end;
4937 clear_hints(map, next);
4938 uvm_map_entry_unlink(map, next);
4939 if (copying) {
4940 entry->aref = next->aref;
4941 entry->etype &= ~UVM_ET_NEEDSCOPY;
4942 }
4943 uvm_map_check(map, "trymerge forwardmerge");
4944 uvm_mapent_free_merged(map, next);
4945 merged++;
4946 }
4947 }
4948
4949 prev = entry->prev;
4950 if (prev != &map->header &&
4951 prev->end == entry->start &&
4952 ((copying && !merged && prev->aref.ar_amap != NULL &&
4953 amap_refs(prev->aref.ar_amap) == 1) ||
4954 (!copying && prev->aref.ar_amap == NULL)) &&
4955 UVM_ET_ISCOMPATIBLE(prev, newetype,
4956 uobj, entry->flags, entry->protection,
4957 entry->max_protection, entry->inheritance, entry->advice,
4958 entry->wired_count) &&
4959 (uobj == NULL ||
4960 prev->offset + prev->end - prev->start == entry->offset)) {
4961 int error;
4962
4963 if (copying) {
4964 error = amap_extend(prev, size,
4965 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_FORWARDS);
4966 } else {
4967 error = 0;
4968 }
4969 if (error == 0) {
4970 if (uobj) {
4971 if (uobj->pgops->pgo_detach) {
4972 uobj->pgops->pgo_detach(uobj);
4973 }
4974 entry->offset = prev->offset;
4975 }
4976
4977 entry->start = prev->start;
4978 clear_hints(map, prev);
4979 uvm_map_entry_unlink(map, prev);
4980 if (copying) {
4981 entry->aref = prev->aref;
4982 entry->etype &= ~UVM_ET_NEEDSCOPY;
4983 }
4984 uvm_map_check(map, "trymerge backmerge");
4985 uvm_mapent_free_merged(map, prev);
4986 merged++;
4987 }
4988 }
4989
4990 return merged;
4991 }
4992
4993 #if defined(DDB) || defined(DEBUGPRINT)
4994
4995 /*
4996 * DDB hooks
4997 */
4998
4999 /*
5000 * uvm_map_printit: actually prints the map
5001 */
5002
5003 void
5004 uvm_map_printit(struct vm_map *map, bool full,
5005 void (*pr)(const char *, ...))
5006 {
5007 struct vm_map_entry *entry;
5008
5009 (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, vm_map_min(map),
5010 vm_map_max(map));
5011 (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=0x%x\n",
5012 map->nentries, map->size, map->ref_count, map->timestamp,
5013 map->flags);
5014 (*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap,
5015 pmap_resident_count(map->pmap), pmap_wired_count(map->pmap));
5016 if (!full)
5017 return;
5018 for (entry = map->header.next; entry != &map->header;
5019 entry = entry->next) {
5020 (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%llx, amap=%p/%d\n",
5021 entry, entry->start, entry->end, entry->object.uvm_obj,
5022 (long long)entry->offset, entry->aref.ar_amap,
5023 entry->aref.ar_pageoff);
5024 (*pr)(
5025 "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
5026 "wc=%d, adv=%d\n",
5027 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
5028 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
5029 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
5030 entry->protection, entry->max_protection,
5031 entry->inheritance, entry->wired_count, entry->advice);
5032 }
5033 }
5034
5035 /*
5036 * uvm_object_printit: actually prints the object
5037 */
5038
5039 void
5040 uvm_object_printit(struct uvm_object *uobj, bool full,
5041 void (*pr)(const char *, ...))
5042 {
5043 struct vm_page *pg;
5044 int cnt = 0;
5045
5046 (*pr)("OBJECT %p: locked=%d, pgops=%p, npages=%d, ",
5047 uobj, mutex_owned(&uobj->vmobjlock), uobj->pgops, uobj->uo_npages);
5048 if (UVM_OBJ_IS_KERN_OBJECT(uobj))
5049 (*pr)("refs=<SYSTEM>\n");
5050 else
5051 (*pr)("refs=%d\n", uobj->uo_refs);
5052
5053 if (!full) {
5054 return;
5055 }
5056 (*pr)(" PAGES <pg,offset>:\n ");
5057 TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
5058 cnt++;
5059 (*pr)("<%p,0x%llx> ", pg, (long long)pg->offset);
5060 if ((cnt % 3) == 0) {
5061 (*pr)("\n ");
5062 }
5063 }
5064 if ((cnt % 3) != 0) {
5065 (*pr)("\n");
5066 }
5067 }
5068
5069 /*
5070 * uvm_page_printit: actually print the page
5071 */
5072
5073 static const char page_flagbits[] = UVM_PGFLAGBITS;
5074 static const char page_pqflagbits[] = UVM_PQFLAGBITS;
5075
5076 void
5077 uvm_page_printit(struct vm_page *pg, bool full,
5078 void (*pr)(const char *, ...))
5079 {
5080 struct vm_page *tpg;
5081 struct uvm_object *uobj;
5082 struct pgflist *pgl;
5083 char pgbuf[128];
5084 char pqbuf[128];
5085
5086 (*pr)("PAGE %p:\n", pg);
5087 snprintb(pgbuf, sizeof(pgbuf), page_flagbits, pg->flags);
5088 snprintb(pqbuf, sizeof(pqbuf), page_pqflagbits, pg->pqflags);
5089 (*pr)(" flags=%s, pqflags=%s, wire_count=%d, pa=0x%lx\n",
5090 pgbuf, pqbuf, pg->wire_count, (long)VM_PAGE_TO_PHYS(pg));
5091 (*pr)(" uobject=%p, uanon=%p, offset=0x%llx loan_count=%d\n",
5092 pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count);
5093 #if defined(UVM_PAGE_TRKOWN)
5094 if (pg->flags & PG_BUSY)
5095 (*pr)(" owning process = %d, tag=%s\n",
5096 pg->owner, pg->owner_tag);
5097 else
5098 (*pr)(" page not busy, no owner\n");
5099 #else
5100 (*pr)(" [page ownership tracking disabled]\n");
5101 #endif
5102
5103 if (!full)
5104 return;
5105
5106 /* cross-verify object/anon */
5107 if ((pg->pqflags & PQ_FREE) == 0) {
5108 if (pg->pqflags & PQ_ANON) {
5109 if (pg->uanon == NULL || pg->uanon->an_page != pg)
5110 (*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n",
5111 (pg->uanon) ? pg->uanon->an_page : NULL);
5112 else
5113 (*pr)(" anon backpointer is OK\n");
5114 } else {
5115 uobj = pg->uobject;
5116 if (uobj) {
5117 (*pr)(" checking object list\n");
5118 TAILQ_FOREACH(tpg, &uobj->memq, listq.queue) {
5119 if (tpg == pg) {
5120 break;
5121 }
5122 }
5123 if (tpg)
5124 (*pr)(" page found on object list\n");
5125 else
5126 (*pr)(" >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
5127 }
5128 }
5129 }
5130
5131 /* cross-verify page queue */
5132 if (pg->pqflags & PQ_FREE) {
5133 int fl = uvm_page_lookup_freelist(pg);
5134 int color = VM_PGCOLOR_BUCKET(pg);
5135 pgl = &uvm.page_free[fl].pgfl_buckets[color].pgfl_queues[
5136 ((pg)->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN];
5137 } else {
5138 pgl = NULL;
5139 }
5140
5141 if (pgl) {
5142 (*pr)(" checking pageq list\n");
5143 LIST_FOREACH(tpg, pgl, pageq.list) {
5144 if (tpg == pg) {
5145 break;
5146 }
5147 }
5148 if (tpg)
5149 (*pr)(" page found on pageq list\n");
5150 else
5151 (*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
5152 }
5153 }
5154
5155 /*
5156 * uvm_pages_printthem - print a summary of all managed pages
5157 */
5158
5159 void
5160 uvm_page_printall(void (*pr)(const char *, ...))
5161 {
5162 unsigned i;
5163 struct vm_page *pg;
5164
5165 (*pr)("%18s %4s %4s %18s %18s"
5166 #ifdef UVM_PAGE_TRKOWN
5167 " OWNER"
5168 #endif
5169 "\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON");
5170 for (i = 0; i < vm_nphysseg; i++) {
5171 for (pg = vm_physmem[i].pgs; pg <= vm_physmem[i].lastpg; pg++) {
5172 (*pr)("%18p %04x %04x %18p %18p",
5173 pg, pg->flags, pg->pqflags, pg->uobject,
5174 pg->uanon);
5175 #ifdef UVM_PAGE_TRKOWN
5176 if (pg->flags & PG_BUSY)
5177 (*pr)(" %d [%s]", pg->owner, pg->owner_tag);
5178 #endif
5179 (*pr)("\n");
5180 }
5181 }
5182 }
5183
5184 #endif
5185
5186 /*
5187 * uvm_map_create: create map
5188 */
5189
5190 struct vm_map *
5191 uvm_map_create(pmap_t pmap, vaddr_t vmin, vaddr_t vmax, int flags)
5192 {
5193 struct vm_map *result;
5194
5195 result = malloc(sizeof(struct vm_map), M_VMMAP, M_WAITOK);
5196 uvm_map_setup(result, vmin, vmax, flags);
5197 result->pmap = pmap;
5198 return(result);
5199 }
5200
5201 /*
5202 * uvm_map_setup: init map
5203 *
5204 * => map must not be in service yet.
5205 */
5206
5207 void
5208 uvm_map_setup(struct vm_map *map, vaddr_t vmin, vaddr_t vmax, int flags)
5209 {
5210 int ipl;
5211
5212 rb_tree_init(&map->rb_tree, &uvm_map_tree_ops);
5213 map->header.next = map->header.prev = &map->header;
5214 map->nentries = 0;
5215 map->size = 0;
5216 map->ref_count = 1;
5217 vm_map_setmin(map, vmin);
5218 vm_map_setmax(map, vmax);
5219 map->flags = flags;
5220 map->first_free = &map->header;
5221 map->hint = &map->header;
5222 map->timestamp = 0;
5223 map->busy = NULL;
5224
5225 if ((flags & VM_MAP_INTRSAFE) != 0) {
5226 ipl = IPL_VM;
5227 } else {
5228 ipl = IPL_NONE;
5229 }
5230
5231 rw_init(&map->lock);
5232 cv_init(&map->cv, "vm_map");
5233 mutex_init(&map->misc_lock, MUTEX_DRIVER, ipl);
5234 mutex_init(&map->mutex, MUTEX_DRIVER, ipl);
5235 }
5236
5237
5238 /*
5239 * U N M A P - m a i n e n t r y p o i n t
5240 */
5241
5242 /*
5243 * uvm_unmap1: remove mappings from a vm_map (from "start" up to "stop")
5244 *
5245 * => caller must check alignment and size
5246 * => map must be unlocked (we will lock it)
5247 * => flags is UVM_FLAG_QUANTUM or 0.
5248 */
5249
5250 void
5251 uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
5252 {
5253 struct vm_map_entry *dead_entries;
5254 struct uvm_mapent_reservation umr;
5255 UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist);
5256
5257 UVMHIST_LOG(maphist, " (map=0x%x, start=0x%x, end=0x%x)",
5258 map, start, end, 0);
5259 if (map == kernel_map) {
5260 LOCKDEBUG_MEM_CHECK((void *)start, end - start);
5261 }
5262 /*
5263 * work now done by helper functions. wipe the pmap's and then
5264 * detach from the dead entries...
5265 */
5266 uvm_mapent_reserve(map, &umr, 2, flags);
5267 vm_map_lock(map);
5268 uvm_unmap_remove(map, start, end, &dead_entries, &umr, flags);
5269 vm_map_unlock(map);
5270 uvm_mapent_unreserve(map, &umr);
5271
5272 if (dead_entries != NULL)
5273 uvm_unmap_detach(dead_entries, 0);
5274
5275 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
5276 }
5277
5278
5279 /*
5280 * uvm_map_reference: add reference to a map
5281 *
5282 * => map need not be locked (we use misc_lock).
5283 */
5284
5285 void
5286 uvm_map_reference(struct vm_map *map)
5287 {
5288 mutex_enter(&map->misc_lock);
5289 map->ref_count++;
5290 mutex_exit(&map->misc_lock);
5291 }
5292
5293 struct vm_map_kernel *
5294 vm_map_to_kernel(struct vm_map *map)
5295 {
5296
5297 KASSERT(VM_MAP_IS_KERNEL(map));
5298
5299 return (struct vm_map_kernel *)map;
5300 }
5301
5302 bool
5303 vm_map_starved_p(struct vm_map *map)
5304 {
5305
5306 if ((map->flags & VM_MAP_WANTVA) != 0) {
5307 return true;
5308 }
5309 /* XXX */
5310 if ((vm_map_max(map) - vm_map_min(map)) / 16 * 15 < map->size) {
5311 return true;
5312 }
5313 return false;
5314 }
5315
5316 #if defined(DDB) || defined(DEBUGPRINT)
5317 void
5318 uvm_whatis(uintptr_t addr, void (*pr)(const char *, ...))
5319 {
5320 struct vm_map *map;
5321
5322 for (map = kernel_map;;) {
5323 struct vm_map_entry *entry;
5324
5325 if (!uvm_map_lookup_entry_bytree(map, (vaddr_t)addr, &entry)) {
5326 break;
5327 }
5328 (*pr)("%p is %p+%zu from VMMAP %p\n",
5329 (void *)addr, (void *)entry->start,
5330 (size_t)(addr - (uintptr_t)entry->start), map);
5331 if (!UVM_ET_ISSUBMAP(entry)) {
5332 break;
5333 }
5334 map = entry->object.sub_map;
5335 }
5336 }
5337 #endif /* defined(DDB) || defined(DEBUGPRINT) */
5338