uvm_map.c revision 1.282 1 /* $NetBSD: uvm_map.c,v 1.282 2009/09/06 23:14:19 rmind Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_map.c 8.3 (Berkeley) 1/12/94
42 * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 /*
70 * uvm_map.c: uvm map operations
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.282 2009/09/06 23:14:19 rmind Exp $");
75
76 #include "opt_ddb.h"
77 #include "opt_uvmhist.h"
78 #include "opt_uvm.h"
79 #include "opt_sysv.h"
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/mman.h>
84 #include <sys/proc.h>
85 #include <sys/malloc.h>
86 #include <sys/pool.h>
87 #include <sys/kernel.h>
88 #include <sys/mount.h>
89 #include <sys/vnode.h>
90 #include <sys/lockdebug.h>
91 #include <sys/atomic.h>
92
93 #ifdef SYSVSHM
94 #include <sys/shm.h>
95 #endif
96
97 #include <uvm/uvm.h>
98 #include <uvm/uvm_readahead.h>
99
100 #if defined(DDB) || defined(DEBUGPRINT)
101 #include <uvm/uvm_ddb.h>
102 #endif
103
104 #if !defined(UVMMAP_COUNTERS)
105
106 #define UVMMAP_EVCNT_DEFINE(name) /* nothing */
107 #define UVMMAP_EVCNT_INCR(ev) /* nothing */
108 #define UVMMAP_EVCNT_DECR(ev) /* nothing */
109
110 #else /* defined(UVMMAP_NOCOUNTERS) */
111
112 #include <sys/evcnt.h>
113 #define UVMMAP_EVCNT_DEFINE(name) \
114 struct evcnt uvmmap_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
115 "uvmmap", #name); \
116 EVCNT_ATTACH_STATIC(uvmmap_evcnt_##name);
117 #define UVMMAP_EVCNT_INCR(ev) uvmmap_evcnt_##ev.ev_count++
118 #define UVMMAP_EVCNT_DECR(ev) uvmmap_evcnt_##ev.ev_count--
119
120 #endif /* defined(UVMMAP_NOCOUNTERS) */
121
122 UVMMAP_EVCNT_DEFINE(ubackmerge)
123 UVMMAP_EVCNT_DEFINE(uforwmerge)
124 UVMMAP_EVCNT_DEFINE(ubimerge)
125 UVMMAP_EVCNT_DEFINE(unomerge)
126 UVMMAP_EVCNT_DEFINE(kbackmerge)
127 UVMMAP_EVCNT_DEFINE(kforwmerge)
128 UVMMAP_EVCNT_DEFINE(kbimerge)
129 UVMMAP_EVCNT_DEFINE(knomerge)
130 UVMMAP_EVCNT_DEFINE(map_call)
131 UVMMAP_EVCNT_DEFINE(mlk_call)
132 UVMMAP_EVCNT_DEFINE(mlk_hint)
133 UVMMAP_EVCNT_DEFINE(mlk_list)
134 UVMMAP_EVCNT_DEFINE(mlk_tree)
135 UVMMAP_EVCNT_DEFINE(mlk_treeloop)
136 UVMMAP_EVCNT_DEFINE(mlk_listloop)
137
138 UVMMAP_EVCNT_DEFINE(uke_alloc)
139 UVMMAP_EVCNT_DEFINE(uke_free)
140 UVMMAP_EVCNT_DEFINE(ukh_alloc)
141 UVMMAP_EVCNT_DEFINE(ukh_free)
142
143 const char vmmapbsy[] = "vmmapbsy";
144
145 /*
146 * cache for vmspace structures.
147 */
148
149 static struct pool_cache uvm_vmspace_cache;
150
151 /*
152 * cache for dynamically-allocated map entries.
153 */
154
155 static struct pool_cache uvm_map_entry_cache;
156
157 MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures");
158 MALLOC_DEFINE(M_VMPMAP, "VM pmap", "VM pmap");
159
160 #ifdef PMAP_GROWKERNEL
161 /*
162 * This global represents the end of the kernel virtual address
163 * space. If we want to exceed this, we must grow the kernel
164 * virtual address space dynamically.
165 *
166 * Note, this variable is locked by kernel_map's lock.
167 */
168 vaddr_t uvm_maxkaddr;
169 #endif
170
171 /*
172 * macros
173 */
174
175 /*
176 * VM_MAP_USE_KMAPENT: determine if uvm_kmapent_alloc/free is used
177 * for the vm_map.
178 */
179 extern struct vm_map *pager_map; /* XXX */
180 #define VM_MAP_USE_KMAPENT_FLAGS(flags) \
181 (((flags) & VM_MAP_INTRSAFE) != 0)
182 #define VM_MAP_USE_KMAPENT(map) \
183 (VM_MAP_USE_KMAPENT_FLAGS((map)->flags) || (map) == kernel_map)
184
185 /*
186 * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging
187 */
188
189 #define UVM_ET_ISCOMPATIBLE(ent, type, uobj, meflags, \
190 prot, maxprot, inh, adv, wire) \
191 ((ent)->etype == (type) && \
192 (((ent)->flags ^ (meflags)) & (UVM_MAP_NOMERGE | UVM_MAP_QUANTUM)) \
193 == 0 && \
194 (ent)->object.uvm_obj == (uobj) && \
195 (ent)->protection == (prot) && \
196 (ent)->max_protection == (maxprot) && \
197 (ent)->inheritance == (inh) && \
198 (ent)->advice == (adv) && \
199 (ent)->wired_count == (wire))
200
201 /*
202 * uvm_map_entry_link: insert entry into a map
203 *
204 * => map must be locked
205 */
206 #define uvm_map_entry_link(map, after_where, entry) do { \
207 uvm_mapent_check(entry); \
208 (map)->nentries++; \
209 (entry)->prev = (after_where); \
210 (entry)->next = (after_where)->next; \
211 (entry)->prev->next = (entry); \
212 (entry)->next->prev = (entry); \
213 uvm_rb_insert((map), (entry)); \
214 } while (/*CONSTCOND*/ 0)
215
216 /*
217 * uvm_map_entry_unlink: remove entry from a map
218 *
219 * => map must be locked
220 */
221 #define uvm_map_entry_unlink(map, entry) do { \
222 KASSERT((entry) != (map)->first_free); \
223 KASSERT((entry) != (map)->hint); \
224 uvm_mapent_check(entry); \
225 (map)->nentries--; \
226 (entry)->next->prev = (entry)->prev; \
227 (entry)->prev->next = (entry)->next; \
228 uvm_rb_remove((map), (entry)); \
229 } while (/*CONSTCOND*/ 0)
230
231 /*
232 * SAVE_HINT: saves the specified entry as the hint for future lookups.
233 *
234 * => map need not be locked.
235 */
236 #define SAVE_HINT(map, check, value) do { \
237 if ((map)->hint == (check)) \
238 (map)->hint = (value); \
239 } while (/*CONSTCOND*/ 0)
240
241 /*
242 * clear_hints: ensure that hints don't point to the entry.
243 *
244 * => map must be write-locked.
245 */
246 static void
247 clear_hints(struct vm_map *map, struct vm_map_entry *ent)
248 {
249
250 SAVE_HINT(map, ent, ent->prev);
251 if (map->first_free == ent) {
252 map->first_free = ent->prev;
253 }
254 }
255
256 /*
257 * VM_MAP_RANGE_CHECK: check and correct range
258 *
259 * => map must at least be read locked
260 */
261
262 #define VM_MAP_RANGE_CHECK(map, start, end) do { \
263 if (start < vm_map_min(map)) \
264 start = vm_map_min(map); \
265 if (end > vm_map_max(map)) \
266 end = vm_map_max(map); \
267 if (start > end) \
268 start = end; \
269 } while (/*CONSTCOND*/ 0)
270
271 /*
272 * local prototypes
273 */
274
275 static struct vm_map_entry *
276 uvm_mapent_alloc(struct vm_map *, int);
277 static struct vm_map_entry *
278 uvm_mapent_alloc_split(struct vm_map *,
279 const struct vm_map_entry *, int,
280 struct uvm_mapent_reservation *);
281 static void uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *);
282 static void uvm_mapent_free(struct vm_map_entry *);
283 #if defined(DEBUG)
284 static void _uvm_mapent_check(const struct vm_map_entry *, const char *,
285 int);
286 #define uvm_mapent_check(map) _uvm_mapent_check(map, __FILE__, __LINE__)
287 #else /* defined(DEBUG) */
288 #define uvm_mapent_check(e) /* nothing */
289 #endif /* defined(DEBUG) */
290 static struct vm_map_entry *
291 uvm_kmapent_alloc(struct vm_map *, int);
292 static void uvm_kmapent_free(struct vm_map_entry *);
293 static vsize_t uvm_kmapent_overhead(vsize_t);
294
295 static void uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
296 static void uvm_map_reference_amap(struct vm_map_entry *, int);
297 static int uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int,
298 struct vm_map_entry *);
299 static void uvm_map_unreference_amap(struct vm_map_entry *, int);
300
301 int _uvm_map_sanity(struct vm_map *);
302 int _uvm_tree_sanity(struct vm_map *);
303 static vsize_t uvm_rb_maxgap(const struct vm_map_entry *);
304
305 CTASSERT(offsetof(struct vm_map_entry, rb_node) == 0);
306 #define ROOT_ENTRY(map) ((struct vm_map_entry *)(map)->rb_tree.rbt_root)
307 #define LEFT_ENTRY(entry) ((struct vm_map_entry *)(entry)->rb_node.rb_left)
308 #define RIGHT_ENTRY(entry) ((struct vm_map_entry *)(entry)->rb_node.rb_right)
309 #define PARENT_ENTRY(map, entry) \
310 (ROOT_ENTRY(map) == (entry) \
311 ? NULL \
312 : (struct vm_map_entry *)RB_FATHER(&(entry)->rb_node))
313
314 static int
315 uvm_map_compare_nodes(const struct rb_node *nparent,
316 const struct rb_node *nkey)
317 {
318 const struct vm_map_entry *eparent = (const void *) nparent;
319 const struct vm_map_entry *ekey = (const void *) nkey;
320
321 KASSERT(eparent->start < ekey->start || eparent->start >= ekey->end);
322 KASSERT(ekey->start < eparent->start || ekey->start >= eparent->end);
323
324 if (ekey->start < eparent->start)
325 return -1;
326 if (ekey->start >= eparent->end)
327 return 1;
328 return 0;
329 }
330
331 static int
332 uvm_map_compare_key(const struct rb_node *nparent, const void *vkey)
333 {
334 const struct vm_map_entry *eparent = (const void *) nparent;
335 const vaddr_t va = *(const vaddr_t *) vkey;
336
337 if (va < eparent->start)
338 return -1;
339 if (va >= eparent->end)
340 return 1;
341 return 0;
342 }
343
344 static const struct rb_tree_ops uvm_map_tree_ops = {
345 .rbto_compare_nodes = uvm_map_compare_nodes,
346 .rbto_compare_key = uvm_map_compare_key,
347 };
348
349 static inline vsize_t
350 uvm_rb_gap(const struct vm_map_entry *entry)
351 {
352 KASSERT(entry->next != NULL);
353 return entry->next->start - entry->end;
354 }
355
356 static vsize_t
357 uvm_rb_maxgap(const struct vm_map_entry *entry)
358 {
359 struct vm_map_entry *child;
360 vsize_t maxgap = entry->gap;
361
362 /*
363 * We need maxgap to be the largest gap of us or any of our
364 * descendents. Since each of our children's maxgap is the
365 * cached value of their largest gap of themselves or their
366 * descendents, we can just use that value and avoid recursing
367 * down the tree to calculate it.
368 */
369 if ((child = LEFT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
370 maxgap = child->maxgap;
371
372 if ((child = RIGHT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
373 maxgap = child->maxgap;
374
375 return maxgap;
376 }
377
378 static void
379 uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
380 {
381 struct vm_map_entry *parent;
382
383 KASSERT(entry->gap == uvm_rb_gap(entry));
384 entry->maxgap = uvm_rb_maxgap(entry);
385
386 while ((parent = PARENT_ENTRY(map, entry)) != NULL) {
387 struct vm_map_entry *brother;
388 vsize_t maxgap = parent->gap;
389
390 KDASSERT(parent->gap == uvm_rb_gap(parent));
391 if (maxgap < entry->maxgap)
392 maxgap = entry->maxgap;
393 /*
394 * Since we work our towards the root, we know entry's maxgap
395 * value is ok but its brothers may now be out-of-date due
396 * rebalancing. So refresh it.
397 */
398 brother = (struct vm_map_entry *)parent->rb_node.rb_nodes[RB_POSITION(&entry->rb_node) ^ RB_DIR_OTHER];
399 if (brother != NULL) {
400 KDASSERT(brother->gap == uvm_rb_gap(brother));
401 brother->maxgap = uvm_rb_maxgap(brother);
402 if (maxgap < brother->maxgap)
403 maxgap = brother->maxgap;
404 }
405
406 parent->maxgap = maxgap;
407 entry = parent;
408 }
409 }
410
411 static void
412 uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
413 {
414 entry->gap = entry->maxgap = uvm_rb_gap(entry);
415 if (entry->prev != &map->header)
416 entry->prev->gap = uvm_rb_gap(entry->prev);
417
418 if (!rb_tree_insert_node(&map->rb_tree, &entry->rb_node))
419 panic("uvm_rb_insert: map %p: duplicate entry?", map);
420
421 /*
422 * If the previous entry is not our immediate left child, then it's an
423 * ancestor and will be fixed up on the way to the root. We don't
424 * have to check entry->prev against &map->header since &map->header
425 * will never be in the tree.
426 */
427 uvm_rb_fixup(map,
428 LEFT_ENTRY(entry) == entry->prev ? entry->prev : entry);
429 }
430
431 static void
432 uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
433 {
434 struct vm_map_entry *prev_parent = NULL, *next_parent = NULL;
435
436 /*
437 * If we are removing an interior node, then an adjacent node will
438 * be used to replace its position in the tree. Therefore we will
439 * need to fixup the tree starting at the parent of the replacement
440 * node. So record their parents for later use.
441 */
442 if (entry->prev != &map->header)
443 prev_parent = PARENT_ENTRY(map, entry->prev);
444 if (entry->next != &map->header)
445 next_parent = PARENT_ENTRY(map, entry->next);
446
447 rb_tree_remove_node(&map->rb_tree, &entry->rb_node);
448
449 /*
450 * If the previous node has a new parent, fixup the tree starting
451 * at the previous node's old parent.
452 */
453 if (entry->prev != &map->header) {
454 /*
455 * Update the previous entry's gap due to our absence.
456 */
457 entry->prev->gap = uvm_rb_gap(entry->prev);
458 uvm_rb_fixup(map, entry->prev);
459 if (prev_parent != NULL
460 && prev_parent != entry
461 && prev_parent != PARENT_ENTRY(map, entry->prev))
462 uvm_rb_fixup(map, prev_parent);
463 }
464
465 /*
466 * If the next node has a new parent, fixup the tree starting
467 * at the next node's old parent.
468 */
469 if (entry->next != &map->header) {
470 uvm_rb_fixup(map, entry->next);
471 if (next_parent != NULL
472 && next_parent != entry
473 && next_parent != PARENT_ENTRY(map, entry->next))
474 uvm_rb_fixup(map, next_parent);
475 }
476 }
477
478 #if defined(DEBUG)
479 int uvm_debug_check_map = 0;
480 int uvm_debug_check_rbtree = 0;
481 #define uvm_map_check(map, name) \
482 _uvm_map_check((map), (name), __FILE__, __LINE__)
483 static void
484 _uvm_map_check(struct vm_map *map, const char *name,
485 const char *file, int line)
486 {
487
488 if ((uvm_debug_check_map && _uvm_map_sanity(map)) ||
489 (uvm_debug_check_rbtree && _uvm_tree_sanity(map))) {
490 panic("uvm_map_check failed: \"%s\" map=%p (%s:%d)",
491 name, map, file, line);
492 }
493 }
494 #else /* defined(DEBUG) */
495 #define uvm_map_check(map, name) /* nothing */
496 #endif /* defined(DEBUG) */
497
498 #if defined(DEBUG) || defined(DDB)
499 int
500 _uvm_map_sanity(struct vm_map *map)
501 {
502 bool first_free_found = false;
503 bool hint_found = false;
504 const struct vm_map_entry *e;
505 struct vm_map_entry *hint = map->hint;
506
507 e = &map->header;
508 for (;;) {
509 if (map->first_free == e) {
510 first_free_found = true;
511 } else if (!first_free_found && e->next->start > e->end) {
512 printf("first_free %p should be %p\n",
513 map->first_free, e);
514 return -1;
515 }
516 if (hint == e) {
517 hint_found = true;
518 }
519
520 e = e->next;
521 if (e == &map->header) {
522 break;
523 }
524 }
525 if (!first_free_found) {
526 printf("stale first_free\n");
527 return -1;
528 }
529 if (!hint_found) {
530 printf("stale hint\n");
531 return -1;
532 }
533 return 0;
534 }
535
536 int
537 _uvm_tree_sanity(struct vm_map *map)
538 {
539 struct vm_map_entry *tmp, *trtmp;
540 int n = 0, i = 1;
541
542 for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
543 if (tmp->gap != uvm_rb_gap(tmp)) {
544 printf("%d/%d gap %lx != %lx %s\n",
545 n + 1, map->nentries,
546 (ulong)tmp->gap, (ulong)uvm_rb_gap(tmp),
547 tmp->next == &map->header ? "(last)" : "");
548 goto error;
549 }
550 /*
551 * If any entries are out of order, tmp->gap will be unsigned
552 * and will likely exceed the size of the map.
553 */
554 if (tmp->gap >= vm_map_max(map) - vm_map_min(map)) {
555 printf("too large gap %zu\n", (size_t)tmp->gap);
556 goto error;
557 }
558 n++;
559 }
560
561 if (n != map->nentries) {
562 printf("nentries: %d vs %d\n", n, map->nentries);
563 goto error;
564 }
565
566 trtmp = NULL;
567 for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
568 if (tmp->maxgap != uvm_rb_maxgap(tmp)) {
569 printf("maxgap %lx != %lx\n",
570 (ulong)tmp->maxgap,
571 (ulong)uvm_rb_maxgap(tmp));
572 goto error;
573 }
574 if (trtmp != NULL && trtmp->start >= tmp->start) {
575 printf("corrupt: 0x%lx >= 0x%lx\n",
576 trtmp->start, tmp->start);
577 goto error;
578 }
579
580 trtmp = tmp;
581 }
582
583 for (tmp = map->header.next; tmp != &map->header;
584 tmp = tmp->next, i++) {
585 trtmp = (void *) rb_tree_iterate(&map->rb_tree, &tmp->rb_node,
586 RB_DIR_LEFT);
587 if (trtmp == NULL)
588 trtmp = &map->header;
589 if (tmp->prev != trtmp) {
590 printf("lookup: %d: %p->prev=%p: %p\n",
591 i, tmp, tmp->prev, trtmp);
592 goto error;
593 }
594 trtmp = (void *) rb_tree_iterate(&map->rb_tree, &tmp->rb_node,
595 RB_DIR_RIGHT);
596 if (trtmp == NULL)
597 trtmp = &map->header;
598 if (tmp->next != trtmp) {
599 printf("lookup: %d: %p->next=%p: %p\n",
600 i, tmp, tmp->next, trtmp);
601 goto error;
602 }
603 trtmp = (void *)rb_tree_find_node(&map->rb_tree, &tmp->start);
604 if (trtmp != tmp) {
605 printf("lookup: %d: %p - %p: %p\n", i, tmp, trtmp,
606 PARENT_ENTRY(map, tmp));
607 goto error;
608 }
609 }
610
611 return (0);
612 error:
613 return (-1);
614 }
615 #endif /* defined(DEBUG) || defined(DDB) */
616
617 #ifdef DIAGNOSTIC
618 static struct vm_map *uvm_kmapent_map(struct vm_map_entry *);
619 #endif
620
621 /*
622 * vm_map_lock: acquire an exclusive (write) lock on a map.
623 *
624 * => Note that "intrsafe" maps use only exclusive, spin locks.
625 *
626 * => The locking protocol provides for guaranteed upgrade from shared ->
627 * exclusive by whichever thread currently has the map marked busy.
628 * See "LOCKING PROTOCOL NOTES" in uvm_map.h. This is horrible; among
629 * other problems, it defeats any fairness guarantees provided by RW
630 * locks.
631 */
632
633 void
634 vm_map_lock(struct vm_map *map)
635 {
636
637 if ((map->flags & VM_MAP_INTRSAFE) != 0) {
638 mutex_spin_enter(&map->mutex);
639 return;
640 }
641
642 for (;;) {
643 rw_enter(&map->lock, RW_WRITER);
644 if (map->busy == NULL)
645 break;
646 if (map->busy == curlwp)
647 break;
648 mutex_enter(&map->misc_lock);
649 rw_exit(&map->lock);
650 if (map->busy != NULL)
651 cv_wait(&map->cv, &map->misc_lock);
652 mutex_exit(&map->misc_lock);
653 }
654
655 map->timestamp++;
656 }
657
658 /*
659 * vm_map_lock_try: try to lock a map, failing if it is already locked.
660 */
661
662 bool
663 vm_map_lock_try(struct vm_map *map)
664 {
665
666 if ((map->flags & VM_MAP_INTRSAFE) != 0)
667 return mutex_tryenter(&map->mutex);
668 if (!rw_tryenter(&map->lock, RW_WRITER))
669 return false;
670 if (map->busy != NULL) {
671 rw_exit(&map->lock);
672 return false;
673 }
674
675 map->timestamp++;
676 return true;
677 }
678
679 /*
680 * vm_map_unlock: release an exclusive lock on a map.
681 */
682
683 void
684 vm_map_unlock(struct vm_map *map)
685 {
686
687 if ((map->flags & VM_MAP_INTRSAFE) != 0)
688 mutex_spin_exit(&map->mutex);
689 else {
690 KASSERT(rw_write_held(&map->lock));
691 KASSERT(map->busy == NULL || map->busy == curlwp);
692 rw_exit(&map->lock);
693 }
694 }
695
696 /*
697 * vm_map_unbusy: mark the map as unbusy, and wake any waiters that
698 * want an exclusive lock.
699 */
700
701 void
702 vm_map_unbusy(struct vm_map *map)
703 {
704
705 KASSERT(map->busy == curlwp);
706
707 /*
708 * Safe to clear 'busy' and 'waiters' with only a read lock held:
709 *
710 * o they can only be set with a write lock held
711 * o writers are blocked out with a read or write hold
712 * o at any time, only one thread owns the set of values
713 */
714 mutex_enter(&map->misc_lock);
715 map->busy = NULL;
716 cv_broadcast(&map->cv);
717 mutex_exit(&map->misc_lock);
718 }
719
720 /*
721 * vm_map_lock_read: acquire a shared (read) lock on a map.
722 */
723
724 void
725 vm_map_lock_read(struct vm_map *map)
726 {
727
728 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
729
730 rw_enter(&map->lock, RW_READER);
731 }
732
733 /*
734 * vm_map_unlock_read: release a shared lock on a map.
735 */
736
737 void
738 vm_map_unlock_read(struct vm_map *map)
739 {
740
741 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
742
743 rw_exit(&map->lock);
744 }
745
746 /*
747 * vm_map_busy: mark a map as busy.
748 *
749 * => the caller must hold the map write locked
750 */
751
752 void
753 vm_map_busy(struct vm_map *map)
754 {
755
756 KASSERT(rw_write_held(&map->lock));
757 KASSERT(map->busy == NULL);
758
759 map->busy = curlwp;
760 }
761
762 /*
763 * vm_map_locked_p: return true if the map is write locked.
764 *
765 * => only for debug purposes like KASSERTs.
766 * => should not be used to verify that a map is not locked.
767 */
768
769 bool
770 vm_map_locked_p(struct vm_map *map)
771 {
772
773 if ((map->flags & VM_MAP_INTRSAFE) != 0) {
774 return mutex_owned(&map->mutex);
775 } else {
776 return rw_write_held(&map->lock);
777 }
778 }
779
780 /*
781 * uvm_mapent_alloc: allocate a map entry
782 */
783
784 static struct vm_map_entry *
785 uvm_mapent_alloc(struct vm_map *map, int flags)
786 {
787 struct vm_map_entry *me;
788 int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
789 UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
790
791 if (VM_MAP_USE_KMAPENT(map)) {
792 me = uvm_kmapent_alloc(map, flags);
793 } else {
794 me = pool_cache_get(&uvm_map_entry_cache, pflags);
795 if (__predict_false(me == NULL))
796 return NULL;
797 me->flags = 0;
798 }
799
800 UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me,
801 ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0);
802 return (me);
803 }
804
805 /*
806 * uvm_mapent_alloc_split: allocate a map entry for clipping.
807 *
808 * => map must be locked by caller if UVM_MAP_QUANTUM is set.
809 */
810
811 static struct vm_map_entry *
812 uvm_mapent_alloc_split(struct vm_map *map,
813 const struct vm_map_entry *old_entry, int flags,
814 struct uvm_mapent_reservation *umr)
815 {
816 struct vm_map_entry *me;
817
818 KASSERT(!VM_MAP_USE_KMAPENT(map) ||
819 (old_entry->flags & UVM_MAP_QUANTUM) || !UMR_EMPTY(umr));
820
821 if (old_entry->flags & UVM_MAP_QUANTUM) {
822 struct vm_map_kernel *vmk = vm_map_to_kernel(map);
823
824 KASSERT(vm_map_locked_p(map));
825 me = vmk->vmk_merged_entries;
826 KASSERT(me);
827 vmk->vmk_merged_entries = me->next;
828 KASSERT(me->flags & UVM_MAP_QUANTUM);
829 } else {
830 me = uvm_mapent_alloc(map, flags);
831 }
832
833 return me;
834 }
835
836 /*
837 * uvm_mapent_free: free map entry
838 */
839
840 static void
841 uvm_mapent_free(struct vm_map_entry *me)
842 {
843 UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
844
845 UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
846 me, me->flags, 0, 0);
847 if (me->flags & UVM_MAP_KERNEL) {
848 uvm_kmapent_free(me);
849 } else {
850 pool_cache_put(&uvm_map_entry_cache, me);
851 }
852 }
853
854 /*
855 * uvm_mapent_free_merged: free merged map entry
856 *
857 * => keep the entry if needed.
858 * => caller shouldn't hold map locked if VM_MAP_USE_KMAPENT(map) is true.
859 * => map should be locked if UVM_MAP_QUANTUM is set.
860 */
861
862 static void
863 uvm_mapent_free_merged(struct vm_map *map, struct vm_map_entry *me)
864 {
865
866 KASSERT(!(me->flags & UVM_MAP_KERNEL) || uvm_kmapent_map(me) == map);
867
868 if (me->flags & UVM_MAP_QUANTUM) {
869 /*
870 * keep this entry for later splitting.
871 */
872 struct vm_map_kernel *vmk;
873
874 KASSERT(vm_map_locked_p(map));
875 KASSERT(VM_MAP_IS_KERNEL(map));
876 KASSERT(!VM_MAP_USE_KMAPENT(map) ||
877 (me->flags & UVM_MAP_KERNEL));
878
879 vmk = vm_map_to_kernel(map);
880 me->next = vmk->vmk_merged_entries;
881 vmk->vmk_merged_entries = me;
882 } else {
883 uvm_mapent_free(me);
884 }
885 }
886
887 /*
888 * uvm_mapent_copy: copy a map entry, preserving flags
889 */
890
891 static inline void
892 uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
893 {
894
895 memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) -
896 ((char *)src));
897 }
898
899 /*
900 * uvm_mapent_overhead: calculate maximum kva overhead necessary for
901 * map entries.
902 *
903 * => size and flags are the same as uvm_km_suballoc's ones.
904 */
905
906 vsize_t
907 uvm_mapent_overhead(vsize_t size, int flags)
908 {
909
910 if (VM_MAP_USE_KMAPENT_FLAGS(flags)) {
911 return uvm_kmapent_overhead(size);
912 }
913 return 0;
914 }
915
916 #if defined(DEBUG)
917 static void
918 _uvm_mapent_check(const struct vm_map_entry *entry, const char *file, int line)
919 {
920
921 if (entry->start >= entry->end) {
922 goto bad;
923 }
924 if (UVM_ET_ISOBJ(entry)) {
925 if (entry->object.uvm_obj == NULL) {
926 goto bad;
927 }
928 } else if (UVM_ET_ISSUBMAP(entry)) {
929 if (entry->object.sub_map == NULL) {
930 goto bad;
931 }
932 } else {
933 if (entry->object.uvm_obj != NULL ||
934 entry->object.sub_map != NULL) {
935 goto bad;
936 }
937 }
938 if (!UVM_ET_ISOBJ(entry)) {
939 if (entry->offset != 0) {
940 goto bad;
941 }
942 }
943
944 return;
945
946 bad:
947 panic("%s: bad entry %p (%s:%d)", __func__, entry, file, line);
948 }
949 #endif /* defined(DEBUG) */
950
951 /*
952 * uvm_map_entry_unwire: unwire a map entry
953 *
954 * => map should be locked by caller
955 */
956
957 static inline void
958 uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
959 {
960
961 entry->wired_count = 0;
962 uvm_fault_unwire_locked(map, entry->start, entry->end);
963 }
964
965
966 /*
967 * wrapper for calling amap_ref()
968 */
969 static inline void
970 uvm_map_reference_amap(struct vm_map_entry *entry, int flags)
971 {
972
973 amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
974 (entry->end - entry->start) >> PAGE_SHIFT, flags);
975 }
976
977
978 /*
979 * wrapper for calling amap_unref()
980 */
981 static inline void
982 uvm_map_unreference_amap(struct vm_map_entry *entry, int flags)
983 {
984
985 amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
986 (entry->end - entry->start) >> PAGE_SHIFT, flags);
987 }
988
989
990 /*
991 * uvm_map_init: init mapping system at boot time.
992 */
993
994 void
995 uvm_map_init(void)
996 {
997 #if defined(UVMHIST)
998 static struct uvm_history_ent maphistbuf[100];
999 static struct uvm_history_ent pdhistbuf[100];
1000 #endif
1001
1002 /*
1003 * first, init logging system.
1004 */
1005
1006 UVMHIST_FUNC("uvm_map_init");
1007 UVMHIST_INIT_STATIC(maphist, maphistbuf);
1008 UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
1009 UVMHIST_CALLED(maphist);
1010 UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
1011
1012 /*
1013 * initialize the global lock for kernel map entry.
1014 */
1015
1016 mutex_init(&uvm_kentry_lock, MUTEX_DRIVER, IPL_VM);
1017
1018 /*
1019 * initialize caches.
1020 */
1021
1022 pool_cache_bootstrap(&uvm_map_entry_cache, sizeof(struct vm_map_entry),
1023 0, 0, 0, "vmmpepl", NULL, IPL_NONE, NULL, NULL, NULL);
1024 pool_cache_bootstrap(&uvm_vmspace_cache, sizeof(struct vmspace),
1025 0, 0, 0, "vmsppl", NULL, IPL_NONE, NULL, NULL, NULL);
1026 }
1027
1028 /*
1029 * clippers
1030 */
1031
1032 /*
1033 * uvm_mapent_splitadj: adjust map entries for splitting, after uvm_mapent_copy.
1034 */
1035
1036 static void
1037 uvm_mapent_splitadj(struct vm_map_entry *entry1, struct vm_map_entry *entry2,
1038 vaddr_t splitat)
1039 {
1040 vaddr_t adj;
1041
1042 KASSERT(entry1->start < splitat);
1043 KASSERT(splitat < entry1->end);
1044
1045 adj = splitat - entry1->start;
1046 entry1->end = entry2->start = splitat;
1047
1048 if (entry1->aref.ar_amap) {
1049 amap_splitref(&entry1->aref, &entry2->aref, adj);
1050 }
1051 if (UVM_ET_ISSUBMAP(entry1)) {
1052 /* ... unlikely to happen, but play it safe */
1053 uvm_map_reference(entry1->object.sub_map);
1054 } else if (UVM_ET_ISOBJ(entry1)) {
1055 KASSERT(entry1->object.uvm_obj != NULL); /* suppress coverity */
1056 entry2->offset += adj;
1057 if (entry1->object.uvm_obj->pgops &&
1058 entry1->object.uvm_obj->pgops->pgo_reference)
1059 entry1->object.uvm_obj->pgops->pgo_reference(
1060 entry1->object.uvm_obj);
1061 }
1062 }
1063
1064 /*
1065 * uvm_map_clip_start: ensure that the entry begins at or after
1066 * the starting address, if it doesn't we split the entry.
1067 *
1068 * => caller should use UVM_MAP_CLIP_START macro rather than calling
1069 * this directly
1070 * => map must be locked by caller
1071 */
1072
1073 void
1074 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
1075 vaddr_t start, struct uvm_mapent_reservation *umr)
1076 {
1077 struct vm_map_entry *new_entry;
1078
1079 /* uvm_map_simplify_entry(map, entry); */ /* XXX */
1080
1081 uvm_map_check(map, "clip_start entry");
1082 uvm_mapent_check(entry);
1083
1084 /*
1085 * Split off the front portion. note that we must insert the new
1086 * entry BEFORE this one, so that this entry has the specified
1087 * starting address.
1088 */
1089 new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
1090 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1091 uvm_mapent_splitadj(new_entry, entry, start);
1092 uvm_map_entry_link(map, entry->prev, new_entry);
1093
1094 uvm_map_check(map, "clip_start leave");
1095 }
1096
1097 /*
1098 * uvm_map_clip_end: ensure that the entry ends at or before
1099 * the ending address, if it does't we split the reference
1100 *
1101 * => caller should use UVM_MAP_CLIP_END macro rather than calling
1102 * this directly
1103 * => map must be locked by caller
1104 */
1105
1106 void
1107 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end,
1108 struct uvm_mapent_reservation *umr)
1109 {
1110 struct vm_map_entry *new_entry;
1111
1112 uvm_map_check(map, "clip_end entry");
1113 uvm_mapent_check(entry);
1114
1115 /*
1116 * Create a new entry and insert it
1117 * AFTER the specified entry
1118 */
1119 new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
1120 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1121 uvm_mapent_splitadj(entry, new_entry, end);
1122 uvm_map_entry_link(map, entry, new_entry);
1123
1124 uvm_map_check(map, "clip_end leave");
1125 }
1126
1127 static void
1128 vm_map_drain(struct vm_map *map, uvm_flag_t flags)
1129 {
1130
1131 if (!VM_MAP_IS_KERNEL(map)) {
1132 return;
1133 }
1134
1135 uvm_km_va_drain(map, flags);
1136 }
1137
1138 /*
1139 * M A P - m a i n e n t r y p o i n t
1140 */
1141 /*
1142 * uvm_map: establish a valid mapping in a map
1143 *
1144 * => assume startp is page aligned.
1145 * => assume size is a multiple of PAGE_SIZE.
1146 * => assume sys_mmap provides enough of a "hint" to have us skip
1147 * over text/data/bss area.
1148 * => map must be unlocked (we will lock it)
1149 * => <uobj,uoffset> value meanings (4 cases):
1150 * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER
1151 * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER
1152 * [3] <uobj,uoffset> == normal mapping
1153 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA
1154 *
1155 * case [4] is for kernel mappings where we don't know the offset until
1156 * we've found a virtual address. note that kernel object offsets are
1157 * always relative to vm_map_min(kernel_map).
1158 *
1159 * => if `align' is non-zero, we align the virtual address to the specified
1160 * alignment.
1161 * this is provided as a mechanism for large pages.
1162 *
1163 * => XXXCDC: need way to map in external amap?
1164 */
1165
1166 int
1167 uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
1168 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags)
1169 {
1170 struct uvm_map_args args;
1171 struct vm_map_entry *new_entry;
1172 int error;
1173
1174 KASSERT((flags & UVM_FLAG_QUANTUM) == 0 || VM_MAP_IS_KERNEL(map));
1175 KASSERT((size & PAGE_MASK) == 0);
1176
1177 /*
1178 * for pager_map, allocate the new entry first to avoid sleeping
1179 * for memory while we have the map locked.
1180 *
1181 * Also, because we allocate entries for in-kernel maps
1182 * a bit differently (cf. uvm_kmapent_alloc/free), we need to
1183 * allocate them before locking the map.
1184 */
1185
1186 new_entry = NULL;
1187 if (VM_MAP_USE_KMAPENT(map) || (flags & UVM_FLAG_QUANTUM) ||
1188 map == pager_map) {
1189 new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
1190 if (__predict_false(new_entry == NULL))
1191 return ENOMEM;
1192 if (flags & UVM_FLAG_QUANTUM)
1193 new_entry->flags |= UVM_MAP_QUANTUM;
1194 }
1195 if (map == pager_map)
1196 flags |= UVM_FLAG_NOMERGE;
1197
1198 error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align,
1199 flags, &args);
1200 if (!error) {
1201 error = uvm_map_enter(map, &args, new_entry);
1202 *startp = args.uma_start;
1203 } else if (new_entry) {
1204 uvm_mapent_free(new_entry);
1205 }
1206
1207 #if defined(DEBUG)
1208 if (!error && VM_MAP_IS_KERNEL(map)) {
1209 uvm_km_check_empty(map, *startp, *startp + size);
1210 }
1211 #endif /* defined(DEBUG) */
1212
1213 return error;
1214 }
1215
1216 int
1217 uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size,
1218 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags,
1219 struct uvm_map_args *args)
1220 {
1221 struct vm_map_entry *prev_entry;
1222 vm_prot_t prot = UVM_PROTECTION(flags);
1223 vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1224
1225 UVMHIST_FUNC("uvm_map_prepare");
1226 UVMHIST_CALLED(maphist);
1227
1228 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
1229 map, start, size, flags);
1230 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
1231
1232 /*
1233 * detect a popular device driver bug.
1234 */
1235
1236 KASSERT(doing_shutdown || curlwp != NULL ||
1237 (map->flags & VM_MAP_INTRSAFE));
1238
1239 /*
1240 * zero-sized mapping doesn't make any sense.
1241 */
1242 KASSERT(size > 0);
1243
1244 KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0);
1245
1246 uvm_map_check(map, "map entry");
1247
1248 /*
1249 * check sanity of protection code
1250 */
1251
1252 if ((prot & maxprot) != prot) {
1253 UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x",
1254 prot, maxprot,0,0);
1255 return EACCES;
1256 }
1257
1258 /*
1259 * figure out where to put new VM range
1260 */
1261
1262 retry:
1263 if (vm_map_lock_try(map) == false) {
1264 if ((flags & UVM_FLAG_TRYLOCK) != 0 &&
1265 (map->flags & VM_MAP_INTRSAFE) == 0) {
1266 return EAGAIN;
1267 }
1268 vm_map_lock(map); /* could sleep here */
1269 }
1270 prev_entry = uvm_map_findspace(map, start, size, &start,
1271 uobj, uoffset, align, flags);
1272 if (prev_entry == NULL) {
1273 unsigned int timestamp;
1274
1275 timestamp = map->timestamp;
1276 UVMHIST_LOG(maphist,"waiting va timestamp=0x%x",
1277 timestamp,0,0,0);
1278 map->flags |= VM_MAP_WANTVA;
1279 vm_map_unlock(map);
1280
1281 /*
1282 * try to reclaim kva and wait until someone does unmap.
1283 * fragile locking here, so we awaken every second to
1284 * recheck the condition.
1285 */
1286
1287 vm_map_drain(map, flags);
1288
1289 mutex_enter(&map->misc_lock);
1290 while ((map->flags & VM_MAP_WANTVA) != 0 &&
1291 map->timestamp == timestamp) {
1292 if ((flags & UVM_FLAG_WAITVA) == 0) {
1293 mutex_exit(&map->misc_lock);
1294 UVMHIST_LOG(maphist,
1295 "<- uvm_map_findspace failed!", 0,0,0,0);
1296 return ENOMEM;
1297 } else {
1298 cv_timedwait(&map->cv, &map->misc_lock, hz);
1299 }
1300 }
1301 mutex_exit(&map->misc_lock);
1302 goto retry;
1303 }
1304
1305 #ifdef PMAP_GROWKERNEL
1306 /*
1307 * If the kernel pmap can't map the requested space,
1308 * then allocate more resources for it.
1309 */
1310 if (map == kernel_map && uvm_maxkaddr < (start + size))
1311 uvm_maxkaddr = pmap_growkernel(start + size);
1312 #endif
1313
1314 UVMMAP_EVCNT_INCR(map_call);
1315
1316 /*
1317 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
1318 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in
1319 * either case we want to zero it before storing it in the map entry
1320 * (because it looks strange and confusing when debugging...)
1321 *
1322 * if uobj is not null
1323 * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
1324 * and we do not need to change uoffset.
1325 * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
1326 * now (based on the starting address of the map). this case is
1327 * for kernel object mappings where we don't know the offset until
1328 * the virtual address is found (with uvm_map_findspace). the
1329 * offset is the distance we are from the start of the map.
1330 */
1331
1332 if (uobj == NULL) {
1333 uoffset = 0;
1334 } else {
1335 if (uoffset == UVM_UNKNOWN_OFFSET) {
1336 KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
1337 uoffset = start - vm_map_min(kernel_map);
1338 }
1339 }
1340
1341 args->uma_flags = flags;
1342 args->uma_prev = prev_entry;
1343 args->uma_start = start;
1344 args->uma_size = size;
1345 args->uma_uobj = uobj;
1346 args->uma_uoffset = uoffset;
1347
1348 UVMHIST_LOG(maphist, "<- done!", 0,0,0,0);
1349 return 0;
1350 }
1351
1352 int
1353 uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args,
1354 struct vm_map_entry *new_entry)
1355 {
1356 struct vm_map_entry *prev_entry = args->uma_prev;
1357 struct vm_map_entry *dead = NULL;
1358
1359 const uvm_flag_t flags = args->uma_flags;
1360 const vm_prot_t prot = UVM_PROTECTION(flags);
1361 const vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1362 const vm_inherit_t inherit = UVM_INHERIT(flags);
1363 const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
1364 AMAP_EXTEND_NOWAIT : 0;
1365 const int advice = UVM_ADVICE(flags);
1366 const int meflagval = (flags & UVM_FLAG_QUANTUM) ?
1367 UVM_MAP_QUANTUM : 0;
1368
1369 vaddr_t start = args->uma_start;
1370 vsize_t size = args->uma_size;
1371 struct uvm_object *uobj = args->uma_uobj;
1372 voff_t uoffset = args->uma_uoffset;
1373
1374 const int kmap = (vm_map_pmap(map) == pmap_kernel());
1375 int merged = 0;
1376 int error;
1377 int newetype;
1378
1379 UVMHIST_FUNC("uvm_map_enter");
1380 UVMHIST_CALLED(maphist);
1381
1382 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
1383 map, start, size, flags);
1384 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
1385
1386 KASSERT(map->hint == prev_entry); /* bimerge case assumes this */
1387
1388 if (flags & UVM_FLAG_QUANTUM) {
1389 KASSERT(new_entry);
1390 KASSERT(new_entry->flags & UVM_MAP_QUANTUM);
1391 }
1392
1393 if (uobj)
1394 newetype = UVM_ET_OBJ;
1395 else
1396 newetype = 0;
1397
1398 if (flags & UVM_FLAG_COPYONW) {
1399 newetype |= UVM_ET_COPYONWRITE;
1400 if ((flags & UVM_FLAG_OVERLAY) == 0)
1401 newetype |= UVM_ET_NEEDSCOPY;
1402 }
1403
1404 /*
1405 * try and insert in map by extending previous entry, if possible.
1406 * XXX: we don't try and pull back the next entry. might be useful
1407 * for a stack, but we are currently allocating our stack in advance.
1408 */
1409
1410 if (flags & UVM_FLAG_NOMERGE)
1411 goto nomerge;
1412
1413 if (prev_entry->end == start &&
1414 prev_entry != &map->header &&
1415 UVM_ET_ISCOMPATIBLE(prev_entry, newetype, uobj, meflagval,
1416 prot, maxprot, inherit, advice, 0)) {
1417
1418 if (uobj && prev_entry->offset +
1419 (prev_entry->end - prev_entry->start) != uoffset)
1420 goto forwardmerge;
1421
1422 /*
1423 * can't extend a shared amap. note: no need to lock amap to
1424 * look at refs since we don't care about its exact value.
1425 * if it is one (i.e. we have only reference) it will stay there
1426 */
1427
1428 if (prev_entry->aref.ar_amap &&
1429 amap_refs(prev_entry->aref.ar_amap) != 1) {
1430 goto forwardmerge;
1431 }
1432
1433 if (prev_entry->aref.ar_amap) {
1434 error = amap_extend(prev_entry, size,
1435 amapwaitflag | AMAP_EXTEND_FORWARDS);
1436 if (error)
1437 goto nomerge;
1438 }
1439
1440 if (kmap) {
1441 UVMMAP_EVCNT_INCR(kbackmerge);
1442 } else {
1443 UVMMAP_EVCNT_INCR(ubackmerge);
1444 }
1445 UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0);
1446
1447 /*
1448 * drop our reference to uobj since we are extending a reference
1449 * that we already have (the ref count can not drop to zero).
1450 */
1451
1452 if (uobj && uobj->pgops->pgo_detach)
1453 uobj->pgops->pgo_detach(uobj);
1454
1455 /*
1456 * Now that we've merged the entries, note that we've grown
1457 * and our gap has shrunk. Then fix the tree.
1458 */
1459 prev_entry->end += size;
1460 prev_entry->gap -= size;
1461 uvm_rb_fixup(map, prev_entry);
1462
1463 uvm_map_check(map, "map backmerged");
1464
1465 UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
1466 merged++;
1467 }
1468
1469 forwardmerge:
1470 if (prev_entry->next->start == (start + size) &&
1471 prev_entry->next != &map->header &&
1472 UVM_ET_ISCOMPATIBLE(prev_entry->next, newetype, uobj, meflagval,
1473 prot, maxprot, inherit, advice, 0)) {
1474
1475 if (uobj && prev_entry->next->offset != uoffset + size)
1476 goto nomerge;
1477
1478 /*
1479 * can't extend a shared amap. note: no need to lock amap to
1480 * look at refs since we don't care about its exact value.
1481 * if it is one (i.e. we have only reference) it will stay there.
1482 *
1483 * note that we also can't merge two amaps, so if we
1484 * merged with the previous entry which has an amap,
1485 * and the next entry also has an amap, we give up.
1486 *
1487 * Interesting cases:
1488 * amap, new, amap -> give up second merge (single fwd extend)
1489 * amap, new, none -> double forward extend (extend again here)
1490 * none, new, amap -> double backward extend (done here)
1491 * uobj, new, amap -> single backward extend (done here)
1492 *
1493 * XXX should we attempt to deal with someone refilling
1494 * the deallocated region between two entries that are
1495 * backed by the same amap (ie, arefs is 2, "prev" and
1496 * "next" refer to it, and adding this allocation will
1497 * close the hole, thus restoring arefs to 1 and
1498 * deallocating the "next" vm_map_entry)? -- @@@
1499 */
1500
1501 if (prev_entry->next->aref.ar_amap &&
1502 (amap_refs(prev_entry->next->aref.ar_amap) != 1 ||
1503 (merged && prev_entry->aref.ar_amap))) {
1504 goto nomerge;
1505 }
1506
1507 if (merged) {
1508 /*
1509 * Try to extend the amap of the previous entry to
1510 * cover the next entry as well. If it doesn't work
1511 * just skip on, don't actually give up, since we've
1512 * already completed the back merge.
1513 */
1514 if (prev_entry->aref.ar_amap) {
1515 if (amap_extend(prev_entry,
1516 prev_entry->next->end -
1517 prev_entry->next->start,
1518 amapwaitflag | AMAP_EXTEND_FORWARDS))
1519 goto nomerge;
1520 }
1521
1522 /*
1523 * Try to extend the amap of the *next* entry
1524 * back to cover the new allocation *and* the
1525 * previous entry as well (the previous merge
1526 * didn't have an amap already otherwise we
1527 * wouldn't be checking here for an amap). If
1528 * it doesn't work just skip on, again, don't
1529 * actually give up, since we've already
1530 * completed the back merge.
1531 */
1532 else if (prev_entry->next->aref.ar_amap) {
1533 if (amap_extend(prev_entry->next,
1534 prev_entry->end -
1535 prev_entry->start,
1536 amapwaitflag | AMAP_EXTEND_BACKWARDS))
1537 goto nomerge;
1538 }
1539 } else {
1540 /*
1541 * Pull the next entry's amap backwards to cover this
1542 * new allocation.
1543 */
1544 if (prev_entry->next->aref.ar_amap) {
1545 error = amap_extend(prev_entry->next, size,
1546 amapwaitflag | AMAP_EXTEND_BACKWARDS);
1547 if (error)
1548 goto nomerge;
1549 }
1550 }
1551
1552 if (merged) {
1553 if (kmap) {
1554 UVMMAP_EVCNT_DECR(kbackmerge);
1555 UVMMAP_EVCNT_INCR(kbimerge);
1556 } else {
1557 UVMMAP_EVCNT_DECR(ubackmerge);
1558 UVMMAP_EVCNT_INCR(ubimerge);
1559 }
1560 } else {
1561 if (kmap) {
1562 UVMMAP_EVCNT_INCR(kforwmerge);
1563 } else {
1564 UVMMAP_EVCNT_INCR(uforwmerge);
1565 }
1566 }
1567 UVMHIST_LOG(maphist," starting forward merge", 0, 0, 0, 0);
1568
1569 /*
1570 * drop our reference to uobj since we are extending a reference
1571 * that we already have (the ref count can not drop to zero).
1572 * (if merged, we've already detached)
1573 */
1574 if (uobj && uobj->pgops->pgo_detach && !merged)
1575 uobj->pgops->pgo_detach(uobj);
1576
1577 if (merged) {
1578 dead = prev_entry->next;
1579 prev_entry->end = dead->end;
1580 uvm_map_entry_unlink(map, dead);
1581 if (dead->aref.ar_amap != NULL) {
1582 prev_entry->aref = dead->aref;
1583 dead->aref.ar_amap = NULL;
1584 }
1585 } else {
1586 prev_entry->next->start -= size;
1587 if (prev_entry != &map->header) {
1588 prev_entry->gap -= size;
1589 KASSERT(prev_entry->gap == uvm_rb_gap(prev_entry));
1590 uvm_rb_fixup(map, prev_entry);
1591 }
1592 if (uobj)
1593 prev_entry->next->offset = uoffset;
1594 }
1595
1596 uvm_map_check(map, "map forwardmerged");
1597
1598 UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0);
1599 merged++;
1600 }
1601
1602 nomerge:
1603 if (!merged) {
1604 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0);
1605 if (kmap) {
1606 UVMMAP_EVCNT_INCR(knomerge);
1607 } else {
1608 UVMMAP_EVCNT_INCR(unomerge);
1609 }
1610
1611 /*
1612 * allocate new entry and link it in.
1613 */
1614
1615 if (new_entry == NULL) {
1616 new_entry = uvm_mapent_alloc(map,
1617 (flags & UVM_FLAG_NOWAIT));
1618 if (__predict_false(new_entry == NULL)) {
1619 error = ENOMEM;
1620 goto done;
1621 }
1622 }
1623 new_entry->start = start;
1624 new_entry->end = new_entry->start + size;
1625 new_entry->object.uvm_obj = uobj;
1626 new_entry->offset = uoffset;
1627
1628 new_entry->etype = newetype;
1629
1630 if (flags & UVM_FLAG_NOMERGE) {
1631 new_entry->flags |= UVM_MAP_NOMERGE;
1632 }
1633
1634 new_entry->protection = prot;
1635 new_entry->max_protection = maxprot;
1636 new_entry->inheritance = inherit;
1637 new_entry->wired_count = 0;
1638 new_entry->advice = advice;
1639 if (flags & UVM_FLAG_OVERLAY) {
1640
1641 /*
1642 * to_add: for BSS we overallocate a little since we
1643 * are likely to extend
1644 */
1645
1646 vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
1647 UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
1648 struct vm_amap *amap = amap_alloc(size, to_add,
1649 (flags & UVM_FLAG_NOWAIT));
1650 if (__predict_false(amap == NULL)) {
1651 error = ENOMEM;
1652 goto done;
1653 }
1654 new_entry->aref.ar_pageoff = 0;
1655 new_entry->aref.ar_amap = amap;
1656 } else {
1657 new_entry->aref.ar_pageoff = 0;
1658 new_entry->aref.ar_amap = NULL;
1659 }
1660 uvm_map_entry_link(map, prev_entry, new_entry);
1661
1662 /*
1663 * Update the free space hint
1664 */
1665
1666 if ((map->first_free == prev_entry) &&
1667 (prev_entry->end >= new_entry->start))
1668 map->first_free = new_entry;
1669
1670 new_entry = NULL;
1671 }
1672
1673 map->size += size;
1674
1675 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1676
1677 error = 0;
1678 done:
1679 if ((flags & UVM_FLAG_QUANTUM) == 0) {
1680 /*
1681 * vmk_merged_entries is locked by the map's lock.
1682 */
1683 vm_map_unlock(map);
1684 }
1685 if (new_entry && error == 0) {
1686 KDASSERT(merged);
1687 uvm_mapent_free_merged(map, new_entry);
1688 new_entry = NULL;
1689 }
1690 if (dead) {
1691 KDASSERT(merged);
1692 uvm_mapent_free_merged(map, dead);
1693 }
1694 if ((flags & UVM_FLAG_QUANTUM) != 0) {
1695 vm_map_unlock(map);
1696 }
1697 if (new_entry != NULL) {
1698 uvm_mapent_free(new_entry);
1699 }
1700 return error;
1701 }
1702
1703 /*
1704 * uvm_map_lookup_entry_bytree: lookup an entry in tree
1705 */
1706
1707 static inline bool
1708 uvm_map_lookup_entry_bytree(struct vm_map *map, vaddr_t address,
1709 struct vm_map_entry **entry /* OUT */)
1710 {
1711 struct vm_map_entry *prev = &map->header;
1712 struct vm_map_entry *cur = ROOT_ENTRY(map);
1713
1714 while (cur) {
1715 UVMMAP_EVCNT_INCR(mlk_treeloop);
1716 if (address >= cur->start) {
1717 if (address < cur->end) {
1718 *entry = cur;
1719 return true;
1720 }
1721 prev = cur;
1722 cur = RIGHT_ENTRY(cur);
1723 } else
1724 cur = LEFT_ENTRY(cur);
1725 }
1726 *entry = prev;
1727 return false;
1728 }
1729
1730 /*
1731 * uvm_map_lookup_entry: find map entry at or before an address
1732 *
1733 * => map must at least be read-locked by caller
1734 * => entry is returned in "entry"
1735 * => return value is true if address is in the returned entry
1736 */
1737
1738 bool
1739 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
1740 struct vm_map_entry **entry /* OUT */)
1741 {
1742 struct vm_map_entry *cur;
1743 bool use_tree = false;
1744 UVMHIST_FUNC("uvm_map_lookup_entry");
1745 UVMHIST_CALLED(maphist);
1746
1747 UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)",
1748 map, address, entry, 0);
1749
1750 /*
1751 * start looking either from the head of the
1752 * list, or from the hint.
1753 */
1754
1755 cur = map->hint;
1756
1757 if (cur == &map->header)
1758 cur = cur->next;
1759
1760 UVMMAP_EVCNT_INCR(mlk_call);
1761 if (address >= cur->start) {
1762
1763 /*
1764 * go from hint to end of list.
1765 *
1766 * but first, make a quick check to see if
1767 * we are already looking at the entry we
1768 * want (which is usually the case).
1769 * note also that we don't need to save the hint
1770 * here... it is the same hint (unless we are
1771 * at the header, in which case the hint didn't
1772 * buy us anything anyway).
1773 */
1774
1775 if (cur != &map->header && cur->end > address) {
1776 UVMMAP_EVCNT_INCR(mlk_hint);
1777 *entry = cur;
1778 UVMHIST_LOG(maphist,"<- got it via hint (0x%x)",
1779 cur, 0, 0, 0);
1780 uvm_mapent_check(*entry);
1781 return (true);
1782 }
1783
1784 if (map->nentries > 15)
1785 use_tree = true;
1786 } else {
1787
1788 /*
1789 * invalid hint. use tree.
1790 */
1791 use_tree = true;
1792 }
1793
1794 uvm_map_check(map, __func__);
1795
1796 if (use_tree) {
1797 /*
1798 * Simple lookup in the tree. Happens when the hint is
1799 * invalid, or nentries reach a threshold.
1800 */
1801 UVMMAP_EVCNT_INCR(mlk_tree);
1802 if (uvm_map_lookup_entry_bytree(map, address, entry)) {
1803 goto got;
1804 } else {
1805 goto failed;
1806 }
1807 }
1808
1809 /*
1810 * search linearly
1811 */
1812
1813 UVMMAP_EVCNT_INCR(mlk_list);
1814 while (cur != &map->header) {
1815 UVMMAP_EVCNT_INCR(mlk_listloop);
1816 if (cur->end > address) {
1817 if (address >= cur->start) {
1818 /*
1819 * save this lookup for future
1820 * hints, and return
1821 */
1822
1823 *entry = cur;
1824 got:
1825 SAVE_HINT(map, map->hint, *entry);
1826 UVMHIST_LOG(maphist,"<- search got it (0x%x)",
1827 cur, 0, 0, 0);
1828 KDASSERT((*entry)->start <= address);
1829 KDASSERT(address < (*entry)->end);
1830 uvm_mapent_check(*entry);
1831 return (true);
1832 }
1833 break;
1834 }
1835 cur = cur->next;
1836 }
1837 *entry = cur->prev;
1838 failed:
1839 SAVE_HINT(map, map->hint, *entry);
1840 UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
1841 KDASSERT((*entry) == &map->header || (*entry)->end <= address);
1842 KDASSERT((*entry)->next == &map->header ||
1843 address < (*entry)->next->start);
1844 return (false);
1845 }
1846
1847 /*
1848 * See if the range between start and start + length fits in the gap
1849 * entry->next->start and entry->end. Returns 1 if fits, 0 if doesn't
1850 * fit, and -1 address wraps around.
1851 */
1852 static int
1853 uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
1854 vsize_t align, int topdown, struct vm_map_entry *entry)
1855 {
1856 vaddr_t end;
1857
1858 #ifdef PMAP_PREFER
1859 /*
1860 * push start address forward as needed to avoid VAC alias problems.
1861 * we only do this if a valid offset is specified.
1862 */
1863
1864 if (uoffset != UVM_UNKNOWN_OFFSET)
1865 PMAP_PREFER(uoffset, start, length, topdown);
1866 #endif
1867 if (align != 0) {
1868 if ((*start & (align - 1)) != 0) {
1869 if (topdown)
1870 *start &= ~(align - 1);
1871 else
1872 *start = roundup(*start, align);
1873 }
1874 /*
1875 * XXX Should we PMAP_PREFER() here again?
1876 * eh...i think we're okay
1877 */
1878 }
1879
1880 /*
1881 * Find the end of the proposed new region. Be sure we didn't
1882 * wrap around the address; if so, we lose. Otherwise, if the
1883 * proposed new region fits before the next entry, we win.
1884 */
1885
1886 end = *start + length;
1887 if (end < *start)
1888 return (-1);
1889
1890 if (entry->next->start >= end && *start >= entry->end)
1891 return (1);
1892
1893 return (0);
1894 }
1895
1896 /*
1897 * uvm_map_findspace: find "length" sized space in "map".
1898 *
1899 * => "hint" is a hint about where we want it, unless UVM_FLAG_FIXED is
1900 * set in "flags" (in which case we insist on using "hint").
1901 * => "result" is VA returned
1902 * => uobj/uoffset are to be used to handle VAC alignment, if required
1903 * => if "align" is non-zero, we attempt to align to that value.
1904 * => caller must at least have read-locked map
1905 * => returns NULL on failure, or pointer to prev. map entry if success
1906 * => note this is a cross between the old vm_map_findspace and vm_map_find
1907 */
1908
1909 struct vm_map_entry *
1910 uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
1911 vaddr_t *result /* OUT */, struct uvm_object *uobj, voff_t uoffset,
1912 vsize_t align, int flags)
1913 {
1914 struct vm_map_entry *entry;
1915 struct vm_map_entry *child, *prev, *tmp;
1916 vaddr_t orig_hint;
1917 const int topdown = map->flags & VM_MAP_TOPDOWN;
1918 UVMHIST_FUNC("uvm_map_findspace");
1919 UVMHIST_CALLED(maphist);
1920
1921 UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)",
1922 map, hint, length, flags);
1923 KASSERT((align & (align - 1)) == 0);
1924 KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1925
1926 uvm_map_check(map, "map_findspace entry");
1927
1928 /*
1929 * remember the original hint. if we are aligning, then we
1930 * may have to try again with no alignment constraint if
1931 * we fail the first time.
1932 */
1933
1934 orig_hint = hint;
1935 if (hint < vm_map_min(map)) { /* check ranges ... */
1936 if (flags & UVM_FLAG_FIXED) {
1937 UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
1938 return (NULL);
1939 }
1940 hint = vm_map_min(map);
1941 }
1942 if (hint > vm_map_max(map)) {
1943 UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]",
1944 hint, vm_map_min(map), vm_map_max(map), 0);
1945 return (NULL);
1946 }
1947
1948 /*
1949 * Look for the first possible address; if there's already
1950 * something at this address, we have to start after it.
1951 */
1952
1953 /*
1954 * @@@: there are four, no, eight cases to consider.
1955 *
1956 * 0: found, fixed, bottom up -> fail
1957 * 1: found, fixed, top down -> fail
1958 * 2: found, not fixed, bottom up -> start after entry->end,
1959 * loop up
1960 * 3: found, not fixed, top down -> start before entry->start,
1961 * loop down
1962 * 4: not found, fixed, bottom up -> check entry->next->start, fail
1963 * 5: not found, fixed, top down -> check entry->next->start, fail
1964 * 6: not found, not fixed, bottom up -> check entry->next->start,
1965 * loop up
1966 * 7: not found, not fixed, top down -> check entry->next->start,
1967 * loop down
1968 *
1969 * as you can see, it reduces to roughly five cases, and that
1970 * adding top down mapping only adds one unique case (without
1971 * it, there would be four cases).
1972 */
1973
1974 if ((flags & UVM_FLAG_FIXED) == 0 && hint == vm_map_min(map)) {
1975 entry = map->first_free;
1976 } else {
1977 if (uvm_map_lookup_entry(map, hint, &entry)) {
1978 /* "hint" address already in use ... */
1979 if (flags & UVM_FLAG_FIXED) {
1980 UVMHIST_LOG(maphist, "<- fixed & VA in use",
1981 0, 0, 0, 0);
1982 return (NULL);
1983 }
1984 if (topdown)
1985 /* Start from lower gap. */
1986 entry = entry->prev;
1987 } else if (flags & UVM_FLAG_FIXED) {
1988 if (entry->next->start >= hint + length &&
1989 hint + length > hint)
1990 goto found;
1991
1992 /* "hint" address is gap but too small */
1993 UVMHIST_LOG(maphist, "<- fixed mapping failed",
1994 0, 0, 0, 0);
1995 return (NULL); /* only one shot at it ... */
1996 } else {
1997 /*
1998 * See if given hint fits in this gap.
1999 */
2000 switch (uvm_map_space_avail(&hint, length,
2001 uoffset, align, topdown, entry)) {
2002 case 1:
2003 goto found;
2004 case -1:
2005 goto wraparound;
2006 }
2007
2008 if (topdown) {
2009 /*
2010 * Still there is a chance to fit
2011 * if hint > entry->end.
2012 */
2013 } else {
2014 /* Start from higher gap. */
2015 entry = entry->next;
2016 if (entry == &map->header)
2017 goto notfound;
2018 goto nextgap;
2019 }
2020 }
2021 }
2022
2023 /*
2024 * Note that all UVM_FLAGS_FIXED case is already handled.
2025 */
2026 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
2027
2028 /* Try to find the space in the red-black tree */
2029
2030 /* Check slot before any entry */
2031 hint = topdown ? entry->next->start - length : entry->end;
2032 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2033 topdown, entry)) {
2034 case 1:
2035 goto found;
2036 case -1:
2037 goto wraparound;
2038 }
2039
2040 nextgap:
2041 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
2042 /* If there is not enough space in the whole tree, we fail */
2043 tmp = ROOT_ENTRY(map);
2044 if (tmp == NULL || tmp->maxgap < length)
2045 goto notfound;
2046
2047 prev = NULL; /* previous candidate */
2048
2049 /* Find an entry close to hint that has enough space */
2050 for (; tmp;) {
2051 KASSERT(tmp->next->start == tmp->end + tmp->gap);
2052 if (topdown) {
2053 if (tmp->next->start < hint + length &&
2054 (prev == NULL || tmp->end > prev->end)) {
2055 if (tmp->gap >= length)
2056 prev = tmp;
2057 else if ((child = LEFT_ENTRY(tmp)) != NULL
2058 && child->maxgap >= length)
2059 prev = tmp;
2060 }
2061 } else {
2062 if (tmp->end >= hint &&
2063 (prev == NULL || tmp->end < prev->end)) {
2064 if (tmp->gap >= length)
2065 prev = tmp;
2066 else if ((child = RIGHT_ENTRY(tmp)) != NULL
2067 && child->maxgap >= length)
2068 prev = tmp;
2069 }
2070 }
2071 if (tmp->next->start < hint + length)
2072 child = RIGHT_ENTRY(tmp);
2073 else if (tmp->end > hint)
2074 child = LEFT_ENTRY(tmp);
2075 else {
2076 if (tmp->gap >= length)
2077 break;
2078 if (topdown)
2079 child = LEFT_ENTRY(tmp);
2080 else
2081 child = RIGHT_ENTRY(tmp);
2082 }
2083 if (child == NULL || child->maxgap < length)
2084 break;
2085 tmp = child;
2086 }
2087
2088 if (tmp != NULL && tmp->start < hint && hint < tmp->next->start) {
2089 /*
2090 * Check if the entry that we found satifies the
2091 * space requirement
2092 */
2093 if (topdown) {
2094 if (hint > tmp->next->start - length)
2095 hint = tmp->next->start - length;
2096 } else {
2097 if (hint < tmp->end)
2098 hint = tmp->end;
2099 }
2100 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2101 topdown, tmp)) {
2102 case 1:
2103 entry = tmp;
2104 goto found;
2105 case -1:
2106 goto wraparound;
2107 }
2108 if (tmp->gap >= length)
2109 goto listsearch;
2110 }
2111 if (prev == NULL)
2112 goto notfound;
2113
2114 if (topdown) {
2115 KASSERT(orig_hint >= prev->next->start - length ||
2116 prev->next->start - length > prev->next->start);
2117 hint = prev->next->start - length;
2118 } else {
2119 KASSERT(orig_hint <= prev->end);
2120 hint = prev->end;
2121 }
2122 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2123 topdown, prev)) {
2124 case 1:
2125 entry = prev;
2126 goto found;
2127 case -1:
2128 goto wraparound;
2129 }
2130 if (prev->gap >= length)
2131 goto listsearch;
2132
2133 if (topdown)
2134 tmp = LEFT_ENTRY(prev);
2135 else
2136 tmp = RIGHT_ENTRY(prev);
2137 for (;;) {
2138 KASSERT(tmp && tmp->maxgap >= length);
2139 if (topdown)
2140 child = RIGHT_ENTRY(tmp);
2141 else
2142 child = LEFT_ENTRY(tmp);
2143 if (child && child->maxgap >= length) {
2144 tmp = child;
2145 continue;
2146 }
2147 if (tmp->gap >= length)
2148 break;
2149 if (topdown)
2150 tmp = LEFT_ENTRY(tmp);
2151 else
2152 tmp = RIGHT_ENTRY(tmp);
2153 }
2154
2155 if (topdown) {
2156 KASSERT(orig_hint >= tmp->next->start - length ||
2157 tmp->next->start - length > tmp->next->start);
2158 hint = tmp->next->start - length;
2159 } else {
2160 KASSERT(orig_hint <= tmp->end);
2161 hint = tmp->end;
2162 }
2163 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2164 topdown, tmp)) {
2165 case 1:
2166 entry = tmp;
2167 goto found;
2168 case -1:
2169 goto wraparound;
2170 }
2171
2172 /*
2173 * The tree fails to find an entry because of offset or alignment
2174 * restrictions. Search the list instead.
2175 */
2176 listsearch:
2177 /*
2178 * Look through the rest of the map, trying to fit a new region in
2179 * the gap between existing regions, or after the very last region.
2180 * note: entry->end = base VA of current gap,
2181 * entry->next->start = VA of end of current gap
2182 */
2183
2184 for (;;) {
2185 /* Update hint for current gap. */
2186 hint = topdown ? entry->next->start - length : entry->end;
2187
2188 /* See if it fits. */
2189 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2190 topdown, entry)) {
2191 case 1:
2192 goto found;
2193 case -1:
2194 goto wraparound;
2195 }
2196
2197 /* Advance to next/previous gap */
2198 if (topdown) {
2199 if (entry == &map->header) {
2200 UVMHIST_LOG(maphist, "<- failed (off start)",
2201 0,0,0,0);
2202 goto notfound;
2203 }
2204 entry = entry->prev;
2205 } else {
2206 entry = entry->next;
2207 if (entry == &map->header) {
2208 UVMHIST_LOG(maphist, "<- failed (off end)",
2209 0,0,0,0);
2210 goto notfound;
2211 }
2212 }
2213 }
2214
2215 found:
2216 SAVE_HINT(map, map->hint, entry);
2217 *result = hint;
2218 UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0);
2219 KASSERT( topdown || hint >= orig_hint);
2220 KASSERT(!topdown || hint <= orig_hint);
2221 KASSERT(entry->end <= hint);
2222 KASSERT(hint + length <= entry->next->start);
2223 return (entry);
2224
2225 wraparound:
2226 UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0);
2227
2228 return (NULL);
2229
2230 notfound:
2231 UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0);
2232
2233 return (NULL);
2234 }
2235
2236 /*
2237 * U N M A P - m a i n h e l p e r f u n c t i o n s
2238 */
2239
2240 /*
2241 * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
2242 *
2243 * => caller must check alignment and size
2244 * => map must be locked by caller
2245 * => we return a list of map entries that we've remove from the map
2246 * in "entry_list"
2247 */
2248
2249 void
2250 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
2251 struct vm_map_entry **entry_list /* OUT */,
2252 struct uvm_mapent_reservation *umr, int flags)
2253 {
2254 struct vm_map_entry *entry, *first_entry, *next;
2255 vaddr_t len;
2256 UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist);
2257
2258 UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)",
2259 map, start, end, 0);
2260 VM_MAP_RANGE_CHECK(map, start, end);
2261
2262 uvm_map_check(map, "unmap_remove entry");
2263
2264 /*
2265 * find first entry
2266 */
2267
2268 if (uvm_map_lookup_entry(map, start, &first_entry) == true) {
2269 /* clip and go... */
2270 entry = first_entry;
2271 UVM_MAP_CLIP_START(map, entry, start, umr);
2272 /* critical! prevents stale hint */
2273 SAVE_HINT(map, entry, entry->prev);
2274 } else {
2275 entry = first_entry->next;
2276 }
2277
2278 /*
2279 * Save the free space hint
2280 */
2281
2282 if (map->first_free != &map->header && map->first_free->start >= start)
2283 map->first_free = entry->prev;
2284
2285 /*
2286 * note: we now re-use first_entry for a different task. we remove
2287 * a number of map entries from the map and save them in a linked
2288 * list headed by "first_entry". once we remove them from the map
2289 * the caller should unlock the map and drop the references to the
2290 * backing objects [c.f. uvm_unmap_detach]. the object is to
2291 * separate unmapping from reference dropping. why?
2292 * [1] the map has to be locked for unmapping
2293 * [2] the map need not be locked for reference dropping
2294 * [3] dropping references may trigger pager I/O, and if we hit
2295 * a pager that does synchronous I/O we may have to wait for it.
2296 * [4] we would like all waiting for I/O to occur with maps unlocked
2297 * so that we don't block other threads.
2298 */
2299
2300 first_entry = NULL;
2301 *entry_list = NULL;
2302
2303 /*
2304 * break up the area into map entry sized regions and unmap. note
2305 * that all mappings have to be removed before we can even consider
2306 * dropping references to amaps or VM objects (otherwise we could end
2307 * up with a mapping to a page on the free list which would be very bad)
2308 */
2309
2310 while ((entry != &map->header) && (entry->start < end)) {
2311 KASSERT((entry->flags & UVM_MAP_FIRST) == 0);
2312
2313 UVM_MAP_CLIP_END(map, entry, end, umr);
2314 next = entry->next;
2315 len = entry->end - entry->start;
2316
2317 /*
2318 * unwire before removing addresses from the pmap; otherwise
2319 * unwiring will put the entries back into the pmap (XXX).
2320 */
2321
2322 if (VM_MAPENT_ISWIRED(entry)) {
2323 uvm_map_entry_unwire(map, entry);
2324 }
2325 if (flags & UVM_FLAG_VAONLY) {
2326
2327 /* nothing */
2328
2329 } else if ((map->flags & VM_MAP_PAGEABLE) == 0) {
2330
2331 /*
2332 * if the map is non-pageable, any pages mapped there
2333 * must be wired and entered with pmap_kenter_pa(),
2334 * and we should free any such pages immediately.
2335 * this is mostly used for kmem_map and mb_map.
2336 */
2337
2338 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
2339 uvm_km_pgremove_intrsafe(map, entry->start,
2340 entry->end);
2341 pmap_kremove(entry->start, len);
2342 }
2343 } else if (UVM_ET_ISOBJ(entry) &&
2344 UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
2345 KASSERT(vm_map_pmap(map) == pmap_kernel());
2346
2347 /*
2348 * note: kernel object mappings are currently used in
2349 * two ways:
2350 * [1] "normal" mappings of pages in the kernel object
2351 * [2] uvm_km_valloc'd allocations in which we
2352 * pmap_enter in some non-kernel-object page
2353 * (e.g. vmapbuf).
2354 *
2355 * for case [1], we need to remove the mapping from
2356 * the pmap and then remove the page from the kernel
2357 * object (because, once pages in a kernel object are
2358 * unmapped they are no longer needed, unlike, say,
2359 * a vnode where you might want the data to persist
2360 * until flushed out of a queue).
2361 *
2362 * for case [2], we need to remove the mapping from
2363 * the pmap. there shouldn't be any pages at the
2364 * specified offset in the kernel object [but it
2365 * doesn't hurt to call uvm_km_pgremove just to be
2366 * safe?]
2367 *
2368 * uvm_km_pgremove currently does the following:
2369 * for pages in the kernel object in range:
2370 * - drops the swap slot
2371 * - uvm_pagefree the page
2372 */
2373
2374 /*
2375 * remove mappings from pmap and drop the pages
2376 * from the object. offsets are always relative
2377 * to vm_map_min(kernel_map).
2378 */
2379
2380 pmap_remove(pmap_kernel(), entry->start,
2381 entry->start + len);
2382 uvm_km_pgremove(entry->start, entry->end);
2383
2384 /*
2385 * null out kernel_object reference, we've just
2386 * dropped it
2387 */
2388
2389 entry->etype &= ~UVM_ET_OBJ;
2390 entry->object.uvm_obj = NULL;
2391 } else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
2392
2393 /*
2394 * remove mappings the standard way.
2395 */
2396
2397 pmap_remove(map->pmap, entry->start, entry->end);
2398 }
2399
2400 #if defined(DEBUG)
2401 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
2402
2403 /*
2404 * check if there's remaining mapping,
2405 * which is a bug in caller.
2406 */
2407
2408 vaddr_t va;
2409 for (va = entry->start; va < entry->end;
2410 va += PAGE_SIZE) {
2411 if (pmap_extract(vm_map_pmap(map), va, NULL)) {
2412 panic("uvm_unmap_remove: has mapping");
2413 }
2414 }
2415
2416 if (VM_MAP_IS_KERNEL(map)) {
2417 uvm_km_check_empty(map, entry->start,
2418 entry->end);
2419 }
2420 }
2421 #endif /* defined(DEBUG) */
2422
2423 /*
2424 * remove entry from map and put it on our list of entries
2425 * that we've nuked. then go to next entry.
2426 */
2427
2428 UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0,0);
2429
2430 /* critical! prevents stale hint */
2431 SAVE_HINT(map, entry, entry->prev);
2432
2433 uvm_map_entry_unlink(map, entry);
2434 KASSERT(map->size >= len);
2435 map->size -= len;
2436 entry->prev = NULL;
2437 entry->next = first_entry;
2438 first_entry = entry;
2439 entry = next;
2440 }
2441 if ((map->flags & VM_MAP_DYING) == 0) {
2442 pmap_update(vm_map_pmap(map));
2443 }
2444
2445 uvm_map_check(map, "unmap_remove leave");
2446
2447 /*
2448 * now we've cleaned up the map and are ready for the caller to drop
2449 * references to the mapped objects.
2450 */
2451
2452 *entry_list = first_entry;
2453 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
2454
2455 if (map->flags & VM_MAP_WANTVA) {
2456 mutex_enter(&map->misc_lock);
2457 map->flags &= ~VM_MAP_WANTVA;
2458 cv_broadcast(&map->cv);
2459 mutex_exit(&map->misc_lock);
2460 }
2461 }
2462
2463 /*
2464 * uvm_unmap_detach: drop references in a chain of map entries
2465 *
2466 * => we will free the map entries as we traverse the list.
2467 */
2468
2469 void
2470 uvm_unmap_detach(struct vm_map_entry *first_entry, int flags)
2471 {
2472 struct vm_map_entry *next_entry;
2473 UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
2474
2475 while (first_entry) {
2476 KASSERT(!VM_MAPENT_ISWIRED(first_entry));
2477 UVMHIST_LOG(maphist,
2478 " detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d",
2479 first_entry, first_entry->aref.ar_amap,
2480 first_entry->object.uvm_obj,
2481 UVM_ET_ISSUBMAP(first_entry));
2482
2483 /*
2484 * drop reference to amap, if we've got one
2485 */
2486
2487 if (first_entry->aref.ar_amap)
2488 uvm_map_unreference_amap(first_entry, flags);
2489
2490 /*
2491 * drop reference to our backing object, if we've got one
2492 */
2493
2494 KASSERT(!UVM_ET_ISSUBMAP(first_entry));
2495 if (UVM_ET_ISOBJ(first_entry) &&
2496 first_entry->object.uvm_obj->pgops->pgo_detach) {
2497 (*first_entry->object.uvm_obj->pgops->pgo_detach)
2498 (first_entry->object.uvm_obj);
2499 }
2500 next_entry = first_entry->next;
2501 uvm_mapent_free(first_entry);
2502 first_entry = next_entry;
2503 }
2504 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
2505 }
2506
2507 /*
2508 * E X T R A C T I O N F U N C T I O N S
2509 */
2510
2511 /*
2512 * uvm_map_reserve: reserve space in a vm_map for future use.
2513 *
2514 * => we reserve space in a map by putting a dummy map entry in the
2515 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
2516 * => map should be unlocked (we will write lock it)
2517 * => we return true if we were able to reserve space
2518 * => XXXCDC: should be inline?
2519 */
2520
2521 int
2522 uvm_map_reserve(struct vm_map *map, vsize_t size,
2523 vaddr_t offset /* hint for pmap_prefer */,
2524 vsize_t align /* alignment */,
2525 vaddr_t *raddr /* IN:hint, OUT: reserved VA */,
2526 uvm_flag_t flags /* UVM_FLAG_FIXED or 0 */)
2527 {
2528 UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
2529
2530 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
2531 map,size,offset,raddr);
2532
2533 size = round_page(size);
2534
2535 /*
2536 * reserve some virtual space.
2537 */
2538
2539 if (uvm_map(map, raddr, size, NULL, offset, align,
2540 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
2541 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE|flags)) != 0) {
2542 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
2543 return (false);
2544 }
2545
2546 UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
2547 return (true);
2548 }
2549
2550 /*
2551 * uvm_map_replace: replace a reserved (blank) area of memory with
2552 * real mappings.
2553 *
2554 * => caller must WRITE-LOCK the map
2555 * => we return true if replacement was a success
2556 * => we expect the newents chain to have nnewents entrys on it and
2557 * we expect newents->prev to point to the last entry on the list
2558 * => note newents is allowed to be NULL
2559 */
2560
2561 static int
2562 uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
2563 struct vm_map_entry *newents, int nnewents, vsize_t nsize,
2564 struct vm_map_entry **oldentryp)
2565 {
2566 struct vm_map_entry *oldent, *last;
2567
2568 uvm_map_check(map, "map_replace entry");
2569
2570 /*
2571 * first find the blank map entry at the specified address
2572 */
2573
2574 if (!uvm_map_lookup_entry(map, start, &oldent)) {
2575 return (false);
2576 }
2577
2578 /*
2579 * check to make sure we have a proper blank entry
2580 */
2581
2582 if (end < oldent->end && !VM_MAP_USE_KMAPENT(map)) {
2583 UVM_MAP_CLIP_END(map, oldent, end, NULL);
2584 }
2585 if (oldent->start != start || oldent->end != end ||
2586 oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
2587 return (false);
2588 }
2589
2590 #ifdef DIAGNOSTIC
2591
2592 /*
2593 * sanity check the newents chain
2594 */
2595
2596 {
2597 struct vm_map_entry *tmpent = newents;
2598 int nent = 0;
2599 vsize_t sz = 0;
2600 vaddr_t cur = start;
2601
2602 while (tmpent) {
2603 nent++;
2604 sz += tmpent->end - tmpent->start;
2605 if (tmpent->start < cur)
2606 panic("uvm_map_replace1");
2607 if (tmpent->start >= tmpent->end || tmpent->end > end) {
2608 printf("tmpent->start=0x%lx, tmpent->end=0x%lx, end=0x%lx\n",
2609 tmpent->start, tmpent->end, end);
2610 panic("uvm_map_replace2");
2611 }
2612 cur = tmpent->end;
2613 if (tmpent->next) {
2614 if (tmpent->next->prev != tmpent)
2615 panic("uvm_map_replace3");
2616 } else {
2617 if (newents->prev != tmpent)
2618 panic("uvm_map_replace4");
2619 }
2620 tmpent = tmpent->next;
2621 }
2622 if (nent != nnewents)
2623 panic("uvm_map_replace5");
2624 if (sz != nsize)
2625 panic("uvm_map_replace6");
2626 }
2627 #endif
2628
2629 /*
2630 * map entry is a valid blank! replace it. (this does all the
2631 * work of map entry link/unlink...).
2632 */
2633
2634 if (newents) {
2635 last = newents->prev;
2636
2637 /* critical: flush stale hints out of map */
2638 SAVE_HINT(map, map->hint, newents);
2639 if (map->first_free == oldent)
2640 map->first_free = last;
2641
2642 last->next = oldent->next;
2643 last->next->prev = last;
2644
2645 /* Fix RB tree */
2646 uvm_rb_remove(map, oldent);
2647
2648 newents->prev = oldent->prev;
2649 newents->prev->next = newents;
2650 map->nentries = map->nentries + (nnewents - 1);
2651
2652 /* Fixup the RB tree */
2653 {
2654 int i;
2655 struct vm_map_entry *tmp;
2656
2657 tmp = newents;
2658 for (i = 0; i < nnewents && tmp; i++) {
2659 uvm_rb_insert(map, tmp);
2660 tmp = tmp->next;
2661 }
2662 }
2663 } else {
2664 /* NULL list of new entries: just remove the old one */
2665 clear_hints(map, oldent);
2666 uvm_map_entry_unlink(map, oldent);
2667 }
2668 map->size -= end - start - nsize;
2669
2670 uvm_map_check(map, "map_replace leave");
2671
2672 /*
2673 * now we can free the old blank entry and return.
2674 */
2675
2676 *oldentryp = oldent;
2677 return (true);
2678 }
2679
2680 /*
2681 * uvm_map_extract: extract a mapping from a map and put it somewhere
2682 * (maybe removing the old mapping)
2683 *
2684 * => maps should be unlocked (we will write lock them)
2685 * => returns 0 on success, error code otherwise
2686 * => start must be page aligned
2687 * => len must be page sized
2688 * => flags:
2689 * UVM_EXTRACT_REMOVE: remove mappings from srcmap
2690 * UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
2691 * UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
2692 * UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
2693 * >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
2694 * >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
2695 * be used from within the kernel in a kernel level map <<<
2696 */
2697
2698 int
2699 uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
2700 struct vm_map *dstmap, vaddr_t *dstaddrp, int flags)
2701 {
2702 vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge;
2703 struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry,
2704 *deadentry, *oldentry;
2705 struct vm_map_entry *resentry = NULL; /* a dummy reservation entry */
2706 vsize_t elen;
2707 int nchain, error, copy_ok;
2708 vsize_t nsize;
2709 UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
2710
2711 UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start,
2712 len,0);
2713 UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0);
2714
2715 /*
2716 * step 0: sanity check: start must be on a page boundary, length
2717 * must be page sized. can't ask for CONTIG/QREF if you asked for
2718 * REMOVE.
2719 */
2720
2721 KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0);
2722 KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
2723 (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
2724
2725 /*
2726 * step 1: reserve space in the target map for the extracted area
2727 */
2728
2729 if ((flags & UVM_EXTRACT_RESERVED) == 0) {
2730 dstaddr = vm_map_min(dstmap);
2731 if (!uvm_map_reserve(dstmap, len, start, 0, &dstaddr, 0))
2732 return (ENOMEM);
2733 *dstaddrp = dstaddr; /* pass address back to caller */
2734 UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0);
2735 } else {
2736 dstaddr = *dstaddrp;
2737 }
2738
2739 /*
2740 * step 2: setup for the extraction process loop by init'ing the
2741 * map entry chain, locking src map, and looking up the first useful
2742 * entry in the map.
2743 */
2744
2745 end = start + len;
2746 newend = dstaddr + len;
2747 chain = endchain = NULL;
2748 nchain = 0;
2749 nsize = 0;
2750 vm_map_lock(srcmap);
2751
2752 if (uvm_map_lookup_entry(srcmap, start, &entry)) {
2753
2754 /* "start" is within an entry */
2755 if (flags & UVM_EXTRACT_QREF) {
2756
2757 /*
2758 * for quick references we don't clip the entry, so
2759 * the entry may map space "before" the starting
2760 * virtual address... this is the "fudge" factor
2761 * (which can be non-zero only the first time
2762 * through the "while" loop in step 3).
2763 */
2764
2765 fudge = start - entry->start;
2766 } else {
2767
2768 /*
2769 * normal reference: we clip the map to fit (thus
2770 * fudge is zero)
2771 */
2772
2773 UVM_MAP_CLIP_START(srcmap, entry, start, NULL);
2774 SAVE_HINT(srcmap, srcmap->hint, entry->prev);
2775 fudge = 0;
2776 }
2777 } else {
2778
2779 /* "start" is not within an entry ... skip to next entry */
2780 if (flags & UVM_EXTRACT_CONTIG) {
2781 error = EINVAL;
2782 goto bad; /* definite hole here ... */
2783 }
2784
2785 entry = entry->next;
2786 fudge = 0;
2787 }
2788
2789 /* save values from srcmap for step 6 */
2790 orig_entry = entry;
2791 orig_fudge = fudge;
2792
2793 /*
2794 * step 3: now start looping through the map entries, extracting
2795 * as we go.
2796 */
2797
2798 while (entry->start < end && entry != &srcmap->header) {
2799
2800 /* if we are not doing a quick reference, clip it */
2801 if ((flags & UVM_EXTRACT_QREF) == 0)
2802 UVM_MAP_CLIP_END(srcmap, entry, end, NULL);
2803
2804 /* clear needs_copy (allow chunking) */
2805 if (UVM_ET_ISNEEDSCOPY(entry)) {
2806 amap_copy(srcmap, entry,
2807 AMAP_COPY_NOWAIT|AMAP_COPY_NOMERGE, start, end);
2808 if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */
2809 error = ENOMEM;
2810 goto bad;
2811 }
2812
2813 /* amap_copy could clip (during chunk)! update fudge */
2814 if (fudge) {
2815 fudge = start - entry->start;
2816 orig_fudge = fudge;
2817 }
2818 }
2819
2820 /* calculate the offset of this from "start" */
2821 oldoffset = (entry->start + fudge) - start;
2822
2823 /* allocate a new map entry */
2824 newentry = uvm_mapent_alloc(dstmap, 0);
2825 if (newentry == NULL) {
2826 error = ENOMEM;
2827 goto bad;
2828 }
2829
2830 /* set up new map entry */
2831 newentry->next = NULL;
2832 newentry->prev = endchain;
2833 newentry->start = dstaddr + oldoffset;
2834 newentry->end =
2835 newentry->start + (entry->end - (entry->start + fudge));
2836 if (newentry->end > newend || newentry->end < newentry->start)
2837 newentry->end = newend;
2838 newentry->object.uvm_obj = entry->object.uvm_obj;
2839 if (newentry->object.uvm_obj) {
2840 if (newentry->object.uvm_obj->pgops->pgo_reference)
2841 newentry->object.uvm_obj->pgops->
2842 pgo_reference(newentry->object.uvm_obj);
2843 newentry->offset = entry->offset + fudge;
2844 } else {
2845 newentry->offset = 0;
2846 }
2847 newentry->etype = entry->etype;
2848 newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
2849 entry->max_protection : entry->protection;
2850 newentry->max_protection = entry->max_protection;
2851 newentry->inheritance = entry->inheritance;
2852 newentry->wired_count = 0;
2853 newentry->aref.ar_amap = entry->aref.ar_amap;
2854 if (newentry->aref.ar_amap) {
2855 newentry->aref.ar_pageoff =
2856 entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
2857 uvm_map_reference_amap(newentry, AMAP_SHARED |
2858 ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
2859 } else {
2860 newentry->aref.ar_pageoff = 0;
2861 }
2862 newentry->advice = entry->advice;
2863 if ((flags & UVM_EXTRACT_QREF) != 0) {
2864 newentry->flags |= UVM_MAP_NOMERGE;
2865 }
2866
2867 /* now link it on the chain */
2868 nchain++;
2869 nsize += newentry->end - newentry->start;
2870 if (endchain == NULL) {
2871 chain = endchain = newentry;
2872 } else {
2873 endchain->next = newentry;
2874 endchain = newentry;
2875 }
2876
2877 /* end of 'while' loop! */
2878 if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
2879 (entry->next == &srcmap->header ||
2880 entry->next->start != entry->end)) {
2881 error = EINVAL;
2882 goto bad;
2883 }
2884 entry = entry->next;
2885 fudge = 0;
2886 }
2887
2888 /*
2889 * step 4: close off chain (in format expected by uvm_map_replace)
2890 */
2891
2892 if (chain)
2893 chain->prev = endchain;
2894
2895 /*
2896 * step 5: attempt to lock the dest map so we can pmap_copy.
2897 * note usage of copy_ok:
2898 * 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
2899 * 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
2900 */
2901
2902 if (srcmap == dstmap || vm_map_lock_try(dstmap) == true) {
2903 copy_ok = 1;
2904 if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2905 nchain, nsize, &resentry)) {
2906 if (srcmap != dstmap)
2907 vm_map_unlock(dstmap);
2908 error = EIO;
2909 goto bad;
2910 }
2911 } else {
2912 copy_ok = 0;
2913 /* replace defered until step 7 */
2914 }
2915
2916 /*
2917 * step 6: traverse the srcmap a second time to do the following:
2918 * - if we got a lock on the dstmap do pmap_copy
2919 * - if UVM_EXTRACT_REMOVE remove the entries
2920 * we make use of orig_entry and orig_fudge (saved in step 2)
2921 */
2922
2923 if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
2924
2925 /* purge possible stale hints from srcmap */
2926 if (flags & UVM_EXTRACT_REMOVE) {
2927 SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
2928 if (srcmap->first_free != &srcmap->header &&
2929 srcmap->first_free->start >= start)
2930 srcmap->first_free = orig_entry->prev;
2931 }
2932
2933 entry = orig_entry;
2934 fudge = orig_fudge;
2935 deadentry = NULL; /* for UVM_EXTRACT_REMOVE */
2936
2937 while (entry->start < end && entry != &srcmap->header) {
2938 if (copy_ok) {
2939 oldoffset = (entry->start + fudge) - start;
2940 elen = MIN(end, entry->end) -
2941 (entry->start + fudge);
2942 pmap_copy(dstmap->pmap, srcmap->pmap,
2943 dstaddr + oldoffset, elen,
2944 entry->start + fudge);
2945 }
2946
2947 /* we advance "entry" in the following if statement */
2948 if (flags & UVM_EXTRACT_REMOVE) {
2949 pmap_remove(srcmap->pmap, entry->start,
2950 entry->end);
2951 oldentry = entry; /* save entry */
2952 entry = entry->next; /* advance */
2953 uvm_map_entry_unlink(srcmap, oldentry);
2954 /* add to dead list */
2955 oldentry->next = deadentry;
2956 deadentry = oldentry;
2957 } else {
2958 entry = entry->next; /* advance */
2959 }
2960
2961 /* end of 'while' loop */
2962 fudge = 0;
2963 }
2964 pmap_update(srcmap->pmap);
2965
2966 /*
2967 * unlock dstmap. we will dispose of deadentry in
2968 * step 7 if needed
2969 */
2970
2971 if (copy_ok && srcmap != dstmap)
2972 vm_map_unlock(dstmap);
2973
2974 } else {
2975 deadentry = NULL;
2976 }
2977
2978 /*
2979 * step 7: we are done with the source map, unlock. if copy_ok
2980 * is 0 then we have not replaced the dummy mapping in dstmap yet
2981 * and we need to do so now.
2982 */
2983
2984 vm_map_unlock(srcmap);
2985 if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
2986 uvm_unmap_detach(deadentry, 0); /* dispose of old entries */
2987
2988 /* now do the replacement if we didn't do it in step 5 */
2989 if (copy_ok == 0) {
2990 vm_map_lock(dstmap);
2991 error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2992 nchain, nsize, &resentry);
2993 vm_map_unlock(dstmap);
2994
2995 if (error == false) {
2996 error = EIO;
2997 goto bad2;
2998 }
2999 }
3000
3001 if (resentry != NULL)
3002 uvm_mapent_free(resentry);
3003
3004 return (0);
3005
3006 /*
3007 * bad: failure recovery
3008 */
3009 bad:
3010 vm_map_unlock(srcmap);
3011 bad2: /* src already unlocked */
3012 if (chain)
3013 uvm_unmap_detach(chain,
3014 (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
3015
3016 if (resentry != NULL)
3017 uvm_mapent_free(resentry);
3018
3019 if ((flags & UVM_EXTRACT_RESERVED) == 0) {
3020 uvm_unmap(dstmap, dstaddr, dstaddr+len); /* ??? */
3021 }
3022 return (error);
3023 }
3024
3025 /* end of extraction functions */
3026
3027 /*
3028 * uvm_map_submap: punch down part of a map into a submap
3029 *
3030 * => only the kernel_map is allowed to be submapped
3031 * => the purpose of submapping is to break up the locking granularity
3032 * of a larger map
3033 * => the range specified must have been mapped previously with a uvm_map()
3034 * call [with uobj==NULL] to create a blank map entry in the main map.
3035 * [And it had better still be blank!]
3036 * => maps which contain submaps should never be copied or forked.
3037 * => to remove a submap, use uvm_unmap() on the main map
3038 * and then uvm_map_deallocate() the submap.
3039 * => main map must be unlocked.
3040 * => submap must have been init'd and have a zero reference count.
3041 * [need not be locked as we don't actually reference it]
3042 */
3043
3044 int
3045 uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
3046 struct vm_map *submap)
3047 {
3048 struct vm_map_entry *entry;
3049 struct uvm_mapent_reservation umr;
3050 int error;
3051
3052 uvm_mapent_reserve(map, &umr, 2, 0);
3053
3054 vm_map_lock(map);
3055 VM_MAP_RANGE_CHECK(map, start, end);
3056
3057 if (uvm_map_lookup_entry(map, start, &entry)) {
3058 UVM_MAP_CLIP_START(map, entry, start, &umr);
3059 UVM_MAP_CLIP_END(map, entry, end, &umr); /* to be safe */
3060 } else {
3061 entry = NULL;
3062 }
3063
3064 if (entry != NULL &&
3065 entry->start == start && entry->end == end &&
3066 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
3067 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
3068 entry->etype |= UVM_ET_SUBMAP;
3069 entry->object.sub_map = submap;
3070 entry->offset = 0;
3071 uvm_map_reference(submap);
3072 error = 0;
3073 } else {
3074 error = EINVAL;
3075 }
3076 vm_map_unlock(map);
3077
3078 uvm_mapent_unreserve(map, &umr);
3079
3080 return error;
3081 }
3082
3083 /*
3084 * uvm_map_setup_kernel: init in-kernel map
3085 *
3086 * => map must not be in service yet.
3087 */
3088
3089 void
3090 uvm_map_setup_kernel(struct vm_map_kernel *map,
3091 vaddr_t vmin, vaddr_t vmax, int flags)
3092 {
3093
3094 uvm_map_setup(&map->vmk_map, vmin, vmax, flags);
3095 callback_head_init(&map->vmk_reclaim_callback, IPL_VM);
3096 LIST_INIT(&map->vmk_kentry_free);
3097 map->vmk_merged_entries = NULL;
3098 }
3099
3100
3101 /*
3102 * uvm_map_protect: change map protection
3103 *
3104 * => set_max means set max_protection.
3105 * => map must be unlocked.
3106 */
3107
3108 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
3109 ~VM_PROT_WRITE : VM_PROT_ALL)
3110
3111 int
3112 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
3113 vm_prot_t new_prot, bool set_max)
3114 {
3115 struct vm_map_entry *current, *entry;
3116 int error = 0;
3117 UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
3118 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
3119 map, start, end, new_prot);
3120
3121 vm_map_lock(map);
3122 VM_MAP_RANGE_CHECK(map, start, end);
3123 if (uvm_map_lookup_entry(map, start, &entry)) {
3124 UVM_MAP_CLIP_START(map, entry, start, NULL);
3125 } else {
3126 entry = entry->next;
3127 }
3128
3129 /*
3130 * make a first pass to check for protection violations.
3131 */
3132
3133 current = entry;
3134 while ((current != &map->header) && (current->start < end)) {
3135 if (UVM_ET_ISSUBMAP(current)) {
3136 error = EINVAL;
3137 goto out;
3138 }
3139 if ((new_prot & current->max_protection) != new_prot) {
3140 error = EACCES;
3141 goto out;
3142 }
3143 /*
3144 * Don't allow VM_PROT_EXECUTE to be set on entries that
3145 * point to vnodes that are associated with a NOEXEC file
3146 * system.
3147 */
3148 if (UVM_ET_ISOBJ(current) &&
3149 UVM_OBJ_IS_VNODE(current->object.uvm_obj)) {
3150 struct vnode *vp =
3151 (struct vnode *) current->object.uvm_obj;
3152
3153 if ((new_prot & VM_PROT_EXECUTE) != 0 &&
3154 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
3155 error = EACCES;
3156 goto out;
3157 }
3158 }
3159
3160 current = current->next;
3161 }
3162
3163 /* go back and fix up protections (no need to clip this time). */
3164
3165 current = entry;
3166 while ((current != &map->header) && (current->start < end)) {
3167 vm_prot_t old_prot;
3168
3169 UVM_MAP_CLIP_END(map, current, end, NULL);
3170 old_prot = current->protection;
3171 if (set_max)
3172 current->protection =
3173 (current->max_protection = new_prot) & old_prot;
3174 else
3175 current->protection = new_prot;
3176
3177 /*
3178 * update physical map if necessary. worry about copy-on-write
3179 * here -- CHECK THIS XXX
3180 */
3181
3182 if (current->protection != old_prot) {
3183 /* update pmap! */
3184 pmap_protect(map->pmap, current->start, current->end,
3185 current->protection & MASK(entry));
3186
3187 /*
3188 * If this entry points at a vnode, and the
3189 * protection includes VM_PROT_EXECUTE, mark
3190 * the vnode as VEXECMAP.
3191 */
3192 if (UVM_ET_ISOBJ(current)) {
3193 struct uvm_object *uobj =
3194 current->object.uvm_obj;
3195
3196 if (UVM_OBJ_IS_VNODE(uobj) &&
3197 (current->protection & VM_PROT_EXECUTE)) {
3198 vn_markexec((struct vnode *) uobj);
3199 }
3200 }
3201 }
3202
3203 /*
3204 * If the map is configured to lock any future mappings,
3205 * wire this entry now if the old protection was VM_PROT_NONE
3206 * and the new protection is not VM_PROT_NONE.
3207 */
3208
3209 if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
3210 VM_MAPENT_ISWIRED(entry) == 0 &&
3211 old_prot == VM_PROT_NONE &&
3212 new_prot != VM_PROT_NONE) {
3213 if (uvm_map_pageable(map, entry->start,
3214 entry->end, false,
3215 UVM_LK_ENTER|UVM_LK_EXIT) != 0) {
3216
3217 /*
3218 * If locking the entry fails, remember the
3219 * error if it's the first one. Note we
3220 * still continue setting the protection in
3221 * the map, but will return the error
3222 * condition regardless.
3223 *
3224 * XXX Ignore what the actual error is,
3225 * XXX just call it a resource shortage
3226 * XXX so that it doesn't get confused
3227 * XXX what uvm_map_protect() itself would
3228 * XXX normally return.
3229 */
3230
3231 error = ENOMEM;
3232 }
3233 }
3234 current = current->next;
3235 }
3236 pmap_update(map->pmap);
3237
3238 out:
3239 vm_map_unlock(map);
3240
3241 UVMHIST_LOG(maphist, "<- done, error=%d",error,0,0,0);
3242 return error;
3243 }
3244
3245 #undef MASK
3246
3247 /*
3248 * uvm_map_inherit: set inheritance code for range of addrs in map.
3249 *
3250 * => map must be unlocked
3251 * => note that the inherit code is used during a "fork". see fork
3252 * code for details.
3253 */
3254
3255 int
3256 uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
3257 vm_inherit_t new_inheritance)
3258 {
3259 struct vm_map_entry *entry, *temp_entry;
3260 UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
3261 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)",
3262 map, start, end, new_inheritance);
3263
3264 switch (new_inheritance) {
3265 case MAP_INHERIT_NONE:
3266 case MAP_INHERIT_COPY:
3267 case MAP_INHERIT_SHARE:
3268 break;
3269 default:
3270 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3271 return EINVAL;
3272 }
3273
3274 vm_map_lock(map);
3275 VM_MAP_RANGE_CHECK(map, start, end);
3276 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3277 entry = temp_entry;
3278 UVM_MAP_CLIP_START(map, entry, start, NULL);
3279 } else {
3280 entry = temp_entry->next;
3281 }
3282 while ((entry != &map->header) && (entry->start < end)) {
3283 UVM_MAP_CLIP_END(map, entry, end, NULL);
3284 entry->inheritance = new_inheritance;
3285 entry = entry->next;
3286 }
3287 vm_map_unlock(map);
3288 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3289 return 0;
3290 }
3291
3292 /*
3293 * uvm_map_advice: set advice code for range of addrs in map.
3294 *
3295 * => map must be unlocked
3296 */
3297
3298 int
3299 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
3300 {
3301 struct vm_map_entry *entry, *temp_entry;
3302 UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist);
3303 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_adv=0x%x)",
3304 map, start, end, new_advice);
3305
3306 vm_map_lock(map);
3307 VM_MAP_RANGE_CHECK(map, start, end);
3308 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3309 entry = temp_entry;
3310 UVM_MAP_CLIP_START(map, entry, start, NULL);
3311 } else {
3312 entry = temp_entry->next;
3313 }
3314
3315 /*
3316 * XXXJRT: disallow holes?
3317 */
3318
3319 while ((entry != &map->header) && (entry->start < end)) {
3320 UVM_MAP_CLIP_END(map, entry, end, NULL);
3321
3322 switch (new_advice) {
3323 case MADV_NORMAL:
3324 case MADV_RANDOM:
3325 case MADV_SEQUENTIAL:
3326 /* nothing special here */
3327 break;
3328
3329 default:
3330 vm_map_unlock(map);
3331 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3332 return EINVAL;
3333 }
3334 entry->advice = new_advice;
3335 entry = entry->next;
3336 }
3337
3338 vm_map_unlock(map);
3339 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3340 return 0;
3341 }
3342
3343 /*
3344 * uvm_map_willneed: apply MADV_WILLNEED
3345 */
3346
3347 int
3348 uvm_map_willneed(struct vm_map *map, vaddr_t start, vaddr_t end)
3349 {
3350 struct vm_map_entry *entry;
3351 UVMHIST_FUNC("uvm_map_willneed"); UVMHIST_CALLED(maphist);
3352 UVMHIST_LOG(maphist,"(map=0x%lx,start=0x%lx,end=0x%lx)",
3353 map, start, end, 0);
3354
3355 vm_map_lock_read(map);
3356 VM_MAP_RANGE_CHECK(map, start, end);
3357 if (!uvm_map_lookup_entry(map, start, &entry)) {
3358 entry = entry->next;
3359 }
3360 while (entry->start < end) {
3361 struct vm_amap * const amap = entry->aref.ar_amap;
3362 struct uvm_object * const uobj = entry->object.uvm_obj;
3363
3364 KASSERT(entry != &map->header);
3365 KASSERT(start < entry->end);
3366 /*
3367 * XXX IMPLEMENT ME.
3368 * Should invent a "weak" mode for uvm_fault()
3369 * which would only do the PGO_LOCKED pgo_get().
3370 *
3371 * for now, we handle only the easy but common case.
3372 */
3373 if (UVM_ET_ISOBJ(entry) && amap == NULL && uobj != NULL) {
3374 off_t offset;
3375 off_t size;
3376
3377 offset = entry->offset;
3378 if (start < entry->start) {
3379 offset += entry->start - start;
3380 }
3381 size = entry->offset + (entry->end - entry->start);
3382 if (entry->end < end) {
3383 size -= end - entry->end;
3384 }
3385 uvm_readahead(uobj, offset, size);
3386 }
3387 entry = entry->next;
3388 }
3389 vm_map_unlock_read(map);
3390 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3391 return 0;
3392 }
3393
3394 /*
3395 * uvm_map_pageable: sets the pageability of a range in a map.
3396 *
3397 * => wires map entries. should not be used for transient page locking.
3398 * for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
3399 * => regions specified as not pageable require lock-down (wired) memory
3400 * and page tables.
3401 * => map must never be read-locked
3402 * => if islocked is true, map is already write-locked
3403 * => we always unlock the map, since we must downgrade to a read-lock
3404 * to call uvm_fault_wire()
3405 * => XXXCDC: check this and try and clean it up.
3406 */
3407
3408 int
3409 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
3410 bool new_pageable, int lockflags)
3411 {
3412 struct vm_map_entry *entry, *start_entry, *failed_entry;
3413 int rv;
3414 #ifdef DIAGNOSTIC
3415 u_int timestamp_save;
3416 #endif
3417 UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
3418 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
3419 map, start, end, new_pageable);
3420 KASSERT(map->flags & VM_MAP_PAGEABLE);
3421
3422 if ((lockflags & UVM_LK_ENTER) == 0)
3423 vm_map_lock(map);
3424 VM_MAP_RANGE_CHECK(map, start, end);
3425
3426 /*
3427 * only one pageability change may take place at one time, since
3428 * uvm_fault_wire assumes it will be called only once for each
3429 * wiring/unwiring. therefore, we have to make sure we're actually
3430 * changing the pageability for the entire region. we do so before
3431 * making any changes.
3432 */
3433
3434 if (uvm_map_lookup_entry(map, start, &start_entry) == false) {
3435 if ((lockflags & UVM_LK_EXIT) == 0)
3436 vm_map_unlock(map);
3437
3438 UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0);
3439 return EFAULT;
3440 }
3441 entry = start_entry;
3442
3443 /*
3444 * handle wiring and unwiring separately.
3445 */
3446
3447 if (new_pageable) { /* unwire */
3448 UVM_MAP_CLIP_START(map, entry, start, NULL);
3449
3450 /*
3451 * unwiring. first ensure that the range to be unwired is
3452 * really wired down and that there are no holes.
3453 */
3454
3455 while ((entry != &map->header) && (entry->start < end)) {
3456 if (entry->wired_count == 0 ||
3457 (entry->end < end &&
3458 (entry->next == &map->header ||
3459 entry->next->start > entry->end))) {
3460 if ((lockflags & UVM_LK_EXIT) == 0)
3461 vm_map_unlock(map);
3462 UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0);
3463 return EINVAL;
3464 }
3465 entry = entry->next;
3466 }
3467
3468 /*
3469 * POSIX 1003.1b - a single munlock call unlocks a region,
3470 * regardless of the number of mlock calls made on that
3471 * region.
3472 */
3473
3474 entry = start_entry;
3475 while ((entry != &map->header) && (entry->start < end)) {
3476 UVM_MAP_CLIP_END(map, entry, end, NULL);
3477 if (VM_MAPENT_ISWIRED(entry))
3478 uvm_map_entry_unwire(map, entry);
3479 entry = entry->next;
3480 }
3481 if ((lockflags & UVM_LK_EXIT) == 0)
3482 vm_map_unlock(map);
3483 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3484 return 0;
3485 }
3486
3487 /*
3488 * wire case: in two passes [XXXCDC: ugly block of code here]
3489 *
3490 * 1: holding the write lock, we create any anonymous maps that need
3491 * to be created. then we clip each map entry to the region to
3492 * be wired and increment its wiring count.
3493 *
3494 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
3495 * in the pages for any newly wired area (wired_count == 1).
3496 *
3497 * downgrading to a read lock for uvm_fault_wire avoids a possible
3498 * deadlock with another thread that may have faulted on one of
3499 * the pages to be wired (it would mark the page busy, blocking
3500 * us, then in turn block on the map lock that we hold). because
3501 * of problems in the recursive lock package, we cannot upgrade
3502 * to a write lock in vm_map_lookup. thus, any actions that
3503 * require the write lock must be done beforehand. because we
3504 * keep the read lock on the map, the copy-on-write status of the
3505 * entries we modify here cannot change.
3506 */
3507
3508 while ((entry != &map->header) && (entry->start < end)) {
3509 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3510
3511 /*
3512 * perform actions of vm_map_lookup that need the
3513 * write lock on the map: create an anonymous map
3514 * for a copy-on-write region, or an anonymous map
3515 * for a zero-fill region. (XXXCDC: submap case
3516 * ok?)
3517 */
3518
3519 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3520 if (UVM_ET_ISNEEDSCOPY(entry) &&
3521 ((entry->max_protection & VM_PROT_WRITE) ||
3522 (entry->object.uvm_obj == NULL))) {
3523 amap_copy(map, entry, 0, start, end);
3524 /* XXXCDC: wait OK? */
3525 }
3526 }
3527 }
3528 UVM_MAP_CLIP_START(map, entry, start, NULL);
3529 UVM_MAP_CLIP_END(map, entry, end, NULL);
3530 entry->wired_count++;
3531
3532 /*
3533 * Check for holes
3534 */
3535
3536 if (entry->protection == VM_PROT_NONE ||
3537 (entry->end < end &&
3538 (entry->next == &map->header ||
3539 entry->next->start > entry->end))) {
3540
3541 /*
3542 * found one. amap creation actions do not need to
3543 * be undone, but the wired counts need to be restored.
3544 */
3545
3546 while (entry != &map->header && entry->end > start) {
3547 entry->wired_count--;
3548 entry = entry->prev;
3549 }
3550 if ((lockflags & UVM_LK_EXIT) == 0)
3551 vm_map_unlock(map);
3552 UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
3553 return EINVAL;
3554 }
3555 entry = entry->next;
3556 }
3557
3558 /*
3559 * Pass 2.
3560 */
3561
3562 #ifdef DIAGNOSTIC
3563 timestamp_save = map->timestamp;
3564 #endif
3565 vm_map_busy(map);
3566 vm_map_unlock(map);
3567
3568 rv = 0;
3569 entry = start_entry;
3570 while (entry != &map->header && entry->start < end) {
3571 if (entry->wired_count == 1) {
3572 rv = uvm_fault_wire(map, entry->start, entry->end,
3573 entry->max_protection, 1);
3574 if (rv) {
3575
3576 /*
3577 * wiring failed. break out of the loop.
3578 * we'll clean up the map below, once we
3579 * have a write lock again.
3580 */
3581
3582 break;
3583 }
3584 }
3585 entry = entry->next;
3586 }
3587
3588 if (rv) { /* failed? */
3589
3590 /*
3591 * Get back to an exclusive (write) lock.
3592 */
3593
3594 vm_map_lock(map);
3595 vm_map_unbusy(map);
3596
3597 #ifdef DIAGNOSTIC
3598 if (timestamp_save + 1 != map->timestamp)
3599 panic("uvm_map_pageable: stale map");
3600 #endif
3601
3602 /*
3603 * first drop the wiring count on all the entries
3604 * which haven't actually been wired yet.
3605 */
3606
3607 failed_entry = entry;
3608 while (entry != &map->header && entry->start < end) {
3609 entry->wired_count--;
3610 entry = entry->next;
3611 }
3612
3613 /*
3614 * now, unwire all the entries that were successfully
3615 * wired above.
3616 */
3617
3618 entry = start_entry;
3619 while (entry != failed_entry) {
3620 entry->wired_count--;
3621 if (VM_MAPENT_ISWIRED(entry) == 0)
3622 uvm_map_entry_unwire(map, entry);
3623 entry = entry->next;
3624 }
3625 if ((lockflags & UVM_LK_EXIT) == 0)
3626 vm_map_unlock(map);
3627 UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
3628 return (rv);
3629 }
3630
3631 if ((lockflags & UVM_LK_EXIT) == 0) {
3632 vm_map_unbusy(map);
3633 } else {
3634
3635 /*
3636 * Get back to an exclusive (write) lock.
3637 */
3638
3639 vm_map_lock(map);
3640 vm_map_unbusy(map);
3641 }
3642
3643 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3644 return 0;
3645 }
3646
3647 /*
3648 * uvm_map_pageable_all: special case of uvm_map_pageable - affects
3649 * all mapped regions.
3650 *
3651 * => map must not be locked.
3652 * => if no flags are specified, all regions are unwired.
3653 * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
3654 */
3655
3656 int
3657 uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
3658 {
3659 struct vm_map_entry *entry, *failed_entry;
3660 vsize_t size;
3661 int rv;
3662 #ifdef DIAGNOSTIC
3663 u_int timestamp_save;
3664 #endif
3665 UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist);
3666 UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0);
3667
3668 KASSERT(map->flags & VM_MAP_PAGEABLE);
3669
3670 vm_map_lock(map);
3671
3672 /*
3673 * handle wiring and unwiring separately.
3674 */
3675
3676 if (flags == 0) { /* unwire */
3677
3678 /*
3679 * POSIX 1003.1b -- munlockall unlocks all regions,
3680 * regardless of how many times mlockall has been called.
3681 */
3682
3683 for (entry = map->header.next; entry != &map->header;
3684 entry = entry->next) {
3685 if (VM_MAPENT_ISWIRED(entry))
3686 uvm_map_entry_unwire(map, entry);
3687 }
3688 map->flags &= ~VM_MAP_WIREFUTURE;
3689 vm_map_unlock(map);
3690 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3691 return 0;
3692 }
3693
3694 if (flags & MCL_FUTURE) {
3695
3696 /*
3697 * must wire all future mappings; remember this.
3698 */
3699
3700 map->flags |= VM_MAP_WIREFUTURE;
3701 }
3702
3703 if ((flags & MCL_CURRENT) == 0) {
3704
3705 /*
3706 * no more work to do!
3707 */
3708
3709 UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
3710 vm_map_unlock(map);
3711 return 0;
3712 }
3713
3714 /*
3715 * wire case: in three passes [XXXCDC: ugly block of code here]
3716 *
3717 * 1: holding the write lock, count all pages mapped by non-wired
3718 * entries. if this would cause us to go over our limit, we fail.
3719 *
3720 * 2: still holding the write lock, we create any anonymous maps that
3721 * need to be created. then we increment its wiring count.
3722 *
3723 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
3724 * in the pages for any newly wired area (wired_count == 1).
3725 *
3726 * downgrading to a read lock for uvm_fault_wire avoids a possible
3727 * deadlock with another thread that may have faulted on one of
3728 * the pages to be wired (it would mark the page busy, blocking
3729 * us, then in turn block on the map lock that we hold). because
3730 * of problems in the recursive lock package, we cannot upgrade
3731 * to a write lock in vm_map_lookup. thus, any actions that
3732 * require the write lock must be done beforehand. because we
3733 * keep the read lock on the map, the copy-on-write status of the
3734 * entries we modify here cannot change.
3735 */
3736
3737 for (size = 0, entry = map->header.next; entry != &map->header;
3738 entry = entry->next) {
3739 if (entry->protection != VM_PROT_NONE &&
3740 VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3741 size += entry->end - entry->start;
3742 }
3743 }
3744
3745 if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
3746 vm_map_unlock(map);
3747 return ENOMEM;
3748 }
3749
3750 if (limit != 0 &&
3751 (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
3752 vm_map_unlock(map);
3753 return ENOMEM;
3754 }
3755
3756 /*
3757 * Pass 2.
3758 */
3759
3760 for (entry = map->header.next; entry != &map->header;
3761 entry = entry->next) {
3762 if (entry->protection == VM_PROT_NONE)
3763 continue;
3764 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3765
3766 /*
3767 * perform actions of vm_map_lookup that need the
3768 * write lock on the map: create an anonymous map
3769 * for a copy-on-write region, or an anonymous map
3770 * for a zero-fill region. (XXXCDC: submap case
3771 * ok?)
3772 */
3773
3774 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3775 if (UVM_ET_ISNEEDSCOPY(entry) &&
3776 ((entry->max_protection & VM_PROT_WRITE) ||
3777 (entry->object.uvm_obj == NULL))) {
3778 amap_copy(map, entry, 0, entry->start,
3779 entry->end);
3780 /* XXXCDC: wait OK? */
3781 }
3782 }
3783 }
3784 entry->wired_count++;
3785 }
3786
3787 /*
3788 * Pass 3.
3789 */
3790
3791 #ifdef DIAGNOSTIC
3792 timestamp_save = map->timestamp;
3793 #endif
3794 vm_map_busy(map);
3795 vm_map_unlock(map);
3796
3797 rv = 0;
3798 for (entry = map->header.next; entry != &map->header;
3799 entry = entry->next) {
3800 if (entry->wired_count == 1) {
3801 rv = uvm_fault_wire(map, entry->start, entry->end,
3802 entry->max_protection, 1);
3803 if (rv) {
3804
3805 /*
3806 * wiring failed. break out of the loop.
3807 * we'll clean up the map below, once we
3808 * have a write lock again.
3809 */
3810
3811 break;
3812 }
3813 }
3814 }
3815
3816 if (rv) {
3817
3818 /*
3819 * Get back an exclusive (write) lock.
3820 */
3821
3822 vm_map_lock(map);
3823 vm_map_unbusy(map);
3824
3825 #ifdef DIAGNOSTIC
3826 if (timestamp_save + 1 != map->timestamp)
3827 panic("uvm_map_pageable_all: stale map");
3828 #endif
3829
3830 /*
3831 * first drop the wiring count on all the entries
3832 * which haven't actually been wired yet.
3833 *
3834 * Skip VM_PROT_NONE entries like we did above.
3835 */
3836
3837 failed_entry = entry;
3838 for (/* nothing */; entry != &map->header;
3839 entry = entry->next) {
3840 if (entry->protection == VM_PROT_NONE)
3841 continue;
3842 entry->wired_count--;
3843 }
3844
3845 /*
3846 * now, unwire all the entries that were successfully
3847 * wired above.
3848 *
3849 * Skip VM_PROT_NONE entries like we did above.
3850 */
3851
3852 for (entry = map->header.next; entry != failed_entry;
3853 entry = entry->next) {
3854 if (entry->protection == VM_PROT_NONE)
3855 continue;
3856 entry->wired_count--;
3857 if (VM_MAPENT_ISWIRED(entry))
3858 uvm_map_entry_unwire(map, entry);
3859 }
3860 vm_map_unlock(map);
3861 UVMHIST_LOG(maphist,"<- done (RV=%d)", rv,0,0,0);
3862 return (rv);
3863 }
3864
3865 vm_map_unbusy(map);
3866
3867 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3868 return 0;
3869 }
3870
3871 /*
3872 * uvm_map_clean: clean out a map range
3873 *
3874 * => valid flags:
3875 * if (flags & PGO_CLEANIT): dirty pages are cleaned first
3876 * if (flags & PGO_SYNCIO): dirty pages are written synchronously
3877 * if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
3878 * if (flags & PGO_FREE): any cached pages are freed after clean
3879 * => returns an error if any part of the specified range isn't mapped
3880 * => never a need to flush amap layer since the anonymous memory has
3881 * no permanent home, but may deactivate pages there
3882 * => called from sys_msync() and sys_madvise()
3883 * => caller must not write-lock map (read OK).
3884 * => we may sleep while cleaning if SYNCIO [with map read-locked]
3885 */
3886
3887 int
3888 uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
3889 {
3890 struct vm_map_entry *current, *entry;
3891 struct uvm_object *uobj;
3892 struct vm_amap *amap;
3893 struct vm_anon *anon;
3894 struct vm_page *pg;
3895 vaddr_t offset;
3896 vsize_t size;
3897 voff_t uoff;
3898 int error, refs;
3899 UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
3900
3901 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
3902 map, start, end, flags);
3903 KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
3904 (PGO_FREE|PGO_DEACTIVATE));
3905
3906 vm_map_lock_read(map);
3907 VM_MAP_RANGE_CHECK(map, start, end);
3908 if (uvm_map_lookup_entry(map, start, &entry) == false) {
3909 vm_map_unlock_read(map);
3910 return EFAULT;
3911 }
3912
3913 /*
3914 * Make a first pass to check for holes and wiring problems.
3915 */
3916
3917 for (current = entry; current->start < end; current = current->next) {
3918 if (UVM_ET_ISSUBMAP(current)) {
3919 vm_map_unlock_read(map);
3920 return EINVAL;
3921 }
3922 if ((flags & PGO_FREE) != 0 && VM_MAPENT_ISWIRED(entry)) {
3923 vm_map_unlock_read(map);
3924 return EBUSY;
3925 }
3926 if (end <= current->end) {
3927 break;
3928 }
3929 if (current->end != current->next->start) {
3930 vm_map_unlock_read(map);
3931 return EFAULT;
3932 }
3933 }
3934
3935 error = 0;
3936 for (current = entry; start < end; current = current->next) {
3937 amap = current->aref.ar_amap; /* top layer */
3938 uobj = current->object.uvm_obj; /* bottom layer */
3939 KASSERT(start >= current->start);
3940
3941 /*
3942 * No amap cleaning necessary if:
3943 *
3944 * (1) There's no amap.
3945 *
3946 * (2) We're not deactivating or freeing pages.
3947 */
3948
3949 if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
3950 goto flush_object;
3951
3952 amap_lock(amap);
3953 offset = start - current->start;
3954 size = MIN(end, current->end) - start;
3955 for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) {
3956 anon = amap_lookup(¤t->aref, offset);
3957 if (anon == NULL)
3958 continue;
3959
3960 mutex_enter(&anon->an_lock);
3961 pg = anon->an_page;
3962 if (pg == NULL) {
3963 mutex_exit(&anon->an_lock);
3964 continue;
3965 }
3966
3967 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
3968
3969 /*
3970 * In these first 3 cases, we just deactivate the page.
3971 */
3972
3973 case PGO_CLEANIT|PGO_FREE:
3974 case PGO_CLEANIT|PGO_DEACTIVATE:
3975 case PGO_DEACTIVATE:
3976 deactivate_it:
3977 /*
3978 * skip the page if it's loaned or wired,
3979 * since it shouldn't be on a paging queue
3980 * at all in these cases.
3981 */
3982
3983 mutex_enter(&uvm_pageqlock);
3984 if (pg->loan_count != 0 ||
3985 pg->wire_count != 0) {
3986 mutex_exit(&uvm_pageqlock);
3987 mutex_exit(&anon->an_lock);
3988 continue;
3989 }
3990 KASSERT(pg->uanon == anon);
3991 uvm_pagedeactivate(pg);
3992 mutex_exit(&uvm_pageqlock);
3993 mutex_exit(&anon->an_lock);
3994 continue;
3995
3996 case PGO_FREE:
3997
3998 /*
3999 * If there are multiple references to
4000 * the amap, just deactivate the page.
4001 */
4002
4003 if (amap_refs(amap) > 1)
4004 goto deactivate_it;
4005
4006 /* skip the page if it's wired */
4007 if (pg->wire_count != 0) {
4008 mutex_exit(&anon->an_lock);
4009 continue;
4010 }
4011 amap_unadd(¤t->aref, offset);
4012 refs = --anon->an_ref;
4013 mutex_exit(&anon->an_lock);
4014 if (refs == 0)
4015 uvm_anfree(anon);
4016 continue;
4017 }
4018 }
4019 amap_unlock(amap);
4020
4021 flush_object:
4022 /*
4023 * flush pages if we've got a valid backing object.
4024 * note that we must always clean object pages before
4025 * freeing them since otherwise we could reveal stale
4026 * data from files.
4027 */
4028
4029 uoff = current->offset + (start - current->start);
4030 size = MIN(end, current->end) - start;
4031 if (uobj != NULL) {
4032 mutex_enter(&uobj->vmobjlock);
4033 if (uobj->pgops->pgo_put != NULL)
4034 error = (uobj->pgops->pgo_put)(uobj, uoff,
4035 uoff + size, flags | PGO_CLEANIT);
4036 else
4037 error = 0;
4038 }
4039 start += size;
4040 }
4041 vm_map_unlock_read(map);
4042 return (error);
4043 }
4044
4045
4046 /*
4047 * uvm_map_checkprot: check protection in map
4048 *
4049 * => must allow specified protection in a fully allocated region.
4050 * => map must be read or write locked by caller.
4051 */
4052
4053 bool
4054 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
4055 vm_prot_t protection)
4056 {
4057 struct vm_map_entry *entry;
4058 struct vm_map_entry *tmp_entry;
4059
4060 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
4061 return (false);
4062 }
4063 entry = tmp_entry;
4064 while (start < end) {
4065 if (entry == &map->header) {
4066 return (false);
4067 }
4068
4069 /*
4070 * no holes allowed
4071 */
4072
4073 if (start < entry->start) {
4074 return (false);
4075 }
4076
4077 /*
4078 * check protection associated with entry
4079 */
4080
4081 if ((entry->protection & protection) != protection) {
4082 return (false);
4083 }
4084 start = entry->end;
4085 entry = entry->next;
4086 }
4087 return (true);
4088 }
4089
4090 /*
4091 * uvmspace_alloc: allocate a vmspace structure.
4092 *
4093 * - structure includes vm_map and pmap
4094 * - XXX: no locking on this structure
4095 * - refcnt set to 1, rest must be init'd by caller
4096 */
4097 struct vmspace *
4098 uvmspace_alloc(vaddr_t vmin, vaddr_t vmax)
4099 {
4100 struct vmspace *vm;
4101 UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
4102
4103 vm = pool_cache_get(&uvm_vmspace_cache, PR_WAITOK);
4104 uvmspace_init(vm, NULL, vmin, vmax);
4105 UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0);
4106 return (vm);
4107 }
4108
4109 /*
4110 * uvmspace_init: initialize a vmspace structure.
4111 *
4112 * - XXX: no locking on this structure
4113 * - refcnt set to 1, rest must be init'd by caller
4114 */
4115 void
4116 uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin, vaddr_t vmax)
4117 {
4118 UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist);
4119
4120 memset(vm, 0, sizeof(*vm));
4121 uvm_map_setup(&vm->vm_map, vmin, vmax, VM_MAP_PAGEABLE
4122 #ifdef __USING_TOPDOWN_VM
4123 | VM_MAP_TOPDOWN
4124 #endif
4125 );
4126 if (pmap)
4127 pmap_reference(pmap);
4128 else
4129 pmap = pmap_create();
4130 vm->vm_map.pmap = pmap;
4131 vm->vm_refcnt = 1;
4132 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4133 }
4134
4135 /*
4136 * uvmspace_share: share a vmspace between two processes
4137 *
4138 * - used for vfork, threads(?)
4139 */
4140
4141 void
4142 uvmspace_share(struct proc *p1, struct proc *p2)
4143 {
4144
4145 uvmspace_addref(p1->p_vmspace);
4146 p2->p_vmspace = p1->p_vmspace;
4147 }
4148
4149 #if 0
4150
4151 /*
4152 * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
4153 *
4154 * - XXX: no locking on vmspace
4155 */
4156
4157 void
4158 uvmspace_unshare(struct lwp *l)
4159 {
4160 struct proc *p = l->l_proc;
4161 struct vmspace *nvm, *ovm = p->p_vmspace;
4162
4163 if (ovm->vm_refcnt == 1)
4164 /* nothing to do: vmspace isn't shared in the first place */
4165 return;
4166
4167 /* make a new vmspace, still holding old one */
4168 nvm = uvmspace_fork(ovm);
4169
4170 kpreempt_disable();
4171 pmap_deactivate(l); /* unbind old vmspace */
4172 p->p_vmspace = nvm;
4173 pmap_activate(l); /* switch to new vmspace */
4174 kpreempt_enable();
4175
4176 uvmspace_free(ovm); /* drop reference to old vmspace */
4177 }
4178
4179 #endif
4180
4181 /*
4182 * uvmspace_exec: the process wants to exec a new program
4183 */
4184
4185 void
4186 uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end)
4187 {
4188 struct proc *p = l->l_proc;
4189 struct vmspace *nvm, *ovm = p->p_vmspace;
4190 struct vm_map *map = &ovm->vm_map;
4191
4192 #ifdef __sparc__
4193 /* XXX cgd 960926: the sparc #ifdef should be a MD hook */
4194 kill_user_windows(l); /* before stack addresses go away */
4195 #endif
4196
4197 /*
4198 * see if more than one process is using this vmspace...
4199 */
4200
4201 if (ovm->vm_refcnt == 1) {
4202
4203 /*
4204 * if p is the only process using its vmspace then we can safely
4205 * recycle that vmspace for the program that is being exec'd.
4206 */
4207
4208 #ifdef SYSVSHM
4209 /*
4210 * SYSV SHM semantics require us to kill all segments on an exec
4211 */
4212
4213 if (ovm->vm_shm)
4214 shmexit(ovm);
4215 #endif
4216
4217 /*
4218 * POSIX 1003.1b -- "lock future mappings" is revoked
4219 * when a process execs another program image.
4220 */
4221
4222 map->flags &= ~VM_MAP_WIREFUTURE;
4223
4224 /*
4225 * now unmap the old program
4226 */
4227
4228 pmap_remove_all(map->pmap);
4229 uvm_unmap(map, vm_map_min(map), vm_map_max(map));
4230 KASSERT(map->header.prev == &map->header);
4231 KASSERT(map->nentries == 0);
4232
4233 /*
4234 * resize the map
4235 */
4236
4237 vm_map_setmin(map, start);
4238 vm_map_setmax(map, end);
4239 } else {
4240
4241 /*
4242 * p's vmspace is being shared, so we can't reuse it for p since
4243 * it is still being used for others. allocate a new vmspace
4244 * for p
4245 */
4246
4247 nvm = uvmspace_alloc(start, end);
4248
4249 /*
4250 * install new vmspace and drop our ref to the old one.
4251 */
4252
4253 kpreempt_disable();
4254 pmap_deactivate(l);
4255 p->p_vmspace = nvm;
4256 pmap_activate(l);
4257 kpreempt_enable();
4258
4259 uvmspace_free(ovm);
4260 }
4261 }
4262
4263 /*
4264 * uvmspace_addref: add a referece to a vmspace.
4265 */
4266
4267 void
4268 uvmspace_addref(struct vmspace *vm)
4269 {
4270 struct vm_map *map = &vm->vm_map;
4271
4272 KASSERT((map->flags & VM_MAP_DYING) == 0);
4273
4274 mutex_enter(&map->misc_lock);
4275 KASSERT(vm->vm_refcnt > 0);
4276 vm->vm_refcnt++;
4277 mutex_exit(&map->misc_lock);
4278 }
4279
4280 /*
4281 * uvmspace_free: free a vmspace data structure
4282 */
4283
4284 void
4285 uvmspace_free(struct vmspace *vm)
4286 {
4287 struct vm_map_entry *dead_entries;
4288 struct vm_map *map = &vm->vm_map;
4289 int n;
4290
4291 UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
4292
4293 UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0);
4294 mutex_enter(&map->misc_lock);
4295 n = --vm->vm_refcnt;
4296 mutex_exit(&map->misc_lock);
4297 if (n > 0)
4298 return;
4299
4300 /*
4301 * at this point, there should be no other references to the map.
4302 * delete all of the mappings, then destroy the pmap.
4303 */
4304
4305 map->flags |= VM_MAP_DYING;
4306 pmap_remove_all(map->pmap);
4307 #ifdef SYSVSHM
4308 /* Get rid of any SYSV shared memory segments. */
4309 if (vm->vm_shm != NULL)
4310 shmexit(vm);
4311 #endif
4312 if (map->nentries) {
4313 uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map),
4314 &dead_entries, NULL, 0);
4315 if (dead_entries != NULL)
4316 uvm_unmap_detach(dead_entries, 0);
4317 }
4318 KASSERT(map->nentries == 0);
4319 KASSERT(map->size == 0);
4320 mutex_destroy(&map->misc_lock);
4321 mutex_destroy(&map->mutex);
4322 rw_destroy(&map->lock);
4323 cv_destroy(&map->cv);
4324 pmap_destroy(map->pmap);
4325 pool_cache_put(&uvm_vmspace_cache, vm);
4326 }
4327
4328 /*
4329 * F O R K - m a i n e n t r y p o i n t
4330 */
4331 /*
4332 * uvmspace_fork: fork a process' main map
4333 *
4334 * => create a new vmspace for child process from parent.
4335 * => parent's map must not be locked.
4336 */
4337
4338 struct vmspace *
4339 uvmspace_fork(struct vmspace *vm1)
4340 {
4341 struct vmspace *vm2;
4342 struct vm_map *old_map = &vm1->vm_map;
4343 struct vm_map *new_map;
4344 struct vm_map_entry *old_entry;
4345 struct vm_map_entry *new_entry;
4346 UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
4347
4348 vm_map_lock(old_map);
4349
4350 vm2 = uvmspace_alloc(vm_map_min(old_map), vm_map_max(old_map));
4351 memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
4352 (char *) (vm1 + 1) - (char *) &vm1->vm_startcopy);
4353 new_map = &vm2->vm_map; /* XXX */
4354
4355 old_entry = old_map->header.next;
4356 new_map->size = old_map->size;
4357
4358 /*
4359 * go entry-by-entry
4360 */
4361
4362 while (old_entry != &old_map->header) {
4363
4364 /*
4365 * first, some sanity checks on the old entry
4366 */
4367
4368 KASSERT(!UVM_ET_ISSUBMAP(old_entry));
4369 KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) ||
4370 !UVM_ET_ISNEEDSCOPY(old_entry));
4371
4372 switch (old_entry->inheritance) {
4373 case MAP_INHERIT_NONE:
4374
4375 /*
4376 * drop the mapping, modify size
4377 */
4378 new_map->size -= old_entry->end - old_entry->start;
4379 break;
4380
4381 case MAP_INHERIT_SHARE:
4382
4383 /*
4384 * share the mapping: this means we want the old and
4385 * new entries to share amaps and backing objects.
4386 */
4387 /*
4388 * if the old_entry needs a new amap (due to prev fork)
4389 * then we need to allocate it now so that we have
4390 * something we own to share with the new_entry. [in
4391 * other words, we need to clear needs_copy]
4392 */
4393
4394 if (UVM_ET_ISNEEDSCOPY(old_entry)) {
4395 /* get our own amap, clears needs_copy */
4396 amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,
4397 0, 0);
4398 /* XXXCDC: WAITOK??? */
4399 }
4400
4401 new_entry = uvm_mapent_alloc(new_map, 0);
4402 /* old_entry -> new_entry */
4403 uvm_mapent_copy(old_entry, new_entry);
4404
4405 /* new pmap has nothing wired in it */
4406 new_entry->wired_count = 0;
4407
4408 /*
4409 * gain reference to object backing the map (can't
4410 * be a submap, already checked this case).
4411 */
4412
4413 if (new_entry->aref.ar_amap)
4414 uvm_map_reference_amap(new_entry, AMAP_SHARED);
4415
4416 if (new_entry->object.uvm_obj &&
4417 new_entry->object.uvm_obj->pgops->pgo_reference)
4418 new_entry->object.uvm_obj->
4419 pgops->pgo_reference(
4420 new_entry->object.uvm_obj);
4421
4422 /* insert entry at end of new_map's entry list */
4423 uvm_map_entry_link(new_map, new_map->header.prev,
4424 new_entry);
4425
4426 break;
4427
4428 case MAP_INHERIT_COPY:
4429
4430 /*
4431 * copy-on-write the mapping (using mmap's
4432 * MAP_PRIVATE semantics)
4433 *
4434 * allocate new_entry, adjust reference counts.
4435 * (note that new references are read-only).
4436 */
4437
4438 new_entry = uvm_mapent_alloc(new_map, 0);
4439 /* old_entry -> new_entry */
4440 uvm_mapent_copy(old_entry, new_entry);
4441
4442 if (new_entry->aref.ar_amap)
4443 uvm_map_reference_amap(new_entry, 0);
4444
4445 if (new_entry->object.uvm_obj &&
4446 new_entry->object.uvm_obj->pgops->pgo_reference)
4447 new_entry->object.uvm_obj->pgops->pgo_reference
4448 (new_entry->object.uvm_obj);
4449
4450 /* new pmap has nothing wired in it */
4451 new_entry->wired_count = 0;
4452
4453 new_entry->etype |=
4454 (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
4455 uvm_map_entry_link(new_map, new_map->header.prev,
4456 new_entry);
4457
4458 /*
4459 * the new entry will need an amap. it will either
4460 * need to be copied from the old entry or created
4461 * from scratch (if the old entry does not have an
4462 * amap). can we defer this process until later
4463 * (by setting "needs_copy") or do we need to copy
4464 * the amap now?
4465 *
4466 * we must copy the amap now if any of the following
4467 * conditions hold:
4468 * 1. the old entry has an amap and that amap is
4469 * being shared. this means that the old (parent)
4470 * process is sharing the amap with another
4471 * process. if we do not clear needs_copy here
4472 * we will end up in a situation where both the
4473 * parent and child process are refering to the
4474 * same amap with "needs_copy" set. if the
4475 * parent write-faults, the fault routine will
4476 * clear "needs_copy" in the parent by allocating
4477 * a new amap. this is wrong because the
4478 * parent is supposed to be sharing the old amap
4479 * and the new amap will break that.
4480 *
4481 * 2. if the old entry has an amap and a non-zero
4482 * wire count then we are going to have to call
4483 * amap_cow_now to avoid page faults in the
4484 * parent process. since amap_cow_now requires
4485 * "needs_copy" to be clear we might as well
4486 * clear it here as well.
4487 *
4488 */
4489
4490 if (old_entry->aref.ar_amap != NULL) {
4491 if ((amap_flags(old_entry->aref.ar_amap) &
4492 AMAP_SHARED) != 0 ||
4493 VM_MAPENT_ISWIRED(old_entry)) {
4494
4495 amap_copy(new_map, new_entry,
4496 AMAP_COPY_NOCHUNK, 0, 0);
4497 /* XXXCDC: M_WAITOK ... ok? */
4498 }
4499 }
4500
4501 /*
4502 * if the parent's entry is wired down, then the
4503 * parent process does not want page faults on
4504 * access to that memory. this means that we
4505 * cannot do copy-on-write because we can't write
4506 * protect the old entry. in this case we
4507 * resolve all copy-on-write faults now, using
4508 * amap_cow_now. note that we have already
4509 * allocated any needed amap (above).
4510 */
4511
4512 if (VM_MAPENT_ISWIRED(old_entry)) {
4513
4514 /*
4515 * resolve all copy-on-write faults now
4516 * (note that there is nothing to do if
4517 * the old mapping does not have an amap).
4518 */
4519 if (old_entry->aref.ar_amap)
4520 amap_cow_now(new_map, new_entry);
4521
4522 } else {
4523
4524 /*
4525 * setup mappings to trigger copy-on-write faults
4526 * we must write-protect the parent if it has
4527 * an amap and it is not already "needs_copy"...
4528 * if it is already "needs_copy" then the parent
4529 * has already been write-protected by a previous
4530 * fork operation.
4531 */
4532
4533 if (old_entry->aref.ar_amap &&
4534 !UVM_ET_ISNEEDSCOPY(old_entry)) {
4535 if (old_entry->max_protection & VM_PROT_WRITE) {
4536 pmap_protect(old_map->pmap,
4537 old_entry->start,
4538 old_entry->end,
4539 old_entry->protection &
4540 ~VM_PROT_WRITE);
4541 }
4542 old_entry->etype |= UVM_ET_NEEDSCOPY;
4543 }
4544 }
4545 break;
4546 } /* end of switch statement */
4547 old_entry = old_entry->next;
4548 }
4549
4550 pmap_update(old_map->pmap);
4551 vm_map_unlock(old_map);
4552
4553 #ifdef SYSVSHM
4554 if (vm1->vm_shm)
4555 shmfork(vm1, vm2);
4556 #endif
4557
4558 #ifdef PMAP_FORK
4559 pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap);
4560 #endif
4561
4562 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4563 return (vm2);
4564 }
4565
4566
4567 /*
4568 * in-kernel map entry allocation.
4569 */
4570
4571 struct uvm_kmapent_hdr {
4572 LIST_ENTRY(uvm_kmapent_hdr) ukh_listq;
4573 int ukh_nused;
4574 struct vm_map_entry *ukh_freelist;
4575 struct vm_map *ukh_map;
4576 struct vm_map_entry ukh_entries[0];
4577 };
4578
4579 #define UVM_KMAPENT_CHUNK \
4580 ((PAGE_SIZE - sizeof(struct uvm_kmapent_hdr)) \
4581 / sizeof(struct vm_map_entry))
4582
4583 #define UVM_KHDR_FIND(entry) \
4584 ((struct uvm_kmapent_hdr *)(((vaddr_t)entry) & ~PAGE_MASK))
4585
4586
4587 #ifdef DIAGNOSTIC
4588 static struct vm_map *
4589 uvm_kmapent_map(struct vm_map_entry *entry)
4590 {
4591 const struct uvm_kmapent_hdr *ukh;
4592
4593 ukh = UVM_KHDR_FIND(entry);
4594 return ukh->ukh_map;
4595 }
4596 #endif
4597
4598 static inline struct vm_map_entry *
4599 uvm_kmapent_get(struct uvm_kmapent_hdr *ukh)
4600 {
4601 struct vm_map_entry *entry;
4602
4603 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4604 KASSERT(ukh->ukh_nused >= 0);
4605
4606 entry = ukh->ukh_freelist;
4607 if (entry) {
4608 KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4609 == UVM_MAP_KERNEL);
4610 ukh->ukh_freelist = entry->next;
4611 ukh->ukh_nused++;
4612 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4613 } else {
4614 KASSERT(ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4615 }
4616
4617 return entry;
4618 }
4619
4620 static inline void
4621 uvm_kmapent_put(struct uvm_kmapent_hdr *ukh, struct vm_map_entry *entry)
4622 {
4623
4624 KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4625 == UVM_MAP_KERNEL);
4626 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4627 KASSERT(ukh->ukh_nused > 0);
4628 KASSERT(ukh->ukh_freelist != NULL ||
4629 ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4630 KASSERT(ukh->ukh_freelist == NULL ||
4631 ukh->ukh_nused < UVM_KMAPENT_CHUNK);
4632
4633 ukh->ukh_nused--;
4634 entry->next = ukh->ukh_freelist;
4635 ukh->ukh_freelist = entry;
4636 }
4637
4638 /*
4639 * uvm_kmapent_alloc: allocate a map entry for in-kernel map
4640 */
4641
4642 static struct vm_map_entry *
4643 uvm_kmapent_alloc(struct vm_map *map, int flags)
4644 {
4645 struct vm_page *pg;
4646 struct uvm_kmapent_hdr *ukh;
4647 struct vm_map_entry *entry;
4648 #ifndef PMAP_MAP_POOLPAGE
4649 struct uvm_map_args args;
4650 uvm_flag_t mapflags = UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
4651 UVM_INH_NONE, UVM_ADV_RANDOM, flags | UVM_FLAG_NOMERGE);
4652 int error;
4653 #endif
4654 vaddr_t va;
4655 int i;
4656
4657 KDASSERT(UVM_KMAPENT_CHUNK > 2);
4658 KDASSERT(kernel_map != NULL);
4659 KASSERT(vm_map_pmap(map) == pmap_kernel());
4660
4661 UVMMAP_EVCNT_INCR(uke_alloc);
4662 entry = NULL;
4663 again:
4664 /*
4665 * try to grab an entry from freelist.
4666 */
4667 mutex_spin_enter(&uvm_kentry_lock);
4668 ukh = LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free);
4669 if (ukh) {
4670 entry = uvm_kmapent_get(ukh);
4671 if (ukh->ukh_nused == UVM_KMAPENT_CHUNK)
4672 LIST_REMOVE(ukh, ukh_listq);
4673 }
4674 mutex_spin_exit(&uvm_kentry_lock);
4675
4676 if (entry)
4677 return entry;
4678
4679 /*
4680 * there's no free entry for this vm_map.
4681 * now we need to allocate some vm_map_entry.
4682 * for simplicity, always allocate one page chunk of them at once.
4683 */
4684
4685 pg = uvm_pagealloc(NULL, 0, NULL,
4686 (flags & UVM_KMF_NOWAIT) != 0 ? UVM_PGA_USERESERVE : 0);
4687 if (__predict_false(pg == NULL)) {
4688 if (flags & UVM_FLAG_NOWAIT)
4689 return NULL;
4690 uvm_wait("kme_alloc");
4691 goto again;
4692 }
4693
4694 #ifdef PMAP_MAP_POOLPAGE
4695 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
4696 KASSERT(va != 0);
4697 #else
4698 error = uvm_map_prepare(map, 0, PAGE_SIZE, NULL, UVM_UNKNOWN_OFFSET,
4699 0, mapflags, &args);
4700 if (error) {
4701 uvm_pagefree(pg);
4702 return NULL;
4703 }
4704
4705 va = args.uma_start;
4706
4707 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
4708 VM_PROT_READ|VM_PROT_WRITE|PMAP_KMPAGE);
4709 pmap_update(vm_map_pmap(map));
4710
4711 #endif
4712 ukh = (void *)va;
4713
4714 /*
4715 * use the last entry for ukh itsself.
4716 */
4717
4718 i = UVM_KMAPENT_CHUNK - 1;
4719 #ifndef PMAP_MAP_POOLPAGE
4720 entry = &ukh->ukh_entries[i--];
4721 entry->flags = UVM_MAP_KERNEL | UVM_MAP_KMAPENT;
4722 error = uvm_map_enter(map, &args, entry);
4723 KASSERT(error == 0);
4724 #endif
4725
4726 ukh->ukh_nused = UVM_KMAPENT_CHUNK;
4727 ukh->ukh_map = map;
4728 ukh->ukh_freelist = NULL;
4729 for (; i >= 1; i--) {
4730 struct vm_map_entry *xentry = &ukh->ukh_entries[i];
4731
4732 xentry->flags = UVM_MAP_KERNEL;
4733 uvm_kmapent_put(ukh, xentry);
4734 }
4735 #ifdef PMAP_MAP_POOLPAGE
4736 KASSERT(ukh->ukh_nused == 1);
4737 #else
4738 KASSERT(ukh->ukh_nused == 2);
4739 #endif
4740
4741 mutex_spin_enter(&uvm_kentry_lock);
4742 LIST_INSERT_HEAD(&vm_map_to_kernel(map)->vmk_kentry_free,
4743 ukh, ukh_listq);
4744 mutex_spin_exit(&uvm_kentry_lock);
4745
4746 /*
4747 * return first entry.
4748 */
4749
4750 entry = &ukh->ukh_entries[0];
4751 entry->flags = UVM_MAP_KERNEL;
4752 UVMMAP_EVCNT_INCR(ukh_alloc);
4753
4754 return entry;
4755 }
4756
4757 /*
4758 * uvm_mapent_free: free map entry for in-kernel map
4759 */
4760
4761 static void
4762 uvm_kmapent_free(struct vm_map_entry *entry)
4763 {
4764 struct uvm_kmapent_hdr *ukh;
4765 struct vm_page *pg;
4766 struct vm_map *map;
4767 #ifndef PMAP_UNMAP_POOLPAGE
4768 struct pmap *pmap;
4769 struct vm_map_entry *deadentry;
4770 #endif
4771 vaddr_t va;
4772 paddr_t pa;
4773
4774 UVMMAP_EVCNT_INCR(uke_free);
4775 ukh = UVM_KHDR_FIND(entry);
4776 map = ukh->ukh_map;
4777
4778 mutex_spin_enter(&uvm_kentry_lock);
4779 uvm_kmapent_put(ukh, entry);
4780 #ifdef PMAP_UNMAP_POOLPAGE
4781 if (ukh->ukh_nused > 0) {
4782 #else
4783 if (ukh->ukh_nused > 1) {
4784 #endif
4785 if (ukh->ukh_nused == UVM_KMAPENT_CHUNK - 1)
4786 LIST_INSERT_HEAD(
4787 &vm_map_to_kernel(map)->vmk_kentry_free,
4788 ukh, ukh_listq);
4789 mutex_spin_exit(&uvm_kentry_lock);
4790 return;
4791 }
4792
4793 /*
4794 * now we can free this ukh.
4795 *
4796 * however, keep an empty ukh to avoid ping-pong.
4797 */
4798
4799 if (LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free) == ukh &&
4800 LIST_NEXT(ukh, ukh_listq) == NULL) {
4801 mutex_spin_exit(&uvm_kentry_lock);
4802 return;
4803 }
4804 LIST_REMOVE(ukh, ukh_listq);
4805 mutex_spin_exit(&uvm_kentry_lock);
4806
4807 va = (vaddr_t)ukh;
4808
4809 #ifdef PMAP_UNMAP_POOLPAGE
4810 KASSERT(ukh->ukh_nused == 0);
4811 pa = PMAP_UNMAP_POOLPAGE(va);
4812 KASSERT(pa != 0);
4813 #else
4814 KASSERT(ukh->ukh_nused == 1);
4815
4816 /*
4817 * remove map entry for ukh itsself.
4818 */
4819
4820 KASSERT((va & PAGE_MASK) == 0);
4821 vm_map_lock(map);
4822 uvm_unmap_remove(map, va, va + PAGE_SIZE, &deadentry, NULL, 0);
4823 KASSERT(deadentry->flags & UVM_MAP_KERNEL);
4824 KASSERT(deadentry->flags & UVM_MAP_KMAPENT);
4825 KASSERT(deadentry->next == NULL);
4826 KASSERT(deadentry == &ukh->ukh_entries[UVM_KMAPENT_CHUNK - 1]);
4827
4828 /*
4829 * unmap the page from pmap and free it.
4830 */
4831
4832 pmap = vm_map_pmap(map);
4833 KASSERT(pmap == pmap_kernel());
4834 if (!pmap_extract(pmap, va, &pa))
4835 panic("%s: no mapping", __func__);
4836 pmap_kremove(va, PAGE_SIZE);
4837 pmap_update(vm_map_pmap(map));
4838 vm_map_unlock(map);
4839 #endif /* !PMAP_UNMAP_POOLPAGE */
4840 pg = PHYS_TO_VM_PAGE(pa);
4841 uvm_pagefree(pg);
4842 UVMMAP_EVCNT_INCR(ukh_free);
4843 }
4844
4845 static vsize_t
4846 uvm_kmapent_overhead(vsize_t size)
4847 {
4848
4849 /*
4850 * - the max number of unmerged entries is howmany(size, PAGE_SIZE)
4851 * as the min allocation unit is PAGE_SIZE.
4852 * - UVM_KMAPENT_CHUNK "kmapent"s are allocated from a page.
4853 * one of them are used to map the page itself.
4854 */
4855
4856 return howmany(howmany(size, PAGE_SIZE), (UVM_KMAPENT_CHUNK - 1)) *
4857 PAGE_SIZE;
4858 }
4859
4860 /*
4861 * map entry reservation
4862 */
4863
4864 /*
4865 * uvm_mapent_reserve: reserve map entries for clipping before locking map.
4866 *
4867 * => needed when unmapping entries allocated without UVM_FLAG_QUANTUM.
4868 * => caller shouldn't hold map locked.
4869 */
4870 int
4871 uvm_mapent_reserve(struct vm_map *map, struct uvm_mapent_reservation *umr,
4872 int nentries, int flags)
4873 {
4874
4875 umr->umr_nentries = 0;
4876
4877 if ((flags & UVM_FLAG_QUANTUM) != 0)
4878 return 0;
4879
4880 if (!VM_MAP_USE_KMAPENT(map))
4881 return 0;
4882
4883 while (nentries--) {
4884 struct vm_map_entry *ent;
4885 ent = uvm_kmapent_alloc(map, flags);
4886 if (!ent) {
4887 uvm_mapent_unreserve(map, umr);
4888 return ENOMEM;
4889 }
4890 UMR_PUTENTRY(umr, ent);
4891 }
4892
4893 return 0;
4894 }
4895
4896 /*
4897 * uvm_mapent_unreserve:
4898 *
4899 * => caller shouldn't hold map locked.
4900 * => never fail or sleep.
4901 */
4902 void
4903 uvm_mapent_unreserve(struct vm_map *map, struct uvm_mapent_reservation *umr)
4904 {
4905
4906 while (!UMR_EMPTY(umr))
4907 uvm_kmapent_free(UMR_GETENTRY(umr));
4908 }
4909
4910 /*
4911 * uvm_mapent_trymerge: try to merge an entry with its neighbors.
4912 *
4913 * => called with map locked.
4914 * => return non zero if successfully merged.
4915 */
4916
4917 int
4918 uvm_mapent_trymerge(struct vm_map *map, struct vm_map_entry *entry, int flags)
4919 {
4920 struct uvm_object *uobj;
4921 struct vm_map_entry *next;
4922 struct vm_map_entry *prev;
4923 vsize_t size;
4924 int merged = 0;
4925 bool copying;
4926 int newetype;
4927
4928 if (VM_MAP_USE_KMAPENT(map)) {
4929 return 0;
4930 }
4931 if (entry->aref.ar_amap != NULL) {
4932 return 0;
4933 }
4934 if ((entry->flags & UVM_MAP_NOMERGE) != 0) {
4935 return 0;
4936 }
4937
4938 uobj = entry->object.uvm_obj;
4939 size = entry->end - entry->start;
4940 copying = (flags & UVM_MERGE_COPYING) != 0;
4941 newetype = copying ? (entry->etype & ~UVM_ET_NEEDSCOPY) : entry->etype;
4942
4943 next = entry->next;
4944 if (next != &map->header &&
4945 next->start == entry->end &&
4946 ((copying && next->aref.ar_amap != NULL &&
4947 amap_refs(next->aref.ar_amap) == 1) ||
4948 (!copying && next->aref.ar_amap == NULL)) &&
4949 UVM_ET_ISCOMPATIBLE(next, newetype,
4950 uobj, entry->flags, entry->protection,
4951 entry->max_protection, entry->inheritance, entry->advice,
4952 entry->wired_count) &&
4953 (uobj == NULL || entry->offset + size == next->offset)) {
4954 int error;
4955
4956 if (copying) {
4957 error = amap_extend(next, size,
4958 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_BACKWARDS);
4959 } else {
4960 error = 0;
4961 }
4962 if (error == 0) {
4963 if (uobj) {
4964 if (uobj->pgops->pgo_detach) {
4965 uobj->pgops->pgo_detach(uobj);
4966 }
4967 }
4968
4969 entry->end = next->end;
4970 clear_hints(map, next);
4971 uvm_map_entry_unlink(map, next);
4972 if (copying) {
4973 entry->aref = next->aref;
4974 entry->etype &= ~UVM_ET_NEEDSCOPY;
4975 }
4976 uvm_map_check(map, "trymerge forwardmerge");
4977 uvm_mapent_free_merged(map, next);
4978 merged++;
4979 }
4980 }
4981
4982 prev = entry->prev;
4983 if (prev != &map->header &&
4984 prev->end == entry->start &&
4985 ((copying && !merged && prev->aref.ar_amap != NULL &&
4986 amap_refs(prev->aref.ar_amap) == 1) ||
4987 (!copying && prev->aref.ar_amap == NULL)) &&
4988 UVM_ET_ISCOMPATIBLE(prev, newetype,
4989 uobj, entry->flags, entry->protection,
4990 entry->max_protection, entry->inheritance, entry->advice,
4991 entry->wired_count) &&
4992 (uobj == NULL ||
4993 prev->offset + prev->end - prev->start == entry->offset)) {
4994 int error;
4995
4996 if (copying) {
4997 error = amap_extend(prev, size,
4998 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_FORWARDS);
4999 } else {
5000 error = 0;
5001 }
5002 if (error == 0) {
5003 if (uobj) {
5004 if (uobj->pgops->pgo_detach) {
5005 uobj->pgops->pgo_detach(uobj);
5006 }
5007 entry->offset = prev->offset;
5008 }
5009
5010 entry->start = prev->start;
5011 clear_hints(map, prev);
5012 uvm_map_entry_unlink(map, prev);
5013 if (copying) {
5014 entry->aref = prev->aref;
5015 entry->etype &= ~UVM_ET_NEEDSCOPY;
5016 }
5017 uvm_map_check(map, "trymerge backmerge");
5018 uvm_mapent_free_merged(map, prev);
5019 merged++;
5020 }
5021 }
5022
5023 return merged;
5024 }
5025
5026 /*
5027 * uvm_map_create: create map
5028 */
5029
5030 struct vm_map *
5031 uvm_map_create(pmap_t pmap, vaddr_t vmin, vaddr_t vmax, int flags)
5032 {
5033 struct vm_map *result;
5034
5035 result = malloc(sizeof(struct vm_map), M_VMMAP, M_WAITOK);
5036 uvm_map_setup(result, vmin, vmax, flags);
5037 result->pmap = pmap;
5038 return(result);
5039 }
5040
5041 /*
5042 * uvm_map_setup: init map
5043 *
5044 * => map must not be in service yet.
5045 */
5046
5047 void
5048 uvm_map_setup(struct vm_map *map, vaddr_t vmin, vaddr_t vmax, int flags)
5049 {
5050 int ipl;
5051
5052 rb_tree_init(&map->rb_tree, &uvm_map_tree_ops);
5053 map->header.next = map->header.prev = &map->header;
5054 map->nentries = 0;
5055 map->size = 0;
5056 map->ref_count = 1;
5057 vm_map_setmin(map, vmin);
5058 vm_map_setmax(map, vmax);
5059 map->flags = flags;
5060 map->first_free = &map->header;
5061 map->hint = &map->header;
5062 map->timestamp = 0;
5063 map->busy = NULL;
5064
5065 if ((flags & VM_MAP_INTRSAFE) != 0) {
5066 ipl = IPL_VM;
5067 } else {
5068 ipl = IPL_NONE;
5069 }
5070
5071 rw_init(&map->lock);
5072 cv_init(&map->cv, "vm_map");
5073 mutex_init(&map->misc_lock, MUTEX_DRIVER, ipl);
5074 mutex_init(&map->mutex, MUTEX_DRIVER, ipl);
5075 }
5076
5077
5078 /*
5079 * U N M A P - m a i n e n t r y p o i n t
5080 */
5081
5082 /*
5083 * uvm_unmap1: remove mappings from a vm_map (from "start" up to "stop")
5084 *
5085 * => caller must check alignment and size
5086 * => map must be unlocked (we will lock it)
5087 * => flags is UVM_FLAG_QUANTUM or 0.
5088 */
5089
5090 void
5091 uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
5092 {
5093 struct vm_map_entry *dead_entries;
5094 struct uvm_mapent_reservation umr;
5095 UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist);
5096
5097 UVMHIST_LOG(maphist, " (map=0x%x, start=0x%x, end=0x%x)",
5098 map, start, end, 0);
5099 if (map == kernel_map) {
5100 LOCKDEBUG_MEM_CHECK((void *)start, end - start);
5101 }
5102 /*
5103 * work now done by helper functions. wipe the pmap's and then
5104 * detach from the dead entries...
5105 */
5106 uvm_mapent_reserve(map, &umr, 2, flags);
5107 vm_map_lock(map);
5108 uvm_unmap_remove(map, start, end, &dead_entries, &umr, flags);
5109 vm_map_unlock(map);
5110 uvm_mapent_unreserve(map, &umr);
5111
5112 if (dead_entries != NULL)
5113 uvm_unmap_detach(dead_entries, 0);
5114
5115 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
5116 }
5117
5118
5119 /*
5120 * uvm_map_reference: add reference to a map
5121 *
5122 * => map need not be locked (we use misc_lock).
5123 */
5124
5125 void
5126 uvm_map_reference(struct vm_map *map)
5127 {
5128 mutex_enter(&map->misc_lock);
5129 map->ref_count++;
5130 mutex_exit(&map->misc_lock);
5131 }
5132
5133 struct vm_map_kernel *
5134 vm_map_to_kernel(struct vm_map *map)
5135 {
5136
5137 KASSERT(VM_MAP_IS_KERNEL(map));
5138
5139 return (struct vm_map_kernel *)map;
5140 }
5141
5142 bool
5143 vm_map_starved_p(struct vm_map *map)
5144 {
5145
5146 if ((map->flags & VM_MAP_WANTVA) != 0) {
5147 return true;
5148 }
5149 /* XXX */
5150 if ((vm_map_max(map) - vm_map_min(map)) / 16 * 15 < map->size) {
5151 return true;
5152 }
5153 return false;
5154 }
5155
5156 #if defined(DDB) || defined(DEBUGPRINT)
5157
5158 /*
5159 * uvm_map_printit: actually prints the map
5160 */
5161
5162 void
5163 uvm_map_printit(struct vm_map *map, bool full,
5164 void (*pr)(const char *, ...))
5165 {
5166 struct vm_map_entry *entry;
5167
5168 (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, vm_map_min(map),
5169 vm_map_max(map));
5170 (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=0x%x\n",
5171 map->nentries, map->size, map->ref_count, map->timestamp,
5172 map->flags);
5173 (*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap,
5174 pmap_resident_count(map->pmap), pmap_wired_count(map->pmap));
5175 if (!full)
5176 return;
5177 for (entry = map->header.next; entry != &map->header;
5178 entry = entry->next) {
5179 (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%llx, amap=%p/%d\n",
5180 entry, entry->start, entry->end, entry->object.uvm_obj,
5181 (long long)entry->offset, entry->aref.ar_amap,
5182 entry->aref.ar_pageoff);
5183 (*pr)(
5184 "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
5185 "wc=%d, adv=%d\n",
5186 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
5187 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
5188 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
5189 entry->protection, entry->max_protection,
5190 entry->inheritance, entry->wired_count, entry->advice);
5191 }
5192 }
5193
5194 void
5195 uvm_whatis(uintptr_t addr, void (*pr)(const char *, ...))
5196 {
5197 struct vm_map *map;
5198
5199 for (map = kernel_map;;) {
5200 struct vm_map_entry *entry;
5201
5202 if (!uvm_map_lookup_entry_bytree(map, (vaddr_t)addr, &entry)) {
5203 break;
5204 }
5205 (*pr)("%p is %p+%zu from VMMAP %p\n",
5206 (void *)addr, (void *)entry->start,
5207 (size_t)(addr - (uintptr_t)entry->start), map);
5208 if (!UVM_ET_ISSUBMAP(entry)) {
5209 break;
5210 }
5211 map = entry->object.sub_map;
5212 }
5213 }
5214
5215 #endif /* DDB || DEBUGPRINT */
5216