uvm_map.c revision 1.304 1 /* $NetBSD: uvm_map.c,v 1.304 2011/09/01 06:40:28 matt Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_map.c 8.3 (Berkeley) 1/12/94
37 * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
38 *
39 *
40 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 * All rights reserved.
42 *
43 * Permission to use, copy, modify and distribute this software and
44 * its documentation is hereby granted, provided that both the copyright
45 * notice and this permission notice appear in all copies of the
46 * software, derivative works or modified versions, and any portions
47 * thereof, and that both notices appear in supporting documentation.
48 *
49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 *
53 * Carnegie Mellon requests users of this software to return to
54 *
55 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
56 * School of Computer Science
57 * Carnegie Mellon University
58 * Pittsburgh PA 15213-3890
59 *
60 * any improvements or extensions that they make and grant Carnegie the
61 * rights to redistribute these changes.
62 */
63
64 /*
65 * uvm_map.c: uvm map operations
66 */
67
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.304 2011/09/01 06:40:28 matt Exp $");
70
71 #include "opt_ddb.h"
72 #include "opt_uvmhist.h"
73 #include "opt_uvm.h"
74 #include "opt_sysv.h"
75
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/mman.h>
79 #include <sys/proc.h>
80 #include <sys/malloc.h>
81 #include <sys/pool.h>
82 #include <sys/kernel.h>
83 #include <sys/mount.h>
84 #include <sys/vnode.h>
85 #include <sys/lockdebug.h>
86 #include <sys/atomic.h>
87 #ifndef __USER_VA0_IS_SAFE
88 #include <sys/sysctl.h>
89 #include <sys/kauth.h>
90 #include "opt_user_va0_disable_default.h"
91 #endif
92
93 #ifdef SYSVSHM
94 #include <sys/shm.h>
95 #endif
96
97 #include <uvm/uvm.h>
98 #include <uvm/uvm_readahead.h>
99
100 #if defined(DDB) || defined(DEBUGPRINT)
101 #include <uvm/uvm_ddb.h>
102 #endif
103
104 #if !defined(UVMMAP_COUNTERS)
105
106 #define UVMMAP_EVCNT_DEFINE(name) /* nothing */
107 #define UVMMAP_EVCNT_INCR(ev) /* nothing */
108 #define UVMMAP_EVCNT_DECR(ev) /* nothing */
109
110 #else /* defined(UVMMAP_NOCOUNTERS) */
111
112 #include <sys/evcnt.h>
113 #define UVMMAP_EVCNT_DEFINE(name) \
114 struct evcnt uvmmap_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
115 "uvmmap", #name); \
116 EVCNT_ATTACH_STATIC(uvmmap_evcnt_##name);
117 #define UVMMAP_EVCNT_INCR(ev) uvmmap_evcnt_##ev.ev_count++
118 #define UVMMAP_EVCNT_DECR(ev) uvmmap_evcnt_##ev.ev_count--
119
120 #endif /* defined(UVMMAP_NOCOUNTERS) */
121
122 UVMMAP_EVCNT_DEFINE(ubackmerge)
123 UVMMAP_EVCNT_DEFINE(uforwmerge)
124 UVMMAP_EVCNT_DEFINE(ubimerge)
125 UVMMAP_EVCNT_DEFINE(unomerge)
126 UVMMAP_EVCNT_DEFINE(kbackmerge)
127 UVMMAP_EVCNT_DEFINE(kforwmerge)
128 UVMMAP_EVCNT_DEFINE(kbimerge)
129 UVMMAP_EVCNT_DEFINE(knomerge)
130 UVMMAP_EVCNT_DEFINE(map_call)
131 UVMMAP_EVCNT_DEFINE(mlk_call)
132 UVMMAP_EVCNT_DEFINE(mlk_hint)
133 UVMMAP_EVCNT_DEFINE(mlk_list)
134 UVMMAP_EVCNT_DEFINE(mlk_tree)
135 UVMMAP_EVCNT_DEFINE(mlk_treeloop)
136 UVMMAP_EVCNT_DEFINE(mlk_listloop)
137
138 UVMMAP_EVCNT_DEFINE(uke_alloc)
139 UVMMAP_EVCNT_DEFINE(uke_free)
140 UVMMAP_EVCNT_DEFINE(ukh_alloc)
141 UVMMAP_EVCNT_DEFINE(ukh_free)
142
143 const char vmmapbsy[] = "vmmapbsy";
144
145 /*
146 * cache for vmspace structures.
147 */
148
149 static struct pool_cache uvm_vmspace_cache;
150
151 /*
152 * cache for dynamically-allocated map entries.
153 */
154
155 static struct pool_cache uvm_map_entry_cache;
156
157 MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures");
158 MALLOC_DEFINE(M_VMPMAP, "VM pmap", "VM pmap");
159
160 #ifdef PMAP_GROWKERNEL
161 /*
162 * This global represents the end of the kernel virtual address
163 * space. If we want to exceed this, we must grow the kernel
164 * virtual address space dynamically.
165 *
166 * Note, this variable is locked by kernel_map's lock.
167 */
168 vaddr_t uvm_maxkaddr;
169 #endif
170
171 #ifndef __USER_VA0_IS_SAFE
172 #ifndef __USER_VA0_DISABLE_DEFAULT
173 #define __USER_VA0_DISABLE_DEFAULT 1
174 #endif
175 #ifdef USER_VA0_DISABLE_DEFAULT /* kernel config option overrides */
176 #undef __USER_VA0_DISABLE_DEFAULT
177 #define __USER_VA0_DISABLE_DEFAULT USER_VA0_DISABLE_DEFAULT
178 #endif
179 static int user_va0_disable = __USER_VA0_DISABLE_DEFAULT;
180 #endif
181
182 /*
183 * macros
184 */
185
186 /*
187 * VM_MAP_USE_KMAPENT: determine if uvm_kmapent_alloc/free is used
188 * for the vm_map.
189 */
190 extern struct vm_map *pager_map; /* XXX */
191 #define VM_MAP_USE_KMAPENT_FLAGS(flags) \
192 (((flags) & VM_MAP_INTRSAFE) != 0)
193 #define VM_MAP_USE_KMAPENT(map) \
194 (VM_MAP_USE_KMAPENT_FLAGS((map)->flags) || (map) == kernel_map)
195
196 /*
197 * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging
198 */
199
200 #define UVM_ET_ISCOMPATIBLE(ent, type, uobj, meflags, \
201 prot, maxprot, inh, adv, wire) \
202 ((ent)->etype == (type) && \
203 (((ent)->flags ^ (meflags)) & (UVM_MAP_NOMERGE | UVM_MAP_QUANTUM)) \
204 == 0 && \
205 (ent)->object.uvm_obj == (uobj) && \
206 (ent)->protection == (prot) && \
207 (ent)->max_protection == (maxprot) && \
208 (ent)->inheritance == (inh) && \
209 (ent)->advice == (adv) && \
210 (ent)->wired_count == (wire))
211
212 /*
213 * uvm_map_entry_link: insert entry into a map
214 *
215 * => map must be locked
216 */
217 #define uvm_map_entry_link(map, after_where, entry) do { \
218 uvm_mapent_check(entry); \
219 (map)->nentries++; \
220 (entry)->prev = (after_where); \
221 (entry)->next = (after_where)->next; \
222 (entry)->prev->next = (entry); \
223 (entry)->next->prev = (entry); \
224 uvm_rb_insert((map), (entry)); \
225 } while (/*CONSTCOND*/ 0)
226
227 /*
228 * uvm_map_entry_unlink: remove entry from a map
229 *
230 * => map must be locked
231 */
232 #define uvm_map_entry_unlink(map, entry) do { \
233 KASSERT((entry) != (map)->first_free); \
234 KASSERT((entry) != (map)->hint); \
235 uvm_mapent_check(entry); \
236 (map)->nentries--; \
237 (entry)->next->prev = (entry)->prev; \
238 (entry)->prev->next = (entry)->next; \
239 uvm_rb_remove((map), (entry)); \
240 } while (/*CONSTCOND*/ 0)
241
242 /*
243 * SAVE_HINT: saves the specified entry as the hint for future lookups.
244 *
245 * => map need not be locked.
246 */
247 #define SAVE_HINT(map, check, value) do { \
248 if ((map)->hint == (check)) \
249 (map)->hint = (value); \
250 } while (/*CONSTCOND*/ 0)
251
252 /*
253 * clear_hints: ensure that hints don't point to the entry.
254 *
255 * => map must be write-locked.
256 */
257 static void
258 clear_hints(struct vm_map *map, struct vm_map_entry *ent)
259 {
260
261 SAVE_HINT(map, ent, ent->prev);
262 if (map->first_free == ent) {
263 map->first_free = ent->prev;
264 }
265 }
266
267 /*
268 * VM_MAP_RANGE_CHECK: check and correct range
269 *
270 * => map must at least be read locked
271 */
272
273 #define VM_MAP_RANGE_CHECK(map, start, end) do { \
274 if (start < vm_map_min(map)) \
275 start = vm_map_min(map); \
276 if (end > vm_map_max(map)) \
277 end = vm_map_max(map); \
278 if (start > end) \
279 start = end; \
280 } while (/*CONSTCOND*/ 0)
281
282 /*
283 * local prototypes
284 */
285
286 static struct vm_map_entry *
287 uvm_mapent_alloc(struct vm_map *, int);
288 static struct vm_map_entry *
289 uvm_mapent_alloc_split(struct vm_map *,
290 const struct vm_map_entry *, int,
291 struct uvm_mapent_reservation *);
292 static void uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *);
293 static void uvm_mapent_free(struct vm_map_entry *);
294 #if defined(DEBUG)
295 static void _uvm_mapent_check(const struct vm_map_entry *, const char *,
296 int);
297 #define uvm_mapent_check(map) _uvm_mapent_check(map, __FILE__, __LINE__)
298 #else /* defined(DEBUG) */
299 #define uvm_mapent_check(e) /* nothing */
300 #endif /* defined(DEBUG) */
301 static struct vm_map_entry *
302 uvm_kmapent_alloc(struct vm_map *, int);
303 static void uvm_kmapent_free(struct vm_map_entry *);
304 static vsize_t uvm_kmapent_overhead(vsize_t);
305
306 static void uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
307 static void uvm_map_reference_amap(struct vm_map_entry *, int);
308 static int uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int,
309 int, struct vm_map_entry *);
310 static void uvm_map_unreference_amap(struct vm_map_entry *, int);
311
312 int _uvm_map_sanity(struct vm_map *);
313 int _uvm_tree_sanity(struct vm_map *);
314 static vsize_t uvm_rb_maxgap(const struct vm_map_entry *);
315
316 #define ROOT_ENTRY(map) ((struct vm_map_entry *)(map)->rb_tree.rbt_root)
317 #define LEFT_ENTRY(entry) ((struct vm_map_entry *)(entry)->rb_node.rb_left)
318 #define RIGHT_ENTRY(entry) ((struct vm_map_entry *)(entry)->rb_node.rb_right)
319 #define PARENT_ENTRY(map, entry) \
320 (ROOT_ENTRY(map) == (entry) \
321 ? NULL : (struct vm_map_entry *)RB_FATHER(&(entry)->rb_node))
322
323 static int
324 uvm_map_compare_nodes(void *ctx, const void *nparent, const void *nkey)
325 {
326 const struct vm_map_entry *eparent = nparent;
327 const struct vm_map_entry *ekey = nkey;
328
329 KASSERT(eparent->start < ekey->start || eparent->start >= ekey->end);
330 KASSERT(ekey->start < eparent->start || ekey->start >= eparent->end);
331
332 if (eparent->start < ekey->start)
333 return -1;
334 if (eparent->end >= ekey->start)
335 return 1;
336 return 0;
337 }
338
339 static int
340 uvm_map_compare_key(void *ctx, const void *nparent, const void *vkey)
341 {
342 const struct vm_map_entry *eparent = nparent;
343 const vaddr_t va = *(const vaddr_t *) vkey;
344
345 if (eparent->start < va)
346 return -1;
347 if (eparent->end >= va)
348 return 1;
349 return 0;
350 }
351
352 static const rb_tree_ops_t uvm_map_tree_ops = {
353 .rbto_compare_nodes = uvm_map_compare_nodes,
354 .rbto_compare_key = uvm_map_compare_key,
355 .rbto_node_offset = offsetof(struct vm_map_entry, rb_node),
356 .rbto_context = NULL
357 };
358
359 /*
360 * uvm_rb_gap: return the gap size between our entry and next entry.
361 */
362 static inline vsize_t
363 uvm_rb_gap(const struct vm_map_entry *entry)
364 {
365
366 KASSERT(entry->next != NULL);
367 return entry->next->start - entry->end;
368 }
369
370 static vsize_t
371 uvm_rb_maxgap(const struct vm_map_entry *entry)
372 {
373 struct vm_map_entry *child;
374 vsize_t maxgap = entry->gap;
375
376 /*
377 * We need maxgap to be the largest gap of us or any of our
378 * descendents. Since each of our children's maxgap is the
379 * cached value of their largest gap of themselves or their
380 * descendents, we can just use that value and avoid recursing
381 * down the tree to calculate it.
382 */
383 if ((child = LEFT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
384 maxgap = child->maxgap;
385
386 if ((child = RIGHT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
387 maxgap = child->maxgap;
388
389 return maxgap;
390 }
391
392 static void
393 uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
394 {
395 struct vm_map_entry *parent;
396
397 KASSERT(entry->gap == uvm_rb_gap(entry));
398 entry->maxgap = uvm_rb_maxgap(entry);
399
400 while ((parent = PARENT_ENTRY(map, entry)) != NULL) {
401 struct vm_map_entry *brother;
402 vsize_t maxgap = parent->gap;
403 unsigned int which;
404
405 KDASSERT(parent->gap == uvm_rb_gap(parent));
406 if (maxgap < entry->maxgap)
407 maxgap = entry->maxgap;
408 /*
409 * Since we work towards the root, we know entry's maxgap
410 * value is OK, but its brothers may now be out-of-date due
411 * to rebalancing. So refresh it.
412 */
413 which = RB_POSITION(&entry->rb_node) ^ RB_DIR_OTHER;
414 brother = (struct vm_map_entry *)parent->rb_node.rb_nodes[which];
415 if (brother != NULL) {
416 KDASSERT(brother->gap == uvm_rb_gap(brother));
417 brother->maxgap = uvm_rb_maxgap(brother);
418 if (maxgap < brother->maxgap)
419 maxgap = brother->maxgap;
420 }
421
422 parent->maxgap = maxgap;
423 entry = parent;
424 }
425 }
426
427 static void
428 uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
429 {
430 struct vm_map_entry *ret;
431
432 entry->gap = entry->maxgap = uvm_rb_gap(entry);
433 if (entry->prev != &map->header)
434 entry->prev->gap = uvm_rb_gap(entry->prev);
435
436 ret = rb_tree_insert_node(&map->rb_tree, entry);
437 KASSERTMSG(ret == entry,
438 ("uvm_rb_insert: map %p: duplicate entry %p", map, ret)
439 );
440
441 /*
442 * If the previous entry is not our immediate left child, then it's an
443 * ancestor and will be fixed up on the way to the root. We don't
444 * have to check entry->prev against &map->header since &map->header
445 * will never be in the tree.
446 */
447 uvm_rb_fixup(map,
448 LEFT_ENTRY(entry) == entry->prev ? entry->prev : entry);
449 }
450
451 static void
452 uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
453 {
454 struct vm_map_entry *prev_parent = NULL, *next_parent = NULL;
455
456 /*
457 * If we are removing an interior node, then an adjacent node will
458 * be used to replace its position in the tree. Therefore we will
459 * need to fixup the tree starting at the parent of the replacement
460 * node. So record their parents for later use.
461 */
462 if (entry->prev != &map->header)
463 prev_parent = PARENT_ENTRY(map, entry->prev);
464 if (entry->next != &map->header)
465 next_parent = PARENT_ENTRY(map, entry->next);
466
467 rb_tree_remove_node(&map->rb_tree, entry);
468
469 /*
470 * If the previous node has a new parent, fixup the tree starting
471 * at the previous node's old parent.
472 */
473 if (entry->prev != &map->header) {
474 /*
475 * Update the previous entry's gap due to our absence.
476 */
477 entry->prev->gap = uvm_rb_gap(entry->prev);
478 uvm_rb_fixup(map, entry->prev);
479 if (prev_parent != NULL
480 && prev_parent != entry
481 && prev_parent != PARENT_ENTRY(map, entry->prev))
482 uvm_rb_fixup(map, prev_parent);
483 }
484
485 /*
486 * If the next node has a new parent, fixup the tree starting
487 * at the next node's old parent.
488 */
489 if (entry->next != &map->header) {
490 uvm_rb_fixup(map, entry->next);
491 if (next_parent != NULL
492 && next_parent != entry
493 && next_parent != PARENT_ENTRY(map, entry->next))
494 uvm_rb_fixup(map, next_parent);
495 }
496 }
497
498 #if defined(DEBUG)
499 int uvm_debug_check_map = 0;
500 int uvm_debug_check_rbtree = 0;
501 #define uvm_map_check(map, name) \
502 _uvm_map_check((map), (name), __FILE__, __LINE__)
503 static void
504 _uvm_map_check(struct vm_map *map, const char *name,
505 const char *file, int line)
506 {
507
508 if ((uvm_debug_check_map && _uvm_map_sanity(map)) ||
509 (uvm_debug_check_rbtree && _uvm_tree_sanity(map))) {
510 panic("uvm_map_check failed: \"%s\" map=%p (%s:%d)",
511 name, map, file, line);
512 }
513 }
514 #else /* defined(DEBUG) */
515 #define uvm_map_check(map, name) /* nothing */
516 #endif /* defined(DEBUG) */
517
518 #if defined(DEBUG) || defined(DDB)
519 int
520 _uvm_map_sanity(struct vm_map *map)
521 {
522 bool first_free_found = false;
523 bool hint_found = false;
524 const struct vm_map_entry *e;
525 struct vm_map_entry *hint = map->hint;
526
527 e = &map->header;
528 for (;;) {
529 if (map->first_free == e) {
530 first_free_found = true;
531 } else if (!first_free_found && e->next->start > e->end) {
532 printf("first_free %p should be %p\n",
533 map->first_free, e);
534 return -1;
535 }
536 if (hint == e) {
537 hint_found = true;
538 }
539
540 e = e->next;
541 if (e == &map->header) {
542 break;
543 }
544 }
545 if (!first_free_found) {
546 printf("stale first_free\n");
547 return -1;
548 }
549 if (!hint_found) {
550 printf("stale hint\n");
551 return -1;
552 }
553 return 0;
554 }
555
556 int
557 _uvm_tree_sanity(struct vm_map *map)
558 {
559 struct vm_map_entry *tmp, *trtmp;
560 int n = 0, i = 1;
561
562 for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
563 if (tmp->gap != uvm_rb_gap(tmp)) {
564 printf("%d/%d gap %lx != %lx %s\n",
565 n + 1, map->nentries,
566 (ulong)tmp->gap, (ulong)uvm_rb_gap(tmp),
567 tmp->next == &map->header ? "(last)" : "");
568 goto error;
569 }
570 /*
571 * If any entries are out of order, tmp->gap will be unsigned
572 * and will likely exceed the size of the map.
573 */
574 if (tmp->gap >= vm_map_max(map) - vm_map_min(map)) {
575 printf("too large gap %zu\n", (size_t)tmp->gap);
576 goto error;
577 }
578 n++;
579 }
580
581 if (n != map->nentries) {
582 printf("nentries: %d vs %d\n", n, map->nentries);
583 goto error;
584 }
585
586 trtmp = NULL;
587 for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
588 if (tmp->maxgap != uvm_rb_maxgap(tmp)) {
589 printf("maxgap %lx != %lx\n",
590 (ulong)tmp->maxgap,
591 (ulong)uvm_rb_maxgap(tmp));
592 goto error;
593 }
594 if (trtmp != NULL && trtmp->start >= tmp->start) {
595 printf("corrupt: 0x%"PRIxVADDR"x >= 0x%"PRIxVADDR"x\n",
596 trtmp->start, tmp->start);
597 goto error;
598 }
599
600 trtmp = tmp;
601 }
602
603 for (tmp = map->header.next; tmp != &map->header;
604 tmp = tmp->next, i++) {
605 trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_LEFT);
606 if (trtmp == NULL)
607 trtmp = &map->header;
608 if (tmp->prev != trtmp) {
609 printf("lookup: %d: %p->prev=%p: %p\n",
610 i, tmp, tmp->prev, trtmp);
611 goto error;
612 }
613 trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_RIGHT);
614 if (trtmp == NULL)
615 trtmp = &map->header;
616 if (tmp->next != trtmp) {
617 printf("lookup: %d: %p->next=%p: %p\n",
618 i, tmp, tmp->next, trtmp);
619 goto error;
620 }
621 trtmp = rb_tree_find_node(&map->rb_tree, &tmp->start);
622 if (trtmp != tmp) {
623 printf("lookup: %d: %p - %p: %p\n", i, tmp, trtmp,
624 PARENT_ENTRY(map, tmp));
625 goto error;
626 }
627 }
628
629 return (0);
630 error:
631 return (-1);
632 }
633 #endif /* defined(DEBUG) || defined(DDB) */
634
635 #ifdef DIAGNOSTIC
636 static struct vm_map *uvm_kmapent_map(struct vm_map_entry *);
637 #endif
638
639 /*
640 * vm_map_lock: acquire an exclusive (write) lock on a map.
641 *
642 * => Note that "intrsafe" maps use only exclusive, spin locks.
643 *
644 * => The locking protocol provides for guaranteed upgrade from shared ->
645 * exclusive by whichever thread currently has the map marked busy.
646 * See "LOCKING PROTOCOL NOTES" in uvm_map.h. This is horrible; among
647 * other problems, it defeats any fairness guarantees provided by RW
648 * locks.
649 */
650
651 void
652 vm_map_lock(struct vm_map *map)
653 {
654
655 if ((map->flags & VM_MAP_INTRSAFE) != 0) {
656 mutex_spin_enter(&map->mutex);
657 return;
658 }
659
660 for (;;) {
661 rw_enter(&map->lock, RW_WRITER);
662 if (map->busy == NULL)
663 break;
664 if (map->busy == curlwp)
665 break;
666 mutex_enter(&map->misc_lock);
667 rw_exit(&map->lock);
668 if (map->busy != NULL)
669 cv_wait(&map->cv, &map->misc_lock);
670 mutex_exit(&map->misc_lock);
671 }
672
673 map->timestamp++;
674 }
675
676 /*
677 * vm_map_lock_try: try to lock a map, failing if it is already locked.
678 */
679
680 bool
681 vm_map_lock_try(struct vm_map *map)
682 {
683
684 if ((map->flags & VM_MAP_INTRSAFE) != 0)
685 return mutex_tryenter(&map->mutex);
686 if (!rw_tryenter(&map->lock, RW_WRITER))
687 return false;
688 if (map->busy != NULL) {
689 rw_exit(&map->lock);
690 return false;
691 }
692
693 map->timestamp++;
694 return true;
695 }
696
697 /*
698 * vm_map_unlock: release an exclusive lock on a map.
699 */
700
701 void
702 vm_map_unlock(struct vm_map *map)
703 {
704
705 if ((map->flags & VM_MAP_INTRSAFE) != 0)
706 mutex_spin_exit(&map->mutex);
707 else {
708 KASSERT(rw_write_held(&map->lock));
709 KASSERT(map->busy == NULL || map->busy == curlwp);
710 rw_exit(&map->lock);
711 }
712 }
713
714 /*
715 * vm_map_unbusy: mark the map as unbusy, and wake any waiters that
716 * want an exclusive lock.
717 */
718
719 void
720 vm_map_unbusy(struct vm_map *map)
721 {
722
723 KASSERT(map->busy == curlwp);
724
725 /*
726 * Safe to clear 'busy' and 'waiters' with only a read lock held:
727 *
728 * o they can only be set with a write lock held
729 * o writers are blocked out with a read or write hold
730 * o at any time, only one thread owns the set of values
731 */
732 mutex_enter(&map->misc_lock);
733 map->busy = NULL;
734 cv_broadcast(&map->cv);
735 mutex_exit(&map->misc_lock);
736 }
737
738 /*
739 * vm_map_lock_read: acquire a shared (read) lock on a map.
740 */
741
742 void
743 vm_map_lock_read(struct vm_map *map)
744 {
745
746 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
747
748 rw_enter(&map->lock, RW_READER);
749 }
750
751 /*
752 * vm_map_unlock_read: release a shared lock on a map.
753 */
754
755 void
756 vm_map_unlock_read(struct vm_map *map)
757 {
758
759 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
760
761 rw_exit(&map->lock);
762 }
763
764 /*
765 * vm_map_busy: mark a map as busy.
766 *
767 * => the caller must hold the map write locked
768 */
769
770 void
771 vm_map_busy(struct vm_map *map)
772 {
773
774 KASSERT(rw_write_held(&map->lock));
775 KASSERT(map->busy == NULL);
776
777 map->busy = curlwp;
778 }
779
780 /*
781 * vm_map_locked_p: return true if the map is write locked.
782 *
783 * => only for debug purposes like KASSERTs.
784 * => should not be used to verify that a map is not locked.
785 */
786
787 bool
788 vm_map_locked_p(struct vm_map *map)
789 {
790
791 if ((map->flags & VM_MAP_INTRSAFE) != 0) {
792 return mutex_owned(&map->mutex);
793 } else {
794 return rw_write_held(&map->lock);
795 }
796 }
797
798 /*
799 * uvm_mapent_alloc: allocate a map entry
800 */
801
802 static struct vm_map_entry *
803 uvm_mapent_alloc(struct vm_map *map, int flags)
804 {
805 struct vm_map_entry *me;
806 int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
807 UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
808
809 if (VM_MAP_USE_KMAPENT(map)) {
810 me = uvm_kmapent_alloc(map, flags);
811 } else {
812 me = pool_cache_get(&uvm_map_entry_cache, pflags);
813 if (__predict_false(me == NULL))
814 return NULL;
815 me->flags = 0;
816 }
817
818 UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me,
819 ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0);
820 return (me);
821 }
822
823 /*
824 * uvm_mapent_alloc_split: allocate a map entry for clipping.
825 *
826 * => map must be locked by caller if UVM_MAP_QUANTUM is set.
827 */
828
829 static struct vm_map_entry *
830 uvm_mapent_alloc_split(struct vm_map *map,
831 const struct vm_map_entry *old_entry, int flags,
832 struct uvm_mapent_reservation *umr)
833 {
834 struct vm_map_entry *me;
835
836 KASSERT(!VM_MAP_USE_KMAPENT(map) ||
837 (old_entry->flags & UVM_MAP_QUANTUM) || !UMR_EMPTY(umr));
838
839 if (old_entry->flags & UVM_MAP_QUANTUM) {
840 struct vm_map_kernel *vmk = vm_map_to_kernel(map);
841
842 KASSERT(vm_map_locked_p(map));
843 me = vmk->vmk_merged_entries;
844 KASSERT(me);
845 vmk->vmk_merged_entries = me->next;
846 KASSERT(me->flags & UVM_MAP_QUANTUM);
847 } else {
848 me = uvm_mapent_alloc(map, flags);
849 }
850
851 return me;
852 }
853
854 /*
855 * uvm_mapent_free: free map entry
856 */
857
858 static void
859 uvm_mapent_free(struct vm_map_entry *me)
860 {
861 UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
862
863 UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
864 me, me->flags, 0, 0);
865 if (me->flags & UVM_MAP_KERNEL) {
866 uvm_kmapent_free(me);
867 } else {
868 pool_cache_put(&uvm_map_entry_cache, me);
869 }
870 }
871
872 /*
873 * uvm_mapent_free_merged: free merged map entry
874 *
875 * => keep the entry if needed.
876 * => caller shouldn't hold map locked if VM_MAP_USE_KMAPENT(map) is true.
877 * => map should be locked if UVM_MAP_QUANTUM is set.
878 */
879
880 static void
881 uvm_mapent_free_merged(struct vm_map *map, struct vm_map_entry *me)
882 {
883
884 KASSERT(!(me->flags & UVM_MAP_KERNEL) || uvm_kmapent_map(me) == map);
885
886 if (me->flags & UVM_MAP_QUANTUM) {
887 /*
888 * keep this entry for later splitting.
889 */
890 struct vm_map_kernel *vmk;
891
892 KASSERT(vm_map_locked_p(map));
893 KASSERT(VM_MAP_IS_KERNEL(map));
894 KASSERT(!VM_MAP_USE_KMAPENT(map) ||
895 (me->flags & UVM_MAP_KERNEL));
896
897 vmk = vm_map_to_kernel(map);
898 me->next = vmk->vmk_merged_entries;
899 vmk->vmk_merged_entries = me;
900 } else {
901 uvm_mapent_free(me);
902 }
903 }
904
905 /*
906 * uvm_mapent_copy: copy a map entry, preserving flags
907 */
908
909 static inline void
910 uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
911 {
912
913 memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) -
914 ((char *)src));
915 }
916
917 /*
918 * uvm_mapent_overhead: calculate maximum kva overhead necessary for
919 * map entries.
920 *
921 * => size and flags are the same as uvm_km_suballoc's ones.
922 */
923
924 vsize_t
925 uvm_mapent_overhead(vsize_t size, int flags)
926 {
927
928 if (VM_MAP_USE_KMAPENT_FLAGS(flags)) {
929 return uvm_kmapent_overhead(size);
930 }
931 return 0;
932 }
933
934 #if defined(DEBUG)
935 static void
936 _uvm_mapent_check(const struct vm_map_entry *entry, const char *file, int line)
937 {
938
939 if (entry->start >= entry->end) {
940 goto bad;
941 }
942 if (UVM_ET_ISOBJ(entry)) {
943 if (entry->object.uvm_obj == NULL) {
944 goto bad;
945 }
946 } else if (UVM_ET_ISSUBMAP(entry)) {
947 if (entry->object.sub_map == NULL) {
948 goto bad;
949 }
950 } else {
951 if (entry->object.uvm_obj != NULL ||
952 entry->object.sub_map != NULL) {
953 goto bad;
954 }
955 }
956 if (!UVM_ET_ISOBJ(entry)) {
957 if (entry->offset != 0) {
958 goto bad;
959 }
960 }
961
962 return;
963
964 bad:
965 panic("%s: bad entry %p (%s:%d)", __func__, entry, file, line);
966 }
967 #endif /* defined(DEBUG) */
968
969 /*
970 * uvm_map_entry_unwire: unwire a map entry
971 *
972 * => map should be locked by caller
973 */
974
975 static inline void
976 uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
977 {
978
979 entry->wired_count = 0;
980 uvm_fault_unwire_locked(map, entry->start, entry->end);
981 }
982
983
984 /*
985 * wrapper for calling amap_ref()
986 */
987 static inline void
988 uvm_map_reference_amap(struct vm_map_entry *entry, int flags)
989 {
990
991 amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
992 (entry->end - entry->start) >> PAGE_SHIFT, flags);
993 }
994
995
996 /*
997 * wrapper for calling amap_unref()
998 */
999 static inline void
1000 uvm_map_unreference_amap(struct vm_map_entry *entry, int flags)
1001 {
1002
1003 amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
1004 (entry->end - entry->start) >> PAGE_SHIFT, flags);
1005 }
1006
1007
1008 /*
1009 * uvm_map_init: init mapping system at boot time.
1010 */
1011
1012 void
1013 uvm_map_init(void)
1014 {
1015 #if defined(UVMHIST)
1016 static struct kern_history_ent maphistbuf[100];
1017 static struct kern_history_ent pdhistbuf[100];
1018 #endif
1019
1020 /*
1021 * first, init logging system.
1022 */
1023
1024 UVMHIST_FUNC("uvm_map_init");
1025 UVMHIST_INIT_STATIC(maphist, maphistbuf);
1026 UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
1027 UVMHIST_CALLED(maphist);
1028 UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
1029
1030 /*
1031 * initialize the global lock for kernel map entry.
1032 */
1033
1034 mutex_init(&uvm_kentry_lock, MUTEX_DRIVER, IPL_VM);
1035
1036 /*
1037 * initialize caches.
1038 */
1039
1040 pool_cache_bootstrap(&uvm_map_entry_cache, sizeof(struct vm_map_entry),
1041 0, 0, 0, "vmmpepl", NULL, IPL_NONE, NULL, NULL, NULL);
1042 pool_cache_bootstrap(&uvm_vmspace_cache, sizeof(struct vmspace),
1043 0, 0, 0, "vmsppl", NULL, IPL_NONE, NULL, NULL, NULL);
1044 }
1045
1046 /*
1047 * clippers
1048 */
1049
1050 /*
1051 * uvm_mapent_splitadj: adjust map entries for splitting, after uvm_mapent_copy.
1052 */
1053
1054 static void
1055 uvm_mapent_splitadj(struct vm_map_entry *entry1, struct vm_map_entry *entry2,
1056 vaddr_t splitat)
1057 {
1058 vaddr_t adj;
1059
1060 KASSERT(entry1->start < splitat);
1061 KASSERT(splitat < entry1->end);
1062
1063 adj = splitat - entry1->start;
1064 entry1->end = entry2->start = splitat;
1065
1066 if (entry1->aref.ar_amap) {
1067 amap_splitref(&entry1->aref, &entry2->aref, adj);
1068 }
1069 if (UVM_ET_ISSUBMAP(entry1)) {
1070 /* ... unlikely to happen, but play it safe */
1071 uvm_map_reference(entry1->object.sub_map);
1072 } else if (UVM_ET_ISOBJ(entry1)) {
1073 KASSERT(entry1->object.uvm_obj != NULL); /* suppress coverity */
1074 entry2->offset += adj;
1075 if (entry1->object.uvm_obj->pgops &&
1076 entry1->object.uvm_obj->pgops->pgo_reference)
1077 entry1->object.uvm_obj->pgops->pgo_reference(
1078 entry1->object.uvm_obj);
1079 }
1080 }
1081
1082 /*
1083 * uvm_map_clip_start: ensure that the entry begins at or after
1084 * the starting address, if it doesn't we split the entry.
1085 *
1086 * => caller should use UVM_MAP_CLIP_START macro rather than calling
1087 * this directly
1088 * => map must be locked by caller
1089 */
1090
1091 void
1092 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
1093 vaddr_t start, struct uvm_mapent_reservation *umr)
1094 {
1095 struct vm_map_entry *new_entry;
1096
1097 /* uvm_map_simplify_entry(map, entry); */ /* XXX */
1098
1099 uvm_map_check(map, "clip_start entry");
1100 uvm_mapent_check(entry);
1101
1102 /*
1103 * Split off the front portion. note that we must insert the new
1104 * entry BEFORE this one, so that this entry has the specified
1105 * starting address.
1106 */
1107 new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
1108 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1109 uvm_mapent_splitadj(new_entry, entry, start);
1110 uvm_map_entry_link(map, entry->prev, new_entry);
1111
1112 uvm_map_check(map, "clip_start leave");
1113 }
1114
1115 /*
1116 * uvm_map_clip_end: ensure that the entry ends at or before
1117 * the ending address, if it does't we split the reference
1118 *
1119 * => caller should use UVM_MAP_CLIP_END macro rather than calling
1120 * this directly
1121 * => map must be locked by caller
1122 */
1123
1124 void
1125 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end,
1126 struct uvm_mapent_reservation *umr)
1127 {
1128 struct vm_map_entry *new_entry;
1129
1130 uvm_map_check(map, "clip_end entry");
1131 uvm_mapent_check(entry);
1132
1133 /*
1134 * Create a new entry and insert it
1135 * AFTER the specified entry
1136 */
1137 new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
1138 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1139 uvm_mapent_splitadj(entry, new_entry, end);
1140 uvm_map_entry_link(map, entry, new_entry);
1141
1142 uvm_map_check(map, "clip_end leave");
1143 }
1144
1145 static void
1146 vm_map_drain(struct vm_map *map, uvm_flag_t flags)
1147 {
1148
1149 if (!VM_MAP_IS_KERNEL(map)) {
1150 return;
1151 }
1152
1153 uvm_km_va_drain(map, flags);
1154 }
1155
1156 /*
1157 * M A P - m a i n e n t r y p o i n t
1158 */
1159 /*
1160 * uvm_map: establish a valid mapping in a map
1161 *
1162 * => assume startp is page aligned.
1163 * => assume size is a multiple of PAGE_SIZE.
1164 * => assume sys_mmap provides enough of a "hint" to have us skip
1165 * over text/data/bss area.
1166 * => map must be unlocked (we will lock it)
1167 * => <uobj,uoffset> value meanings (4 cases):
1168 * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER
1169 * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER
1170 * [3] <uobj,uoffset> == normal mapping
1171 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA
1172 *
1173 * case [4] is for kernel mappings where we don't know the offset until
1174 * we've found a virtual address. note that kernel object offsets are
1175 * always relative to vm_map_min(kernel_map).
1176 *
1177 * => if `align' is non-zero, we align the virtual address to the specified
1178 * alignment.
1179 * this is provided as a mechanism for large pages.
1180 *
1181 * => XXXCDC: need way to map in external amap?
1182 */
1183
1184 int
1185 uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
1186 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags)
1187 {
1188 struct uvm_map_args args;
1189 struct vm_map_entry *new_entry;
1190 int error;
1191
1192 KASSERT((flags & UVM_FLAG_QUANTUM) == 0 || VM_MAP_IS_KERNEL(map));
1193 KASSERT((size & PAGE_MASK) == 0);
1194
1195 #ifndef __USER_VA0_IS_SAFE
1196 if ((flags & UVM_FLAG_FIXED) && *startp == 0 &&
1197 !VM_MAP_IS_KERNEL(map) && user_va0_disable)
1198 return EACCES;
1199 #endif
1200
1201 /*
1202 * for pager_map, allocate the new entry first to avoid sleeping
1203 * for memory while we have the map locked.
1204 *
1205 * Also, because we allocate entries for in-kernel maps
1206 * a bit differently (cf. uvm_kmapent_alloc/free), we need to
1207 * allocate them before locking the map.
1208 */
1209
1210 new_entry = NULL;
1211 if (VM_MAP_USE_KMAPENT(map) || (flags & UVM_FLAG_QUANTUM) ||
1212 map == pager_map) {
1213 new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
1214 if (__predict_false(new_entry == NULL))
1215 return ENOMEM;
1216 if (flags & UVM_FLAG_QUANTUM)
1217 new_entry->flags |= UVM_MAP_QUANTUM;
1218 }
1219 if (map == pager_map)
1220 flags |= UVM_FLAG_NOMERGE;
1221
1222 error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align,
1223 flags, &args);
1224 if (!error) {
1225 error = uvm_map_enter(map, &args, new_entry);
1226 *startp = args.uma_start;
1227 } else if (new_entry) {
1228 uvm_mapent_free(new_entry);
1229 }
1230
1231 #if defined(DEBUG)
1232 if (!error && VM_MAP_IS_KERNEL(map)) {
1233 uvm_km_check_empty(map, *startp, *startp + size);
1234 }
1235 #endif /* defined(DEBUG) */
1236
1237 return error;
1238 }
1239
1240 int
1241 uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size,
1242 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags,
1243 struct uvm_map_args *args)
1244 {
1245 struct vm_map_entry *prev_entry;
1246 vm_prot_t prot = UVM_PROTECTION(flags);
1247 vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1248
1249 UVMHIST_FUNC("uvm_map_prepare");
1250 UVMHIST_CALLED(maphist);
1251
1252 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
1253 map, start, size, flags);
1254 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
1255
1256 /*
1257 * detect a popular device driver bug.
1258 */
1259
1260 KASSERT(doing_shutdown || curlwp != NULL ||
1261 (map->flags & VM_MAP_INTRSAFE));
1262
1263 /*
1264 * zero-sized mapping doesn't make any sense.
1265 */
1266 KASSERT(size > 0);
1267
1268 KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0);
1269
1270 uvm_map_check(map, "map entry");
1271
1272 /*
1273 * check sanity of protection code
1274 */
1275
1276 if ((prot & maxprot) != prot) {
1277 UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x",
1278 prot, maxprot,0,0);
1279 return EACCES;
1280 }
1281
1282 /*
1283 * figure out where to put new VM range
1284 */
1285
1286 retry:
1287 if (vm_map_lock_try(map) == false) {
1288 if ((flags & UVM_FLAG_TRYLOCK) != 0 &&
1289 (map->flags & VM_MAP_INTRSAFE) == 0) {
1290 return EAGAIN;
1291 }
1292 vm_map_lock(map); /* could sleep here */
1293 }
1294 prev_entry = uvm_map_findspace(map, start, size, &start,
1295 uobj, uoffset, align, flags);
1296 if (prev_entry == NULL) {
1297 unsigned int timestamp;
1298
1299 timestamp = map->timestamp;
1300 UVMHIST_LOG(maphist,"waiting va timestamp=0x%x",
1301 timestamp,0,0,0);
1302 map->flags |= VM_MAP_WANTVA;
1303 vm_map_unlock(map);
1304
1305 /*
1306 * try to reclaim kva and wait until someone does unmap.
1307 * fragile locking here, so we awaken every second to
1308 * recheck the condition.
1309 */
1310
1311 vm_map_drain(map, flags);
1312
1313 mutex_enter(&map->misc_lock);
1314 while ((map->flags & VM_MAP_WANTVA) != 0 &&
1315 map->timestamp == timestamp) {
1316 if ((flags & UVM_FLAG_WAITVA) == 0) {
1317 mutex_exit(&map->misc_lock);
1318 UVMHIST_LOG(maphist,
1319 "<- uvm_map_findspace failed!", 0,0,0,0);
1320 return ENOMEM;
1321 } else {
1322 cv_timedwait(&map->cv, &map->misc_lock, hz);
1323 }
1324 }
1325 mutex_exit(&map->misc_lock);
1326 goto retry;
1327 }
1328
1329 #ifdef PMAP_GROWKERNEL
1330 /*
1331 * If the kernel pmap can't map the requested space,
1332 * then allocate more resources for it.
1333 */
1334 if (map == kernel_map && uvm_maxkaddr < (start + size))
1335 uvm_maxkaddr = pmap_growkernel(start + size);
1336 #endif
1337
1338 UVMMAP_EVCNT_INCR(map_call);
1339
1340 /*
1341 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
1342 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in
1343 * either case we want to zero it before storing it in the map entry
1344 * (because it looks strange and confusing when debugging...)
1345 *
1346 * if uobj is not null
1347 * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
1348 * and we do not need to change uoffset.
1349 * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
1350 * now (based on the starting address of the map). this case is
1351 * for kernel object mappings where we don't know the offset until
1352 * the virtual address is found (with uvm_map_findspace). the
1353 * offset is the distance we are from the start of the map.
1354 */
1355
1356 if (uobj == NULL) {
1357 uoffset = 0;
1358 } else {
1359 if (uoffset == UVM_UNKNOWN_OFFSET) {
1360 KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
1361 uoffset = start - vm_map_min(kernel_map);
1362 }
1363 }
1364
1365 args->uma_flags = flags;
1366 args->uma_prev = prev_entry;
1367 args->uma_start = start;
1368 args->uma_size = size;
1369 args->uma_uobj = uobj;
1370 args->uma_uoffset = uoffset;
1371
1372 UVMHIST_LOG(maphist, "<- done!", 0,0,0,0);
1373 return 0;
1374 }
1375
1376 int
1377 uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args,
1378 struct vm_map_entry *new_entry)
1379 {
1380 struct vm_map_entry *prev_entry = args->uma_prev;
1381 struct vm_map_entry *dead = NULL;
1382
1383 const uvm_flag_t flags = args->uma_flags;
1384 const vm_prot_t prot = UVM_PROTECTION(flags);
1385 const vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1386 const vm_inherit_t inherit = UVM_INHERIT(flags);
1387 const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
1388 AMAP_EXTEND_NOWAIT : 0;
1389 const int advice = UVM_ADVICE(flags);
1390 const int meflagval = (flags & UVM_FLAG_QUANTUM) ?
1391 UVM_MAP_QUANTUM : 0;
1392
1393 vaddr_t start = args->uma_start;
1394 vsize_t size = args->uma_size;
1395 struct uvm_object *uobj = args->uma_uobj;
1396 voff_t uoffset = args->uma_uoffset;
1397
1398 const int kmap = (vm_map_pmap(map) == pmap_kernel());
1399 int merged = 0;
1400 int error;
1401 int newetype;
1402
1403 UVMHIST_FUNC("uvm_map_enter");
1404 UVMHIST_CALLED(maphist);
1405
1406 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
1407 map, start, size, flags);
1408 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
1409
1410 KASSERT(map->hint == prev_entry); /* bimerge case assumes this */
1411
1412 if (flags & UVM_FLAG_QUANTUM) {
1413 KASSERT(new_entry);
1414 KASSERT(new_entry->flags & UVM_MAP_QUANTUM);
1415 }
1416
1417 if (uobj)
1418 newetype = UVM_ET_OBJ;
1419 else
1420 newetype = 0;
1421
1422 if (flags & UVM_FLAG_COPYONW) {
1423 newetype |= UVM_ET_COPYONWRITE;
1424 if ((flags & UVM_FLAG_OVERLAY) == 0)
1425 newetype |= UVM_ET_NEEDSCOPY;
1426 }
1427
1428 /*
1429 * try and insert in map by extending previous entry, if possible.
1430 * XXX: we don't try and pull back the next entry. might be useful
1431 * for a stack, but we are currently allocating our stack in advance.
1432 */
1433
1434 if (flags & UVM_FLAG_NOMERGE)
1435 goto nomerge;
1436
1437 if (prev_entry->end == start &&
1438 prev_entry != &map->header &&
1439 UVM_ET_ISCOMPATIBLE(prev_entry, newetype, uobj, meflagval,
1440 prot, maxprot, inherit, advice, 0)) {
1441
1442 if (uobj && prev_entry->offset +
1443 (prev_entry->end - prev_entry->start) != uoffset)
1444 goto forwardmerge;
1445
1446 /*
1447 * can't extend a shared amap. note: no need to lock amap to
1448 * look at refs since we don't care about its exact value.
1449 * if it is one (i.e. we have only reference) it will stay there
1450 */
1451
1452 if (prev_entry->aref.ar_amap &&
1453 amap_refs(prev_entry->aref.ar_amap) != 1) {
1454 goto forwardmerge;
1455 }
1456
1457 if (prev_entry->aref.ar_amap) {
1458 error = amap_extend(prev_entry, size,
1459 amapwaitflag | AMAP_EXTEND_FORWARDS);
1460 if (error)
1461 goto nomerge;
1462 }
1463
1464 if (kmap) {
1465 UVMMAP_EVCNT_INCR(kbackmerge);
1466 } else {
1467 UVMMAP_EVCNT_INCR(ubackmerge);
1468 }
1469 UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0);
1470
1471 /*
1472 * drop our reference to uobj since we are extending a reference
1473 * that we already have (the ref count can not drop to zero).
1474 */
1475
1476 if (uobj && uobj->pgops->pgo_detach)
1477 uobj->pgops->pgo_detach(uobj);
1478
1479 /*
1480 * Now that we've merged the entries, note that we've grown
1481 * and our gap has shrunk. Then fix the tree.
1482 */
1483 prev_entry->end += size;
1484 prev_entry->gap -= size;
1485 uvm_rb_fixup(map, prev_entry);
1486
1487 uvm_map_check(map, "map backmerged");
1488
1489 UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
1490 merged++;
1491 }
1492
1493 forwardmerge:
1494 if (prev_entry->next->start == (start + size) &&
1495 prev_entry->next != &map->header &&
1496 UVM_ET_ISCOMPATIBLE(prev_entry->next, newetype, uobj, meflagval,
1497 prot, maxprot, inherit, advice, 0)) {
1498
1499 if (uobj && prev_entry->next->offset != uoffset + size)
1500 goto nomerge;
1501
1502 /*
1503 * can't extend a shared amap. note: no need to lock amap to
1504 * look at refs since we don't care about its exact value.
1505 * if it is one (i.e. we have only reference) it will stay there.
1506 *
1507 * note that we also can't merge two amaps, so if we
1508 * merged with the previous entry which has an amap,
1509 * and the next entry also has an amap, we give up.
1510 *
1511 * Interesting cases:
1512 * amap, new, amap -> give up second merge (single fwd extend)
1513 * amap, new, none -> double forward extend (extend again here)
1514 * none, new, amap -> double backward extend (done here)
1515 * uobj, new, amap -> single backward extend (done here)
1516 *
1517 * XXX should we attempt to deal with someone refilling
1518 * the deallocated region between two entries that are
1519 * backed by the same amap (ie, arefs is 2, "prev" and
1520 * "next" refer to it, and adding this allocation will
1521 * close the hole, thus restoring arefs to 1 and
1522 * deallocating the "next" vm_map_entry)? -- @@@
1523 */
1524
1525 if (prev_entry->next->aref.ar_amap &&
1526 (amap_refs(prev_entry->next->aref.ar_amap) != 1 ||
1527 (merged && prev_entry->aref.ar_amap))) {
1528 goto nomerge;
1529 }
1530
1531 if (merged) {
1532 /*
1533 * Try to extend the amap of the previous entry to
1534 * cover the next entry as well. If it doesn't work
1535 * just skip on, don't actually give up, since we've
1536 * already completed the back merge.
1537 */
1538 if (prev_entry->aref.ar_amap) {
1539 if (amap_extend(prev_entry,
1540 prev_entry->next->end -
1541 prev_entry->next->start,
1542 amapwaitflag | AMAP_EXTEND_FORWARDS))
1543 goto nomerge;
1544 }
1545
1546 /*
1547 * Try to extend the amap of the *next* entry
1548 * back to cover the new allocation *and* the
1549 * previous entry as well (the previous merge
1550 * didn't have an amap already otherwise we
1551 * wouldn't be checking here for an amap). If
1552 * it doesn't work just skip on, again, don't
1553 * actually give up, since we've already
1554 * completed the back merge.
1555 */
1556 else if (prev_entry->next->aref.ar_amap) {
1557 if (amap_extend(prev_entry->next,
1558 prev_entry->end -
1559 prev_entry->start,
1560 amapwaitflag | AMAP_EXTEND_BACKWARDS))
1561 goto nomerge;
1562 }
1563 } else {
1564 /*
1565 * Pull the next entry's amap backwards to cover this
1566 * new allocation.
1567 */
1568 if (prev_entry->next->aref.ar_amap) {
1569 error = amap_extend(prev_entry->next, size,
1570 amapwaitflag | AMAP_EXTEND_BACKWARDS);
1571 if (error)
1572 goto nomerge;
1573 }
1574 }
1575
1576 if (merged) {
1577 if (kmap) {
1578 UVMMAP_EVCNT_DECR(kbackmerge);
1579 UVMMAP_EVCNT_INCR(kbimerge);
1580 } else {
1581 UVMMAP_EVCNT_DECR(ubackmerge);
1582 UVMMAP_EVCNT_INCR(ubimerge);
1583 }
1584 } else {
1585 if (kmap) {
1586 UVMMAP_EVCNT_INCR(kforwmerge);
1587 } else {
1588 UVMMAP_EVCNT_INCR(uforwmerge);
1589 }
1590 }
1591 UVMHIST_LOG(maphist," starting forward merge", 0, 0, 0, 0);
1592
1593 /*
1594 * drop our reference to uobj since we are extending a reference
1595 * that we already have (the ref count can not drop to zero).
1596 * (if merged, we've already detached)
1597 */
1598 if (uobj && uobj->pgops->pgo_detach && !merged)
1599 uobj->pgops->pgo_detach(uobj);
1600
1601 if (merged) {
1602 dead = prev_entry->next;
1603 prev_entry->end = dead->end;
1604 uvm_map_entry_unlink(map, dead);
1605 if (dead->aref.ar_amap != NULL) {
1606 prev_entry->aref = dead->aref;
1607 dead->aref.ar_amap = NULL;
1608 }
1609 } else {
1610 prev_entry->next->start -= size;
1611 if (prev_entry != &map->header) {
1612 prev_entry->gap -= size;
1613 KASSERT(prev_entry->gap == uvm_rb_gap(prev_entry));
1614 uvm_rb_fixup(map, prev_entry);
1615 }
1616 if (uobj)
1617 prev_entry->next->offset = uoffset;
1618 }
1619
1620 uvm_map_check(map, "map forwardmerged");
1621
1622 UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0);
1623 merged++;
1624 }
1625
1626 nomerge:
1627 if (!merged) {
1628 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0);
1629 if (kmap) {
1630 UVMMAP_EVCNT_INCR(knomerge);
1631 } else {
1632 UVMMAP_EVCNT_INCR(unomerge);
1633 }
1634
1635 /*
1636 * allocate new entry and link it in.
1637 */
1638
1639 if (new_entry == NULL) {
1640 new_entry = uvm_mapent_alloc(map,
1641 (flags & UVM_FLAG_NOWAIT));
1642 if (__predict_false(new_entry == NULL)) {
1643 error = ENOMEM;
1644 goto done;
1645 }
1646 }
1647 new_entry->start = start;
1648 new_entry->end = new_entry->start + size;
1649 new_entry->object.uvm_obj = uobj;
1650 new_entry->offset = uoffset;
1651
1652 new_entry->etype = newetype;
1653
1654 if (flags & UVM_FLAG_NOMERGE) {
1655 new_entry->flags |= UVM_MAP_NOMERGE;
1656 }
1657
1658 new_entry->protection = prot;
1659 new_entry->max_protection = maxprot;
1660 new_entry->inheritance = inherit;
1661 new_entry->wired_count = 0;
1662 new_entry->advice = advice;
1663 if (flags & UVM_FLAG_OVERLAY) {
1664
1665 /*
1666 * to_add: for BSS we overallocate a little since we
1667 * are likely to extend
1668 */
1669
1670 vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
1671 UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
1672 struct vm_amap *amap = amap_alloc(size, to_add,
1673 (flags & UVM_FLAG_NOWAIT));
1674 if (__predict_false(amap == NULL)) {
1675 error = ENOMEM;
1676 goto done;
1677 }
1678 new_entry->aref.ar_pageoff = 0;
1679 new_entry->aref.ar_amap = amap;
1680 } else {
1681 new_entry->aref.ar_pageoff = 0;
1682 new_entry->aref.ar_amap = NULL;
1683 }
1684 uvm_map_entry_link(map, prev_entry, new_entry);
1685
1686 /*
1687 * Update the free space hint
1688 */
1689
1690 if ((map->first_free == prev_entry) &&
1691 (prev_entry->end >= new_entry->start))
1692 map->first_free = new_entry;
1693
1694 new_entry = NULL;
1695 }
1696
1697 map->size += size;
1698
1699 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1700
1701 error = 0;
1702 done:
1703 if ((flags & UVM_FLAG_QUANTUM) == 0) {
1704 /*
1705 * vmk_merged_entries is locked by the map's lock.
1706 */
1707 vm_map_unlock(map);
1708 }
1709 if (new_entry && error == 0) {
1710 KDASSERT(merged);
1711 uvm_mapent_free_merged(map, new_entry);
1712 new_entry = NULL;
1713 }
1714 if (dead) {
1715 KDASSERT(merged);
1716 uvm_mapent_free_merged(map, dead);
1717 }
1718 if ((flags & UVM_FLAG_QUANTUM) != 0) {
1719 vm_map_unlock(map);
1720 }
1721 if (new_entry != NULL) {
1722 uvm_mapent_free(new_entry);
1723 }
1724 return error;
1725 }
1726
1727 /*
1728 * uvm_map_lookup_entry_bytree: lookup an entry in tree
1729 */
1730
1731 static inline bool
1732 uvm_map_lookup_entry_bytree(struct vm_map *map, vaddr_t address,
1733 struct vm_map_entry **entry /* OUT */)
1734 {
1735 struct vm_map_entry *prev = &map->header;
1736 struct vm_map_entry *cur = ROOT_ENTRY(map);
1737
1738 while (cur) {
1739 UVMMAP_EVCNT_INCR(mlk_treeloop);
1740 if (address >= cur->start) {
1741 if (address < cur->end) {
1742 *entry = cur;
1743 return true;
1744 }
1745 prev = cur;
1746 cur = RIGHT_ENTRY(cur);
1747 } else
1748 cur = LEFT_ENTRY(cur);
1749 }
1750 *entry = prev;
1751 return false;
1752 }
1753
1754 /*
1755 * uvm_map_lookup_entry: find map entry at or before an address
1756 *
1757 * => map must at least be read-locked by caller
1758 * => entry is returned in "entry"
1759 * => return value is true if address is in the returned entry
1760 */
1761
1762 bool
1763 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
1764 struct vm_map_entry **entry /* OUT */)
1765 {
1766 struct vm_map_entry *cur;
1767 bool use_tree = false;
1768 UVMHIST_FUNC("uvm_map_lookup_entry");
1769 UVMHIST_CALLED(maphist);
1770
1771 UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)",
1772 map, address, entry, 0);
1773
1774 /*
1775 * start looking either from the head of the
1776 * list, or from the hint.
1777 */
1778
1779 cur = map->hint;
1780
1781 if (cur == &map->header)
1782 cur = cur->next;
1783
1784 UVMMAP_EVCNT_INCR(mlk_call);
1785 if (address >= cur->start) {
1786
1787 /*
1788 * go from hint to end of list.
1789 *
1790 * but first, make a quick check to see if
1791 * we are already looking at the entry we
1792 * want (which is usually the case).
1793 * note also that we don't need to save the hint
1794 * here... it is the same hint (unless we are
1795 * at the header, in which case the hint didn't
1796 * buy us anything anyway).
1797 */
1798
1799 if (cur != &map->header && cur->end > address) {
1800 UVMMAP_EVCNT_INCR(mlk_hint);
1801 *entry = cur;
1802 UVMHIST_LOG(maphist,"<- got it via hint (0x%x)",
1803 cur, 0, 0, 0);
1804 uvm_mapent_check(*entry);
1805 return (true);
1806 }
1807
1808 if (map->nentries > 15)
1809 use_tree = true;
1810 } else {
1811
1812 /*
1813 * invalid hint. use tree.
1814 */
1815 use_tree = true;
1816 }
1817
1818 uvm_map_check(map, __func__);
1819
1820 if (use_tree) {
1821 /*
1822 * Simple lookup in the tree. Happens when the hint is
1823 * invalid, or nentries reach a threshold.
1824 */
1825 UVMMAP_EVCNT_INCR(mlk_tree);
1826 if (uvm_map_lookup_entry_bytree(map, address, entry)) {
1827 goto got;
1828 } else {
1829 goto failed;
1830 }
1831 }
1832
1833 /*
1834 * search linearly
1835 */
1836
1837 UVMMAP_EVCNT_INCR(mlk_list);
1838 while (cur != &map->header) {
1839 UVMMAP_EVCNT_INCR(mlk_listloop);
1840 if (cur->end > address) {
1841 if (address >= cur->start) {
1842 /*
1843 * save this lookup for future
1844 * hints, and return
1845 */
1846
1847 *entry = cur;
1848 got:
1849 SAVE_HINT(map, map->hint, *entry);
1850 UVMHIST_LOG(maphist,"<- search got it (0x%x)",
1851 cur, 0, 0, 0);
1852 KDASSERT((*entry)->start <= address);
1853 KDASSERT(address < (*entry)->end);
1854 uvm_mapent_check(*entry);
1855 return (true);
1856 }
1857 break;
1858 }
1859 cur = cur->next;
1860 }
1861 *entry = cur->prev;
1862 failed:
1863 SAVE_HINT(map, map->hint, *entry);
1864 UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
1865 KDASSERT((*entry) == &map->header || (*entry)->end <= address);
1866 KDASSERT((*entry)->next == &map->header ||
1867 address < (*entry)->next->start);
1868 return (false);
1869 }
1870
1871 /*
1872 * See if the range between start and start + length fits in the gap
1873 * entry->next->start and entry->end. Returns 1 if fits, 0 if doesn't
1874 * fit, and -1 address wraps around.
1875 */
1876 static int
1877 uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
1878 vsize_t align, int flags, int topdown, struct vm_map_entry *entry)
1879 {
1880 vaddr_t end;
1881
1882 #ifdef PMAP_PREFER
1883 /*
1884 * push start address forward as needed to avoid VAC alias problems.
1885 * we only do this if a valid offset is specified.
1886 */
1887
1888 if (uoffset != UVM_UNKNOWN_OFFSET)
1889 PMAP_PREFER(uoffset, start, length, topdown);
1890 #endif
1891 if ((flags & UVM_FLAG_COLORMATCH) != 0) {
1892 KASSERT(align < uvmexp.ncolors);
1893 if (uvmexp.ncolors > 1) {
1894 const u_int colormask = uvmexp.colormask;
1895 const u_int colorsize = colormask + 1;
1896 vaddr_t hint = atop(*start);
1897 const u_int color = hint & colormask;
1898 if (color != align) {
1899 hint -= color; /* adjust to color boundary */
1900 KASSERT((hint & colormask) == 0);
1901 if (topdown) {
1902 if (align > color)
1903 hint -= colorsize;
1904 } else {
1905 if (align < color)
1906 hint += colorsize;
1907 }
1908 *start = ptoa(hint + align); /* adjust to color */
1909 }
1910 }
1911 } else if (align != 0) {
1912 if ((*start & (align - 1)) != 0) {
1913 if (topdown)
1914 *start &= ~(align - 1);
1915 else
1916 *start = roundup(*start, align);
1917 }
1918 /*
1919 * XXX Should we PMAP_PREFER() here again?
1920 * eh...i think we're okay
1921 */
1922 }
1923
1924 /*
1925 * Find the end of the proposed new region. Be sure we didn't
1926 * wrap around the address; if so, we lose. Otherwise, if the
1927 * proposed new region fits before the next entry, we win.
1928 */
1929
1930 end = *start + length;
1931 if (end < *start)
1932 return (-1);
1933
1934 if (entry->next->start >= end && *start >= entry->end)
1935 return (1);
1936
1937 return (0);
1938 }
1939
1940 /*
1941 * uvm_map_findspace: find "length" sized space in "map".
1942 *
1943 * => "hint" is a hint about where we want it, unless UVM_FLAG_FIXED is
1944 * set in "flags" (in which case we insist on using "hint").
1945 * => "result" is VA returned
1946 * => uobj/uoffset are to be used to handle VAC alignment, if required
1947 * => if "align" is non-zero, we attempt to align to that value.
1948 * => caller must at least have read-locked map
1949 * => returns NULL on failure, or pointer to prev. map entry if success
1950 * => note this is a cross between the old vm_map_findspace and vm_map_find
1951 */
1952
1953 struct vm_map_entry *
1954 uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
1955 vaddr_t *result /* OUT */, struct uvm_object *uobj, voff_t uoffset,
1956 vsize_t align, int flags)
1957 {
1958 struct vm_map_entry *entry;
1959 struct vm_map_entry *child, *prev, *tmp;
1960 vaddr_t orig_hint;
1961 const int topdown = map->flags & VM_MAP_TOPDOWN;
1962 UVMHIST_FUNC("uvm_map_findspace");
1963 UVMHIST_CALLED(maphist);
1964
1965 UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)",
1966 map, hint, length, flags);
1967 KASSERT((flags & UVM_FLAG_COLORMATCH) != 0 || (align & (align - 1)) == 0);
1968 KASSERT((flags & UVM_FLAG_COLORMATCH) == 0 || align < uvmexp.ncolors);
1969 KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1970
1971 uvm_map_check(map, "map_findspace entry");
1972
1973 /*
1974 * remember the original hint. if we are aligning, then we
1975 * may have to try again with no alignment constraint if
1976 * we fail the first time.
1977 */
1978
1979 orig_hint = hint;
1980 if (hint < vm_map_min(map)) { /* check ranges ... */
1981 if (flags & UVM_FLAG_FIXED) {
1982 UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
1983 return (NULL);
1984 }
1985 hint = vm_map_min(map);
1986 }
1987 if (hint > vm_map_max(map)) {
1988 UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]",
1989 hint, vm_map_min(map), vm_map_max(map), 0);
1990 return (NULL);
1991 }
1992
1993 /*
1994 * Look for the first possible address; if there's already
1995 * something at this address, we have to start after it.
1996 */
1997
1998 /*
1999 * @@@: there are four, no, eight cases to consider.
2000 *
2001 * 0: found, fixed, bottom up -> fail
2002 * 1: found, fixed, top down -> fail
2003 * 2: found, not fixed, bottom up -> start after entry->end,
2004 * loop up
2005 * 3: found, not fixed, top down -> start before entry->start,
2006 * loop down
2007 * 4: not found, fixed, bottom up -> check entry->next->start, fail
2008 * 5: not found, fixed, top down -> check entry->next->start, fail
2009 * 6: not found, not fixed, bottom up -> check entry->next->start,
2010 * loop up
2011 * 7: not found, not fixed, top down -> check entry->next->start,
2012 * loop down
2013 *
2014 * as you can see, it reduces to roughly five cases, and that
2015 * adding top down mapping only adds one unique case (without
2016 * it, there would be four cases).
2017 */
2018
2019 if ((flags & UVM_FLAG_FIXED) == 0 && hint == vm_map_min(map)) {
2020 entry = map->first_free;
2021 } else {
2022 if (uvm_map_lookup_entry(map, hint, &entry)) {
2023 /* "hint" address already in use ... */
2024 if (flags & UVM_FLAG_FIXED) {
2025 UVMHIST_LOG(maphist, "<- fixed & VA in use",
2026 0, 0, 0, 0);
2027 return (NULL);
2028 }
2029 if (topdown)
2030 /* Start from lower gap. */
2031 entry = entry->prev;
2032 } else if (flags & UVM_FLAG_FIXED) {
2033 if (entry->next->start >= hint + length &&
2034 hint + length > hint)
2035 goto found;
2036
2037 /* "hint" address is gap but too small */
2038 UVMHIST_LOG(maphist, "<- fixed mapping failed",
2039 0, 0, 0, 0);
2040 return (NULL); /* only one shot at it ... */
2041 } else {
2042 /*
2043 * See if given hint fits in this gap.
2044 */
2045 switch (uvm_map_space_avail(&hint, length,
2046 uoffset, align, flags, topdown, entry)) {
2047 case 1:
2048 goto found;
2049 case -1:
2050 goto wraparound;
2051 }
2052
2053 if (topdown) {
2054 /*
2055 * Still there is a chance to fit
2056 * if hint > entry->end.
2057 */
2058 } else {
2059 /* Start from higher gap. */
2060 entry = entry->next;
2061 if (entry == &map->header)
2062 goto notfound;
2063 goto nextgap;
2064 }
2065 }
2066 }
2067
2068 /*
2069 * Note that all UVM_FLAGS_FIXED case is already handled.
2070 */
2071 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
2072
2073 /* Try to find the space in the red-black tree */
2074
2075 /* Check slot before any entry */
2076 hint = topdown ? entry->next->start - length : entry->end;
2077 switch (uvm_map_space_avail(&hint, length, uoffset, align, flags,
2078 topdown, entry)) {
2079 case 1:
2080 goto found;
2081 case -1:
2082 goto wraparound;
2083 }
2084
2085 nextgap:
2086 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
2087 /* If there is not enough space in the whole tree, we fail */
2088 tmp = ROOT_ENTRY(map);
2089 if (tmp == NULL || tmp->maxgap < length)
2090 goto notfound;
2091
2092 prev = NULL; /* previous candidate */
2093
2094 /* Find an entry close to hint that has enough space */
2095 for (; tmp;) {
2096 KASSERT(tmp->next->start == tmp->end + tmp->gap);
2097 if (topdown) {
2098 if (tmp->next->start < hint + length &&
2099 (prev == NULL || tmp->end > prev->end)) {
2100 if (tmp->gap >= length)
2101 prev = tmp;
2102 else if ((child = LEFT_ENTRY(tmp)) != NULL
2103 && child->maxgap >= length)
2104 prev = tmp;
2105 }
2106 } else {
2107 if (tmp->end >= hint &&
2108 (prev == NULL || tmp->end < prev->end)) {
2109 if (tmp->gap >= length)
2110 prev = tmp;
2111 else if ((child = RIGHT_ENTRY(tmp)) != NULL
2112 && child->maxgap >= length)
2113 prev = tmp;
2114 }
2115 }
2116 if (tmp->next->start < hint + length)
2117 child = RIGHT_ENTRY(tmp);
2118 else if (tmp->end > hint)
2119 child = LEFT_ENTRY(tmp);
2120 else {
2121 if (tmp->gap >= length)
2122 break;
2123 if (topdown)
2124 child = LEFT_ENTRY(tmp);
2125 else
2126 child = RIGHT_ENTRY(tmp);
2127 }
2128 if (child == NULL || child->maxgap < length)
2129 break;
2130 tmp = child;
2131 }
2132
2133 if (tmp != NULL && tmp->start < hint && hint < tmp->next->start) {
2134 /*
2135 * Check if the entry that we found satifies the
2136 * space requirement
2137 */
2138 if (topdown) {
2139 if (hint > tmp->next->start - length)
2140 hint = tmp->next->start - length;
2141 } else {
2142 if (hint < tmp->end)
2143 hint = tmp->end;
2144 }
2145 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2146 flags, topdown, tmp)) {
2147 case 1:
2148 entry = tmp;
2149 goto found;
2150 case -1:
2151 goto wraparound;
2152 }
2153 if (tmp->gap >= length)
2154 goto listsearch;
2155 }
2156 if (prev == NULL)
2157 goto notfound;
2158
2159 if (topdown) {
2160 KASSERT(orig_hint >= prev->next->start - length ||
2161 prev->next->start - length > prev->next->start);
2162 hint = prev->next->start - length;
2163 } else {
2164 KASSERT(orig_hint <= prev->end);
2165 hint = prev->end;
2166 }
2167 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2168 flags, topdown, prev)) {
2169 case 1:
2170 entry = prev;
2171 goto found;
2172 case -1:
2173 goto wraparound;
2174 }
2175 if (prev->gap >= length)
2176 goto listsearch;
2177
2178 if (topdown)
2179 tmp = LEFT_ENTRY(prev);
2180 else
2181 tmp = RIGHT_ENTRY(prev);
2182 for (;;) {
2183 KASSERT(tmp && tmp->maxgap >= length);
2184 if (topdown)
2185 child = RIGHT_ENTRY(tmp);
2186 else
2187 child = LEFT_ENTRY(tmp);
2188 if (child && child->maxgap >= length) {
2189 tmp = child;
2190 continue;
2191 }
2192 if (tmp->gap >= length)
2193 break;
2194 if (topdown)
2195 tmp = LEFT_ENTRY(tmp);
2196 else
2197 tmp = RIGHT_ENTRY(tmp);
2198 }
2199
2200 if (topdown) {
2201 KASSERT(orig_hint >= tmp->next->start - length ||
2202 tmp->next->start - length > tmp->next->start);
2203 hint = tmp->next->start - length;
2204 } else {
2205 KASSERT(orig_hint <= tmp->end);
2206 hint = tmp->end;
2207 }
2208 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2209 flags, topdown, tmp)) {
2210 case 1:
2211 entry = tmp;
2212 goto found;
2213 case -1:
2214 goto wraparound;
2215 }
2216
2217 /*
2218 * The tree fails to find an entry because of offset or alignment
2219 * restrictions. Search the list instead.
2220 */
2221 listsearch:
2222 /*
2223 * Look through the rest of the map, trying to fit a new region in
2224 * the gap between existing regions, or after the very last region.
2225 * note: entry->end = base VA of current gap,
2226 * entry->next->start = VA of end of current gap
2227 */
2228
2229 for (;;) {
2230 /* Update hint for current gap. */
2231 hint = topdown ? entry->next->start - length : entry->end;
2232
2233 /* See if it fits. */
2234 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2235 flags, topdown, entry)) {
2236 case 1:
2237 goto found;
2238 case -1:
2239 goto wraparound;
2240 }
2241
2242 /* Advance to next/previous gap */
2243 if (topdown) {
2244 if (entry == &map->header) {
2245 UVMHIST_LOG(maphist, "<- failed (off start)",
2246 0,0,0,0);
2247 goto notfound;
2248 }
2249 entry = entry->prev;
2250 } else {
2251 entry = entry->next;
2252 if (entry == &map->header) {
2253 UVMHIST_LOG(maphist, "<- failed (off end)",
2254 0,0,0,0);
2255 goto notfound;
2256 }
2257 }
2258 }
2259
2260 found:
2261 SAVE_HINT(map, map->hint, entry);
2262 *result = hint;
2263 UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0);
2264 KASSERT( topdown || hint >= orig_hint);
2265 KASSERT(!topdown || hint <= orig_hint);
2266 KASSERT(entry->end <= hint);
2267 KASSERT(hint + length <= entry->next->start);
2268 return (entry);
2269
2270 wraparound:
2271 UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0);
2272
2273 return (NULL);
2274
2275 notfound:
2276 UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0);
2277
2278 return (NULL);
2279 }
2280
2281 /*
2282 * U N M A P - m a i n h e l p e r f u n c t i o n s
2283 */
2284
2285 /*
2286 * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
2287 *
2288 * => caller must check alignment and size
2289 * => map must be locked by caller
2290 * => we return a list of map entries that we've remove from the map
2291 * in "entry_list"
2292 */
2293
2294 void
2295 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
2296 struct vm_map_entry **entry_list /* OUT */,
2297 struct uvm_mapent_reservation *umr, int flags)
2298 {
2299 struct vm_map_entry *entry, *first_entry, *next;
2300 vaddr_t len;
2301 UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist);
2302
2303 UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)",
2304 map, start, end, 0);
2305 VM_MAP_RANGE_CHECK(map, start, end);
2306
2307 uvm_map_check(map, "unmap_remove entry");
2308
2309 /*
2310 * find first entry
2311 */
2312
2313 if (uvm_map_lookup_entry(map, start, &first_entry) == true) {
2314 /* clip and go... */
2315 entry = first_entry;
2316 UVM_MAP_CLIP_START(map, entry, start, umr);
2317 /* critical! prevents stale hint */
2318 SAVE_HINT(map, entry, entry->prev);
2319 } else {
2320 entry = first_entry->next;
2321 }
2322
2323 /*
2324 * Save the free space hint
2325 */
2326
2327 if (map->first_free != &map->header && map->first_free->start >= start)
2328 map->first_free = entry->prev;
2329
2330 /*
2331 * note: we now re-use first_entry for a different task. we remove
2332 * a number of map entries from the map and save them in a linked
2333 * list headed by "first_entry". once we remove them from the map
2334 * the caller should unlock the map and drop the references to the
2335 * backing objects [c.f. uvm_unmap_detach]. the object is to
2336 * separate unmapping from reference dropping. why?
2337 * [1] the map has to be locked for unmapping
2338 * [2] the map need not be locked for reference dropping
2339 * [3] dropping references may trigger pager I/O, and if we hit
2340 * a pager that does synchronous I/O we may have to wait for it.
2341 * [4] we would like all waiting for I/O to occur with maps unlocked
2342 * so that we don't block other threads.
2343 */
2344
2345 first_entry = NULL;
2346 *entry_list = NULL;
2347
2348 /*
2349 * break up the area into map entry sized regions and unmap. note
2350 * that all mappings have to be removed before we can even consider
2351 * dropping references to amaps or VM objects (otherwise we could end
2352 * up with a mapping to a page on the free list which would be very bad)
2353 */
2354
2355 while ((entry != &map->header) && (entry->start < end)) {
2356 KASSERT((entry->flags & UVM_MAP_FIRST) == 0);
2357
2358 UVM_MAP_CLIP_END(map, entry, end, umr);
2359 next = entry->next;
2360 len = entry->end - entry->start;
2361
2362 /*
2363 * unwire before removing addresses from the pmap; otherwise
2364 * unwiring will put the entries back into the pmap (XXX).
2365 */
2366
2367 if (VM_MAPENT_ISWIRED(entry)) {
2368 uvm_map_entry_unwire(map, entry);
2369 }
2370 if (flags & UVM_FLAG_VAONLY) {
2371
2372 /* nothing */
2373
2374 } else if ((map->flags & VM_MAP_PAGEABLE) == 0) {
2375
2376 /*
2377 * if the map is non-pageable, any pages mapped there
2378 * must be wired and entered with pmap_kenter_pa(),
2379 * and we should free any such pages immediately.
2380 * this is mostly used for kmem_map.
2381 */
2382 KASSERT(vm_map_pmap(map) == pmap_kernel());
2383
2384 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
2385 uvm_km_pgremove_intrsafe(map, entry->start,
2386 entry->end);
2387 pmap_kremove(entry->start, len);
2388 }
2389 } else if (UVM_ET_ISOBJ(entry) &&
2390 UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
2391 panic("%s: kernel object %p %p\n",
2392 __func__, map, entry);
2393 } else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
2394 /*
2395 * remove mappings the standard way. lock object
2396 * and/or amap to ensure vm_page state does not
2397 * change while in pmap_remove().
2398 */
2399
2400 uvm_map_lock_entry(entry);
2401 pmap_remove(map->pmap, entry->start, entry->end);
2402 uvm_map_unlock_entry(entry);
2403 }
2404
2405 #if defined(DEBUG)
2406 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
2407
2408 /*
2409 * check if there's remaining mapping,
2410 * which is a bug in caller.
2411 */
2412
2413 vaddr_t va;
2414 for (va = entry->start; va < entry->end;
2415 va += PAGE_SIZE) {
2416 if (pmap_extract(vm_map_pmap(map), va, NULL)) {
2417 panic("%s: %#"PRIxVADDR" has mapping",
2418 __func__, va);
2419 }
2420 }
2421
2422 if (VM_MAP_IS_KERNEL(map)) {
2423 uvm_km_check_empty(map, entry->start,
2424 entry->end);
2425 }
2426 }
2427 #endif /* defined(DEBUG) */
2428
2429 /*
2430 * remove entry from map and put it on our list of entries
2431 * that we've nuked. then go to next entry.
2432 */
2433
2434 UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0,0);
2435
2436 /* critical! prevents stale hint */
2437 SAVE_HINT(map, entry, entry->prev);
2438
2439 uvm_map_entry_unlink(map, entry);
2440 KASSERT(map->size >= len);
2441 map->size -= len;
2442 entry->prev = NULL;
2443 entry->next = first_entry;
2444 first_entry = entry;
2445 entry = next;
2446 }
2447
2448 /*
2449 * Note: if map is dying, leave pmap_update() for pmap_destroy(),
2450 * which will be called later.
2451 */
2452 if ((map->flags & VM_MAP_DYING) == 0) {
2453 pmap_update(vm_map_pmap(map));
2454 } else {
2455 KASSERT(vm_map_pmap(map) != pmap_kernel());
2456 }
2457
2458 uvm_map_check(map, "unmap_remove leave");
2459
2460 /*
2461 * now we've cleaned up the map and are ready for the caller to drop
2462 * references to the mapped objects.
2463 */
2464
2465 *entry_list = first_entry;
2466 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
2467
2468 if (map->flags & VM_MAP_WANTVA) {
2469 mutex_enter(&map->misc_lock);
2470 map->flags &= ~VM_MAP_WANTVA;
2471 cv_broadcast(&map->cv);
2472 mutex_exit(&map->misc_lock);
2473 }
2474 }
2475
2476 /*
2477 * uvm_unmap_detach: drop references in a chain of map entries
2478 *
2479 * => we will free the map entries as we traverse the list.
2480 */
2481
2482 void
2483 uvm_unmap_detach(struct vm_map_entry *first_entry, int flags)
2484 {
2485 struct vm_map_entry *next_entry;
2486 UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
2487
2488 while (first_entry) {
2489 KASSERT(!VM_MAPENT_ISWIRED(first_entry));
2490 UVMHIST_LOG(maphist,
2491 " detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d",
2492 first_entry, first_entry->aref.ar_amap,
2493 first_entry->object.uvm_obj,
2494 UVM_ET_ISSUBMAP(first_entry));
2495
2496 /*
2497 * drop reference to amap, if we've got one
2498 */
2499
2500 if (first_entry->aref.ar_amap)
2501 uvm_map_unreference_amap(first_entry, flags);
2502
2503 /*
2504 * drop reference to our backing object, if we've got one
2505 */
2506
2507 KASSERT(!UVM_ET_ISSUBMAP(first_entry));
2508 if (UVM_ET_ISOBJ(first_entry) &&
2509 first_entry->object.uvm_obj->pgops->pgo_detach) {
2510 (*first_entry->object.uvm_obj->pgops->pgo_detach)
2511 (first_entry->object.uvm_obj);
2512 }
2513 next_entry = first_entry->next;
2514 uvm_mapent_free(first_entry);
2515 first_entry = next_entry;
2516 }
2517 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
2518 }
2519
2520 /*
2521 * E X T R A C T I O N F U N C T I O N S
2522 */
2523
2524 /*
2525 * uvm_map_reserve: reserve space in a vm_map for future use.
2526 *
2527 * => we reserve space in a map by putting a dummy map entry in the
2528 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
2529 * => map should be unlocked (we will write lock it)
2530 * => we return true if we were able to reserve space
2531 * => XXXCDC: should be inline?
2532 */
2533
2534 int
2535 uvm_map_reserve(struct vm_map *map, vsize_t size,
2536 vaddr_t offset /* hint for pmap_prefer */,
2537 vsize_t align /* alignment */,
2538 vaddr_t *raddr /* IN:hint, OUT: reserved VA */,
2539 uvm_flag_t flags /* UVM_FLAG_FIXED or 0 */)
2540 {
2541 UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
2542
2543 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
2544 map,size,offset,raddr);
2545
2546 size = round_page(size);
2547
2548 /*
2549 * reserve some virtual space.
2550 */
2551
2552 if (uvm_map(map, raddr, size, NULL, offset, align,
2553 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
2554 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE|flags)) != 0) {
2555 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
2556 return (false);
2557 }
2558
2559 UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
2560 return (true);
2561 }
2562
2563 /*
2564 * uvm_map_replace: replace a reserved (blank) area of memory with
2565 * real mappings.
2566 *
2567 * => caller must WRITE-LOCK the map
2568 * => we return true if replacement was a success
2569 * => we expect the newents chain to have nnewents entrys on it and
2570 * we expect newents->prev to point to the last entry on the list
2571 * => note newents is allowed to be NULL
2572 */
2573
2574 static int
2575 uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
2576 struct vm_map_entry *newents, int nnewents, vsize_t nsize,
2577 struct vm_map_entry **oldentryp)
2578 {
2579 struct vm_map_entry *oldent, *last;
2580
2581 uvm_map_check(map, "map_replace entry");
2582
2583 /*
2584 * first find the blank map entry at the specified address
2585 */
2586
2587 if (!uvm_map_lookup_entry(map, start, &oldent)) {
2588 return (false);
2589 }
2590
2591 /*
2592 * check to make sure we have a proper blank entry
2593 */
2594
2595 if (end < oldent->end && !VM_MAP_USE_KMAPENT(map)) {
2596 UVM_MAP_CLIP_END(map, oldent, end, NULL);
2597 }
2598 if (oldent->start != start || oldent->end != end ||
2599 oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
2600 return (false);
2601 }
2602
2603 #ifdef DIAGNOSTIC
2604
2605 /*
2606 * sanity check the newents chain
2607 */
2608
2609 {
2610 struct vm_map_entry *tmpent = newents;
2611 int nent = 0;
2612 vsize_t sz = 0;
2613 vaddr_t cur = start;
2614
2615 while (tmpent) {
2616 nent++;
2617 sz += tmpent->end - tmpent->start;
2618 if (tmpent->start < cur)
2619 panic("uvm_map_replace1");
2620 if (tmpent->start >= tmpent->end || tmpent->end > end) {
2621 panic("uvm_map_replace2: "
2622 "tmpent->start=0x%"PRIxVADDR
2623 ", tmpent->end=0x%"PRIxVADDR
2624 ", end=0x%"PRIxVADDR,
2625 tmpent->start, tmpent->end, end);
2626 }
2627 cur = tmpent->end;
2628 if (tmpent->next) {
2629 if (tmpent->next->prev != tmpent)
2630 panic("uvm_map_replace3");
2631 } else {
2632 if (newents->prev != tmpent)
2633 panic("uvm_map_replace4");
2634 }
2635 tmpent = tmpent->next;
2636 }
2637 if (nent != nnewents)
2638 panic("uvm_map_replace5");
2639 if (sz != nsize)
2640 panic("uvm_map_replace6");
2641 }
2642 #endif
2643
2644 /*
2645 * map entry is a valid blank! replace it. (this does all the
2646 * work of map entry link/unlink...).
2647 */
2648
2649 if (newents) {
2650 last = newents->prev;
2651
2652 /* critical: flush stale hints out of map */
2653 SAVE_HINT(map, map->hint, newents);
2654 if (map->first_free == oldent)
2655 map->first_free = last;
2656
2657 last->next = oldent->next;
2658 last->next->prev = last;
2659
2660 /* Fix RB tree */
2661 uvm_rb_remove(map, oldent);
2662
2663 newents->prev = oldent->prev;
2664 newents->prev->next = newents;
2665 map->nentries = map->nentries + (nnewents - 1);
2666
2667 /* Fixup the RB tree */
2668 {
2669 int i;
2670 struct vm_map_entry *tmp;
2671
2672 tmp = newents;
2673 for (i = 0; i < nnewents && tmp; i++) {
2674 uvm_rb_insert(map, tmp);
2675 tmp = tmp->next;
2676 }
2677 }
2678 } else {
2679 /* NULL list of new entries: just remove the old one */
2680 clear_hints(map, oldent);
2681 uvm_map_entry_unlink(map, oldent);
2682 }
2683 map->size -= end - start - nsize;
2684
2685 uvm_map_check(map, "map_replace leave");
2686
2687 /*
2688 * now we can free the old blank entry and return.
2689 */
2690
2691 *oldentryp = oldent;
2692 return (true);
2693 }
2694
2695 /*
2696 * uvm_map_extract: extract a mapping from a map and put it somewhere
2697 * (maybe removing the old mapping)
2698 *
2699 * => maps should be unlocked (we will write lock them)
2700 * => returns 0 on success, error code otherwise
2701 * => start must be page aligned
2702 * => len must be page sized
2703 * => flags:
2704 * UVM_EXTRACT_REMOVE: remove mappings from srcmap
2705 * UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
2706 * UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
2707 * UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
2708 * >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
2709 * >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
2710 * be used from within the kernel in a kernel level map <<<
2711 */
2712
2713 int
2714 uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
2715 struct vm_map *dstmap, vaddr_t *dstaddrp, int flags)
2716 {
2717 vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge;
2718 struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry,
2719 *deadentry, *oldentry;
2720 struct vm_map_entry *resentry = NULL; /* a dummy reservation entry */
2721 vsize_t elen;
2722 int nchain, error, copy_ok;
2723 vsize_t nsize;
2724 UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
2725
2726 UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start,
2727 len,0);
2728 UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0);
2729
2730 /*
2731 * step 0: sanity check: start must be on a page boundary, length
2732 * must be page sized. can't ask for CONTIG/QREF if you asked for
2733 * REMOVE.
2734 */
2735
2736 KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0);
2737 KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
2738 (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
2739
2740 /*
2741 * step 1: reserve space in the target map for the extracted area
2742 */
2743
2744 if ((flags & UVM_EXTRACT_RESERVED) == 0) {
2745 dstaddr = vm_map_min(dstmap);
2746 if (!uvm_map_reserve(dstmap, len, start, 0, &dstaddr, 0))
2747 return (ENOMEM);
2748 *dstaddrp = dstaddr; /* pass address back to caller */
2749 UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0);
2750 } else {
2751 dstaddr = *dstaddrp;
2752 }
2753
2754 /*
2755 * step 2: setup for the extraction process loop by init'ing the
2756 * map entry chain, locking src map, and looking up the first useful
2757 * entry in the map.
2758 */
2759
2760 end = start + len;
2761 newend = dstaddr + len;
2762 chain = endchain = NULL;
2763 nchain = 0;
2764 nsize = 0;
2765 vm_map_lock(srcmap);
2766
2767 if (uvm_map_lookup_entry(srcmap, start, &entry)) {
2768
2769 /* "start" is within an entry */
2770 if (flags & UVM_EXTRACT_QREF) {
2771
2772 /*
2773 * for quick references we don't clip the entry, so
2774 * the entry may map space "before" the starting
2775 * virtual address... this is the "fudge" factor
2776 * (which can be non-zero only the first time
2777 * through the "while" loop in step 3).
2778 */
2779
2780 fudge = start - entry->start;
2781 } else {
2782
2783 /*
2784 * normal reference: we clip the map to fit (thus
2785 * fudge is zero)
2786 */
2787
2788 UVM_MAP_CLIP_START(srcmap, entry, start, NULL);
2789 SAVE_HINT(srcmap, srcmap->hint, entry->prev);
2790 fudge = 0;
2791 }
2792 } else {
2793
2794 /* "start" is not within an entry ... skip to next entry */
2795 if (flags & UVM_EXTRACT_CONTIG) {
2796 error = EINVAL;
2797 goto bad; /* definite hole here ... */
2798 }
2799
2800 entry = entry->next;
2801 fudge = 0;
2802 }
2803
2804 /* save values from srcmap for step 6 */
2805 orig_entry = entry;
2806 orig_fudge = fudge;
2807
2808 /*
2809 * step 3: now start looping through the map entries, extracting
2810 * as we go.
2811 */
2812
2813 while (entry->start < end && entry != &srcmap->header) {
2814
2815 /* if we are not doing a quick reference, clip it */
2816 if ((flags & UVM_EXTRACT_QREF) == 0)
2817 UVM_MAP_CLIP_END(srcmap, entry, end, NULL);
2818
2819 /* clear needs_copy (allow chunking) */
2820 if (UVM_ET_ISNEEDSCOPY(entry)) {
2821 amap_copy(srcmap, entry,
2822 AMAP_COPY_NOWAIT|AMAP_COPY_NOMERGE, start, end);
2823 if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */
2824 error = ENOMEM;
2825 goto bad;
2826 }
2827
2828 /* amap_copy could clip (during chunk)! update fudge */
2829 if (fudge) {
2830 fudge = start - entry->start;
2831 orig_fudge = fudge;
2832 }
2833 }
2834
2835 /* calculate the offset of this from "start" */
2836 oldoffset = (entry->start + fudge) - start;
2837
2838 /* allocate a new map entry */
2839 newentry = uvm_mapent_alloc(dstmap, 0);
2840 if (newentry == NULL) {
2841 error = ENOMEM;
2842 goto bad;
2843 }
2844
2845 /* set up new map entry */
2846 newentry->next = NULL;
2847 newentry->prev = endchain;
2848 newentry->start = dstaddr + oldoffset;
2849 newentry->end =
2850 newentry->start + (entry->end - (entry->start + fudge));
2851 if (newentry->end > newend || newentry->end < newentry->start)
2852 newentry->end = newend;
2853 newentry->object.uvm_obj = entry->object.uvm_obj;
2854 if (newentry->object.uvm_obj) {
2855 if (newentry->object.uvm_obj->pgops->pgo_reference)
2856 newentry->object.uvm_obj->pgops->
2857 pgo_reference(newentry->object.uvm_obj);
2858 newentry->offset = entry->offset + fudge;
2859 } else {
2860 newentry->offset = 0;
2861 }
2862 newentry->etype = entry->etype;
2863 newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
2864 entry->max_protection : entry->protection;
2865 newentry->max_protection = entry->max_protection;
2866 newentry->inheritance = entry->inheritance;
2867 newentry->wired_count = 0;
2868 newentry->aref.ar_amap = entry->aref.ar_amap;
2869 if (newentry->aref.ar_amap) {
2870 newentry->aref.ar_pageoff =
2871 entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
2872 uvm_map_reference_amap(newentry, AMAP_SHARED |
2873 ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
2874 } else {
2875 newentry->aref.ar_pageoff = 0;
2876 }
2877 newentry->advice = entry->advice;
2878 if ((flags & UVM_EXTRACT_QREF) != 0) {
2879 newentry->flags |= UVM_MAP_NOMERGE;
2880 }
2881
2882 /* now link it on the chain */
2883 nchain++;
2884 nsize += newentry->end - newentry->start;
2885 if (endchain == NULL) {
2886 chain = endchain = newentry;
2887 } else {
2888 endchain->next = newentry;
2889 endchain = newentry;
2890 }
2891
2892 /* end of 'while' loop! */
2893 if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
2894 (entry->next == &srcmap->header ||
2895 entry->next->start != entry->end)) {
2896 error = EINVAL;
2897 goto bad;
2898 }
2899 entry = entry->next;
2900 fudge = 0;
2901 }
2902
2903 /*
2904 * step 4: close off chain (in format expected by uvm_map_replace)
2905 */
2906
2907 if (chain)
2908 chain->prev = endchain;
2909
2910 /*
2911 * step 5: attempt to lock the dest map so we can pmap_copy.
2912 * note usage of copy_ok:
2913 * 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
2914 * 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
2915 */
2916
2917 if (srcmap == dstmap || vm_map_lock_try(dstmap) == true) {
2918 copy_ok = 1;
2919 if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2920 nchain, nsize, &resentry)) {
2921 if (srcmap != dstmap)
2922 vm_map_unlock(dstmap);
2923 error = EIO;
2924 goto bad;
2925 }
2926 } else {
2927 copy_ok = 0;
2928 /* replace defered until step 7 */
2929 }
2930
2931 /*
2932 * step 6: traverse the srcmap a second time to do the following:
2933 * - if we got a lock on the dstmap do pmap_copy
2934 * - if UVM_EXTRACT_REMOVE remove the entries
2935 * we make use of orig_entry and orig_fudge (saved in step 2)
2936 */
2937
2938 if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
2939
2940 /* purge possible stale hints from srcmap */
2941 if (flags & UVM_EXTRACT_REMOVE) {
2942 SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
2943 if (srcmap->first_free != &srcmap->header &&
2944 srcmap->first_free->start >= start)
2945 srcmap->first_free = orig_entry->prev;
2946 }
2947
2948 entry = orig_entry;
2949 fudge = orig_fudge;
2950 deadentry = NULL; /* for UVM_EXTRACT_REMOVE */
2951
2952 while (entry->start < end && entry != &srcmap->header) {
2953 if (copy_ok) {
2954 oldoffset = (entry->start + fudge) - start;
2955 elen = MIN(end, entry->end) -
2956 (entry->start + fudge);
2957 pmap_copy(dstmap->pmap, srcmap->pmap,
2958 dstaddr + oldoffset, elen,
2959 entry->start + fudge);
2960 }
2961
2962 /* we advance "entry" in the following if statement */
2963 if (flags & UVM_EXTRACT_REMOVE) {
2964 uvm_map_lock_entry(entry);
2965 pmap_remove(srcmap->pmap, entry->start,
2966 entry->end);
2967 uvm_map_unlock_entry(entry);
2968 oldentry = entry; /* save entry */
2969 entry = entry->next; /* advance */
2970 uvm_map_entry_unlink(srcmap, oldentry);
2971 /* add to dead list */
2972 oldentry->next = deadentry;
2973 deadentry = oldentry;
2974 } else {
2975 entry = entry->next; /* advance */
2976 }
2977
2978 /* end of 'while' loop */
2979 fudge = 0;
2980 }
2981 pmap_update(srcmap->pmap);
2982
2983 /*
2984 * unlock dstmap. we will dispose of deadentry in
2985 * step 7 if needed
2986 */
2987
2988 if (copy_ok && srcmap != dstmap)
2989 vm_map_unlock(dstmap);
2990
2991 } else {
2992 deadentry = NULL;
2993 }
2994
2995 /*
2996 * step 7: we are done with the source map, unlock. if copy_ok
2997 * is 0 then we have not replaced the dummy mapping in dstmap yet
2998 * and we need to do so now.
2999 */
3000
3001 vm_map_unlock(srcmap);
3002 if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
3003 uvm_unmap_detach(deadentry, 0); /* dispose of old entries */
3004
3005 /* now do the replacement if we didn't do it in step 5 */
3006 if (copy_ok == 0) {
3007 vm_map_lock(dstmap);
3008 error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
3009 nchain, nsize, &resentry);
3010 vm_map_unlock(dstmap);
3011
3012 if (error == false) {
3013 error = EIO;
3014 goto bad2;
3015 }
3016 }
3017
3018 if (resentry != NULL)
3019 uvm_mapent_free(resentry);
3020
3021 return (0);
3022
3023 /*
3024 * bad: failure recovery
3025 */
3026 bad:
3027 vm_map_unlock(srcmap);
3028 bad2: /* src already unlocked */
3029 if (chain)
3030 uvm_unmap_detach(chain,
3031 (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
3032
3033 if (resentry != NULL)
3034 uvm_mapent_free(resentry);
3035
3036 if ((flags & UVM_EXTRACT_RESERVED) == 0) {
3037 uvm_unmap(dstmap, dstaddr, dstaddr+len); /* ??? */
3038 }
3039 return (error);
3040 }
3041
3042 /* end of extraction functions */
3043
3044 /*
3045 * uvm_map_submap: punch down part of a map into a submap
3046 *
3047 * => only the kernel_map is allowed to be submapped
3048 * => the purpose of submapping is to break up the locking granularity
3049 * of a larger map
3050 * => the range specified must have been mapped previously with a uvm_map()
3051 * call [with uobj==NULL] to create a blank map entry in the main map.
3052 * [And it had better still be blank!]
3053 * => maps which contain submaps should never be copied or forked.
3054 * => to remove a submap, use uvm_unmap() on the main map
3055 * and then uvm_map_deallocate() the submap.
3056 * => main map must be unlocked.
3057 * => submap must have been init'd and have a zero reference count.
3058 * [need not be locked as we don't actually reference it]
3059 */
3060
3061 int
3062 uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
3063 struct vm_map *submap)
3064 {
3065 struct vm_map_entry *entry;
3066 struct uvm_mapent_reservation umr;
3067 int error;
3068
3069 uvm_mapent_reserve(map, &umr, 2, 0);
3070
3071 vm_map_lock(map);
3072 VM_MAP_RANGE_CHECK(map, start, end);
3073
3074 if (uvm_map_lookup_entry(map, start, &entry)) {
3075 UVM_MAP_CLIP_START(map, entry, start, &umr);
3076 UVM_MAP_CLIP_END(map, entry, end, &umr); /* to be safe */
3077 } else {
3078 entry = NULL;
3079 }
3080
3081 if (entry != NULL &&
3082 entry->start == start && entry->end == end &&
3083 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
3084 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
3085 entry->etype |= UVM_ET_SUBMAP;
3086 entry->object.sub_map = submap;
3087 entry->offset = 0;
3088 uvm_map_reference(submap);
3089 error = 0;
3090 } else {
3091 error = EINVAL;
3092 }
3093 vm_map_unlock(map);
3094
3095 uvm_mapent_unreserve(map, &umr);
3096
3097 return error;
3098 }
3099
3100 /*
3101 * uvm_map_setup_kernel: init in-kernel map
3102 *
3103 * => map must not be in service yet.
3104 */
3105
3106 void
3107 uvm_map_setup_kernel(struct vm_map_kernel *map,
3108 vaddr_t vmin, vaddr_t vmax, int flags)
3109 {
3110
3111 uvm_map_setup(&map->vmk_map, vmin, vmax, flags);
3112 callback_head_init(&map->vmk_reclaim_callback, IPL_VM);
3113 LIST_INIT(&map->vmk_kentry_free);
3114 map->vmk_merged_entries = NULL;
3115 }
3116
3117
3118 /*
3119 * uvm_map_protect: change map protection
3120 *
3121 * => set_max means set max_protection.
3122 * => map must be unlocked.
3123 */
3124
3125 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
3126 ~VM_PROT_WRITE : VM_PROT_ALL)
3127
3128 int
3129 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
3130 vm_prot_t new_prot, bool set_max)
3131 {
3132 struct vm_map_entry *current, *entry;
3133 int error = 0;
3134 UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
3135 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
3136 map, start, end, new_prot);
3137
3138 vm_map_lock(map);
3139 VM_MAP_RANGE_CHECK(map, start, end);
3140 if (uvm_map_lookup_entry(map, start, &entry)) {
3141 UVM_MAP_CLIP_START(map, entry, start, NULL);
3142 } else {
3143 entry = entry->next;
3144 }
3145
3146 /*
3147 * make a first pass to check for protection violations.
3148 */
3149
3150 current = entry;
3151 while ((current != &map->header) && (current->start < end)) {
3152 if (UVM_ET_ISSUBMAP(current)) {
3153 error = EINVAL;
3154 goto out;
3155 }
3156 if ((new_prot & current->max_protection) != new_prot) {
3157 error = EACCES;
3158 goto out;
3159 }
3160 /*
3161 * Don't allow VM_PROT_EXECUTE to be set on entries that
3162 * point to vnodes that are associated with a NOEXEC file
3163 * system.
3164 */
3165 if (UVM_ET_ISOBJ(current) &&
3166 UVM_OBJ_IS_VNODE(current->object.uvm_obj)) {
3167 struct vnode *vp =
3168 (struct vnode *) current->object.uvm_obj;
3169
3170 if ((new_prot & VM_PROT_EXECUTE) != 0 &&
3171 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
3172 error = EACCES;
3173 goto out;
3174 }
3175 }
3176
3177 current = current->next;
3178 }
3179
3180 /* go back and fix up protections (no need to clip this time). */
3181
3182 current = entry;
3183 while ((current != &map->header) && (current->start < end)) {
3184 vm_prot_t old_prot;
3185
3186 UVM_MAP_CLIP_END(map, current, end, NULL);
3187 old_prot = current->protection;
3188 if (set_max)
3189 current->protection =
3190 (current->max_protection = new_prot) & old_prot;
3191 else
3192 current->protection = new_prot;
3193
3194 /*
3195 * update physical map if necessary. worry about copy-on-write
3196 * here -- CHECK THIS XXX
3197 */
3198
3199 if (current->protection != old_prot) {
3200 /* update pmap! */
3201 uvm_map_lock_entry(current);
3202 pmap_protect(map->pmap, current->start, current->end,
3203 current->protection & MASK(entry));
3204 uvm_map_unlock_entry(current);
3205
3206 /*
3207 * If this entry points at a vnode, and the
3208 * protection includes VM_PROT_EXECUTE, mark
3209 * the vnode as VEXECMAP.
3210 */
3211 if (UVM_ET_ISOBJ(current)) {
3212 struct uvm_object *uobj =
3213 current->object.uvm_obj;
3214
3215 if (UVM_OBJ_IS_VNODE(uobj) &&
3216 (current->protection & VM_PROT_EXECUTE)) {
3217 vn_markexec((struct vnode *) uobj);
3218 }
3219 }
3220 }
3221
3222 /*
3223 * If the map is configured to lock any future mappings,
3224 * wire this entry now if the old protection was VM_PROT_NONE
3225 * and the new protection is not VM_PROT_NONE.
3226 */
3227
3228 if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
3229 VM_MAPENT_ISWIRED(entry) == 0 &&
3230 old_prot == VM_PROT_NONE &&
3231 new_prot != VM_PROT_NONE) {
3232 if (uvm_map_pageable(map, entry->start,
3233 entry->end, false,
3234 UVM_LK_ENTER|UVM_LK_EXIT) != 0) {
3235
3236 /*
3237 * If locking the entry fails, remember the
3238 * error if it's the first one. Note we
3239 * still continue setting the protection in
3240 * the map, but will return the error
3241 * condition regardless.
3242 *
3243 * XXX Ignore what the actual error is,
3244 * XXX just call it a resource shortage
3245 * XXX so that it doesn't get confused
3246 * XXX what uvm_map_protect() itself would
3247 * XXX normally return.
3248 */
3249
3250 error = ENOMEM;
3251 }
3252 }
3253 current = current->next;
3254 }
3255 pmap_update(map->pmap);
3256
3257 out:
3258 vm_map_unlock(map);
3259
3260 UVMHIST_LOG(maphist, "<- done, error=%d",error,0,0,0);
3261 return error;
3262 }
3263
3264 #undef MASK
3265
3266 /*
3267 * uvm_map_inherit: set inheritance code for range of addrs in map.
3268 *
3269 * => map must be unlocked
3270 * => note that the inherit code is used during a "fork". see fork
3271 * code for details.
3272 */
3273
3274 int
3275 uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
3276 vm_inherit_t new_inheritance)
3277 {
3278 struct vm_map_entry *entry, *temp_entry;
3279 UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
3280 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)",
3281 map, start, end, new_inheritance);
3282
3283 switch (new_inheritance) {
3284 case MAP_INHERIT_NONE:
3285 case MAP_INHERIT_COPY:
3286 case MAP_INHERIT_SHARE:
3287 break;
3288 default:
3289 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3290 return EINVAL;
3291 }
3292
3293 vm_map_lock(map);
3294 VM_MAP_RANGE_CHECK(map, start, end);
3295 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3296 entry = temp_entry;
3297 UVM_MAP_CLIP_START(map, entry, start, NULL);
3298 } else {
3299 entry = temp_entry->next;
3300 }
3301 while ((entry != &map->header) && (entry->start < end)) {
3302 UVM_MAP_CLIP_END(map, entry, end, NULL);
3303 entry->inheritance = new_inheritance;
3304 entry = entry->next;
3305 }
3306 vm_map_unlock(map);
3307 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3308 return 0;
3309 }
3310
3311 /*
3312 * uvm_map_advice: set advice code for range of addrs in map.
3313 *
3314 * => map must be unlocked
3315 */
3316
3317 int
3318 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
3319 {
3320 struct vm_map_entry *entry, *temp_entry;
3321 UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist);
3322 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_adv=0x%x)",
3323 map, start, end, new_advice);
3324
3325 vm_map_lock(map);
3326 VM_MAP_RANGE_CHECK(map, start, end);
3327 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3328 entry = temp_entry;
3329 UVM_MAP_CLIP_START(map, entry, start, NULL);
3330 } else {
3331 entry = temp_entry->next;
3332 }
3333
3334 /*
3335 * XXXJRT: disallow holes?
3336 */
3337
3338 while ((entry != &map->header) && (entry->start < end)) {
3339 UVM_MAP_CLIP_END(map, entry, end, NULL);
3340
3341 switch (new_advice) {
3342 case MADV_NORMAL:
3343 case MADV_RANDOM:
3344 case MADV_SEQUENTIAL:
3345 /* nothing special here */
3346 break;
3347
3348 default:
3349 vm_map_unlock(map);
3350 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3351 return EINVAL;
3352 }
3353 entry->advice = new_advice;
3354 entry = entry->next;
3355 }
3356
3357 vm_map_unlock(map);
3358 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3359 return 0;
3360 }
3361
3362 /*
3363 * uvm_map_willneed: apply MADV_WILLNEED
3364 */
3365
3366 int
3367 uvm_map_willneed(struct vm_map *map, vaddr_t start, vaddr_t end)
3368 {
3369 struct vm_map_entry *entry;
3370 UVMHIST_FUNC("uvm_map_willneed"); UVMHIST_CALLED(maphist);
3371 UVMHIST_LOG(maphist,"(map=0x%lx,start=0x%lx,end=0x%lx)",
3372 map, start, end, 0);
3373
3374 vm_map_lock_read(map);
3375 VM_MAP_RANGE_CHECK(map, start, end);
3376 if (!uvm_map_lookup_entry(map, start, &entry)) {
3377 entry = entry->next;
3378 }
3379 while (entry->start < end) {
3380 struct vm_amap * const amap = entry->aref.ar_amap;
3381 struct uvm_object * const uobj = entry->object.uvm_obj;
3382
3383 KASSERT(entry != &map->header);
3384 KASSERT(start < entry->end);
3385 /*
3386 * For now, we handle only the easy but commonly-requested case.
3387 * ie. start prefetching of backing uobj pages.
3388 *
3389 * XXX It might be useful to pmap_enter() the already-in-core
3390 * pages by inventing a "weak" mode for uvm_fault() which would
3391 * only do the PGO_LOCKED pgo_get().
3392 */
3393 if (UVM_ET_ISOBJ(entry) && amap == NULL && uobj != NULL) {
3394 off_t offset;
3395 off_t size;
3396
3397 offset = entry->offset;
3398 if (start < entry->start) {
3399 offset += entry->start - start;
3400 }
3401 size = entry->offset + (entry->end - entry->start);
3402 if (entry->end < end) {
3403 size -= end - entry->end;
3404 }
3405 uvm_readahead(uobj, offset, size);
3406 }
3407 entry = entry->next;
3408 }
3409 vm_map_unlock_read(map);
3410 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3411 return 0;
3412 }
3413
3414 /*
3415 * uvm_map_pageable: sets the pageability of a range in a map.
3416 *
3417 * => wires map entries. should not be used for transient page locking.
3418 * for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
3419 * => regions specified as not pageable require lock-down (wired) memory
3420 * and page tables.
3421 * => map must never be read-locked
3422 * => if islocked is true, map is already write-locked
3423 * => we always unlock the map, since we must downgrade to a read-lock
3424 * to call uvm_fault_wire()
3425 * => XXXCDC: check this and try and clean it up.
3426 */
3427
3428 int
3429 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
3430 bool new_pageable, int lockflags)
3431 {
3432 struct vm_map_entry *entry, *start_entry, *failed_entry;
3433 int rv;
3434 #ifdef DIAGNOSTIC
3435 u_int timestamp_save;
3436 #endif
3437 UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
3438 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
3439 map, start, end, new_pageable);
3440 KASSERT(map->flags & VM_MAP_PAGEABLE);
3441
3442 if ((lockflags & UVM_LK_ENTER) == 0)
3443 vm_map_lock(map);
3444 VM_MAP_RANGE_CHECK(map, start, end);
3445
3446 /*
3447 * only one pageability change may take place at one time, since
3448 * uvm_fault_wire assumes it will be called only once for each
3449 * wiring/unwiring. therefore, we have to make sure we're actually
3450 * changing the pageability for the entire region. we do so before
3451 * making any changes.
3452 */
3453
3454 if (uvm_map_lookup_entry(map, start, &start_entry) == false) {
3455 if ((lockflags & UVM_LK_EXIT) == 0)
3456 vm_map_unlock(map);
3457
3458 UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0);
3459 return EFAULT;
3460 }
3461 entry = start_entry;
3462
3463 /*
3464 * handle wiring and unwiring separately.
3465 */
3466
3467 if (new_pageable) { /* unwire */
3468 UVM_MAP_CLIP_START(map, entry, start, NULL);
3469
3470 /*
3471 * unwiring. first ensure that the range to be unwired is
3472 * really wired down and that there are no holes.
3473 */
3474
3475 while ((entry != &map->header) && (entry->start < end)) {
3476 if (entry->wired_count == 0 ||
3477 (entry->end < end &&
3478 (entry->next == &map->header ||
3479 entry->next->start > entry->end))) {
3480 if ((lockflags & UVM_LK_EXIT) == 0)
3481 vm_map_unlock(map);
3482 UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0);
3483 return EINVAL;
3484 }
3485 entry = entry->next;
3486 }
3487
3488 /*
3489 * POSIX 1003.1b - a single munlock call unlocks a region,
3490 * regardless of the number of mlock calls made on that
3491 * region.
3492 */
3493
3494 entry = start_entry;
3495 while ((entry != &map->header) && (entry->start < end)) {
3496 UVM_MAP_CLIP_END(map, entry, end, NULL);
3497 if (VM_MAPENT_ISWIRED(entry))
3498 uvm_map_entry_unwire(map, entry);
3499 entry = entry->next;
3500 }
3501 if ((lockflags & UVM_LK_EXIT) == 0)
3502 vm_map_unlock(map);
3503 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3504 return 0;
3505 }
3506
3507 /*
3508 * wire case: in two passes [XXXCDC: ugly block of code here]
3509 *
3510 * 1: holding the write lock, we create any anonymous maps that need
3511 * to be created. then we clip each map entry to the region to
3512 * be wired and increment its wiring count.
3513 *
3514 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
3515 * in the pages for any newly wired area (wired_count == 1).
3516 *
3517 * downgrading to a read lock for uvm_fault_wire avoids a possible
3518 * deadlock with another thread that may have faulted on one of
3519 * the pages to be wired (it would mark the page busy, blocking
3520 * us, then in turn block on the map lock that we hold). because
3521 * of problems in the recursive lock package, we cannot upgrade
3522 * to a write lock in vm_map_lookup. thus, any actions that
3523 * require the write lock must be done beforehand. because we
3524 * keep the read lock on the map, the copy-on-write status of the
3525 * entries we modify here cannot change.
3526 */
3527
3528 while ((entry != &map->header) && (entry->start < end)) {
3529 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3530
3531 /*
3532 * perform actions of vm_map_lookup that need the
3533 * write lock on the map: create an anonymous map
3534 * for a copy-on-write region, or an anonymous map
3535 * for a zero-fill region. (XXXCDC: submap case
3536 * ok?)
3537 */
3538
3539 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3540 if (UVM_ET_ISNEEDSCOPY(entry) &&
3541 ((entry->max_protection & VM_PROT_WRITE) ||
3542 (entry->object.uvm_obj == NULL))) {
3543 amap_copy(map, entry, 0, start, end);
3544 /* XXXCDC: wait OK? */
3545 }
3546 }
3547 }
3548 UVM_MAP_CLIP_START(map, entry, start, NULL);
3549 UVM_MAP_CLIP_END(map, entry, end, NULL);
3550 entry->wired_count++;
3551
3552 /*
3553 * Check for holes
3554 */
3555
3556 if (entry->protection == VM_PROT_NONE ||
3557 (entry->end < end &&
3558 (entry->next == &map->header ||
3559 entry->next->start > entry->end))) {
3560
3561 /*
3562 * found one. amap creation actions do not need to
3563 * be undone, but the wired counts need to be restored.
3564 */
3565
3566 while (entry != &map->header && entry->end > start) {
3567 entry->wired_count--;
3568 entry = entry->prev;
3569 }
3570 if ((lockflags & UVM_LK_EXIT) == 0)
3571 vm_map_unlock(map);
3572 UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
3573 return EINVAL;
3574 }
3575 entry = entry->next;
3576 }
3577
3578 /*
3579 * Pass 2.
3580 */
3581
3582 #ifdef DIAGNOSTIC
3583 timestamp_save = map->timestamp;
3584 #endif
3585 vm_map_busy(map);
3586 vm_map_unlock(map);
3587
3588 rv = 0;
3589 entry = start_entry;
3590 while (entry != &map->header && entry->start < end) {
3591 if (entry->wired_count == 1) {
3592 rv = uvm_fault_wire(map, entry->start, entry->end,
3593 entry->max_protection, 1);
3594 if (rv) {
3595
3596 /*
3597 * wiring failed. break out of the loop.
3598 * we'll clean up the map below, once we
3599 * have a write lock again.
3600 */
3601
3602 break;
3603 }
3604 }
3605 entry = entry->next;
3606 }
3607
3608 if (rv) { /* failed? */
3609
3610 /*
3611 * Get back to an exclusive (write) lock.
3612 */
3613
3614 vm_map_lock(map);
3615 vm_map_unbusy(map);
3616
3617 #ifdef DIAGNOSTIC
3618 if (timestamp_save + 1 != map->timestamp)
3619 panic("uvm_map_pageable: stale map");
3620 #endif
3621
3622 /*
3623 * first drop the wiring count on all the entries
3624 * which haven't actually been wired yet.
3625 */
3626
3627 failed_entry = entry;
3628 while (entry != &map->header && entry->start < end) {
3629 entry->wired_count--;
3630 entry = entry->next;
3631 }
3632
3633 /*
3634 * now, unwire all the entries that were successfully
3635 * wired above.
3636 */
3637
3638 entry = start_entry;
3639 while (entry != failed_entry) {
3640 entry->wired_count--;
3641 if (VM_MAPENT_ISWIRED(entry) == 0)
3642 uvm_map_entry_unwire(map, entry);
3643 entry = entry->next;
3644 }
3645 if ((lockflags & UVM_LK_EXIT) == 0)
3646 vm_map_unlock(map);
3647 UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
3648 return (rv);
3649 }
3650
3651 if ((lockflags & UVM_LK_EXIT) == 0) {
3652 vm_map_unbusy(map);
3653 } else {
3654
3655 /*
3656 * Get back to an exclusive (write) lock.
3657 */
3658
3659 vm_map_lock(map);
3660 vm_map_unbusy(map);
3661 }
3662
3663 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3664 return 0;
3665 }
3666
3667 /*
3668 * uvm_map_pageable_all: special case of uvm_map_pageable - affects
3669 * all mapped regions.
3670 *
3671 * => map must not be locked.
3672 * => if no flags are specified, all regions are unwired.
3673 * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
3674 */
3675
3676 int
3677 uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
3678 {
3679 struct vm_map_entry *entry, *failed_entry;
3680 vsize_t size;
3681 int rv;
3682 #ifdef DIAGNOSTIC
3683 u_int timestamp_save;
3684 #endif
3685 UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist);
3686 UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0);
3687
3688 KASSERT(map->flags & VM_MAP_PAGEABLE);
3689
3690 vm_map_lock(map);
3691
3692 /*
3693 * handle wiring and unwiring separately.
3694 */
3695
3696 if (flags == 0) { /* unwire */
3697
3698 /*
3699 * POSIX 1003.1b -- munlockall unlocks all regions,
3700 * regardless of how many times mlockall has been called.
3701 */
3702
3703 for (entry = map->header.next; entry != &map->header;
3704 entry = entry->next) {
3705 if (VM_MAPENT_ISWIRED(entry))
3706 uvm_map_entry_unwire(map, entry);
3707 }
3708 map->flags &= ~VM_MAP_WIREFUTURE;
3709 vm_map_unlock(map);
3710 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3711 return 0;
3712 }
3713
3714 if (flags & MCL_FUTURE) {
3715
3716 /*
3717 * must wire all future mappings; remember this.
3718 */
3719
3720 map->flags |= VM_MAP_WIREFUTURE;
3721 }
3722
3723 if ((flags & MCL_CURRENT) == 0) {
3724
3725 /*
3726 * no more work to do!
3727 */
3728
3729 UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
3730 vm_map_unlock(map);
3731 return 0;
3732 }
3733
3734 /*
3735 * wire case: in three passes [XXXCDC: ugly block of code here]
3736 *
3737 * 1: holding the write lock, count all pages mapped by non-wired
3738 * entries. if this would cause us to go over our limit, we fail.
3739 *
3740 * 2: still holding the write lock, we create any anonymous maps that
3741 * need to be created. then we increment its wiring count.
3742 *
3743 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
3744 * in the pages for any newly wired area (wired_count == 1).
3745 *
3746 * downgrading to a read lock for uvm_fault_wire avoids a possible
3747 * deadlock with another thread that may have faulted on one of
3748 * the pages to be wired (it would mark the page busy, blocking
3749 * us, then in turn block on the map lock that we hold). because
3750 * of problems in the recursive lock package, we cannot upgrade
3751 * to a write lock in vm_map_lookup. thus, any actions that
3752 * require the write lock must be done beforehand. because we
3753 * keep the read lock on the map, the copy-on-write status of the
3754 * entries we modify here cannot change.
3755 */
3756
3757 for (size = 0, entry = map->header.next; entry != &map->header;
3758 entry = entry->next) {
3759 if (entry->protection != VM_PROT_NONE &&
3760 VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3761 size += entry->end - entry->start;
3762 }
3763 }
3764
3765 if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
3766 vm_map_unlock(map);
3767 return ENOMEM;
3768 }
3769
3770 if (limit != 0 &&
3771 (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
3772 vm_map_unlock(map);
3773 return ENOMEM;
3774 }
3775
3776 /*
3777 * Pass 2.
3778 */
3779
3780 for (entry = map->header.next; entry != &map->header;
3781 entry = entry->next) {
3782 if (entry->protection == VM_PROT_NONE)
3783 continue;
3784 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3785
3786 /*
3787 * perform actions of vm_map_lookup that need the
3788 * write lock on the map: create an anonymous map
3789 * for a copy-on-write region, or an anonymous map
3790 * for a zero-fill region. (XXXCDC: submap case
3791 * ok?)
3792 */
3793
3794 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3795 if (UVM_ET_ISNEEDSCOPY(entry) &&
3796 ((entry->max_protection & VM_PROT_WRITE) ||
3797 (entry->object.uvm_obj == NULL))) {
3798 amap_copy(map, entry, 0, entry->start,
3799 entry->end);
3800 /* XXXCDC: wait OK? */
3801 }
3802 }
3803 }
3804 entry->wired_count++;
3805 }
3806
3807 /*
3808 * Pass 3.
3809 */
3810
3811 #ifdef DIAGNOSTIC
3812 timestamp_save = map->timestamp;
3813 #endif
3814 vm_map_busy(map);
3815 vm_map_unlock(map);
3816
3817 rv = 0;
3818 for (entry = map->header.next; entry != &map->header;
3819 entry = entry->next) {
3820 if (entry->wired_count == 1) {
3821 rv = uvm_fault_wire(map, entry->start, entry->end,
3822 entry->max_protection, 1);
3823 if (rv) {
3824
3825 /*
3826 * wiring failed. break out of the loop.
3827 * we'll clean up the map below, once we
3828 * have a write lock again.
3829 */
3830
3831 break;
3832 }
3833 }
3834 }
3835
3836 if (rv) {
3837
3838 /*
3839 * Get back an exclusive (write) lock.
3840 */
3841
3842 vm_map_lock(map);
3843 vm_map_unbusy(map);
3844
3845 #ifdef DIAGNOSTIC
3846 if (timestamp_save + 1 != map->timestamp)
3847 panic("uvm_map_pageable_all: stale map");
3848 #endif
3849
3850 /*
3851 * first drop the wiring count on all the entries
3852 * which haven't actually been wired yet.
3853 *
3854 * Skip VM_PROT_NONE entries like we did above.
3855 */
3856
3857 failed_entry = entry;
3858 for (/* nothing */; entry != &map->header;
3859 entry = entry->next) {
3860 if (entry->protection == VM_PROT_NONE)
3861 continue;
3862 entry->wired_count--;
3863 }
3864
3865 /*
3866 * now, unwire all the entries that were successfully
3867 * wired above.
3868 *
3869 * Skip VM_PROT_NONE entries like we did above.
3870 */
3871
3872 for (entry = map->header.next; entry != failed_entry;
3873 entry = entry->next) {
3874 if (entry->protection == VM_PROT_NONE)
3875 continue;
3876 entry->wired_count--;
3877 if (VM_MAPENT_ISWIRED(entry))
3878 uvm_map_entry_unwire(map, entry);
3879 }
3880 vm_map_unlock(map);
3881 UVMHIST_LOG(maphist,"<- done (RV=%d)", rv,0,0,0);
3882 return (rv);
3883 }
3884
3885 vm_map_unbusy(map);
3886
3887 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3888 return 0;
3889 }
3890
3891 /*
3892 * uvm_map_clean: clean out a map range
3893 *
3894 * => valid flags:
3895 * if (flags & PGO_CLEANIT): dirty pages are cleaned first
3896 * if (flags & PGO_SYNCIO): dirty pages are written synchronously
3897 * if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
3898 * if (flags & PGO_FREE): any cached pages are freed after clean
3899 * => returns an error if any part of the specified range isn't mapped
3900 * => never a need to flush amap layer since the anonymous memory has
3901 * no permanent home, but may deactivate pages there
3902 * => called from sys_msync() and sys_madvise()
3903 * => caller must not write-lock map (read OK).
3904 * => we may sleep while cleaning if SYNCIO [with map read-locked]
3905 */
3906
3907 int
3908 uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
3909 {
3910 struct vm_map_entry *current, *entry;
3911 struct uvm_object *uobj;
3912 struct vm_amap *amap;
3913 struct vm_anon *anon, *anon_tofree;
3914 struct vm_page *pg;
3915 vaddr_t offset;
3916 vsize_t size;
3917 voff_t uoff;
3918 int error, refs;
3919 UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
3920
3921 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
3922 map, start, end, flags);
3923 KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
3924 (PGO_FREE|PGO_DEACTIVATE));
3925
3926 vm_map_lock_read(map);
3927 VM_MAP_RANGE_CHECK(map, start, end);
3928 if (uvm_map_lookup_entry(map, start, &entry) == false) {
3929 vm_map_unlock_read(map);
3930 return EFAULT;
3931 }
3932
3933 /*
3934 * Make a first pass to check for holes and wiring problems.
3935 */
3936
3937 for (current = entry; current->start < end; current = current->next) {
3938 if (UVM_ET_ISSUBMAP(current)) {
3939 vm_map_unlock_read(map);
3940 return EINVAL;
3941 }
3942 if ((flags & PGO_FREE) != 0 && VM_MAPENT_ISWIRED(entry)) {
3943 vm_map_unlock_read(map);
3944 return EBUSY;
3945 }
3946 if (end <= current->end) {
3947 break;
3948 }
3949 if (current->end != current->next->start) {
3950 vm_map_unlock_read(map);
3951 return EFAULT;
3952 }
3953 }
3954
3955 error = 0;
3956 for (current = entry; start < end; current = current->next) {
3957 amap = current->aref.ar_amap; /* upper layer */
3958 uobj = current->object.uvm_obj; /* lower layer */
3959 KASSERT(start >= current->start);
3960
3961 /*
3962 * No amap cleaning necessary if:
3963 *
3964 * (1) There's no amap.
3965 *
3966 * (2) We're not deactivating or freeing pages.
3967 */
3968
3969 if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
3970 goto flush_object;
3971
3972 offset = start - current->start;
3973 size = MIN(end, current->end) - start;
3974 anon_tofree = NULL;
3975
3976 amap_lock(amap);
3977 for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) {
3978 anon = amap_lookup(¤t->aref, offset);
3979 if (anon == NULL)
3980 continue;
3981
3982 KASSERT(anon->an_lock == amap->am_lock);
3983 pg = anon->an_page;
3984 if (pg == NULL) {
3985 continue;
3986 }
3987
3988 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
3989
3990 /*
3991 * In these first 3 cases, we just deactivate the page.
3992 */
3993
3994 case PGO_CLEANIT|PGO_FREE:
3995 case PGO_CLEANIT|PGO_DEACTIVATE:
3996 case PGO_DEACTIVATE:
3997 deactivate_it:
3998 /*
3999 * skip the page if it's loaned or wired,
4000 * since it shouldn't be on a paging queue
4001 * at all in these cases.
4002 */
4003
4004 mutex_enter(&uvm_pageqlock);
4005 if (pg->loan_count != 0 ||
4006 pg->wire_count != 0) {
4007 mutex_exit(&uvm_pageqlock);
4008 continue;
4009 }
4010 KASSERT(pg->uanon == anon);
4011 uvm_pagedeactivate(pg);
4012 mutex_exit(&uvm_pageqlock);
4013 continue;
4014
4015 case PGO_FREE:
4016
4017 /*
4018 * If there are multiple references to
4019 * the amap, just deactivate the page.
4020 */
4021
4022 if (amap_refs(amap) > 1)
4023 goto deactivate_it;
4024
4025 /* skip the page if it's wired */
4026 if (pg->wire_count != 0) {
4027 continue;
4028 }
4029 amap_unadd(¤t->aref, offset);
4030 refs = --anon->an_ref;
4031 if (refs == 0) {
4032 anon->an_link = anon_tofree;
4033 anon_tofree = anon;
4034 }
4035 continue;
4036 }
4037 }
4038 uvm_anon_freelst(amap, anon_tofree);
4039
4040 flush_object:
4041 /*
4042 * flush pages if we've got a valid backing object.
4043 * note that we must always clean object pages before
4044 * freeing them since otherwise we could reveal stale
4045 * data from files.
4046 */
4047
4048 uoff = current->offset + (start - current->start);
4049 size = MIN(end, current->end) - start;
4050 if (uobj != NULL) {
4051 mutex_enter(uobj->vmobjlock);
4052 if (uobj->pgops->pgo_put != NULL)
4053 error = (uobj->pgops->pgo_put)(uobj, uoff,
4054 uoff + size, flags | PGO_CLEANIT);
4055 else
4056 error = 0;
4057 }
4058 start += size;
4059 }
4060 vm_map_unlock_read(map);
4061 return (error);
4062 }
4063
4064
4065 /*
4066 * uvm_map_checkprot: check protection in map
4067 *
4068 * => must allow specified protection in a fully allocated region.
4069 * => map must be read or write locked by caller.
4070 */
4071
4072 bool
4073 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
4074 vm_prot_t protection)
4075 {
4076 struct vm_map_entry *entry;
4077 struct vm_map_entry *tmp_entry;
4078
4079 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
4080 return (false);
4081 }
4082 entry = tmp_entry;
4083 while (start < end) {
4084 if (entry == &map->header) {
4085 return (false);
4086 }
4087
4088 /*
4089 * no holes allowed
4090 */
4091
4092 if (start < entry->start) {
4093 return (false);
4094 }
4095
4096 /*
4097 * check protection associated with entry
4098 */
4099
4100 if ((entry->protection & protection) != protection) {
4101 return (false);
4102 }
4103 start = entry->end;
4104 entry = entry->next;
4105 }
4106 return (true);
4107 }
4108
4109 /*
4110 * uvmspace_alloc: allocate a vmspace structure.
4111 *
4112 * - structure includes vm_map and pmap
4113 * - XXX: no locking on this structure
4114 * - refcnt set to 1, rest must be init'd by caller
4115 */
4116 struct vmspace *
4117 uvmspace_alloc(vaddr_t vmin, vaddr_t vmax)
4118 {
4119 struct vmspace *vm;
4120 UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
4121
4122 vm = pool_cache_get(&uvm_vmspace_cache, PR_WAITOK);
4123 uvmspace_init(vm, NULL, vmin, vmax);
4124 UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0);
4125 return (vm);
4126 }
4127
4128 /*
4129 * uvmspace_init: initialize a vmspace structure.
4130 *
4131 * - XXX: no locking on this structure
4132 * - refcnt set to 1, rest must be init'd by caller
4133 */
4134 void
4135 uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin, vaddr_t vmax)
4136 {
4137 UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist);
4138
4139 memset(vm, 0, sizeof(*vm));
4140 uvm_map_setup(&vm->vm_map, vmin, vmax, VM_MAP_PAGEABLE
4141 #ifdef __USING_TOPDOWN_VM
4142 | VM_MAP_TOPDOWN
4143 #endif
4144 );
4145 if (pmap)
4146 pmap_reference(pmap);
4147 else
4148 pmap = pmap_create();
4149 vm->vm_map.pmap = pmap;
4150 vm->vm_refcnt = 1;
4151 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4152 }
4153
4154 /*
4155 * uvmspace_share: share a vmspace between two processes
4156 *
4157 * - used for vfork, threads(?)
4158 */
4159
4160 void
4161 uvmspace_share(struct proc *p1, struct proc *p2)
4162 {
4163
4164 uvmspace_addref(p1->p_vmspace);
4165 p2->p_vmspace = p1->p_vmspace;
4166 }
4167
4168 #if 0
4169
4170 /*
4171 * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
4172 *
4173 * - XXX: no locking on vmspace
4174 */
4175
4176 void
4177 uvmspace_unshare(struct lwp *l)
4178 {
4179 struct proc *p = l->l_proc;
4180 struct vmspace *nvm, *ovm = p->p_vmspace;
4181
4182 if (ovm->vm_refcnt == 1)
4183 /* nothing to do: vmspace isn't shared in the first place */
4184 return;
4185
4186 /* make a new vmspace, still holding old one */
4187 nvm = uvmspace_fork(ovm);
4188
4189 kpreempt_disable();
4190 pmap_deactivate(l); /* unbind old vmspace */
4191 p->p_vmspace = nvm;
4192 pmap_activate(l); /* switch to new vmspace */
4193 kpreempt_enable();
4194
4195 uvmspace_free(ovm); /* drop reference to old vmspace */
4196 }
4197
4198 #endif
4199
4200 /*
4201 * uvmspace_exec: the process wants to exec a new program
4202 */
4203
4204 void
4205 uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end)
4206 {
4207 struct proc *p = l->l_proc;
4208 struct vmspace *nvm, *ovm = p->p_vmspace;
4209 struct vm_map *map;
4210
4211 #ifdef __HAVE_CPU_VMSPACE_EXEC
4212 cpu_vmspace_exec(l, start, end);
4213 #endif
4214
4215 /*
4216 * Special case: no vmspace yet (see posix_spawn) -
4217 * no races possible in this case.
4218 */
4219 if (ovm == NULL) {
4220 p->p_vmspace = uvmspace_alloc(start, end);
4221 pmap_activate(l);
4222 return;
4223 }
4224
4225 map = &ovm->vm_map;
4226 /*
4227 * see if more than one process is using this vmspace...
4228 */
4229
4230 if (ovm->vm_refcnt == 1) {
4231
4232 /*
4233 * if p is the only process using its vmspace then we can safely
4234 * recycle that vmspace for the program that is being exec'd.
4235 */
4236
4237 #ifdef SYSVSHM
4238 /*
4239 * SYSV SHM semantics require us to kill all segments on an exec
4240 */
4241
4242 if (ovm->vm_shm)
4243 shmexit(ovm);
4244 #endif
4245
4246 /*
4247 * POSIX 1003.1b -- "lock future mappings" is revoked
4248 * when a process execs another program image.
4249 */
4250
4251 map->flags &= ~VM_MAP_WIREFUTURE;
4252
4253 /*
4254 * now unmap the old program
4255 */
4256
4257 pmap_remove_all(map->pmap);
4258 uvm_unmap(map, vm_map_min(map), vm_map_max(map));
4259 KASSERT(map->header.prev == &map->header);
4260 KASSERT(map->nentries == 0);
4261
4262 /*
4263 * resize the map
4264 */
4265
4266 vm_map_setmin(map, start);
4267 vm_map_setmax(map, end);
4268 } else {
4269
4270 /*
4271 * p's vmspace is being shared, so we can't reuse it for p since
4272 * it is still being used for others. allocate a new vmspace
4273 * for p
4274 */
4275
4276 nvm = uvmspace_alloc(start, end);
4277
4278 /*
4279 * install new vmspace and drop our ref to the old one.
4280 */
4281
4282 kpreempt_disable();
4283 pmap_deactivate(l);
4284 p->p_vmspace = nvm;
4285 pmap_activate(l);
4286 kpreempt_enable();
4287
4288 uvmspace_free(ovm);
4289 }
4290 }
4291
4292 /*
4293 * uvmspace_addref: add a referece to a vmspace.
4294 */
4295
4296 void
4297 uvmspace_addref(struct vmspace *vm)
4298 {
4299 struct vm_map *map = &vm->vm_map;
4300
4301 KASSERT((map->flags & VM_MAP_DYING) == 0);
4302
4303 mutex_enter(&map->misc_lock);
4304 KASSERT(vm->vm_refcnt > 0);
4305 vm->vm_refcnt++;
4306 mutex_exit(&map->misc_lock);
4307 }
4308
4309 /*
4310 * uvmspace_free: free a vmspace data structure
4311 */
4312
4313 void
4314 uvmspace_free(struct vmspace *vm)
4315 {
4316 struct vm_map_entry *dead_entries;
4317 struct vm_map *map = &vm->vm_map;
4318 int n;
4319
4320 UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
4321
4322 UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0);
4323 mutex_enter(&map->misc_lock);
4324 n = --vm->vm_refcnt;
4325 mutex_exit(&map->misc_lock);
4326 if (n > 0)
4327 return;
4328
4329 /*
4330 * at this point, there should be no other references to the map.
4331 * delete all of the mappings, then destroy the pmap.
4332 */
4333
4334 map->flags |= VM_MAP_DYING;
4335 pmap_remove_all(map->pmap);
4336 #ifdef SYSVSHM
4337 /* Get rid of any SYSV shared memory segments. */
4338 if (vm->vm_shm != NULL)
4339 shmexit(vm);
4340 #endif
4341 if (map->nentries) {
4342 uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map),
4343 &dead_entries, NULL, 0);
4344 if (dead_entries != NULL)
4345 uvm_unmap_detach(dead_entries, 0);
4346 }
4347 KASSERT(map->nentries == 0);
4348 KASSERT(map->size == 0);
4349 mutex_destroy(&map->misc_lock);
4350 mutex_destroy(&map->mutex);
4351 rw_destroy(&map->lock);
4352 cv_destroy(&map->cv);
4353 pmap_destroy(map->pmap);
4354 pool_cache_put(&uvm_vmspace_cache, vm);
4355 }
4356
4357 /*
4358 * F O R K - m a i n e n t r y p o i n t
4359 */
4360 /*
4361 * uvmspace_fork: fork a process' main map
4362 *
4363 * => create a new vmspace for child process from parent.
4364 * => parent's map must not be locked.
4365 */
4366
4367 struct vmspace *
4368 uvmspace_fork(struct vmspace *vm1)
4369 {
4370 struct vmspace *vm2;
4371 struct vm_map *old_map = &vm1->vm_map;
4372 struct vm_map *new_map;
4373 struct vm_map_entry *old_entry;
4374 struct vm_map_entry *new_entry;
4375 UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
4376
4377 vm_map_lock(old_map);
4378
4379 vm2 = uvmspace_alloc(vm_map_min(old_map), vm_map_max(old_map));
4380 memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
4381 (char *) (vm1 + 1) - (char *) &vm1->vm_startcopy);
4382 new_map = &vm2->vm_map; /* XXX */
4383
4384 old_entry = old_map->header.next;
4385 new_map->size = old_map->size;
4386
4387 /*
4388 * go entry-by-entry
4389 */
4390
4391 while (old_entry != &old_map->header) {
4392
4393 /*
4394 * first, some sanity checks on the old entry
4395 */
4396
4397 KASSERT(!UVM_ET_ISSUBMAP(old_entry));
4398 KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) ||
4399 !UVM_ET_ISNEEDSCOPY(old_entry));
4400
4401 switch (old_entry->inheritance) {
4402 case MAP_INHERIT_NONE:
4403
4404 /*
4405 * drop the mapping, modify size
4406 */
4407 new_map->size -= old_entry->end - old_entry->start;
4408 break;
4409
4410 case MAP_INHERIT_SHARE:
4411
4412 /*
4413 * share the mapping: this means we want the old and
4414 * new entries to share amaps and backing objects.
4415 */
4416 /*
4417 * if the old_entry needs a new amap (due to prev fork)
4418 * then we need to allocate it now so that we have
4419 * something we own to share with the new_entry. [in
4420 * other words, we need to clear needs_copy]
4421 */
4422
4423 if (UVM_ET_ISNEEDSCOPY(old_entry)) {
4424 /* get our own amap, clears needs_copy */
4425 amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,
4426 0, 0);
4427 /* XXXCDC: WAITOK??? */
4428 }
4429
4430 new_entry = uvm_mapent_alloc(new_map, 0);
4431 /* old_entry -> new_entry */
4432 uvm_mapent_copy(old_entry, new_entry);
4433
4434 /* new pmap has nothing wired in it */
4435 new_entry->wired_count = 0;
4436
4437 /*
4438 * gain reference to object backing the map (can't
4439 * be a submap, already checked this case).
4440 */
4441
4442 if (new_entry->aref.ar_amap)
4443 uvm_map_reference_amap(new_entry, AMAP_SHARED);
4444
4445 if (new_entry->object.uvm_obj &&
4446 new_entry->object.uvm_obj->pgops->pgo_reference)
4447 new_entry->object.uvm_obj->
4448 pgops->pgo_reference(
4449 new_entry->object.uvm_obj);
4450
4451 /* insert entry at end of new_map's entry list */
4452 uvm_map_entry_link(new_map, new_map->header.prev,
4453 new_entry);
4454
4455 break;
4456
4457 case MAP_INHERIT_COPY:
4458
4459 /*
4460 * copy-on-write the mapping (using mmap's
4461 * MAP_PRIVATE semantics)
4462 *
4463 * allocate new_entry, adjust reference counts.
4464 * (note that new references are read-only).
4465 */
4466
4467 new_entry = uvm_mapent_alloc(new_map, 0);
4468 /* old_entry -> new_entry */
4469 uvm_mapent_copy(old_entry, new_entry);
4470
4471 if (new_entry->aref.ar_amap)
4472 uvm_map_reference_amap(new_entry, 0);
4473
4474 if (new_entry->object.uvm_obj &&
4475 new_entry->object.uvm_obj->pgops->pgo_reference)
4476 new_entry->object.uvm_obj->pgops->pgo_reference
4477 (new_entry->object.uvm_obj);
4478
4479 /* new pmap has nothing wired in it */
4480 new_entry->wired_count = 0;
4481
4482 new_entry->etype |=
4483 (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
4484 uvm_map_entry_link(new_map, new_map->header.prev,
4485 new_entry);
4486
4487 /*
4488 * the new entry will need an amap. it will either
4489 * need to be copied from the old entry or created
4490 * from scratch (if the old entry does not have an
4491 * amap). can we defer this process until later
4492 * (by setting "needs_copy") or do we need to copy
4493 * the amap now?
4494 *
4495 * we must copy the amap now if any of the following
4496 * conditions hold:
4497 * 1. the old entry has an amap and that amap is
4498 * being shared. this means that the old (parent)
4499 * process is sharing the amap with another
4500 * process. if we do not clear needs_copy here
4501 * we will end up in a situation where both the
4502 * parent and child process are refering to the
4503 * same amap with "needs_copy" set. if the
4504 * parent write-faults, the fault routine will
4505 * clear "needs_copy" in the parent by allocating
4506 * a new amap. this is wrong because the
4507 * parent is supposed to be sharing the old amap
4508 * and the new amap will break that.
4509 *
4510 * 2. if the old entry has an amap and a non-zero
4511 * wire count then we are going to have to call
4512 * amap_cow_now to avoid page faults in the
4513 * parent process. since amap_cow_now requires
4514 * "needs_copy" to be clear we might as well
4515 * clear it here as well.
4516 *
4517 */
4518
4519 if (old_entry->aref.ar_amap != NULL) {
4520 if ((amap_flags(old_entry->aref.ar_amap) &
4521 AMAP_SHARED) != 0 ||
4522 VM_MAPENT_ISWIRED(old_entry)) {
4523
4524 amap_copy(new_map, new_entry,
4525 AMAP_COPY_NOCHUNK, 0, 0);
4526 /* XXXCDC: M_WAITOK ... ok? */
4527 }
4528 }
4529
4530 /*
4531 * if the parent's entry is wired down, then the
4532 * parent process does not want page faults on
4533 * access to that memory. this means that we
4534 * cannot do copy-on-write because we can't write
4535 * protect the old entry. in this case we
4536 * resolve all copy-on-write faults now, using
4537 * amap_cow_now. note that we have already
4538 * allocated any needed amap (above).
4539 */
4540
4541 if (VM_MAPENT_ISWIRED(old_entry)) {
4542
4543 /*
4544 * resolve all copy-on-write faults now
4545 * (note that there is nothing to do if
4546 * the old mapping does not have an amap).
4547 */
4548 if (old_entry->aref.ar_amap)
4549 amap_cow_now(new_map, new_entry);
4550
4551 } else {
4552
4553 /*
4554 * setup mappings to trigger copy-on-write faults
4555 * we must write-protect the parent if it has
4556 * an amap and it is not already "needs_copy"...
4557 * if it is already "needs_copy" then the parent
4558 * has already been write-protected by a previous
4559 * fork operation.
4560 */
4561
4562 if (old_entry->aref.ar_amap &&
4563 !UVM_ET_ISNEEDSCOPY(old_entry)) {
4564 if (old_entry->max_protection & VM_PROT_WRITE) {
4565 pmap_protect(old_map->pmap,
4566 old_entry->start,
4567 old_entry->end,
4568 old_entry->protection &
4569 ~VM_PROT_WRITE);
4570 }
4571 old_entry->etype |= UVM_ET_NEEDSCOPY;
4572 }
4573 }
4574 break;
4575 } /* end of switch statement */
4576 old_entry = old_entry->next;
4577 }
4578
4579 pmap_update(old_map->pmap);
4580 vm_map_unlock(old_map);
4581
4582 #ifdef SYSVSHM
4583 if (vm1->vm_shm)
4584 shmfork(vm1, vm2);
4585 #endif
4586
4587 #ifdef PMAP_FORK
4588 pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap);
4589 #endif
4590
4591 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4592 return (vm2);
4593 }
4594
4595
4596 /*
4597 * in-kernel map entry allocation.
4598 */
4599
4600 struct uvm_kmapent_hdr {
4601 LIST_ENTRY(uvm_kmapent_hdr) ukh_listq;
4602 int ukh_nused;
4603 struct vm_map_entry *ukh_freelist;
4604 struct vm_map *ukh_map;
4605 struct vm_map_entry ukh_entries[0];
4606 };
4607
4608 #define UVM_KMAPENT_CHUNK \
4609 ((PAGE_SIZE - sizeof(struct uvm_kmapent_hdr)) \
4610 / sizeof(struct vm_map_entry))
4611
4612 #define UVM_KHDR_FIND(entry) \
4613 ((struct uvm_kmapent_hdr *)(((vaddr_t)entry) & ~PAGE_MASK))
4614
4615
4616 #ifdef DIAGNOSTIC
4617 static struct vm_map *
4618 uvm_kmapent_map(struct vm_map_entry *entry)
4619 {
4620 const struct uvm_kmapent_hdr *ukh;
4621
4622 ukh = UVM_KHDR_FIND(entry);
4623 return ukh->ukh_map;
4624 }
4625 #endif
4626
4627 static inline struct vm_map_entry *
4628 uvm_kmapent_get(struct uvm_kmapent_hdr *ukh)
4629 {
4630 struct vm_map_entry *entry;
4631
4632 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4633 KASSERT(ukh->ukh_nused >= 0);
4634
4635 entry = ukh->ukh_freelist;
4636 if (entry) {
4637 KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4638 == UVM_MAP_KERNEL);
4639 ukh->ukh_freelist = entry->next;
4640 ukh->ukh_nused++;
4641 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4642 } else {
4643 KASSERT(ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4644 }
4645
4646 return entry;
4647 }
4648
4649 static inline void
4650 uvm_kmapent_put(struct uvm_kmapent_hdr *ukh, struct vm_map_entry *entry)
4651 {
4652
4653 KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4654 == UVM_MAP_KERNEL);
4655 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4656 KASSERT(ukh->ukh_nused > 0);
4657 KASSERT(ukh->ukh_freelist != NULL ||
4658 ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4659 KASSERT(ukh->ukh_freelist == NULL ||
4660 ukh->ukh_nused < UVM_KMAPENT_CHUNK);
4661
4662 ukh->ukh_nused--;
4663 entry->next = ukh->ukh_freelist;
4664 ukh->ukh_freelist = entry;
4665 }
4666
4667 /*
4668 * uvm_kmapent_alloc: allocate a map entry for in-kernel map
4669 */
4670
4671 static struct vm_map_entry *
4672 uvm_kmapent_alloc(struct vm_map *map, int flags)
4673 {
4674 struct vm_page *pg;
4675 struct uvm_kmapent_hdr *ukh;
4676 struct vm_map_entry *entry;
4677 #ifndef PMAP_MAP_POOLPAGE
4678 struct uvm_map_args args;
4679 uvm_flag_t mapflags = UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
4680 UVM_INH_NONE, UVM_ADV_RANDOM, flags | UVM_FLAG_NOMERGE);
4681 int error;
4682 #endif
4683 vaddr_t va;
4684 int i;
4685
4686 KDASSERT(UVM_KMAPENT_CHUNK > 2);
4687 KDASSERT(kernel_map != NULL);
4688 KASSERT(vm_map_pmap(map) == pmap_kernel());
4689
4690 UVMMAP_EVCNT_INCR(uke_alloc);
4691 entry = NULL;
4692 again:
4693 /*
4694 * try to grab an entry from freelist.
4695 */
4696 mutex_spin_enter(&uvm_kentry_lock);
4697 ukh = LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free);
4698 if (ukh) {
4699 entry = uvm_kmapent_get(ukh);
4700 if (ukh->ukh_nused == UVM_KMAPENT_CHUNK)
4701 LIST_REMOVE(ukh, ukh_listq);
4702 }
4703 mutex_spin_exit(&uvm_kentry_lock);
4704
4705 if (entry)
4706 return entry;
4707
4708 /*
4709 * there's no free entry for this vm_map.
4710 * now we need to allocate some vm_map_entry.
4711 * for simplicity, always allocate one page chunk of them at once.
4712 */
4713
4714 pg = uvm_pagealloc(NULL, 0, NULL,
4715 (flags & UVM_KMF_NOWAIT) != 0 ? UVM_PGA_USERESERVE : 0);
4716 if (__predict_false(pg == NULL)) {
4717 if (flags & UVM_FLAG_NOWAIT)
4718 return NULL;
4719 uvm_wait("kme_alloc");
4720 goto again;
4721 }
4722
4723 #ifdef PMAP_MAP_POOLPAGE
4724 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
4725 KASSERT(va != 0);
4726 #else
4727 error = uvm_map_prepare(map, 0, PAGE_SIZE, NULL, UVM_UNKNOWN_OFFSET,
4728 VM_PGCOLOR_BUCKET(pg), mapflags | UVM_FLAG_COLORMATCH, &args);
4729 if (error) {
4730 uvm_pagefree(pg);
4731 return NULL;
4732 }
4733
4734 va = args.uma_start;
4735
4736 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
4737 VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
4738 pmap_update(vm_map_pmap(map));
4739
4740 #endif
4741 ukh = (void *)va;
4742
4743 /*
4744 * use the last entry for ukh itsself.
4745 */
4746
4747 i = UVM_KMAPENT_CHUNK - 1;
4748 #ifndef PMAP_MAP_POOLPAGE
4749 entry = &ukh->ukh_entries[i--];
4750 entry->flags = UVM_MAP_KERNEL | UVM_MAP_KMAPENT;
4751 error = uvm_map_enter(map, &args, entry);
4752 KASSERT(error == 0);
4753 #endif
4754
4755 ukh->ukh_nused = UVM_KMAPENT_CHUNK;
4756 ukh->ukh_map = map;
4757 ukh->ukh_freelist = NULL;
4758 for (; i >= 1; i--) {
4759 struct vm_map_entry *xentry = &ukh->ukh_entries[i];
4760
4761 xentry->flags = UVM_MAP_KERNEL;
4762 uvm_kmapent_put(ukh, xentry);
4763 }
4764 #ifdef PMAP_MAP_POOLPAGE
4765 KASSERT(ukh->ukh_nused == 1);
4766 #else
4767 KASSERT(ukh->ukh_nused == 2);
4768 #endif
4769
4770 mutex_spin_enter(&uvm_kentry_lock);
4771 LIST_INSERT_HEAD(&vm_map_to_kernel(map)->vmk_kentry_free,
4772 ukh, ukh_listq);
4773 mutex_spin_exit(&uvm_kentry_lock);
4774
4775 /*
4776 * return first entry.
4777 */
4778
4779 entry = &ukh->ukh_entries[0];
4780 entry->flags = UVM_MAP_KERNEL;
4781 UVMMAP_EVCNT_INCR(ukh_alloc);
4782
4783 return entry;
4784 }
4785
4786 /*
4787 * uvm_mapent_free: free map entry for in-kernel map
4788 */
4789
4790 static void
4791 uvm_kmapent_free(struct vm_map_entry *entry)
4792 {
4793 struct uvm_kmapent_hdr *ukh;
4794 struct vm_page *pg;
4795 struct vm_map *map;
4796 #ifndef PMAP_UNMAP_POOLPAGE
4797 struct pmap *pmap;
4798 struct vm_map_entry *deadentry;
4799 #endif
4800 vaddr_t va;
4801 paddr_t pa;
4802
4803 UVMMAP_EVCNT_INCR(uke_free);
4804 ukh = UVM_KHDR_FIND(entry);
4805 map = ukh->ukh_map;
4806
4807 mutex_spin_enter(&uvm_kentry_lock);
4808 uvm_kmapent_put(ukh, entry);
4809 #ifdef PMAP_UNMAP_POOLPAGE
4810 if (ukh->ukh_nused > 0) {
4811 #else
4812 if (ukh->ukh_nused > 1) {
4813 #endif
4814 if (ukh->ukh_nused == UVM_KMAPENT_CHUNK - 1)
4815 LIST_INSERT_HEAD(
4816 &vm_map_to_kernel(map)->vmk_kentry_free,
4817 ukh, ukh_listq);
4818 mutex_spin_exit(&uvm_kentry_lock);
4819 return;
4820 }
4821
4822 /*
4823 * now we can free this ukh.
4824 *
4825 * however, keep an empty ukh to avoid ping-pong.
4826 */
4827
4828 if (LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free) == ukh &&
4829 LIST_NEXT(ukh, ukh_listq) == NULL) {
4830 mutex_spin_exit(&uvm_kentry_lock);
4831 return;
4832 }
4833 LIST_REMOVE(ukh, ukh_listq);
4834 mutex_spin_exit(&uvm_kentry_lock);
4835
4836 va = (vaddr_t)ukh;
4837
4838 #ifdef PMAP_UNMAP_POOLPAGE
4839 KASSERT(ukh->ukh_nused == 0);
4840 pa = PMAP_UNMAP_POOLPAGE(va);
4841 KASSERT(pa != 0);
4842 #else
4843 KASSERT(ukh->ukh_nused == 1);
4844
4845 /*
4846 * remove map entry for ukh itsself.
4847 */
4848
4849 KASSERT((va & PAGE_MASK) == 0);
4850 vm_map_lock(map);
4851 uvm_unmap_remove(map, va, va + PAGE_SIZE, &deadentry, NULL, 0);
4852 KASSERT(deadentry->flags & UVM_MAP_KERNEL);
4853 KASSERT(deadentry->flags & UVM_MAP_KMAPENT);
4854 KASSERT(deadentry->next == NULL);
4855 KASSERT(deadentry == &ukh->ukh_entries[UVM_KMAPENT_CHUNK - 1]);
4856
4857 /*
4858 * unmap the page from pmap and free it.
4859 */
4860
4861 pmap = vm_map_pmap(map);
4862 KASSERT(pmap == pmap_kernel());
4863 if (!pmap_extract(pmap, va, &pa))
4864 panic("%s: no mapping", __func__);
4865 pmap_kremove(va, PAGE_SIZE);
4866 pmap_update(vm_map_pmap(map));
4867 vm_map_unlock(map);
4868 #endif /* !PMAP_UNMAP_POOLPAGE */
4869 pg = PHYS_TO_VM_PAGE(pa);
4870 uvm_pagefree(pg);
4871 UVMMAP_EVCNT_INCR(ukh_free);
4872 }
4873
4874 static vsize_t
4875 uvm_kmapent_overhead(vsize_t size)
4876 {
4877
4878 /*
4879 * - the max number of unmerged entries is howmany(size, PAGE_SIZE)
4880 * as the min allocation unit is PAGE_SIZE.
4881 * - UVM_KMAPENT_CHUNK "kmapent"s are allocated from a page.
4882 * one of them are used to map the page itself.
4883 */
4884
4885 return howmany(howmany(size, PAGE_SIZE), (UVM_KMAPENT_CHUNK - 1)) *
4886 PAGE_SIZE;
4887 }
4888
4889 /*
4890 * map entry reservation
4891 */
4892
4893 /*
4894 * uvm_mapent_reserve: reserve map entries for clipping before locking map.
4895 *
4896 * => needed when unmapping entries allocated without UVM_FLAG_QUANTUM.
4897 * => caller shouldn't hold map locked.
4898 */
4899 int
4900 uvm_mapent_reserve(struct vm_map *map, struct uvm_mapent_reservation *umr,
4901 int nentries, int flags)
4902 {
4903
4904 umr->umr_nentries = 0;
4905
4906 if ((flags & UVM_FLAG_QUANTUM) != 0)
4907 return 0;
4908
4909 if (!VM_MAP_USE_KMAPENT(map))
4910 return 0;
4911
4912 while (nentries--) {
4913 struct vm_map_entry *ent;
4914 ent = uvm_kmapent_alloc(map, flags);
4915 if (!ent) {
4916 uvm_mapent_unreserve(map, umr);
4917 return ENOMEM;
4918 }
4919 UMR_PUTENTRY(umr, ent);
4920 }
4921
4922 return 0;
4923 }
4924
4925 /*
4926 * uvm_mapent_unreserve:
4927 *
4928 * => caller shouldn't hold map locked.
4929 * => never fail or sleep.
4930 */
4931 void
4932 uvm_mapent_unreserve(struct vm_map *map, struct uvm_mapent_reservation *umr)
4933 {
4934
4935 while (!UMR_EMPTY(umr))
4936 uvm_kmapent_free(UMR_GETENTRY(umr));
4937 }
4938
4939 /*
4940 * uvm_mapent_trymerge: try to merge an entry with its neighbors.
4941 *
4942 * => called with map locked.
4943 * => return non zero if successfully merged.
4944 */
4945
4946 int
4947 uvm_mapent_trymerge(struct vm_map *map, struct vm_map_entry *entry, int flags)
4948 {
4949 struct uvm_object *uobj;
4950 struct vm_map_entry *next;
4951 struct vm_map_entry *prev;
4952 vsize_t size;
4953 int merged = 0;
4954 bool copying;
4955 int newetype;
4956
4957 if (VM_MAP_USE_KMAPENT(map)) {
4958 return 0;
4959 }
4960 if (entry->aref.ar_amap != NULL) {
4961 return 0;
4962 }
4963 if ((entry->flags & UVM_MAP_NOMERGE) != 0) {
4964 return 0;
4965 }
4966
4967 uobj = entry->object.uvm_obj;
4968 size = entry->end - entry->start;
4969 copying = (flags & UVM_MERGE_COPYING) != 0;
4970 newetype = copying ? (entry->etype & ~UVM_ET_NEEDSCOPY) : entry->etype;
4971
4972 next = entry->next;
4973 if (next != &map->header &&
4974 next->start == entry->end &&
4975 ((copying && next->aref.ar_amap != NULL &&
4976 amap_refs(next->aref.ar_amap) == 1) ||
4977 (!copying && next->aref.ar_amap == NULL)) &&
4978 UVM_ET_ISCOMPATIBLE(next, newetype,
4979 uobj, entry->flags, entry->protection,
4980 entry->max_protection, entry->inheritance, entry->advice,
4981 entry->wired_count) &&
4982 (uobj == NULL || entry->offset + size == next->offset)) {
4983 int error;
4984
4985 if (copying) {
4986 error = amap_extend(next, size,
4987 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_BACKWARDS);
4988 } else {
4989 error = 0;
4990 }
4991 if (error == 0) {
4992 if (uobj) {
4993 if (uobj->pgops->pgo_detach) {
4994 uobj->pgops->pgo_detach(uobj);
4995 }
4996 }
4997
4998 entry->end = next->end;
4999 clear_hints(map, next);
5000 uvm_map_entry_unlink(map, next);
5001 if (copying) {
5002 entry->aref = next->aref;
5003 entry->etype &= ~UVM_ET_NEEDSCOPY;
5004 }
5005 uvm_map_check(map, "trymerge forwardmerge");
5006 uvm_mapent_free_merged(map, next);
5007 merged++;
5008 }
5009 }
5010
5011 prev = entry->prev;
5012 if (prev != &map->header &&
5013 prev->end == entry->start &&
5014 ((copying && !merged && prev->aref.ar_amap != NULL &&
5015 amap_refs(prev->aref.ar_amap) == 1) ||
5016 (!copying && prev->aref.ar_amap == NULL)) &&
5017 UVM_ET_ISCOMPATIBLE(prev, newetype,
5018 uobj, entry->flags, entry->protection,
5019 entry->max_protection, entry->inheritance, entry->advice,
5020 entry->wired_count) &&
5021 (uobj == NULL ||
5022 prev->offset + prev->end - prev->start == entry->offset)) {
5023 int error;
5024
5025 if (copying) {
5026 error = amap_extend(prev, size,
5027 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_FORWARDS);
5028 } else {
5029 error = 0;
5030 }
5031 if (error == 0) {
5032 if (uobj) {
5033 if (uobj->pgops->pgo_detach) {
5034 uobj->pgops->pgo_detach(uobj);
5035 }
5036 entry->offset = prev->offset;
5037 }
5038
5039 entry->start = prev->start;
5040 clear_hints(map, prev);
5041 uvm_map_entry_unlink(map, prev);
5042 if (copying) {
5043 entry->aref = prev->aref;
5044 entry->etype &= ~UVM_ET_NEEDSCOPY;
5045 }
5046 uvm_map_check(map, "trymerge backmerge");
5047 uvm_mapent_free_merged(map, prev);
5048 merged++;
5049 }
5050 }
5051
5052 return merged;
5053 }
5054
5055 /*
5056 * uvm_map_create: create map
5057 */
5058
5059 struct vm_map *
5060 uvm_map_create(pmap_t pmap, vaddr_t vmin, vaddr_t vmax, int flags)
5061 {
5062 struct vm_map *result;
5063
5064 result = malloc(sizeof(struct vm_map), M_VMMAP, M_WAITOK);
5065 uvm_map_setup(result, vmin, vmax, flags);
5066 result->pmap = pmap;
5067 return(result);
5068 }
5069
5070 /*
5071 * uvm_map_setup: init map
5072 *
5073 * => map must not be in service yet.
5074 */
5075
5076 void
5077 uvm_map_setup(struct vm_map *map, vaddr_t vmin, vaddr_t vmax, int flags)
5078 {
5079 int ipl;
5080
5081 rb_tree_init(&map->rb_tree, &uvm_map_tree_ops);
5082 map->header.next = map->header.prev = &map->header;
5083 map->nentries = 0;
5084 map->size = 0;
5085 map->ref_count = 1;
5086 vm_map_setmin(map, vmin);
5087 vm_map_setmax(map, vmax);
5088 map->flags = flags;
5089 map->first_free = &map->header;
5090 map->hint = &map->header;
5091 map->timestamp = 0;
5092 map->busy = NULL;
5093
5094 if ((flags & VM_MAP_INTRSAFE) != 0) {
5095 ipl = IPL_VM;
5096 } else {
5097 ipl = IPL_NONE;
5098 }
5099
5100 rw_init(&map->lock);
5101 cv_init(&map->cv, "vm_map");
5102 mutex_init(&map->misc_lock, MUTEX_DRIVER, ipl);
5103 mutex_init(&map->mutex, MUTEX_DRIVER, ipl);
5104 }
5105
5106
5107 /*
5108 * U N M A P - m a i n e n t r y p o i n t
5109 */
5110
5111 /*
5112 * uvm_unmap1: remove mappings from a vm_map (from "start" up to "stop")
5113 *
5114 * => caller must check alignment and size
5115 * => map must be unlocked (we will lock it)
5116 * => flags is UVM_FLAG_QUANTUM or 0.
5117 */
5118
5119 void
5120 uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
5121 {
5122 struct vm_map_entry *dead_entries;
5123 struct uvm_mapent_reservation umr;
5124 UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist);
5125
5126 UVMHIST_LOG(maphist, " (map=0x%x, start=0x%x, end=0x%x)",
5127 map, start, end, 0);
5128 if (map == kernel_map) {
5129 LOCKDEBUG_MEM_CHECK((void *)start, end - start);
5130 }
5131 /*
5132 * work now done by helper functions. wipe the pmap's and then
5133 * detach from the dead entries...
5134 */
5135 uvm_mapent_reserve(map, &umr, 2, flags);
5136 vm_map_lock(map);
5137 uvm_unmap_remove(map, start, end, &dead_entries, &umr, flags);
5138 vm_map_unlock(map);
5139 uvm_mapent_unreserve(map, &umr);
5140
5141 if (dead_entries != NULL)
5142 uvm_unmap_detach(dead_entries, 0);
5143
5144 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
5145 }
5146
5147
5148 /*
5149 * uvm_map_reference: add reference to a map
5150 *
5151 * => map need not be locked (we use misc_lock).
5152 */
5153
5154 void
5155 uvm_map_reference(struct vm_map *map)
5156 {
5157 mutex_enter(&map->misc_lock);
5158 map->ref_count++;
5159 mutex_exit(&map->misc_lock);
5160 }
5161
5162 struct vm_map_kernel *
5163 vm_map_to_kernel(struct vm_map *map)
5164 {
5165
5166 KASSERT(VM_MAP_IS_KERNEL(map));
5167
5168 return (struct vm_map_kernel *)map;
5169 }
5170
5171 bool
5172 vm_map_starved_p(struct vm_map *map)
5173 {
5174
5175 if ((map->flags & VM_MAP_WANTVA) != 0) {
5176 return true;
5177 }
5178 /* XXX */
5179 if ((vm_map_max(map) - vm_map_min(map)) / 16 * 15 < map->size) {
5180 return true;
5181 }
5182 return false;
5183 }
5184
5185 void
5186 uvm_map_lock_entry(struct vm_map_entry *entry)
5187 {
5188
5189 if (entry->aref.ar_amap != NULL) {
5190 amap_lock(entry->aref.ar_amap);
5191 }
5192 if (UVM_ET_ISOBJ(entry)) {
5193 mutex_enter(entry->object.uvm_obj->vmobjlock);
5194 }
5195 }
5196
5197 void
5198 uvm_map_unlock_entry(struct vm_map_entry *entry)
5199 {
5200
5201 if (UVM_ET_ISOBJ(entry)) {
5202 mutex_exit(entry->object.uvm_obj->vmobjlock);
5203 }
5204 if (entry->aref.ar_amap != NULL) {
5205 amap_unlock(entry->aref.ar_amap);
5206 }
5207 }
5208
5209 #if defined(DDB) || defined(DEBUGPRINT)
5210
5211 /*
5212 * uvm_map_printit: actually prints the map
5213 */
5214
5215 void
5216 uvm_map_printit(struct vm_map *map, bool full,
5217 void (*pr)(const char *, ...))
5218 {
5219 struct vm_map_entry *entry;
5220
5221 (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, vm_map_min(map),
5222 vm_map_max(map));
5223 (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=0x%x\n",
5224 map->nentries, map->size, map->ref_count, map->timestamp,
5225 map->flags);
5226 (*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap,
5227 pmap_resident_count(map->pmap), pmap_wired_count(map->pmap));
5228 if (!full)
5229 return;
5230 for (entry = map->header.next; entry != &map->header;
5231 entry = entry->next) {
5232 (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%llx, amap=%p/%d\n",
5233 entry, entry->start, entry->end, entry->object.uvm_obj,
5234 (long long)entry->offset, entry->aref.ar_amap,
5235 entry->aref.ar_pageoff);
5236 (*pr)(
5237 "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
5238 "wc=%d, adv=%d\n",
5239 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
5240 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
5241 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
5242 entry->protection, entry->max_protection,
5243 entry->inheritance, entry->wired_count, entry->advice);
5244 }
5245 }
5246
5247 void
5248 uvm_whatis(uintptr_t addr, void (*pr)(const char *, ...))
5249 {
5250 struct vm_map *map;
5251
5252 for (map = kernel_map;;) {
5253 struct vm_map_entry *entry;
5254
5255 if (!uvm_map_lookup_entry_bytree(map, (vaddr_t)addr, &entry)) {
5256 break;
5257 }
5258 (*pr)("%p is %p+%zu from VMMAP %p\n",
5259 (void *)addr, (void *)entry->start,
5260 (size_t)(addr - (uintptr_t)entry->start), map);
5261 if (!UVM_ET_ISSUBMAP(entry)) {
5262 break;
5263 }
5264 map = entry->object.sub_map;
5265 }
5266 }
5267
5268 #endif /* DDB || DEBUGPRINT */
5269
5270 #ifndef __USER_VA0_IS_SAFE
5271 static int
5272 sysctl_user_va0_disable(SYSCTLFN_ARGS)
5273 {
5274 struct sysctlnode node;
5275 int t, error;
5276
5277 node = *rnode;
5278 node.sysctl_data = &t;
5279 t = user_va0_disable;
5280 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5281 if (error || newp == NULL)
5282 return (error);
5283
5284 /* lower only at securelevel < 1 */
5285 if (!t && user_va0_disable &&
5286 kauth_authorize_system(l->l_cred,
5287 KAUTH_SYSTEM_CHSYSFLAGS /* XXX */, 0,
5288 NULL, NULL, NULL))
5289 return EPERM;
5290
5291 user_va0_disable = !!t;
5292 return 0;
5293 }
5294
5295 SYSCTL_SETUP(sysctl_uvmmap_setup, "sysctl uvmmap setup")
5296 {
5297
5298 sysctl_createv(clog, 0, NULL, NULL,
5299 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5300 CTLTYPE_INT, "user_va0_disable",
5301 SYSCTL_DESCR("Disable VA 0"),
5302 sysctl_user_va0_disable, 0, &user_va0_disable, 0,
5303 CTL_VM, CTL_CREATE, CTL_EOL);
5304 }
5305 #endif
5306