uvm_map.c revision 1.298 1 /* $NetBSD: uvm_map.c,v 1.298 2011/06/12 03:36:03 rmind Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_map.c 8.3 (Berkeley) 1/12/94
37 * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
38 *
39 *
40 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 * All rights reserved.
42 *
43 * Permission to use, copy, modify and distribute this software and
44 * its documentation is hereby granted, provided that both the copyright
45 * notice and this permission notice appear in all copies of the
46 * software, derivative works or modified versions, and any portions
47 * thereof, and that both notices appear in supporting documentation.
48 *
49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 *
53 * Carnegie Mellon requests users of this software to return to
54 *
55 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
56 * School of Computer Science
57 * Carnegie Mellon University
58 * Pittsburgh PA 15213-3890
59 *
60 * any improvements or extensions that they make and grant Carnegie the
61 * rights to redistribute these changes.
62 */
63
64 /*
65 * uvm_map.c: uvm map operations
66 */
67
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.298 2011/06/12 03:36:03 rmind Exp $");
70
71 #include "opt_ddb.h"
72 #include "opt_uvmhist.h"
73 #include "opt_uvm.h"
74 #include "opt_sysv.h"
75
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/mman.h>
79 #include <sys/proc.h>
80 #include <sys/malloc.h>
81 #include <sys/pool.h>
82 #include <sys/kernel.h>
83 #include <sys/mount.h>
84 #include <sys/vnode.h>
85 #include <sys/lockdebug.h>
86 #include <sys/atomic.h>
87 #ifndef __USER_VA0_IS_SAFE
88 #include <sys/sysctl.h>
89 #include <sys/kauth.h>
90 #include "opt_user_va0_disable_default.h"
91 #endif
92
93 #ifdef SYSVSHM
94 #include <sys/shm.h>
95 #endif
96
97 #include <uvm/uvm.h>
98 #include <uvm/uvm_readahead.h>
99
100 #if defined(DDB) || defined(DEBUGPRINT)
101 #include <uvm/uvm_ddb.h>
102 #endif
103
104 #if !defined(UVMMAP_COUNTERS)
105
106 #define UVMMAP_EVCNT_DEFINE(name) /* nothing */
107 #define UVMMAP_EVCNT_INCR(ev) /* nothing */
108 #define UVMMAP_EVCNT_DECR(ev) /* nothing */
109
110 #else /* defined(UVMMAP_NOCOUNTERS) */
111
112 #include <sys/evcnt.h>
113 #define UVMMAP_EVCNT_DEFINE(name) \
114 struct evcnt uvmmap_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
115 "uvmmap", #name); \
116 EVCNT_ATTACH_STATIC(uvmmap_evcnt_##name);
117 #define UVMMAP_EVCNT_INCR(ev) uvmmap_evcnt_##ev.ev_count++
118 #define UVMMAP_EVCNT_DECR(ev) uvmmap_evcnt_##ev.ev_count--
119
120 #endif /* defined(UVMMAP_NOCOUNTERS) */
121
122 UVMMAP_EVCNT_DEFINE(ubackmerge)
123 UVMMAP_EVCNT_DEFINE(uforwmerge)
124 UVMMAP_EVCNT_DEFINE(ubimerge)
125 UVMMAP_EVCNT_DEFINE(unomerge)
126 UVMMAP_EVCNT_DEFINE(kbackmerge)
127 UVMMAP_EVCNT_DEFINE(kforwmerge)
128 UVMMAP_EVCNT_DEFINE(kbimerge)
129 UVMMAP_EVCNT_DEFINE(knomerge)
130 UVMMAP_EVCNT_DEFINE(map_call)
131 UVMMAP_EVCNT_DEFINE(mlk_call)
132 UVMMAP_EVCNT_DEFINE(mlk_hint)
133 UVMMAP_EVCNT_DEFINE(mlk_list)
134 UVMMAP_EVCNT_DEFINE(mlk_tree)
135 UVMMAP_EVCNT_DEFINE(mlk_treeloop)
136 UVMMAP_EVCNT_DEFINE(mlk_listloop)
137
138 UVMMAP_EVCNT_DEFINE(uke_alloc)
139 UVMMAP_EVCNT_DEFINE(uke_free)
140 UVMMAP_EVCNT_DEFINE(ukh_alloc)
141 UVMMAP_EVCNT_DEFINE(ukh_free)
142
143 const char vmmapbsy[] = "vmmapbsy";
144
145 /*
146 * cache for vmspace structures.
147 */
148
149 static struct pool_cache uvm_vmspace_cache;
150
151 /*
152 * cache for dynamically-allocated map entries.
153 */
154
155 static struct pool_cache uvm_map_entry_cache;
156
157 MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures");
158 MALLOC_DEFINE(M_VMPMAP, "VM pmap", "VM pmap");
159
160 #ifdef PMAP_GROWKERNEL
161 /*
162 * This global represents the end of the kernel virtual address
163 * space. If we want to exceed this, we must grow the kernel
164 * virtual address space dynamically.
165 *
166 * Note, this variable is locked by kernel_map's lock.
167 */
168 vaddr_t uvm_maxkaddr;
169 #endif
170
171 #ifndef __USER_VA0_IS_SAFE
172 #ifndef __USER_VA0_DISABLE_DEFAULT
173 #define __USER_VA0_DISABLE_DEFAULT 1
174 #endif
175 #ifdef USER_VA0_DISABLE_DEFAULT /* kernel config option overrides */
176 #undef __USER_VA0_DISABLE_DEFAULT
177 #define __USER_VA0_DISABLE_DEFAULT USER_VA0_DISABLE_DEFAULT
178 #endif
179 static int user_va0_disable = __USER_VA0_DISABLE_DEFAULT;
180 #endif
181
182 /*
183 * macros
184 */
185
186 /*
187 * VM_MAP_USE_KMAPENT: determine if uvm_kmapent_alloc/free is used
188 * for the vm_map.
189 */
190 extern struct vm_map *pager_map; /* XXX */
191 #define VM_MAP_USE_KMAPENT_FLAGS(flags) \
192 (((flags) & VM_MAP_INTRSAFE) != 0)
193 #define VM_MAP_USE_KMAPENT(map) \
194 (VM_MAP_USE_KMAPENT_FLAGS((map)->flags) || (map) == kernel_map)
195
196 /*
197 * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging
198 */
199
200 #define UVM_ET_ISCOMPATIBLE(ent, type, uobj, meflags, \
201 prot, maxprot, inh, adv, wire) \
202 ((ent)->etype == (type) && \
203 (((ent)->flags ^ (meflags)) & (UVM_MAP_NOMERGE | UVM_MAP_QUANTUM)) \
204 == 0 && \
205 (ent)->object.uvm_obj == (uobj) && \
206 (ent)->protection == (prot) && \
207 (ent)->max_protection == (maxprot) && \
208 (ent)->inheritance == (inh) && \
209 (ent)->advice == (adv) && \
210 (ent)->wired_count == (wire))
211
212 /*
213 * uvm_map_entry_link: insert entry into a map
214 *
215 * => map must be locked
216 */
217 #define uvm_map_entry_link(map, after_where, entry) do { \
218 uvm_mapent_check(entry); \
219 (map)->nentries++; \
220 (entry)->prev = (after_where); \
221 (entry)->next = (after_where)->next; \
222 (entry)->prev->next = (entry); \
223 (entry)->next->prev = (entry); \
224 uvm_rb_insert((map), (entry)); \
225 } while (/*CONSTCOND*/ 0)
226
227 /*
228 * uvm_map_entry_unlink: remove entry from a map
229 *
230 * => map must be locked
231 */
232 #define uvm_map_entry_unlink(map, entry) do { \
233 KASSERT((entry) != (map)->first_free); \
234 KASSERT((entry) != (map)->hint); \
235 uvm_mapent_check(entry); \
236 (map)->nentries--; \
237 (entry)->next->prev = (entry)->prev; \
238 (entry)->prev->next = (entry)->next; \
239 uvm_rb_remove((map), (entry)); \
240 } while (/*CONSTCOND*/ 0)
241
242 /*
243 * SAVE_HINT: saves the specified entry as the hint for future lookups.
244 *
245 * => map need not be locked.
246 */
247 #define SAVE_HINT(map, check, value) do { \
248 if ((map)->hint == (check)) \
249 (map)->hint = (value); \
250 } while (/*CONSTCOND*/ 0)
251
252 /*
253 * clear_hints: ensure that hints don't point to the entry.
254 *
255 * => map must be write-locked.
256 */
257 static void
258 clear_hints(struct vm_map *map, struct vm_map_entry *ent)
259 {
260
261 SAVE_HINT(map, ent, ent->prev);
262 if (map->first_free == ent) {
263 map->first_free = ent->prev;
264 }
265 }
266
267 /*
268 * VM_MAP_RANGE_CHECK: check and correct range
269 *
270 * => map must at least be read locked
271 */
272
273 #define VM_MAP_RANGE_CHECK(map, start, end) do { \
274 if (start < vm_map_min(map)) \
275 start = vm_map_min(map); \
276 if (end > vm_map_max(map)) \
277 end = vm_map_max(map); \
278 if (start > end) \
279 start = end; \
280 } while (/*CONSTCOND*/ 0)
281
282 /*
283 * local prototypes
284 */
285
286 static struct vm_map_entry *
287 uvm_mapent_alloc(struct vm_map *, int);
288 static struct vm_map_entry *
289 uvm_mapent_alloc_split(struct vm_map *,
290 const struct vm_map_entry *, int,
291 struct uvm_mapent_reservation *);
292 static void uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *);
293 static void uvm_mapent_free(struct vm_map_entry *);
294 #if defined(DEBUG)
295 static void _uvm_mapent_check(const struct vm_map_entry *, const char *,
296 int);
297 #define uvm_mapent_check(map) _uvm_mapent_check(map, __FILE__, __LINE__)
298 #else /* defined(DEBUG) */
299 #define uvm_mapent_check(e) /* nothing */
300 #endif /* defined(DEBUG) */
301 static struct vm_map_entry *
302 uvm_kmapent_alloc(struct vm_map *, int);
303 static void uvm_kmapent_free(struct vm_map_entry *);
304 static vsize_t uvm_kmapent_overhead(vsize_t);
305
306 static void uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
307 static void uvm_map_reference_amap(struct vm_map_entry *, int);
308 static int uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int,
309 struct vm_map_entry *);
310 static void uvm_map_unreference_amap(struct vm_map_entry *, int);
311
312 int _uvm_map_sanity(struct vm_map *);
313 int _uvm_tree_sanity(struct vm_map *);
314 static vsize_t uvm_rb_maxgap(const struct vm_map_entry *);
315
316 #define ROOT_ENTRY(map) ((struct vm_map_entry *)(map)->rb_tree.rbt_root)
317 #define LEFT_ENTRY(entry) ((struct vm_map_entry *)(entry)->rb_node.rb_left)
318 #define RIGHT_ENTRY(entry) ((struct vm_map_entry *)(entry)->rb_node.rb_right)
319 #define PARENT_ENTRY(map, entry) \
320 (ROOT_ENTRY(map) == (entry) \
321 ? NULL : (struct vm_map_entry *)RB_FATHER(&(entry)->rb_node))
322
323 static int
324 uvm_map_compare_nodes(void *ctx, const void *nparent, const void *nkey)
325 {
326 const struct vm_map_entry *eparent = nparent;
327 const struct vm_map_entry *ekey = nkey;
328
329 KASSERT(eparent->start < ekey->start || eparent->start >= ekey->end);
330 KASSERT(ekey->start < eparent->start || ekey->start >= eparent->end);
331
332 if (eparent->start < ekey->start)
333 return -1;
334 if (eparent->end >= ekey->start)
335 return 1;
336 return 0;
337 }
338
339 static int
340 uvm_map_compare_key(void *ctx, const void *nparent, const void *vkey)
341 {
342 const struct vm_map_entry *eparent = nparent;
343 const vaddr_t va = *(const vaddr_t *) vkey;
344
345 if (eparent->start < va)
346 return -1;
347 if (eparent->end >= va)
348 return 1;
349 return 0;
350 }
351
352 static const rb_tree_ops_t uvm_map_tree_ops = {
353 .rbto_compare_nodes = uvm_map_compare_nodes,
354 .rbto_compare_key = uvm_map_compare_key,
355 .rbto_node_offset = offsetof(struct vm_map_entry, rb_node),
356 .rbto_context = NULL
357 };
358
359 /*
360 * uvm_rb_gap: return the gap size between our entry and next entry.
361 */
362 static inline vsize_t
363 uvm_rb_gap(const struct vm_map_entry *entry)
364 {
365
366 KASSERT(entry->next != NULL);
367 return entry->next->start - entry->end;
368 }
369
370 static vsize_t
371 uvm_rb_maxgap(const struct vm_map_entry *entry)
372 {
373 struct vm_map_entry *child;
374 vsize_t maxgap = entry->gap;
375
376 /*
377 * We need maxgap to be the largest gap of us or any of our
378 * descendents. Since each of our children's maxgap is the
379 * cached value of their largest gap of themselves or their
380 * descendents, we can just use that value and avoid recursing
381 * down the tree to calculate it.
382 */
383 if ((child = LEFT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
384 maxgap = child->maxgap;
385
386 if ((child = RIGHT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
387 maxgap = child->maxgap;
388
389 return maxgap;
390 }
391
392 static void
393 uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
394 {
395 struct vm_map_entry *parent;
396
397 KASSERT(entry->gap == uvm_rb_gap(entry));
398 entry->maxgap = uvm_rb_maxgap(entry);
399
400 while ((parent = PARENT_ENTRY(map, entry)) != NULL) {
401 struct vm_map_entry *brother;
402 vsize_t maxgap = parent->gap;
403 unsigned int which;
404
405 KDASSERT(parent->gap == uvm_rb_gap(parent));
406 if (maxgap < entry->maxgap)
407 maxgap = entry->maxgap;
408 /*
409 * Since we work towards the root, we know entry's maxgap
410 * value is OK, but its brothers may now be out-of-date due
411 * to rebalancing. So refresh it.
412 */
413 which = RB_POSITION(&entry->rb_node) ^ RB_DIR_OTHER;
414 brother = (struct vm_map_entry *)parent->rb_node.rb_nodes[which];
415 if (brother != NULL) {
416 KDASSERT(brother->gap == uvm_rb_gap(brother));
417 brother->maxgap = uvm_rb_maxgap(brother);
418 if (maxgap < brother->maxgap)
419 maxgap = brother->maxgap;
420 }
421
422 parent->maxgap = maxgap;
423 entry = parent;
424 }
425 }
426
427 static void
428 uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
429 {
430 struct vm_map_entry *ret;
431
432 entry->gap = entry->maxgap = uvm_rb_gap(entry);
433 if (entry->prev != &map->header)
434 entry->prev->gap = uvm_rb_gap(entry->prev);
435
436 ret = rb_tree_insert_node(&map->rb_tree, entry);
437 KASSERTMSG(ret == entry,
438 ("uvm_rb_insert: map %p: duplicate entry %p", map, ret)
439 );
440
441 /*
442 * If the previous entry is not our immediate left child, then it's an
443 * ancestor and will be fixed up on the way to the root. We don't
444 * have to check entry->prev against &map->header since &map->header
445 * will never be in the tree.
446 */
447 uvm_rb_fixup(map,
448 LEFT_ENTRY(entry) == entry->prev ? entry->prev : entry);
449 }
450
451 static void
452 uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
453 {
454 struct vm_map_entry *prev_parent = NULL, *next_parent = NULL;
455
456 /*
457 * If we are removing an interior node, then an adjacent node will
458 * be used to replace its position in the tree. Therefore we will
459 * need to fixup the tree starting at the parent of the replacement
460 * node. So record their parents for later use.
461 */
462 if (entry->prev != &map->header)
463 prev_parent = PARENT_ENTRY(map, entry->prev);
464 if (entry->next != &map->header)
465 next_parent = PARENT_ENTRY(map, entry->next);
466
467 rb_tree_remove_node(&map->rb_tree, entry);
468
469 /*
470 * If the previous node has a new parent, fixup the tree starting
471 * at the previous node's old parent.
472 */
473 if (entry->prev != &map->header) {
474 /*
475 * Update the previous entry's gap due to our absence.
476 */
477 entry->prev->gap = uvm_rb_gap(entry->prev);
478 uvm_rb_fixup(map, entry->prev);
479 if (prev_parent != NULL
480 && prev_parent != entry
481 && prev_parent != PARENT_ENTRY(map, entry->prev))
482 uvm_rb_fixup(map, prev_parent);
483 }
484
485 /*
486 * If the next node has a new parent, fixup the tree starting
487 * at the next node's old parent.
488 */
489 if (entry->next != &map->header) {
490 uvm_rb_fixup(map, entry->next);
491 if (next_parent != NULL
492 && next_parent != entry
493 && next_parent != PARENT_ENTRY(map, entry->next))
494 uvm_rb_fixup(map, next_parent);
495 }
496 }
497
498 #if defined(DEBUG)
499 int uvm_debug_check_map = 0;
500 int uvm_debug_check_rbtree = 0;
501 #define uvm_map_check(map, name) \
502 _uvm_map_check((map), (name), __FILE__, __LINE__)
503 static void
504 _uvm_map_check(struct vm_map *map, const char *name,
505 const char *file, int line)
506 {
507
508 if ((uvm_debug_check_map && _uvm_map_sanity(map)) ||
509 (uvm_debug_check_rbtree && _uvm_tree_sanity(map))) {
510 panic("uvm_map_check failed: \"%s\" map=%p (%s:%d)",
511 name, map, file, line);
512 }
513 }
514 #else /* defined(DEBUG) */
515 #define uvm_map_check(map, name) /* nothing */
516 #endif /* defined(DEBUG) */
517
518 #if defined(DEBUG) || defined(DDB)
519 int
520 _uvm_map_sanity(struct vm_map *map)
521 {
522 bool first_free_found = false;
523 bool hint_found = false;
524 const struct vm_map_entry *e;
525 struct vm_map_entry *hint = map->hint;
526
527 e = &map->header;
528 for (;;) {
529 if (map->first_free == e) {
530 first_free_found = true;
531 } else if (!first_free_found && e->next->start > e->end) {
532 printf("first_free %p should be %p\n",
533 map->first_free, e);
534 return -1;
535 }
536 if (hint == e) {
537 hint_found = true;
538 }
539
540 e = e->next;
541 if (e == &map->header) {
542 break;
543 }
544 }
545 if (!first_free_found) {
546 printf("stale first_free\n");
547 return -1;
548 }
549 if (!hint_found) {
550 printf("stale hint\n");
551 return -1;
552 }
553 return 0;
554 }
555
556 int
557 _uvm_tree_sanity(struct vm_map *map)
558 {
559 struct vm_map_entry *tmp, *trtmp;
560 int n = 0, i = 1;
561
562 for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
563 if (tmp->gap != uvm_rb_gap(tmp)) {
564 printf("%d/%d gap %lx != %lx %s\n",
565 n + 1, map->nentries,
566 (ulong)tmp->gap, (ulong)uvm_rb_gap(tmp),
567 tmp->next == &map->header ? "(last)" : "");
568 goto error;
569 }
570 /*
571 * If any entries are out of order, tmp->gap will be unsigned
572 * and will likely exceed the size of the map.
573 */
574 if (tmp->gap >= vm_map_max(map) - vm_map_min(map)) {
575 printf("too large gap %zu\n", (size_t)tmp->gap);
576 goto error;
577 }
578 n++;
579 }
580
581 if (n != map->nentries) {
582 printf("nentries: %d vs %d\n", n, map->nentries);
583 goto error;
584 }
585
586 trtmp = NULL;
587 for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
588 if (tmp->maxgap != uvm_rb_maxgap(tmp)) {
589 printf("maxgap %lx != %lx\n",
590 (ulong)tmp->maxgap,
591 (ulong)uvm_rb_maxgap(tmp));
592 goto error;
593 }
594 if (trtmp != NULL && trtmp->start >= tmp->start) {
595 printf("corrupt: 0x%"PRIxVADDR"x >= 0x%"PRIxVADDR"x\n",
596 trtmp->start, tmp->start);
597 goto error;
598 }
599
600 trtmp = tmp;
601 }
602
603 for (tmp = map->header.next; tmp != &map->header;
604 tmp = tmp->next, i++) {
605 trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_LEFT);
606 if (trtmp == NULL)
607 trtmp = &map->header;
608 if (tmp->prev != trtmp) {
609 printf("lookup: %d: %p->prev=%p: %p\n",
610 i, tmp, tmp->prev, trtmp);
611 goto error;
612 }
613 trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_RIGHT);
614 if (trtmp == NULL)
615 trtmp = &map->header;
616 if (tmp->next != trtmp) {
617 printf("lookup: %d: %p->next=%p: %p\n",
618 i, tmp, tmp->next, trtmp);
619 goto error;
620 }
621 trtmp = rb_tree_find_node(&map->rb_tree, &tmp->start);
622 if (trtmp != tmp) {
623 printf("lookup: %d: %p - %p: %p\n", i, tmp, trtmp,
624 PARENT_ENTRY(map, tmp));
625 goto error;
626 }
627 }
628
629 return (0);
630 error:
631 return (-1);
632 }
633 #endif /* defined(DEBUG) || defined(DDB) */
634
635 #ifdef DIAGNOSTIC
636 static struct vm_map *uvm_kmapent_map(struct vm_map_entry *);
637 #endif
638
639 /*
640 * vm_map_lock: acquire an exclusive (write) lock on a map.
641 *
642 * => Note that "intrsafe" maps use only exclusive, spin locks.
643 *
644 * => The locking protocol provides for guaranteed upgrade from shared ->
645 * exclusive by whichever thread currently has the map marked busy.
646 * See "LOCKING PROTOCOL NOTES" in uvm_map.h. This is horrible; among
647 * other problems, it defeats any fairness guarantees provided by RW
648 * locks.
649 */
650
651 void
652 vm_map_lock(struct vm_map *map)
653 {
654
655 if ((map->flags & VM_MAP_INTRSAFE) != 0) {
656 mutex_spin_enter(&map->mutex);
657 return;
658 }
659
660 for (;;) {
661 rw_enter(&map->lock, RW_WRITER);
662 if (map->busy == NULL)
663 break;
664 if (map->busy == curlwp)
665 break;
666 mutex_enter(&map->misc_lock);
667 rw_exit(&map->lock);
668 if (map->busy != NULL)
669 cv_wait(&map->cv, &map->misc_lock);
670 mutex_exit(&map->misc_lock);
671 }
672
673 map->timestamp++;
674 }
675
676 /*
677 * vm_map_lock_try: try to lock a map, failing if it is already locked.
678 */
679
680 bool
681 vm_map_lock_try(struct vm_map *map)
682 {
683
684 if ((map->flags & VM_MAP_INTRSAFE) != 0)
685 return mutex_tryenter(&map->mutex);
686 if (!rw_tryenter(&map->lock, RW_WRITER))
687 return false;
688 if (map->busy != NULL) {
689 rw_exit(&map->lock);
690 return false;
691 }
692
693 map->timestamp++;
694 return true;
695 }
696
697 /*
698 * vm_map_unlock: release an exclusive lock on a map.
699 */
700
701 void
702 vm_map_unlock(struct vm_map *map)
703 {
704
705 if ((map->flags & VM_MAP_INTRSAFE) != 0)
706 mutex_spin_exit(&map->mutex);
707 else {
708 KASSERT(rw_write_held(&map->lock));
709 KASSERT(map->busy == NULL || map->busy == curlwp);
710 rw_exit(&map->lock);
711 }
712 }
713
714 /*
715 * vm_map_unbusy: mark the map as unbusy, and wake any waiters that
716 * want an exclusive lock.
717 */
718
719 void
720 vm_map_unbusy(struct vm_map *map)
721 {
722
723 KASSERT(map->busy == curlwp);
724
725 /*
726 * Safe to clear 'busy' and 'waiters' with only a read lock held:
727 *
728 * o they can only be set with a write lock held
729 * o writers are blocked out with a read or write hold
730 * o at any time, only one thread owns the set of values
731 */
732 mutex_enter(&map->misc_lock);
733 map->busy = NULL;
734 cv_broadcast(&map->cv);
735 mutex_exit(&map->misc_lock);
736 }
737
738 /*
739 * vm_map_lock_read: acquire a shared (read) lock on a map.
740 */
741
742 void
743 vm_map_lock_read(struct vm_map *map)
744 {
745
746 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
747
748 rw_enter(&map->lock, RW_READER);
749 }
750
751 /*
752 * vm_map_unlock_read: release a shared lock on a map.
753 */
754
755 void
756 vm_map_unlock_read(struct vm_map *map)
757 {
758
759 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
760
761 rw_exit(&map->lock);
762 }
763
764 /*
765 * vm_map_busy: mark a map as busy.
766 *
767 * => the caller must hold the map write locked
768 */
769
770 void
771 vm_map_busy(struct vm_map *map)
772 {
773
774 KASSERT(rw_write_held(&map->lock));
775 KASSERT(map->busy == NULL);
776
777 map->busy = curlwp;
778 }
779
780 /*
781 * vm_map_locked_p: return true if the map is write locked.
782 *
783 * => only for debug purposes like KASSERTs.
784 * => should not be used to verify that a map is not locked.
785 */
786
787 bool
788 vm_map_locked_p(struct vm_map *map)
789 {
790
791 if ((map->flags & VM_MAP_INTRSAFE) != 0) {
792 return mutex_owned(&map->mutex);
793 } else {
794 return rw_write_held(&map->lock);
795 }
796 }
797
798 /*
799 * uvm_mapent_alloc: allocate a map entry
800 */
801
802 static struct vm_map_entry *
803 uvm_mapent_alloc(struct vm_map *map, int flags)
804 {
805 struct vm_map_entry *me;
806 int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
807 UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
808
809 if (VM_MAP_USE_KMAPENT(map)) {
810 me = uvm_kmapent_alloc(map, flags);
811 } else {
812 me = pool_cache_get(&uvm_map_entry_cache, pflags);
813 if (__predict_false(me == NULL))
814 return NULL;
815 me->flags = 0;
816 }
817
818 UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me,
819 ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0);
820 return (me);
821 }
822
823 /*
824 * uvm_mapent_alloc_split: allocate a map entry for clipping.
825 *
826 * => map must be locked by caller if UVM_MAP_QUANTUM is set.
827 */
828
829 static struct vm_map_entry *
830 uvm_mapent_alloc_split(struct vm_map *map,
831 const struct vm_map_entry *old_entry, int flags,
832 struct uvm_mapent_reservation *umr)
833 {
834 struct vm_map_entry *me;
835
836 KASSERT(!VM_MAP_USE_KMAPENT(map) ||
837 (old_entry->flags & UVM_MAP_QUANTUM) || !UMR_EMPTY(umr));
838
839 if (old_entry->flags & UVM_MAP_QUANTUM) {
840 struct vm_map_kernel *vmk = vm_map_to_kernel(map);
841
842 KASSERT(vm_map_locked_p(map));
843 me = vmk->vmk_merged_entries;
844 KASSERT(me);
845 vmk->vmk_merged_entries = me->next;
846 KASSERT(me->flags & UVM_MAP_QUANTUM);
847 } else {
848 me = uvm_mapent_alloc(map, flags);
849 }
850
851 return me;
852 }
853
854 /*
855 * uvm_mapent_free: free map entry
856 */
857
858 static void
859 uvm_mapent_free(struct vm_map_entry *me)
860 {
861 UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
862
863 UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
864 me, me->flags, 0, 0);
865 if (me->flags & UVM_MAP_KERNEL) {
866 uvm_kmapent_free(me);
867 } else {
868 pool_cache_put(&uvm_map_entry_cache, me);
869 }
870 }
871
872 /*
873 * uvm_mapent_free_merged: free merged map entry
874 *
875 * => keep the entry if needed.
876 * => caller shouldn't hold map locked if VM_MAP_USE_KMAPENT(map) is true.
877 * => map should be locked if UVM_MAP_QUANTUM is set.
878 */
879
880 static void
881 uvm_mapent_free_merged(struct vm_map *map, struct vm_map_entry *me)
882 {
883
884 KASSERT(!(me->flags & UVM_MAP_KERNEL) || uvm_kmapent_map(me) == map);
885
886 if (me->flags & UVM_MAP_QUANTUM) {
887 /*
888 * keep this entry for later splitting.
889 */
890 struct vm_map_kernel *vmk;
891
892 KASSERT(vm_map_locked_p(map));
893 KASSERT(VM_MAP_IS_KERNEL(map));
894 KASSERT(!VM_MAP_USE_KMAPENT(map) ||
895 (me->flags & UVM_MAP_KERNEL));
896
897 vmk = vm_map_to_kernel(map);
898 me->next = vmk->vmk_merged_entries;
899 vmk->vmk_merged_entries = me;
900 } else {
901 uvm_mapent_free(me);
902 }
903 }
904
905 /*
906 * uvm_mapent_copy: copy a map entry, preserving flags
907 */
908
909 static inline void
910 uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
911 {
912
913 memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) -
914 ((char *)src));
915 }
916
917 /*
918 * uvm_mapent_overhead: calculate maximum kva overhead necessary for
919 * map entries.
920 *
921 * => size and flags are the same as uvm_km_suballoc's ones.
922 */
923
924 vsize_t
925 uvm_mapent_overhead(vsize_t size, int flags)
926 {
927
928 if (VM_MAP_USE_KMAPENT_FLAGS(flags)) {
929 return uvm_kmapent_overhead(size);
930 }
931 return 0;
932 }
933
934 #if defined(DEBUG)
935 static void
936 _uvm_mapent_check(const struct vm_map_entry *entry, const char *file, int line)
937 {
938
939 if (entry->start >= entry->end) {
940 goto bad;
941 }
942 if (UVM_ET_ISOBJ(entry)) {
943 if (entry->object.uvm_obj == NULL) {
944 goto bad;
945 }
946 } else if (UVM_ET_ISSUBMAP(entry)) {
947 if (entry->object.sub_map == NULL) {
948 goto bad;
949 }
950 } else {
951 if (entry->object.uvm_obj != NULL ||
952 entry->object.sub_map != NULL) {
953 goto bad;
954 }
955 }
956 if (!UVM_ET_ISOBJ(entry)) {
957 if (entry->offset != 0) {
958 goto bad;
959 }
960 }
961
962 return;
963
964 bad:
965 panic("%s: bad entry %p (%s:%d)", __func__, entry, file, line);
966 }
967 #endif /* defined(DEBUG) */
968
969 /*
970 * uvm_map_entry_unwire: unwire a map entry
971 *
972 * => map should be locked by caller
973 */
974
975 static inline void
976 uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
977 {
978
979 entry->wired_count = 0;
980 uvm_fault_unwire_locked(map, entry->start, entry->end);
981 }
982
983
984 /*
985 * wrapper for calling amap_ref()
986 */
987 static inline void
988 uvm_map_reference_amap(struct vm_map_entry *entry, int flags)
989 {
990
991 amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
992 (entry->end - entry->start) >> PAGE_SHIFT, flags);
993 }
994
995
996 /*
997 * wrapper for calling amap_unref()
998 */
999 static inline void
1000 uvm_map_unreference_amap(struct vm_map_entry *entry, int flags)
1001 {
1002
1003 amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
1004 (entry->end - entry->start) >> PAGE_SHIFT, flags);
1005 }
1006
1007
1008 /*
1009 * uvm_map_init: init mapping system at boot time.
1010 */
1011
1012 void
1013 uvm_map_init(void)
1014 {
1015 #if defined(UVMHIST)
1016 static struct kern_history_ent maphistbuf[100];
1017 static struct kern_history_ent pdhistbuf[100];
1018 #endif
1019
1020 /*
1021 * first, init logging system.
1022 */
1023
1024 UVMHIST_FUNC("uvm_map_init");
1025 UVMHIST_INIT_STATIC(maphist, maphistbuf);
1026 UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
1027 UVMHIST_CALLED(maphist);
1028 UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
1029
1030 /*
1031 * initialize the global lock for kernel map entry.
1032 */
1033
1034 mutex_init(&uvm_kentry_lock, MUTEX_DRIVER, IPL_VM);
1035
1036 /*
1037 * initialize caches.
1038 */
1039
1040 pool_cache_bootstrap(&uvm_map_entry_cache, sizeof(struct vm_map_entry),
1041 0, 0, 0, "vmmpepl", NULL, IPL_NONE, NULL, NULL, NULL);
1042 pool_cache_bootstrap(&uvm_vmspace_cache, sizeof(struct vmspace),
1043 0, 0, 0, "vmsppl", NULL, IPL_NONE, NULL, NULL, NULL);
1044 }
1045
1046 /*
1047 * clippers
1048 */
1049
1050 /*
1051 * uvm_mapent_splitadj: adjust map entries for splitting, after uvm_mapent_copy.
1052 */
1053
1054 static void
1055 uvm_mapent_splitadj(struct vm_map_entry *entry1, struct vm_map_entry *entry2,
1056 vaddr_t splitat)
1057 {
1058 vaddr_t adj;
1059
1060 KASSERT(entry1->start < splitat);
1061 KASSERT(splitat < entry1->end);
1062
1063 adj = splitat - entry1->start;
1064 entry1->end = entry2->start = splitat;
1065
1066 if (entry1->aref.ar_amap) {
1067 amap_splitref(&entry1->aref, &entry2->aref, adj);
1068 }
1069 if (UVM_ET_ISSUBMAP(entry1)) {
1070 /* ... unlikely to happen, but play it safe */
1071 uvm_map_reference(entry1->object.sub_map);
1072 } else if (UVM_ET_ISOBJ(entry1)) {
1073 KASSERT(entry1->object.uvm_obj != NULL); /* suppress coverity */
1074 entry2->offset += adj;
1075 if (entry1->object.uvm_obj->pgops &&
1076 entry1->object.uvm_obj->pgops->pgo_reference)
1077 entry1->object.uvm_obj->pgops->pgo_reference(
1078 entry1->object.uvm_obj);
1079 }
1080 }
1081
1082 /*
1083 * uvm_map_clip_start: ensure that the entry begins at or after
1084 * the starting address, if it doesn't we split the entry.
1085 *
1086 * => caller should use UVM_MAP_CLIP_START macro rather than calling
1087 * this directly
1088 * => map must be locked by caller
1089 */
1090
1091 void
1092 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
1093 vaddr_t start, struct uvm_mapent_reservation *umr)
1094 {
1095 struct vm_map_entry *new_entry;
1096
1097 /* uvm_map_simplify_entry(map, entry); */ /* XXX */
1098
1099 uvm_map_check(map, "clip_start entry");
1100 uvm_mapent_check(entry);
1101
1102 /*
1103 * Split off the front portion. note that we must insert the new
1104 * entry BEFORE this one, so that this entry has the specified
1105 * starting address.
1106 */
1107 new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
1108 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1109 uvm_mapent_splitadj(new_entry, entry, start);
1110 uvm_map_entry_link(map, entry->prev, new_entry);
1111
1112 uvm_map_check(map, "clip_start leave");
1113 }
1114
1115 /*
1116 * uvm_map_clip_end: ensure that the entry ends at or before
1117 * the ending address, if it does't we split the reference
1118 *
1119 * => caller should use UVM_MAP_CLIP_END macro rather than calling
1120 * this directly
1121 * => map must be locked by caller
1122 */
1123
1124 void
1125 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end,
1126 struct uvm_mapent_reservation *umr)
1127 {
1128 struct vm_map_entry *new_entry;
1129
1130 uvm_map_check(map, "clip_end entry");
1131 uvm_mapent_check(entry);
1132
1133 /*
1134 * Create a new entry and insert it
1135 * AFTER the specified entry
1136 */
1137 new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
1138 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1139 uvm_mapent_splitadj(entry, new_entry, end);
1140 uvm_map_entry_link(map, entry, new_entry);
1141
1142 uvm_map_check(map, "clip_end leave");
1143 }
1144
1145 static void
1146 vm_map_drain(struct vm_map *map, uvm_flag_t flags)
1147 {
1148
1149 if (!VM_MAP_IS_KERNEL(map)) {
1150 return;
1151 }
1152
1153 uvm_km_va_drain(map, flags);
1154 }
1155
1156 /*
1157 * M A P - m a i n e n t r y p o i n t
1158 */
1159 /*
1160 * uvm_map: establish a valid mapping in a map
1161 *
1162 * => assume startp is page aligned.
1163 * => assume size is a multiple of PAGE_SIZE.
1164 * => assume sys_mmap provides enough of a "hint" to have us skip
1165 * over text/data/bss area.
1166 * => map must be unlocked (we will lock it)
1167 * => <uobj,uoffset> value meanings (4 cases):
1168 * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER
1169 * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER
1170 * [3] <uobj,uoffset> == normal mapping
1171 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA
1172 *
1173 * case [4] is for kernel mappings where we don't know the offset until
1174 * we've found a virtual address. note that kernel object offsets are
1175 * always relative to vm_map_min(kernel_map).
1176 *
1177 * => if `align' is non-zero, we align the virtual address to the specified
1178 * alignment.
1179 * this is provided as a mechanism for large pages.
1180 *
1181 * => XXXCDC: need way to map in external amap?
1182 */
1183
1184 int
1185 uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
1186 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags)
1187 {
1188 struct uvm_map_args args;
1189 struct vm_map_entry *new_entry;
1190 int error;
1191
1192 KASSERT((flags & UVM_FLAG_QUANTUM) == 0 || VM_MAP_IS_KERNEL(map));
1193 KASSERT((size & PAGE_MASK) == 0);
1194
1195 #ifndef __USER_VA0_IS_SAFE
1196 if ((flags & UVM_FLAG_FIXED) && *startp == 0 &&
1197 !VM_MAP_IS_KERNEL(map) && user_va0_disable)
1198 return EACCES;
1199 #endif
1200
1201 /*
1202 * for pager_map, allocate the new entry first to avoid sleeping
1203 * for memory while we have the map locked.
1204 *
1205 * Also, because we allocate entries for in-kernel maps
1206 * a bit differently (cf. uvm_kmapent_alloc/free), we need to
1207 * allocate them before locking the map.
1208 */
1209
1210 new_entry = NULL;
1211 if (VM_MAP_USE_KMAPENT(map) || (flags & UVM_FLAG_QUANTUM) ||
1212 map == pager_map) {
1213 new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
1214 if (__predict_false(new_entry == NULL))
1215 return ENOMEM;
1216 if (flags & UVM_FLAG_QUANTUM)
1217 new_entry->flags |= UVM_MAP_QUANTUM;
1218 }
1219 if (map == pager_map)
1220 flags |= UVM_FLAG_NOMERGE;
1221
1222 error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align,
1223 flags, &args);
1224 if (!error) {
1225 error = uvm_map_enter(map, &args, new_entry);
1226 *startp = args.uma_start;
1227 } else if (new_entry) {
1228 uvm_mapent_free(new_entry);
1229 }
1230
1231 #if defined(DEBUG)
1232 if (!error && VM_MAP_IS_KERNEL(map)) {
1233 uvm_km_check_empty(map, *startp, *startp + size);
1234 }
1235 #endif /* defined(DEBUG) */
1236
1237 return error;
1238 }
1239
1240 int
1241 uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size,
1242 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags,
1243 struct uvm_map_args *args)
1244 {
1245 struct vm_map_entry *prev_entry;
1246 vm_prot_t prot = UVM_PROTECTION(flags);
1247 vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1248
1249 UVMHIST_FUNC("uvm_map_prepare");
1250 UVMHIST_CALLED(maphist);
1251
1252 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
1253 map, start, size, flags);
1254 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
1255
1256 /*
1257 * detect a popular device driver bug.
1258 */
1259
1260 KASSERT(doing_shutdown || curlwp != NULL ||
1261 (map->flags & VM_MAP_INTRSAFE));
1262
1263 /*
1264 * zero-sized mapping doesn't make any sense.
1265 */
1266 KASSERT(size > 0);
1267
1268 KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0);
1269
1270 uvm_map_check(map, "map entry");
1271
1272 /*
1273 * check sanity of protection code
1274 */
1275
1276 if ((prot & maxprot) != prot) {
1277 UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x",
1278 prot, maxprot,0,0);
1279 return EACCES;
1280 }
1281
1282 /*
1283 * figure out where to put new VM range
1284 */
1285
1286 retry:
1287 if (vm_map_lock_try(map) == false) {
1288 if ((flags & UVM_FLAG_TRYLOCK) != 0 &&
1289 (map->flags & VM_MAP_INTRSAFE) == 0) {
1290 return EAGAIN;
1291 }
1292 vm_map_lock(map); /* could sleep here */
1293 }
1294 prev_entry = uvm_map_findspace(map, start, size, &start,
1295 uobj, uoffset, align, flags);
1296 if (prev_entry == NULL) {
1297 unsigned int timestamp;
1298
1299 timestamp = map->timestamp;
1300 UVMHIST_LOG(maphist,"waiting va timestamp=0x%x",
1301 timestamp,0,0,0);
1302 map->flags |= VM_MAP_WANTVA;
1303 vm_map_unlock(map);
1304
1305 /*
1306 * try to reclaim kva and wait until someone does unmap.
1307 * fragile locking here, so we awaken every second to
1308 * recheck the condition.
1309 */
1310
1311 vm_map_drain(map, flags);
1312
1313 mutex_enter(&map->misc_lock);
1314 while ((map->flags & VM_MAP_WANTVA) != 0 &&
1315 map->timestamp == timestamp) {
1316 if ((flags & UVM_FLAG_WAITVA) == 0) {
1317 mutex_exit(&map->misc_lock);
1318 UVMHIST_LOG(maphist,
1319 "<- uvm_map_findspace failed!", 0,0,0,0);
1320 return ENOMEM;
1321 } else {
1322 cv_timedwait(&map->cv, &map->misc_lock, hz);
1323 }
1324 }
1325 mutex_exit(&map->misc_lock);
1326 goto retry;
1327 }
1328
1329 #ifdef PMAP_GROWKERNEL
1330 /*
1331 * If the kernel pmap can't map the requested space,
1332 * then allocate more resources for it.
1333 */
1334 if (map == kernel_map && uvm_maxkaddr < (start + size))
1335 uvm_maxkaddr = pmap_growkernel(start + size);
1336 #endif
1337
1338 UVMMAP_EVCNT_INCR(map_call);
1339
1340 /*
1341 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
1342 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in
1343 * either case we want to zero it before storing it in the map entry
1344 * (because it looks strange and confusing when debugging...)
1345 *
1346 * if uobj is not null
1347 * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
1348 * and we do not need to change uoffset.
1349 * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
1350 * now (based on the starting address of the map). this case is
1351 * for kernel object mappings where we don't know the offset until
1352 * the virtual address is found (with uvm_map_findspace). the
1353 * offset is the distance we are from the start of the map.
1354 */
1355
1356 if (uobj == NULL) {
1357 uoffset = 0;
1358 } else {
1359 if (uoffset == UVM_UNKNOWN_OFFSET) {
1360 KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
1361 uoffset = start - vm_map_min(kernel_map);
1362 }
1363 }
1364
1365 args->uma_flags = flags;
1366 args->uma_prev = prev_entry;
1367 args->uma_start = start;
1368 args->uma_size = size;
1369 args->uma_uobj = uobj;
1370 args->uma_uoffset = uoffset;
1371
1372 UVMHIST_LOG(maphist, "<- done!", 0,0,0,0);
1373 return 0;
1374 }
1375
1376 int
1377 uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args,
1378 struct vm_map_entry *new_entry)
1379 {
1380 struct vm_map_entry *prev_entry = args->uma_prev;
1381 struct vm_map_entry *dead = NULL;
1382
1383 const uvm_flag_t flags = args->uma_flags;
1384 const vm_prot_t prot = UVM_PROTECTION(flags);
1385 const vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1386 const vm_inherit_t inherit = UVM_INHERIT(flags);
1387 const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
1388 AMAP_EXTEND_NOWAIT : 0;
1389 const int advice = UVM_ADVICE(flags);
1390 const int meflagval = (flags & UVM_FLAG_QUANTUM) ?
1391 UVM_MAP_QUANTUM : 0;
1392
1393 vaddr_t start = args->uma_start;
1394 vsize_t size = args->uma_size;
1395 struct uvm_object *uobj = args->uma_uobj;
1396 voff_t uoffset = args->uma_uoffset;
1397
1398 const int kmap = (vm_map_pmap(map) == pmap_kernel());
1399 int merged = 0;
1400 int error;
1401 int newetype;
1402
1403 UVMHIST_FUNC("uvm_map_enter");
1404 UVMHIST_CALLED(maphist);
1405
1406 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
1407 map, start, size, flags);
1408 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
1409
1410 KASSERT(map->hint == prev_entry); /* bimerge case assumes this */
1411
1412 if (flags & UVM_FLAG_QUANTUM) {
1413 KASSERT(new_entry);
1414 KASSERT(new_entry->flags & UVM_MAP_QUANTUM);
1415 }
1416
1417 if (uobj)
1418 newetype = UVM_ET_OBJ;
1419 else
1420 newetype = 0;
1421
1422 if (flags & UVM_FLAG_COPYONW) {
1423 newetype |= UVM_ET_COPYONWRITE;
1424 if ((flags & UVM_FLAG_OVERLAY) == 0)
1425 newetype |= UVM_ET_NEEDSCOPY;
1426 }
1427
1428 /*
1429 * try and insert in map by extending previous entry, if possible.
1430 * XXX: we don't try and pull back the next entry. might be useful
1431 * for a stack, but we are currently allocating our stack in advance.
1432 */
1433
1434 if (flags & UVM_FLAG_NOMERGE)
1435 goto nomerge;
1436
1437 if (prev_entry->end == start &&
1438 prev_entry != &map->header &&
1439 UVM_ET_ISCOMPATIBLE(prev_entry, newetype, uobj, meflagval,
1440 prot, maxprot, inherit, advice, 0)) {
1441
1442 if (uobj && prev_entry->offset +
1443 (prev_entry->end - prev_entry->start) != uoffset)
1444 goto forwardmerge;
1445
1446 /*
1447 * can't extend a shared amap. note: no need to lock amap to
1448 * look at refs since we don't care about its exact value.
1449 * if it is one (i.e. we have only reference) it will stay there
1450 */
1451
1452 if (prev_entry->aref.ar_amap &&
1453 amap_refs(prev_entry->aref.ar_amap) != 1) {
1454 goto forwardmerge;
1455 }
1456
1457 if (prev_entry->aref.ar_amap) {
1458 error = amap_extend(prev_entry, size,
1459 amapwaitflag | AMAP_EXTEND_FORWARDS);
1460 if (error)
1461 goto nomerge;
1462 }
1463
1464 if (kmap) {
1465 UVMMAP_EVCNT_INCR(kbackmerge);
1466 } else {
1467 UVMMAP_EVCNT_INCR(ubackmerge);
1468 }
1469 UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0);
1470
1471 /*
1472 * drop our reference to uobj since we are extending a reference
1473 * that we already have (the ref count can not drop to zero).
1474 */
1475
1476 if (uobj && uobj->pgops->pgo_detach)
1477 uobj->pgops->pgo_detach(uobj);
1478
1479 /*
1480 * Now that we've merged the entries, note that we've grown
1481 * and our gap has shrunk. Then fix the tree.
1482 */
1483 prev_entry->end += size;
1484 prev_entry->gap -= size;
1485 uvm_rb_fixup(map, prev_entry);
1486
1487 uvm_map_check(map, "map backmerged");
1488
1489 UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
1490 merged++;
1491 }
1492
1493 forwardmerge:
1494 if (prev_entry->next->start == (start + size) &&
1495 prev_entry->next != &map->header &&
1496 UVM_ET_ISCOMPATIBLE(prev_entry->next, newetype, uobj, meflagval,
1497 prot, maxprot, inherit, advice, 0)) {
1498
1499 if (uobj && prev_entry->next->offset != uoffset + size)
1500 goto nomerge;
1501
1502 /*
1503 * can't extend a shared amap. note: no need to lock amap to
1504 * look at refs since we don't care about its exact value.
1505 * if it is one (i.e. we have only reference) it will stay there.
1506 *
1507 * note that we also can't merge two amaps, so if we
1508 * merged with the previous entry which has an amap,
1509 * and the next entry also has an amap, we give up.
1510 *
1511 * Interesting cases:
1512 * amap, new, amap -> give up second merge (single fwd extend)
1513 * amap, new, none -> double forward extend (extend again here)
1514 * none, new, amap -> double backward extend (done here)
1515 * uobj, new, amap -> single backward extend (done here)
1516 *
1517 * XXX should we attempt to deal with someone refilling
1518 * the deallocated region between two entries that are
1519 * backed by the same amap (ie, arefs is 2, "prev" and
1520 * "next" refer to it, and adding this allocation will
1521 * close the hole, thus restoring arefs to 1 and
1522 * deallocating the "next" vm_map_entry)? -- @@@
1523 */
1524
1525 if (prev_entry->next->aref.ar_amap &&
1526 (amap_refs(prev_entry->next->aref.ar_amap) != 1 ||
1527 (merged && prev_entry->aref.ar_amap))) {
1528 goto nomerge;
1529 }
1530
1531 if (merged) {
1532 /*
1533 * Try to extend the amap of the previous entry to
1534 * cover the next entry as well. If it doesn't work
1535 * just skip on, don't actually give up, since we've
1536 * already completed the back merge.
1537 */
1538 if (prev_entry->aref.ar_amap) {
1539 if (amap_extend(prev_entry,
1540 prev_entry->next->end -
1541 prev_entry->next->start,
1542 amapwaitflag | AMAP_EXTEND_FORWARDS))
1543 goto nomerge;
1544 }
1545
1546 /*
1547 * Try to extend the amap of the *next* entry
1548 * back to cover the new allocation *and* the
1549 * previous entry as well (the previous merge
1550 * didn't have an amap already otherwise we
1551 * wouldn't be checking here for an amap). If
1552 * it doesn't work just skip on, again, don't
1553 * actually give up, since we've already
1554 * completed the back merge.
1555 */
1556 else if (prev_entry->next->aref.ar_amap) {
1557 if (amap_extend(prev_entry->next,
1558 prev_entry->end -
1559 prev_entry->start,
1560 amapwaitflag | AMAP_EXTEND_BACKWARDS))
1561 goto nomerge;
1562 }
1563 } else {
1564 /*
1565 * Pull the next entry's amap backwards to cover this
1566 * new allocation.
1567 */
1568 if (prev_entry->next->aref.ar_amap) {
1569 error = amap_extend(prev_entry->next, size,
1570 amapwaitflag | AMAP_EXTEND_BACKWARDS);
1571 if (error)
1572 goto nomerge;
1573 }
1574 }
1575
1576 if (merged) {
1577 if (kmap) {
1578 UVMMAP_EVCNT_DECR(kbackmerge);
1579 UVMMAP_EVCNT_INCR(kbimerge);
1580 } else {
1581 UVMMAP_EVCNT_DECR(ubackmerge);
1582 UVMMAP_EVCNT_INCR(ubimerge);
1583 }
1584 } else {
1585 if (kmap) {
1586 UVMMAP_EVCNT_INCR(kforwmerge);
1587 } else {
1588 UVMMAP_EVCNT_INCR(uforwmerge);
1589 }
1590 }
1591 UVMHIST_LOG(maphist," starting forward merge", 0, 0, 0, 0);
1592
1593 /*
1594 * drop our reference to uobj since we are extending a reference
1595 * that we already have (the ref count can not drop to zero).
1596 * (if merged, we've already detached)
1597 */
1598 if (uobj && uobj->pgops->pgo_detach && !merged)
1599 uobj->pgops->pgo_detach(uobj);
1600
1601 if (merged) {
1602 dead = prev_entry->next;
1603 prev_entry->end = dead->end;
1604 uvm_map_entry_unlink(map, dead);
1605 if (dead->aref.ar_amap != NULL) {
1606 prev_entry->aref = dead->aref;
1607 dead->aref.ar_amap = NULL;
1608 }
1609 } else {
1610 prev_entry->next->start -= size;
1611 if (prev_entry != &map->header) {
1612 prev_entry->gap -= size;
1613 KASSERT(prev_entry->gap == uvm_rb_gap(prev_entry));
1614 uvm_rb_fixup(map, prev_entry);
1615 }
1616 if (uobj)
1617 prev_entry->next->offset = uoffset;
1618 }
1619
1620 uvm_map_check(map, "map forwardmerged");
1621
1622 UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0);
1623 merged++;
1624 }
1625
1626 nomerge:
1627 if (!merged) {
1628 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0);
1629 if (kmap) {
1630 UVMMAP_EVCNT_INCR(knomerge);
1631 } else {
1632 UVMMAP_EVCNT_INCR(unomerge);
1633 }
1634
1635 /*
1636 * allocate new entry and link it in.
1637 */
1638
1639 if (new_entry == NULL) {
1640 new_entry = uvm_mapent_alloc(map,
1641 (flags & UVM_FLAG_NOWAIT));
1642 if (__predict_false(new_entry == NULL)) {
1643 error = ENOMEM;
1644 goto done;
1645 }
1646 }
1647 new_entry->start = start;
1648 new_entry->end = new_entry->start + size;
1649 new_entry->object.uvm_obj = uobj;
1650 new_entry->offset = uoffset;
1651
1652 new_entry->etype = newetype;
1653
1654 if (flags & UVM_FLAG_NOMERGE) {
1655 new_entry->flags |= UVM_MAP_NOMERGE;
1656 }
1657
1658 new_entry->protection = prot;
1659 new_entry->max_protection = maxprot;
1660 new_entry->inheritance = inherit;
1661 new_entry->wired_count = 0;
1662 new_entry->advice = advice;
1663 if (flags & UVM_FLAG_OVERLAY) {
1664
1665 /*
1666 * to_add: for BSS we overallocate a little since we
1667 * are likely to extend
1668 */
1669
1670 vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
1671 UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
1672 struct vm_amap *amap = amap_alloc(size, to_add,
1673 (flags & UVM_FLAG_NOWAIT));
1674 if (__predict_false(amap == NULL)) {
1675 error = ENOMEM;
1676 goto done;
1677 }
1678 new_entry->aref.ar_pageoff = 0;
1679 new_entry->aref.ar_amap = amap;
1680 } else {
1681 new_entry->aref.ar_pageoff = 0;
1682 new_entry->aref.ar_amap = NULL;
1683 }
1684 uvm_map_entry_link(map, prev_entry, new_entry);
1685
1686 /*
1687 * Update the free space hint
1688 */
1689
1690 if ((map->first_free == prev_entry) &&
1691 (prev_entry->end >= new_entry->start))
1692 map->first_free = new_entry;
1693
1694 new_entry = NULL;
1695 }
1696
1697 map->size += size;
1698
1699 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1700
1701 error = 0;
1702 done:
1703 if ((flags & UVM_FLAG_QUANTUM) == 0) {
1704 /*
1705 * vmk_merged_entries is locked by the map's lock.
1706 */
1707 vm_map_unlock(map);
1708 }
1709 if (new_entry && error == 0) {
1710 KDASSERT(merged);
1711 uvm_mapent_free_merged(map, new_entry);
1712 new_entry = NULL;
1713 }
1714 if (dead) {
1715 KDASSERT(merged);
1716 uvm_mapent_free_merged(map, dead);
1717 }
1718 if ((flags & UVM_FLAG_QUANTUM) != 0) {
1719 vm_map_unlock(map);
1720 }
1721 if (new_entry != NULL) {
1722 uvm_mapent_free(new_entry);
1723 }
1724 return error;
1725 }
1726
1727 /*
1728 * uvm_map_lookup_entry_bytree: lookup an entry in tree
1729 */
1730
1731 static inline bool
1732 uvm_map_lookup_entry_bytree(struct vm_map *map, vaddr_t address,
1733 struct vm_map_entry **entry /* OUT */)
1734 {
1735 struct vm_map_entry *prev = &map->header;
1736 struct vm_map_entry *cur = ROOT_ENTRY(map);
1737
1738 while (cur) {
1739 UVMMAP_EVCNT_INCR(mlk_treeloop);
1740 if (address >= cur->start) {
1741 if (address < cur->end) {
1742 *entry = cur;
1743 return true;
1744 }
1745 prev = cur;
1746 cur = RIGHT_ENTRY(cur);
1747 } else
1748 cur = LEFT_ENTRY(cur);
1749 }
1750 *entry = prev;
1751 return false;
1752 }
1753
1754 /*
1755 * uvm_map_lookup_entry: find map entry at or before an address
1756 *
1757 * => map must at least be read-locked by caller
1758 * => entry is returned in "entry"
1759 * => return value is true if address is in the returned entry
1760 */
1761
1762 bool
1763 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
1764 struct vm_map_entry **entry /* OUT */)
1765 {
1766 struct vm_map_entry *cur;
1767 bool use_tree = false;
1768 UVMHIST_FUNC("uvm_map_lookup_entry");
1769 UVMHIST_CALLED(maphist);
1770
1771 UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)",
1772 map, address, entry, 0);
1773
1774 /*
1775 * start looking either from the head of the
1776 * list, or from the hint.
1777 */
1778
1779 cur = map->hint;
1780
1781 if (cur == &map->header)
1782 cur = cur->next;
1783
1784 UVMMAP_EVCNT_INCR(mlk_call);
1785 if (address >= cur->start) {
1786
1787 /*
1788 * go from hint to end of list.
1789 *
1790 * but first, make a quick check to see if
1791 * we are already looking at the entry we
1792 * want (which is usually the case).
1793 * note also that we don't need to save the hint
1794 * here... it is the same hint (unless we are
1795 * at the header, in which case the hint didn't
1796 * buy us anything anyway).
1797 */
1798
1799 if (cur != &map->header && cur->end > address) {
1800 UVMMAP_EVCNT_INCR(mlk_hint);
1801 *entry = cur;
1802 UVMHIST_LOG(maphist,"<- got it via hint (0x%x)",
1803 cur, 0, 0, 0);
1804 uvm_mapent_check(*entry);
1805 return (true);
1806 }
1807
1808 if (map->nentries > 15)
1809 use_tree = true;
1810 } else {
1811
1812 /*
1813 * invalid hint. use tree.
1814 */
1815 use_tree = true;
1816 }
1817
1818 uvm_map_check(map, __func__);
1819
1820 if (use_tree) {
1821 /*
1822 * Simple lookup in the tree. Happens when the hint is
1823 * invalid, or nentries reach a threshold.
1824 */
1825 UVMMAP_EVCNT_INCR(mlk_tree);
1826 if (uvm_map_lookup_entry_bytree(map, address, entry)) {
1827 goto got;
1828 } else {
1829 goto failed;
1830 }
1831 }
1832
1833 /*
1834 * search linearly
1835 */
1836
1837 UVMMAP_EVCNT_INCR(mlk_list);
1838 while (cur != &map->header) {
1839 UVMMAP_EVCNT_INCR(mlk_listloop);
1840 if (cur->end > address) {
1841 if (address >= cur->start) {
1842 /*
1843 * save this lookup for future
1844 * hints, and return
1845 */
1846
1847 *entry = cur;
1848 got:
1849 SAVE_HINT(map, map->hint, *entry);
1850 UVMHIST_LOG(maphist,"<- search got it (0x%x)",
1851 cur, 0, 0, 0);
1852 KDASSERT((*entry)->start <= address);
1853 KDASSERT(address < (*entry)->end);
1854 uvm_mapent_check(*entry);
1855 return (true);
1856 }
1857 break;
1858 }
1859 cur = cur->next;
1860 }
1861 *entry = cur->prev;
1862 failed:
1863 SAVE_HINT(map, map->hint, *entry);
1864 UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
1865 KDASSERT((*entry) == &map->header || (*entry)->end <= address);
1866 KDASSERT((*entry)->next == &map->header ||
1867 address < (*entry)->next->start);
1868 return (false);
1869 }
1870
1871 /*
1872 * See if the range between start and start + length fits in the gap
1873 * entry->next->start and entry->end. Returns 1 if fits, 0 if doesn't
1874 * fit, and -1 address wraps around.
1875 */
1876 static int
1877 uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
1878 vsize_t align, int topdown, struct vm_map_entry *entry)
1879 {
1880 vaddr_t end;
1881
1882 #ifdef PMAP_PREFER
1883 /*
1884 * push start address forward as needed to avoid VAC alias problems.
1885 * we only do this if a valid offset is specified.
1886 */
1887
1888 if (uoffset != UVM_UNKNOWN_OFFSET)
1889 PMAP_PREFER(uoffset, start, length, topdown);
1890 #endif
1891 if (align != 0) {
1892 if ((*start & (align - 1)) != 0) {
1893 if (topdown)
1894 *start &= ~(align - 1);
1895 else
1896 *start = roundup(*start, align);
1897 }
1898 /*
1899 * XXX Should we PMAP_PREFER() here again?
1900 * eh...i think we're okay
1901 */
1902 }
1903
1904 /*
1905 * Find the end of the proposed new region. Be sure we didn't
1906 * wrap around the address; if so, we lose. Otherwise, if the
1907 * proposed new region fits before the next entry, we win.
1908 */
1909
1910 end = *start + length;
1911 if (end < *start)
1912 return (-1);
1913
1914 if (entry->next->start >= end && *start >= entry->end)
1915 return (1);
1916
1917 return (0);
1918 }
1919
1920 /*
1921 * uvm_map_findspace: find "length" sized space in "map".
1922 *
1923 * => "hint" is a hint about where we want it, unless UVM_FLAG_FIXED is
1924 * set in "flags" (in which case we insist on using "hint").
1925 * => "result" is VA returned
1926 * => uobj/uoffset are to be used to handle VAC alignment, if required
1927 * => if "align" is non-zero, we attempt to align to that value.
1928 * => caller must at least have read-locked map
1929 * => returns NULL on failure, or pointer to prev. map entry if success
1930 * => note this is a cross between the old vm_map_findspace and vm_map_find
1931 */
1932
1933 struct vm_map_entry *
1934 uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
1935 vaddr_t *result /* OUT */, struct uvm_object *uobj, voff_t uoffset,
1936 vsize_t align, int flags)
1937 {
1938 struct vm_map_entry *entry;
1939 struct vm_map_entry *child, *prev, *tmp;
1940 vaddr_t orig_hint;
1941 const int topdown = map->flags & VM_MAP_TOPDOWN;
1942 UVMHIST_FUNC("uvm_map_findspace");
1943 UVMHIST_CALLED(maphist);
1944
1945 UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)",
1946 map, hint, length, flags);
1947 KASSERT((align & (align - 1)) == 0);
1948 KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1949
1950 uvm_map_check(map, "map_findspace entry");
1951
1952 /*
1953 * remember the original hint. if we are aligning, then we
1954 * may have to try again with no alignment constraint if
1955 * we fail the first time.
1956 */
1957
1958 orig_hint = hint;
1959 if (hint < vm_map_min(map)) { /* check ranges ... */
1960 if (flags & UVM_FLAG_FIXED) {
1961 UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
1962 return (NULL);
1963 }
1964 hint = vm_map_min(map);
1965 }
1966 if (hint > vm_map_max(map)) {
1967 UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]",
1968 hint, vm_map_min(map), vm_map_max(map), 0);
1969 return (NULL);
1970 }
1971
1972 /*
1973 * Look for the first possible address; if there's already
1974 * something at this address, we have to start after it.
1975 */
1976
1977 /*
1978 * @@@: there are four, no, eight cases to consider.
1979 *
1980 * 0: found, fixed, bottom up -> fail
1981 * 1: found, fixed, top down -> fail
1982 * 2: found, not fixed, bottom up -> start after entry->end,
1983 * loop up
1984 * 3: found, not fixed, top down -> start before entry->start,
1985 * loop down
1986 * 4: not found, fixed, bottom up -> check entry->next->start, fail
1987 * 5: not found, fixed, top down -> check entry->next->start, fail
1988 * 6: not found, not fixed, bottom up -> check entry->next->start,
1989 * loop up
1990 * 7: not found, not fixed, top down -> check entry->next->start,
1991 * loop down
1992 *
1993 * as you can see, it reduces to roughly five cases, and that
1994 * adding top down mapping only adds one unique case (without
1995 * it, there would be four cases).
1996 */
1997
1998 if ((flags & UVM_FLAG_FIXED) == 0 && hint == vm_map_min(map)) {
1999 entry = map->first_free;
2000 } else {
2001 if (uvm_map_lookup_entry(map, hint, &entry)) {
2002 /* "hint" address already in use ... */
2003 if (flags & UVM_FLAG_FIXED) {
2004 UVMHIST_LOG(maphist, "<- fixed & VA in use",
2005 0, 0, 0, 0);
2006 return (NULL);
2007 }
2008 if (topdown)
2009 /* Start from lower gap. */
2010 entry = entry->prev;
2011 } else if (flags & UVM_FLAG_FIXED) {
2012 if (entry->next->start >= hint + length &&
2013 hint + length > hint)
2014 goto found;
2015
2016 /* "hint" address is gap but too small */
2017 UVMHIST_LOG(maphist, "<- fixed mapping failed",
2018 0, 0, 0, 0);
2019 return (NULL); /* only one shot at it ... */
2020 } else {
2021 /*
2022 * See if given hint fits in this gap.
2023 */
2024 switch (uvm_map_space_avail(&hint, length,
2025 uoffset, align, topdown, entry)) {
2026 case 1:
2027 goto found;
2028 case -1:
2029 goto wraparound;
2030 }
2031
2032 if (topdown) {
2033 /*
2034 * Still there is a chance to fit
2035 * if hint > entry->end.
2036 */
2037 } else {
2038 /* Start from higher gap. */
2039 entry = entry->next;
2040 if (entry == &map->header)
2041 goto notfound;
2042 goto nextgap;
2043 }
2044 }
2045 }
2046
2047 /*
2048 * Note that all UVM_FLAGS_FIXED case is already handled.
2049 */
2050 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
2051
2052 /* Try to find the space in the red-black tree */
2053
2054 /* Check slot before any entry */
2055 hint = topdown ? entry->next->start - length : entry->end;
2056 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2057 topdown, entry)) {
2058 case 1:
2059 goto found;
2060 case -1:
2061 goto wraparound;
2062 }
2063
2064 nextgap:
2065 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
2066 /* If there is not enough space in the whole tree, we fail */
2067 tmp = ROOT_ENTRY(map);
2068 if (tmp == NULL || tmp->maxgap < length)
2069 goto notfound;
2070
2071 prev = NULL; /* previous candidate */
2072
2073 /* Find an entry close to hint that has enough space */
2074 for (; tmp;) {
2075 KASSERT(tmp->next->start == tmp->end + tmp->gap);
2076 if (topdown) {
2077 if (tmp->next->start < hint + length &&
2078 (prev == NULL || tmp->end > prev->end)) {
2079 if (tmp->gap >= length)
2080 prev = tmp;
2081 else if ((child = LEFT_ENTRY(tmp)) != NULL
2082 && child->maxgap >= length)
2083 prev = tmp;
2084 }
2085 } else {
2086 if (tmp->end >= hint &&
2087 (prev == NULL || tmp->end < prev->end)) {
2088 if (tmp->gap >= length)
2089 prev = tmp;
2090 else if ((child = RIGHT_ENTRY(tmp)) != NULL
2091 && child->maxgap >= length)
2092 prev = tmp;
2093 }
2094 }
2095 if (tmp->next->start < hint + length)
2096 child = RIGHT_ENTRY(tmp);
2097 else if (tmp->end > hint)
2098 child = LEFT_ENTRY(tmp);
2099 else {
2100 if (tmp->gap >= length)
2101 break;
2102 if (topdown)
2103 child = LEFT_ENTRY(tmp);
2104 else
2105 child = RIGHT_ENTRY(tmp);
2106 }
2107 if (child == NULL || child->maxgap < length)
2108 break;
2109 tmp = child;
2110 }
2111
2112 if (tmp != NULL && tmp->start < hint && hint < tmp->next->start) {
2113 /*
2114 * Check if the entry that we found satifies the
2115 * space requirement
2116 */
2117 if (topdown) {
2118 if (hint > tmp->next->start - length)
2119 hint = tmp->next->start - length;
2120 } else {
2121 if (hint < tmp->end)
2122 hint = tmp->end;
2123 }
2124 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2125 topdown, tmp)) {
2126 case 1:
2127 entry = tmp;
2128 goto found;
2129 case -1:
2130 goto wraparound;
2131 }
2132 if (tmp->gap >= length)
2133 goto listsearch;
2134 }
2135 if (prev == NULL)
2136 goto notfound;
2137
2138 if (topdown) {
2139 KASSERT(orig_hint >= prev->next->start - length ||
2140 prev->next->start - length > prev->next->start);
2141 hint = prev->next->start - length;
2142 } else {
2143 KASSERT(orig_hint <= prev->end);
2144 hint = prev->end;
2145 }
2146 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2147 topdown, prev)) {
2148 case 1:
2149 entry = prev;
2150 goto found;
2151 case -1:
2152 goto wraparound;
2153 }
2154 if (prev->gap >= length)
2155 goto listsearch;
2156
2157 if (topdown)
2158 tmp = LEFT_ENTRY(prev);
2159 else
2160 tmp = RIGHT_ENTRY(prev);
2161 for (;;) {
2162 KASSERT(tmp && tmp->maxgap >= length);
2163 if (topdown)
2164 child = RIGHT_ENTRY(tmp);
2165 else
2166 child = LEFT_ENTRY(tmp);
2167 if (child && child->maxgap >= length) {
2168 tmp = child;
2169 continue;
2170 }
2171 if (tmp->gap >= length)
2172 break;
2173 if (topdown)
2174 tmp = LEFT_ENTRY(tmp);
2175 else
2176 tmp = RIGHT_ENTRY(tmp);
2177 }
2178
2179 if (topdown) {
2180 KASSERT(orig_hint >= tmp->next->start - length ||
2181 tmp->next->start - length > tmp->next->start);
2182 hint = tmp->next->start - length;
2183 } else {
2184 KASSERT(orig_hint <= tmp->end);
2185 hint = tmp->end;
2186 }
2187 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2188 topdown, tmp)) {
2189 case 1:
2190 entry = tmp;
2191 goto found;
2192 case -1:
2193 goto wraparound;
2194 }
2195
2196 /*
2197 * The tree fails to find an entry because of offset or alignment
2198 * restrictions. Search the list instead.
2199 */
2200 listsearch:
2201 /*
2202 * Look through the rest of the map, trying to fit a new region in
2203 * the gap between existing regions, or after the very last region.
2204 * note: entry->end = base VA of current gap,
2205 * entry->next->start = VA of end of current gap
2206 */
2207
2208 for (;;) {
2209 /* Update hint for current gap. */
2210 hint = topdown ? entry->next->start - length : entry->end;
2211
2212 /* See if it fits. */
2213 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2214 topdown, entry)) {
2215 case 1:
2216 goto found;
2217 case -1:
2218 goto wraparound;
2219 }
2220
2221 /* Advance to next/previous gap */
2222 if (topdown) {
2223 if (entry == &map->header) {
2224 UVMHIST_LOG(maphist, "<- failed (off start)",
2225 0,0,0,0);
2226 goto notfound;
2227 }
2228 entry = entry->prev;
2229 } else {
2230 entry = entry->next;
2231 if (entry == &map->header) {
2232 UVMHIST_LOG(maphist, "<- failed (off end)",
2233 0,0,0,0);
2234 goto notfound;
2235 }
2236 }
2237 }
2238
2239 found:
2240 SAVE_HINT(map, map->hint, entry);
2241 *result = hint;
2242 UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0);
2243 KASSERT( topdown || hint >= orig_hint);
2244 KASSERT(!topdown || hint <= orig_hint);
2245 KASSERT(entry->end <= hint);
2246 KASSERT(hint + length <= entry->next->start);
2247 return (entry);
2248
2249 wraparound:
2250 UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0);
2251
2252 return (NULL);
2253
2254 notfound:
2255 UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0);
2256
2257 return (NULL);
2258 }
2259
2260 /*
2261 * U N M A P - m a i n h e l p e r f u n c t i o n s
2262 */
2263
2264 /*
2265 * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
2266 *
2267 * => caller must check alignment and size
2268 * => map must be locked by caller
2269 * => we return a list of map entries that we've remove from the map
2270 * in "entry_list"
2271 */
2272
2273 void
2274 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
2275 struct vm_map_entry **entry_list /* OUT */,
2276 struct uvm_mapent_reservation *umr, int flags)
2277 {
2278 struct vm_map_entry *entry, *first_entry, *next;
2279 vaddr_t len;
2280 UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist);
2281
2282 UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)",
2283 map, start, end, 0);
2284 VM_MAP_RANGE_CHECK(map, start, end);
2285
2286 uvm_map_check(map, "unmap_remove entry");
2287
2288 /*
2289 * find first entry
2290 */
2291
2292 if (uvm_map_lookup_entry(map, start, &first_entry) == true) {
2293 /* clip and go... */
2294 entry = first_entry;
2295 UVM_MAP_CLIP_START(map, entry, start, umr);
2296 /* critical! prevents stale hint */
2297 SAVE_HINT(map, entry, entry->prev);
2298 } else {
2299 entry = first_entry->next;
2300 }
2301
2302 /*
2303 * Save the free space hint
2304 */
2305
2306 if (map->first_free != &map->header && map->first_free->start >= start)
2307 map->first_free = entry->prev;
2308
2309 /*
2310 * note: we now re-use first_entry for a different task. we remove
2311 * a number of map entries from the map and save them in a linked
2312 * list headed by "first_entry". once we remove them from the map
2313 * the caller should unlock the map and drop the references to the
2314 * backing objects [c.f. uvm_unmap_detach]. the object is to
2315 * separate unmapping from reference dropping. why?
2316 * [1] the map has to be locked for unmapping
2317 * [2] the map need not be locked for reference dropping
2318 * [3] dropping references may trigger pager I/O, and if we hit
2319 * a pager that does synchronous I/O we may have to wait for it.
2320 * [4] we would like all waiting for I/O to occur with maps unlocked
2321 * so that we don't block other threads.
2322 */
2323
2324 first_entry = NULL;
2325 *entry_list = NULL;
2326
2327 /*
2328 * break up the area into map entry sized regions and unmap. note
2329 * that all mappings have to be removed before we can even consider
2330 * dropping references to amaps or VM objects (otherwise we could end
2331 * up with a mapping to a page on the free list which would be very bad)
2332 */
2333
2334 while ((entry != &map->header) && (entry->start < end)) {
2335 KASSERT((entry->flags & UVM_MAP_FIRST) == 0);
2336
2337 UVM_MAP_CLIP_END(map, entry, end, umr);
2338 next = entry->next;
2339 len = entry->end - entry->start;
2340
2341 /*
2342 * unwire before removing addresses from the pmap; otherwise
2343 * unwiring will put the entries back into the pmap (XXX).
2344 */
2345
2346 if (VM_MAPENT_ISWIRED(entry)) {
2347 uvm_map_entry_unwire(map, entry);
2348 }
2349 if (flags & UVM_FLAG_VAONLY) {
2350
2351 /* nothing */
2352
2353 } else if ((map->flags & VM_MAP_PAGEABLE) == 0) {
2354
2355 /*
2356 * if the map is non-pageable, any pages mapped there
2357 * must be wired and entered with pmap_kenter_pa(),
2358 * and we should free any such pages immediately.
2359 * this is mostly used for kmem_map.
2360 */
2361 KASSERT(vm_map_pmap(map) == pmap_kernel());
2362
2363 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
2364 uvm_km_pgremove_intrsafe(map, entry->start,
2365 entry->end);
2366 pmap_kremove(entry->start, len);
2367 }
2368 } else if (UVM_ET_ISOBJ(entry) &&
2369 UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
2370 KASSERT(vm_map_pmap(map) == pmap_kernel());
2371
2372 /*
2373 * note: kernel object mappings are currently used in
2374 * two ways:
2375 * [1] "normal" mappings of pages in the kernel object
2376 * [2] uvm_km_valloc'd allocations in which we
2377 * pmap_enter in some non-kernel-object page
2378 * (e.g. vmapbuf).
2379 *
2380 * for case [1], we need to remove the mapping from
2381 * the pmap and then remove the page from the kernel
2382 * object (because, once pages in a kernel object are
2383 * unmapped they are no longer needed, unlike, say,
2384 * a vnode where you might want the data to persist
2385 * until flushed out of a queue).
2386 *
2387 * for case [2], we need to remove the mapping from
2388 * the pmap. there shouldn't be any pages at the
2389 * specified offset in the kernel object [but it
2390 * doesn't hurt to call uvm_km_pgremove just to be
2391 * safe?]
2392 *
2393 * uvm_km_pgremove currently does the following:
2394 * for pages in the kernel object in range:
2395 * - drops the swap slot
2396 * - uvm_pagefree the page
2397 */
2398
2399 /*
2400 * remove mappings from pmap and drop the pages
2401 * from the object. offsets are always relative
2402 * to vm_map_min(kernel_map).
2403 *
2404 * don't need to lock object as the kernel is
2405 * always self-consistent.
2406 */
2407
2408 pmap_remove(pmap_kernel(), entry->start,
2409 entry->start + len);
2410 uvm_km_pgremove(entry->start, entry->end);
2411
2412 /*
2413 * null out kernel_object reference, we've just
2414 * dropped it
2415 */
2416
2417 entry->etype &= ~UVM_ET_OBJ;
2418 entry->object.uvm_obj = NULL;
2419 } else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
2420 /*
2421 * remove mappings the standard way. lock object
2422 * and/or amap to ensure vm_page state does not
2423 * change while in pmap_remove().
2424 */
2425
2426 uvm_map_lock_entry(entry);
2427 pmap_remove(map->pmap, entry->start, entry->end);
2428 uvm_map_unlock_entry(entry);
2429 }
2430
2431 #if defined(DEBUG)
2432 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
2433
2434 /*
2435 * check if there's remaining mapping,
2436 * which is a bug in caller.
2437 */
2438
2439 vaddr_t va;
2440 for (va = entry->start; va < entry->end;
2441 va += PAGE_SIZE) {
2442 if (pmap_extract(vm_map_pmap(map), va, NULL)) {
2443 panic("uvm_unmap_remove: has mapping");
2444 }
2445 }
2446
2447 if (VM_MAP_IS_KERNEL(map)) {
2448 uvm_km_check_empty(map, entry->start,
2449 entry->end);
2450 }
2451 }
2452 #endif /* defined(DEBUG) */
2453
2454 /*
2455 * remove entry from map and put it on our list of entries
2456 * that we've nuked. then go to next entry.
2457 */
2458
2459 UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0,0);
2460
2461 /* critical! prevents stale hint */
2462 SAVE_HINT(map, entry, entry->prev);
2463
2464 uvm_map_entry_unlink(map, entry);
2465 KASSERT(map->size >= len);
2466 map->size -= len;
2467 entry->prev = NULL;
2468 entry->next = first_entry;
2469 first_entry = entry;
2470 entry = next;
2471 }
2472
2473 /*
2474 * Note: if map is dying, leave pmap_update() for pmap_destroy(),
2475 * which will be called later.
2476 */
2477 if ((map->flags & VM_MAP_DYING) == 0) {
2478 pmap_update(vm_map_pmap(map));
2479 } else {
2480 KASSERT(vm_map_pmap(map) != pmap_kernel());
2481 }
2482
2483 uvm_map_check(map, "unmap_remove leave");
2484
2485 /*
2486 * now we've cleaned up the map and are ready for the caller to drop
2487 * references to the mapped objects.
2488 */
2489
2490 *entry_list = first_entry;
2491 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
2492
2493 if (map->flags & VM_MAP_WANTVA) {
2494 mutex_enter(&map->misc_lock);
2495 map->flags &= ~VM_MAP_WANTVA;
2496 cv_broadcast(&map->cv);
2497 mutex_exit(&map->misc_lock);
2498 }
2499 }
2500
2501 /*
2502 * uvm_unmap_detach: drop references in a chain of map entries
2503 *
2504 * => we will free the map entries as we traverse the list.
2505 */
2506
2507 void
2508 uvm_unmap_detach(struct vm_map_entry *first_entry, int flags)
2509 {
2510 struct vm_map_entry *next_entry;
2511 UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
2512
2513 while (first_entry) {
2514 KASSERT(!VM_MAPENT_ISWIRED(first_entry));
2515 UVMHIST_LOG(maphist,
2516 " detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d",
2517 first_entry, first_entry->aref.ar_amap,
2518 first_entry->object.uvm_obj,
2519 UVM_ET_ISSUBMAP(first_entry));
2520
2521 /*
2522 * drop reference to amap, if we've got one
2523 */
2524
2525 if (first_entry->aref.ar_amap)
2526 uvm_map_unreference_amap(first_entry, flags);
2527
2528 /*
2529 * drop reference to our backing object, if we've got one
2530 */
2531
2532 KASSERT(!UVM_ET_ISSUBMAP(first_entry));
2533 if (UVM_ET_ISOBJ(first_entry) &&
2534 first_entry->object.uvm_obj->pgops->pgo_detach) {
2535 (*first_entry->object.uvm_obj->pgops->pgo_detach)
2536 (first_entry->object.uvm_obj);
2537 }
2538 next_entry = first_entry->next;
2539 uvm_mapent_free(first_entry);
2540 first_entry = next_entry;
2541 }
2542 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
2543 }
2544
2545 /*
2546 * E X T R A C T I O N F U N C T I O N S
2547 */
2548
2549 /*
2550 * uvm_map_reserve: reserve space in a vm_map for future use.
2551 *
2552 * => we reserve space in a map by putting a dummy map entry in the
2553 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
2554 * => map should be unlocked (we will write lock it)
2555 * => we return true if we were able to reserve space
2556 * => XXXCDC: should be inline?
2557 */
2558
2559 int
2560 uvm_map_reserve(struct vm_map *map, vsize_t size,
2561 vaddr_t offset /* hint for pmap_prefer */,
2562 vsize_t align /* alignment */,
2563 vaddr_t *raddr /* IN:hint, OUT: reserved VA */,
2564 uvm_flag_t flags /* UVM_FLAG_FIXED or 0 */)
2565 {
2566 UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
2567
2568 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
2569 map,size,offset,raddr);
2570
2571 size = round_page(size);
2572
2573 /*
2574 * reserve some virtual space.
2575 */
2576
2577 if (uvm_map(map, raddr, size, NULL, offset, align,
2578 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
2579 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE|flags)) != 0) {
2580 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
2581 return (false);
2582 }
2583
2584 UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
2585 return (true);
2586 }
2587
2588 /*
2589 * uvm_map_replace: replace a reserved (blank) area of memory with
2590 * real mappings.
2591 *
2592 * => caller must WRITE-LOCK the map
2593 * => we return true if replacement was a success
2594 * => we expect the newents chain to have nnewents entrys on it and
2595 * we expect newents->prev to point to the last entry on the list
2596 * => note newents is allowed to be NULL
2597 */
2598
2599 static int
2600 uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
2601 struct vm_map_entry *newents, int nnewents, vsize_t nsize,
2602 struct vm_map_entry **oldentryp)
2603 {
2604 struct vm_map_entry *oldent, *last;
2605
2606 uvm_map_check(map, "map_replace entry");
2607
2608 /*
2609 * first find the blank map entry at the specified address
2610 */
2611
2612 if (!uvm_map_lookup_entry(map, start, &oldent)) {
2613 return (false);
2614 }
2615
2616 /*
2617 * check to make sure we have a proper blank entry
2618 */
2619
2620 if (end < oldent->end && !VM_MAP_USE_KMAPENT(map)) {
2621 UVM_MAP_CLIP_END(map, oldent, end, NULL);
2622 }
2623 if (oldent->start != start || oldent->end != end ||
2624 oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
2625 return (false);
2626 }
2627
2628 #ifdef DIAGNOSTIC
2629
2630 /*
2631 * sanity check the newents chain
2632 */
2633
2634 {
2635 struct vm_map_entry *tmpent = newents;
2636 int nent = 0;
2637 vsize_t sz = 0;
2638 vaddr_t cur = start;
2639
2640 while (tmpent) {
2641 nent++;
2642 sz += tmpent->end - tmpent->start;
2643 if (tmpent->start < cur)
2644 panic("uvm_map_replace1");
2645 if (tmpent->start >= tmpent->end || tmpent->end > end) {
2646 panic("uvm_map_replace2: "
2647 "tmpent->start=0x%"PRIxVADDR
2648 ", tmpent->end=0x%"PRIxVADDR
2649 ", end=0x%"PRIxVADDR,
2650 tmpent->start, tmpent->end, end);
2651 }
2652 cur = tmpent->end;
2653 if (tmpent->next) {
2654 if (tmpent->next->prev != tmpent)
2655 panic("uvm_map_replace3");
2656 } else {
2657 if (newents->prev != tmpent)
2658 panic("uvm_map_replace4");
2659 }
2660 tmpent = tmpent->next;
2661 }
2662 if (nent != nnewents)
2663 panic("uvm_map_replace5");
2664 if (sz != nsize)
2665 panic("uvm_map_replace6");
2666 }
2667 #endif
2668
2669 /*
2670 * map entry is a valid blank! replace it. (this does all the
2671 * work of map entry link/unlink...).
2672 */
2673
2674 if (newents) {
2675 last = newents->prev;
2676
2677 /* critical: flush stale hints out of map */
2678 SAVE_HINT(map, map->hint, newents);
2679 if (map->first_free == oldent)
2680 map->first_free = last;
2681
2682 last->next = oldent->next;
2683 last->next->prev = last;
2684
2685 /* Fix RB tree */
2686 uvm_rb_remove(map, oldent);
2687
2688 newents->prev = oldent->prev;
2689 newents->prev->next = newents;
2690 map->nentries = map->nentries + (nnewents - 1);
2691
2692 /* Fixup the RB tree */
2693 {
2694 int i;
2695 struct vm_map_entry *tmp;
2696
2697 tmp = newents;
2698 for (i = 0; i < nnewents && tmp; i++) {
2699 uvm_rb_insert(map, tmp);
2700 tmp = tmp->next;
2701 }
2702 }
2703 } else {
2704 /* NULL list of new entries: just remove the old one */
2705 clear_hints(map, oldent);
2706 uvm_map_entry_unlink(map, oldent);
2707 }
2708 map->size -= end - start - nsize;
2709
2710 uvm_map_check(map, "map_replace leave");
2711
2712 /*
2713 * now we can free the old blank entry and return.
2714 */
2715
2716 *oldentryp = oldent;
2717 return (true);
2718 }
2719
2720 /*
2721 * uvm_map_extract: extract a mapping from a map and put it somewhere
2722 * (maybe removing the old mapping)
2723 *
2724 * => maps should be unlocked (we will write lock them)
2725 * => returns 0 on success, error code otherwise
2726 * => start must be page aligned
2727 * => len must be page sized
2728 * => flags:
2729 * UVM_EXTRACT_REMOVE: remove mappings from srcmap
2730 * UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
2731 * UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
2732 * UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
2733 * >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
2734 * >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
2735 * be used from within the kernel in a kernel level map <<<
2736 */
2737
2738 int
2739 uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
2740 struct vm_map *dstmap, vaddr_t *dstaddrp, int flags)
2741 {
2742 vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge;
2743 struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry,
2744 *deadentry, *oldentry;
2745 struct vm_map_entry *resentry = NULL; /* a dummy reservation entry */
2746 vsize_t elen;
2747 int nchain, error, copy_ok;
2748 vsize_t nsize;
2749 UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
2750
2751 UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start,
2752 len,0);
2753 UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0);
2754
2755 /*
2756 * step 0: sanity check: start must be on a page boundary, length
2757 * must be page sized. can't ask for CONTIG/QREF if you asked for
2758 * REMOVE.
2759 */
2760
2761 KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0);
2762 KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
2763 (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
2764
2765 /*
2766 * step 1: reserve space in the target map for the extracted area
2767 */
2768
2769 if ((flags & UVM_EXTRACT_RESERVED) == 0) {
2770 dstaddr = vm_map_min(dstmap);
2771 if (!uvm_map_reserve(dstmap, len, start, 0, &dstaddr, 0))
2772 return (ENOMEM);
2773 *dstaddrp = dstaddr; /* pass address back to caller */
2774 UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0);
2775 } else {
2776 dstaddr = *dstaddrp;
2777 }
2778
2779 /*
2780 * step 2: setup for the extraction process loop by init'ing the
2781 * map entry chain, locking src map, and looking up the first useful
2782 * entry in the map.
2783 */
2784
2785 end = start + len;
2786 newend = dstaddr + len;
2787 chain = endchain = NULL;
2788 nchain = 0;
2789 nsize = 0;
2790 vm_map_lock(srcmap);
2791
2792 if (uvm_map_lookup_entry(srcmap, start, &entry)) {
2793
2794 /* "start" is within an entry */
2795 if (flags & UVM_EXTRACT_QREF) {
2796
2797 /*
2798 * for quick references we don't clip the entry, so
2799 * the entry may map space "before" the starting
2800 * virtual address... this is the "fudge" factor
2801 * (which can be non-zero only the first time
2802 * through the "while" loop in step 3).
2803 */
2804
2805 fudge = start - entry->start;
2806 } else {
2807
2808 /*
2809 * normal reference: we clip the map to fit (thus
2810 * fudge is zero)
2811 */
2812
2813 UVM_MAP_CLIP_START(srcmap, entry, start, NULL);
2814 SAVE_HINT(srcmap, srcmap->hint, entry->prev);
2815 fudge = 0;
2816 }
2817 } else {
2818
2819 /* "start" is not within an entry ... skip to next entry */
2820 if (flags & UVM_EXTRACT_CONTIG) {
2821 error = EINVAL;
2822 goto bad; /* definite hole here ... */
2823 }
2824
2825 entry = entry->next;
2826 fudge = 0;
2827 }
2828
2829 /* save values from srcmap for step 6 */
2830 orig_entry = entry;
2831 orig_fudge = fudge;
2832
2833 /*
2834 * step 3: now start looping through the map entries, extracting
2835 * as we go.
2836 */
2837
2838 while (entry->start < end && entry != &srcmap->header) {
2839
2840 /* if we are not doing a quick reference, clip it */
2841 if ((flags & UVM_EXTRACT_QREF) == 0)
2842 UVM_MAP_CLIP_END(srcmap, entry, end, NULL);
2843
2844 /* clear needs_copy (allow chunking) */
2845 if (UVM_ET_ISNEEDSCOPY(entry)) {
2846 amap_copy(srcmap, entry,
2847 AMAP_COPY_NOWAIT|AMAP_COPY_NOMERGE, start, end);
2848 if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */
2849 error = ENOMEM;
2850 goto bad;
2851 }
2852
2853 /* amap_copy could clip (during chunk)! update fudge */
2854 if (fudge) {
2855 fudge = start - entry->start;
2856 orig_fudge = fudge;
2857 }
2858 }
2859
2860 /* calculate the offset of this from "start" */
2861 oldoffset = (entry->start + fudge) - start;
2862
2863 /* allocate a new map entry */
2864 newentry = uvm_mapent_alloc(dstmap, 0);
2865 if (newentry == NULL) {
2866 error = ENOMEM;
2867 goto bad;
2868 }
2869
2870 /* set up new map entry */
2871 newentry->next = NULL;
2872 newentry->prev = endchain;
2873 newentry->start = dstaddr + oldoffset;
2874 newentry->end =
2875 newentry->start + (entry->end - (entry->start + fudge));
2876 if (newentry->end > newend || newentry->end < newentry->start)
2877 newentry->end = newend;
2878 newentry->object.uvm_obj = entry->object.uvm_obj;
2879 if (newentry->object.uvm_obj) {
2880 if (newentry->object.uvm_obj->pgops->pgo_reference)
2881 newentry->object.uvm_obj->pgops->
2882 pgo_reference(newentry->object.uvm_obj);
2883 newentry->offset = entry->offset + fudge;
2884 } else {
2885 newentry->offset = 0;
2886 }
2887 newentry->etype = entry->etype;
2888 newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
2889 entry->max_protection : entry->protection;
2890 newentry->max_protection = entry->max_protection;
2891 newentry->inheritance = entry->inheritance;
2892 newentry->wired_count = 0;
2893 newentry->aref.ar_amap = entry->aref.ar_amap;
2894 if (newentry->aref.ar_amap) {
2895 newentry->aref.ar_pageoff =
2896 entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
2897 uvm_map_reference_amap(newentry, AMAP_SHARED |
2898 ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
2899 } else {
2900 newentry->aref.ar_pageoff = 0;
2901 }
2902 newentry->advice = entry->advice;
2903 if ((flags & UVM_EXTRACT_QREF) != 0) {
2904 newentry->flags |= UVM_MAP_NOMERGE;
2905 }
2906
2907 /* now link it on the chain */
2908 nchain++;
2909 nsize += newentry->end - newentry->start;
2910 if (endchain == NULL) {
2911 chain = endchain = newentry;
2912 } else {
2913 endchain->next = newentry;
2914 endchain = newentry;
2915 }
2916
2917 /* end of 'while' loop! */
2918 if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
2919 (entry->next == &srcmap->header ||
2920 entry->next->start != entry->end)) {
2921 error = EINVAL;
2922 goto bad;
2923 }
2924 entry = entry->next;
2925 fudge = 0;
2926 }
2927
2928 /*
2929 * step 4: close off chain (in format expected by uvm_map_replace)
2930 */
2931
2932 if (chain)
2933 chain->prev = endchain;
2934
2935 /*
2936 * step 5: attempt to lock the dest map so we can pmap_copy.
2937 * note usage of copy_ok:
2938 * 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
2939 * 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
2940 */
2941
2942 if (srcmap == dstmap || vm_map_lock_try(dstmap) == true) {
2943 copy_ok = 1;
2944 if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2945 nchain, nsize, &resentry)) {
2946 if (srcmap != dstmap)
2947 vm_map_unlock(dstmap);
2948 error = EIO;
2949 goto bad;
2950 }
2951 } else {
2952 copy_ok = 0;
2953 /* replace defered until step 7 */
2954 }
2955
2956 /*
2957 * step 6: traverse the srcmap a second time to do the following:
2958 * - if we got a lock on the dstmap do pmap_copy
2959 * - if UVM_EXTRACT_REMOVE remove the entries
2960 * we make use of orig_entry and orig_fudge (saved in step 2)
2961 */
2962
2963 if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
2964
2965 /* purge possible stale hints from srcmap */
2966 if (flags & UVM_EXTRACT_REMOVE) {
2967 SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
2968 if (srcmap->first_free != &srcmap->header &&
2969 srcmap->first_free->start >= start)
2970 srcmap->first_free = orig_entry->prev;
2971 }
2972
2973 entry = orig_entry;
2974 fudge = orig_fudge;
2975 deadentry = NULL; /* for UVM_EXTRACT_REMOVE */
2976
2977 while (entry->start < end && entry != &srcmap->header) {
2978 if (copy_ok) {
2979 oldoffset = (entry->start + fudge) - start;
2980 elen = MIN(end, entry->end) -
2981 (entry->start + fudge);
2982 pmap_copy(dstmap->pmap, srcmap->pmap,
2983 dstaddr + oldoffset, elen,
2984 entry->start + fudge);
2985 }
2986
2987 /* we advance "entry" in the following if statement */
2988 if (flags & UVM_EXTRACT_REMOVE) {
2989 uvm_map_lock_entry(entry);
2990 pmap_remove(srcmap->pmap, entry->start,
2991 entry->end);
2992 uvm_map_unlock_entry(entry);
2993 oldentry = entry; /* save entry */
2994 entry = entry->next; /* advance */
2995 uvm_map_entry_unlink(srcmap, oldentry);
2996 /* add to dead list */
2997 oldentry->next = deadentry;
2998 deadentry = oldentry;
2999 } else {
3000 entry = entry->next; /* advance */
3001 }
3002
3003 /* end of 'while' loop */
3004 fudge = 0;
3005 }
3006 pmap_update(srcmap->pmap);
3007
3008 /*
3009 * unlock dstmap. we will dispose of deadentry in
3010 * step 7 if needed
3011 */
3012
3013 if (copy_ok && srcmap != dstmap)
3014 vm_map_unlock(dstmap);
3015
3016 } else {
3017 deadentry = NULL;
3018 }
3019
3020 /*
3021 * step 7: we are done with the source map, unlock. if copy_ok
3022 * is 0 then we have not replaced the dummy mapping in dstmap yet
3023 * and we need to do so now.
3024 */
3025
3026 vm_map_unlock(srcmap);
3027 if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
3028 uvm_unmap_detach(deadentry, 0); /* dispose of old entries */
3029
3030 /* now do the replacement if we didn't do it in step 5 */
3031 if (copy_ok == 0) {
3032 vm_map_lock(dstmap);
3033 error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
3034 nchain, nsize, &resentry);
3035 vm_map_unlock(dstmap);
3036
3037 if (error == false) {
3038 error = EIO;
3039 goto bad2;
3040 }
3041 }
3042
3043 if (resentry != NULL)
3044 uvm_mapent_free(resentry);
3045
3046 return (0);
3047
3048 /*
3049 * bad: failure recovery
3050 */
3051 bad:
3052 vm_map_unlock(srcmap);
3053 bad2: /* src already unlocked */
3054 if (chain)
3055 uvm_unmap_detach(chain,
3056 (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
3057
3058 if (resentry != NULL)
3059 uvm_mapent_free(resentry);
3060
3061 if ((flags & UVM_EXTRACT_RESERVED) == 0) {
3062 uvm_unmap(dstmap, dstaddr, dstaddr+len); /* ??? */
3063 }
3064 return (error);
3065 }
3066
3067 /* end of extraction functions */
3068
3069 /*
3070 * uvm_map_submap: punch down part of a map into a submap
3071 *
3072 * => only the kernel_map is allowed to be submapped
3073 * => the purpose of submapping is to break up the locking granularity
3074 * of a larger map
3075 * => the range specified must have been mapped previously with a uvm_map()
3076 * call [with uobj==NULL] to create a blank map entry in the main map.
3077 * [And it had better still be blank!]
3078 * => maps which contain submaps should never be copied or forked.
3079 * => to remove a submap, use uvm_unmap() on the main map
3080 * and then uvm_map_deallocate() the submap.
3081 * => main map must be unlocked.
3082 * => submap must have been init'd and have a zero reference count.
3083 * [need not be locked as we don't actually reference it]
3084 */
3085
3086 int
3087 uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
3088 struct vm_map *submap)
3089 {
3090 struct vm_map_entry *entry;
3091 struct uvm_mapent_reservation umr;
3092 int error;
3093
3094 uvm_mapent_reserve(map, &umr, 2, 0);
3095
3096 vm_map_lock(map);
3097 VM_MAP_RANGE_CHECK(map, start, end);
3098
3099 if (uvm_map_lookup_entry(map, start, &entry)) {
3100 UVM_MAP_CLIP_START(map, entry, start, &umr);
3101 UVM_MAP_CLIP_END(map, entry, end, &umr); /* to be safe */
3102 } else {
3103 entry = NULL;
3104 }
3105
3106 if (entry != NULL &&
3107 entry->start == start && entry->end == end &&
3108 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
3109 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
3110 entry->etype |= UVM_ET_SUBMAP;
3111 entry->object.sub_map = submap;
3112 entry->offset = 0;
3113 uvm_map_reference(submap);
3114 error = 0;
3115 } else {
3116 error = EINVAL;
3117 }
3118 vm_map_unlock(map);
3119
3120 uvm_mapent_unreserve(map, &umr);
3121
3122 return error;
3123 }
3124
3125 /*
3126 * uvm_map_setup_kernel: init in-kernel map
3127 *
3128 * => map must not be in service yet.
3129 */
3130
3131 void
3132 uvm_map_setup_kernel(struct vm_map_kernel *map,
3133 vaddr_t vmin, vaddr_t vmax, int flags)
3134 {
3135
3136 uvm_map_setup(&map->vmk_map, vmin, vmax, flags);
3137 callback_head_init(&map->vmk_reclaim_callback, IPL_VM);
3138 LIST_INIT(&map->vmk_kentry_free);
3139 map->vmk_merged_entries = NULL;
3140 }
3141
3142
3143 /*
3144 * uvm_map_protect: change map protection
3145 *
3146 * => set_max means set max_protection.
3147 * => map must be unlocked.
3148 */
3149
3150 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
3151 ~VM_PROT_WRITE : VM_PROT_ALL)
3152
3153 int
3154 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
3155 vm_prot_t new_prot, bool set_max)
3156 {
3157 struct vm_map_entry *current, *entry;
3158 int error = 0;
3159 UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
3160 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
3161 map, start, end, new_prot);
3162
3163 vm_map_lock(map);
3164 VM_MAP_RANGE_CHECK(map, start, end);
3165 if (uvm_map_lookup_entry(map, start, &entry)) {
3166 UVM_MAP_CLIP_START(map, entry, start, NULL);
3167 } else {
3168 entry = entry->next;
3169 }
3170
3171 /*
3172 * make a first pass to check for protection violations.
3173 */
3174
3175 current = entry;
3176 while ((current != &map->header) && (current->start < end)) {
3177 if (UVM_ET_ISSUBMAP(current)) {
3178 error = EINVAL;
3179 goto out;
3180 }
3181 if ((new_prot & current->max_protection) != new_prot) {
3182 error = EACCES;
3183 goto out;
3184 }
3185 /*
3186 * Don't allow VM_PROT_EXECUTE to be set on entries that
3187 * point to vnodes that are associated with a NOEXEC file
3188 * system.
3189 */
3190 if (UVM_ET_ISOBJ(current) &&
3191 UVM_OBJ_IS_VNODE(current->object.uvm_obj)) {
3192 struct vnode *vp =
3193 (struct vnode *) current->object.uvm_obj;
3194
3195 if ((new_prot & VM_PROT_EXECUTE) != 0 &&
3196 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
3197 error = EACCES;
3198 goto out;
3199 }
3200 }
3201
3202 current = current->next;
3203 }
3204
3205 /* go back and fix up protections (no need to clip this time). */
3206
3207 current = entry;
3208 while ((current != &map->header) && (current->start < end)) {
3209 vm_prot_t old_prot;
3210
3211 UVM_MAP_CLIP_END(map, current, end, NULL);
3212 old_prot = current->protection;
3213 if (set_max)
3214 current->protection =
3215 (current->max_protection = new_prot) & old_prot;
3216 else
3217 current->protection = new_prot;
3218
3219 /*
3220 * update physical map if necessary. worry about copy-on-write
3221 * here -- CHECK THIS XXX
3222 */
3223
3224 if (current->protection != old_prot) {
3225 /* update pmap! */
3226 uvm_map_lock_entry(current);
3227 pmap_protect(map->pmap, current->start, current->end,
3228 current->protection & MASK(entry));
3229 uvm_map_unlock_entry(current);
3230
3231 /*
3232 * If this entry points at a vnode, and the
3233 * protection includes VM_PROT_EXECUTE, mark
3234 * the vnode as VEXECMAP.
3235 */
3236 if (UVM_ET_ISOBJ(current)) {
3237 struct uvm_object *uobj =
3238 current->object.uvm_obj;
3239
3240 if (UVM_OBJ_IS_VNODE(uobj) &&
3241 (current->protection & VM_PROT_EXECUTE)) {
3242 vn_markexec((struct vnode *) uobj);
3243 }
3244 }
3245 }
3246
3247 /*
3248 * If the map is configured to lock any future mappings,
3249 * wire this entry now if the old protection was VM_PROT_NONE
3250 * and the new protection is not VM_PROT_NONE.
3251 */
3252
3253 if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
3254 VM_MAPENT_ISWIRED(entry) == 0 &&
3255 old_prot == VM_PROT_NONE &&
3256 new_prot != VM_PROT_NONE) {
3257 if (uvm_map_pageable(map, entry->start,
3258 entry->end, false,
3259 UVM_LK_ENTER|UVM_LK_EXIT) != 0) {
3260
3261 /*
3262 * If locking the entry fails, remember the
3263 * error if it's the first one. Note we
3264 * still continue setting the protection in
3265 * the map, but will return the error
3266 * condition regardless.
3267 *
3268 * XXX Ignore what the actual error is,
3269 * XXX just call it a resource shortage
3270 * XXX so that it doesn't get confused
3271 * XXX what uvm_map_protect() itself would
3272 * XXX normally return.
3273 */
3274
3275 error = ENOMEM;
3276 }
3277 }
3278 current = current->next;
3279 }
3280 pmap_update(map->pmap);
3281
3282 out:
3283 vm_map_unlock(map);
3284
3285 UVMHIST_LOG(maphist, "<- done, error=%d",error,0,0,0);
3286 return error;
3287 }
3288
3289 #undef MASK
3290
3291 /*
3292 * uvm_map_inherit: set inheritance code for range of addrs in map.
3293 *
3294 * => map must be unlocked
3295 * => note that the inherit code is used during a "fork". see fork
3296 * code for details.
3297 */
3298
3299 int
3300 uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
3301 vm_inherit_t new_inheritance)
3302 {
3303 struct vm_map_entry *entry, *temp_entry;
3304 UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
3305 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)",
3306 map, start, end, new_inheritance);
3307
3308 switch (new_inheritance) {
3309 case MAP_INHERIT_NONE:
3310 case MAP_INHERIT_COPY:
3311 case MAP_INHERIT_SHARE:
3312 break;
3313 default:
3314 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3315 return EINVAL;
3316 }
3317
3318 vm_map_lock(map);
3319 VM_MAP_RANGE_CHECK(map, start, end);
3320 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3321 entry = temp_entry;
3322 UVM_MAP_CLIP_START(map, entry, start, NULL);
3323 } else {
3324 entry = temp_entry->next;
3325 }
3326 while ((entry != &map->header) && (entry->start < end)) {
3327 UVM_MAP_CLIP_END(map, entry, end, NULL);
3328 entry->inheritance = new_inheritance;
3329 entry = entry->next;
3330 }
3331 vm_map_unlock(map);
3332 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3333 return 0;
3334 }
3335
3336 /*
3337 * uvm_map_advice: set advice code for range of addrs in map.
3338 *
3339 * => map must be unlocked
3340 */
3341
3342 int
3343 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
3344 {
3345 struct vm_map_entry *entry, *temp_entry;
3346 UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist);
3347 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_adv=0x%x)",
3348 map, start, end, new_advice);
3349
3350 vm_map_lock(map);
3351 VM_MAP_RANGE_CHECK(map, start, end);
3352 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3353 entry = temp_entry;
3354 UVM_MAP_CLIP_START(map, entry, start, NULL);
3355 } else {
3356 entry = temp_entry->next;
3357 }
3358
3359 /*
3360 * XXXJRT: disallow holes?
3361 */
3362
3363 while ((entry != &map->header) && (entry->start < end)) {
3364 UVM_MAP_CLIP_END(map, entry, end, NULL);
3365
3366 switch (new_advice) {
3367 case MADV_NORMAL:
3368 case MADV_RANDOM:
3369 case MADV_SEQUENTIAL:
3370 /* nothing special here */
3371 break;
3372
3373 default:
3374 vm_map_unlock(map);
3375 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3376 return EINVAL;
3377 }
3378 entry->advice = new_advice;
3379 entry = entry->next;
3380 }
3381
3382 vm_map_unlock(map);
3383 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3384 return 0;
3385 }
3386
3387 /*
3388 * uvm_map_willneed: apply MADV_WILLNEED
3389 */
3390
3391 int
3392 uvm_map_willneed(struct vm_map *map, vaddr_t start, vaddr_t end)
3393 {
3394 struct vm_map_entry *entry;
3395 UVMHIST_FUNC("uvm_map_willneed"); UVMHIST_CALLED(maphist);
3396 UVMHIST_LOG(maphist,"(map=0x%lx,start=0x%lx,end=0x%lx)",
3397 map, start, end, 0);
3398
3399 vm_map_lock_read(map);
3400 VM_MAP_RANGE_CHECK(map, start, end);
3401 if (!uvm_map_lookup_entry(map, start, &entry)) {
3402 entry = entry->next;
3403 }
3404 while (entry->start < end) {
3405 struct vm_amap * const amap = entry->aref.ar_amap;
3406 struct uvm_object * const uobj = entry->object.uvm_obj;
3407
3408 KASSERT(entry != &map->header);
3409 KASSERT(start < entry->end);
3410 /*
3411 * For now, we handle only the easy but commonly-requested case.
3412 * ie. start prefetching of backing uobj pages.
3413 *
3414 * XXX It might be useful to pmap_enter() the already-in-core
3415 * pages by inventing a "weak" mode for uvm_fault() which would
3416 * only do the PGO_LOCKED pgo_get().
3417 */
3418 if (UVM_ET_ISOBJ(entry) && amap == NULL && uobj != NULL) {
3419 off_t offset;
3420 off_t size;
3421
3422 offset = entry->offset;
3423 if (start < entry->start) {
3424 offset += entry->start - start;
3425 }
3426 size = entry->offset + (entry->end - entry->start);
3427 if (entry->end < end) {
3428 size -= end - entry->end;
3429 }
3430 uvm_readahead(uobj, offset, size);
3431 }
3432 entry = entry->next;
3433 }
3434 vm_map_unlock_read(map);
3435 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3436 return 0;
3437 }
3438
3439 /*
3440 * uvm_map_pageable: sets the pageability of a range in a map.
3441 *
3442 * => wires map entries. should not be used for transient page locking.
3443 * for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
3444 * => regions specified as not pageable require lock-down (wired) memory
3445 * and page tables.
3446 * => map must never be read-locked
3447 * => if islocked is true, map is already write-locked
3448 * => we always unlock the map, since we must downgrade to a read-lock
3449 * to call uvm_fault_wire()
3450 * => XXXCDC: check this and try and clean it up.
3451 */
3452
3453 int
3454 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
3455 bool new_pageable, int lockflags)
3456 {
3457 struct vm_map_entry *entry, *start_entry, *failed_entry;
3458 int rv;
3459 #ifdef DIAGNOSTIC
3460 u_int timestamp_save;
3461 #endif
3462 UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
3463 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
3464 map, start, end, new_pageable);
3465 KASSERT(map->flags & VM_MAP_PAGEABLE);
3466
3467 if ((lockflags & UVM_LK_ENTER) == 0)
3468 vm_map_lock(map);
3469 VM_MAP_RANGE_CHECK(map, start, end);
3470
3471 /*
3472 * only one pageability change may take place at one time, since
3473 * uvm_fault_wire assumes it will be called only once for each
3474 * wiring/unwiring. therefore, we have to make sure we're actually
3475 * changing the pageability for the entire region. we do so before
3476 * making any changes.
3477 */
3478
3479 if (uvm_map_lookup_entry(map, start, &start_entry) == false) {
3480 if ((lockflags & UVM_LK_EXIT) == 0)
3481 vm_map_unlock(map);
3482
3483 UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0);
3484 return EFAULT;
3485 }
3486 entry = start_entry;
3487
3488 /*
3489 * handle wiring and unwiring separately.
3490 */
3491
3492 if (new_pageable) { /* unwire */
3493 UVM_MAP_CLIP_START(map, entry, start, NULL);
3494
3495 /*
3496 * unwiring. first ensure that the range to be unwired is
3497 * really wired down and that there are no holes.
3498 */
3499
3500 while ((entry != &map->header) && (entry->start < end)) {
3501 if (entry->wired_count == 0 ||
3502 (entry->end < end &&
3503 (entry->next == &map->header ||
3504 entry->next->start > entry->end))) {
3505 if ((lockflags & UVM_LK_EXIT) == 0)
3506 vm_map_unlock(map);
3507 UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0);
3508 return EINVAL;
3509 }
3510 entry = entry->next;
3511 }
3512
3513 /*
3514 * POSIX 1003.1b - a single munlock call unlocks a region,
3515 * regardless of the number of mlock calls made on that
3516 * region.
3517 */
3518
3519 entry = start_entry;
3520 while ((entry != &map->header) && (entry->start < end)) {
3521 UVM_MAP_CLIP_END(map, entry, end, NULL);
3522 if (VM_MAPENT_ISWIRED(entry))
3523 uvm_map_entry_unwire(map, entry);
3524 entry = entry->next;
3525 }
3526 if ((lockflags & UVM_LK_EXIT) == 0)
3527 vm_map_unlock(map);
3528 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3529 return 0;
3530 }
3531
3532 /*
3533 * wire case: in two passes [XXXCDC: ugly block of code here]
3534 *
3535 * 1: holding the write lock, we create any anonymous maps that need
3536 * to be created. then we clip each map entry to the region to
3537 * be wired and increment its wiring count.
3538 *
3539 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
3540 * in the pages for any newly wired area (wired_count == 1).
3541 *
3542 * downgrading to a read lock for uvm_fault_wire avoids a possible
3543 * deadlock with another thread that may have faulted on one of
3544 * the pages to be wired (it would mark the page busy, blocking
3545 * us, then in turn block on the map lock that we hold). because
3546 * of problems in the recursive lock package, we cannot upgrade
3547 * to a write lock in vm_map_lookup. thus, any actions that
3548 * require the write lock must be done beforehand. because we
3549 * keep the read lock on the map, the copy-on-write status of the
3550 * entries we modify here cannot change.
3551 */
3552
3553 while ((entry != &map->header) && (entry->start < end)) {
3554 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3555
3556 /*
3557 * perform actions of vm_map_lookup that need the
3558 * write lock on the map: create an anonymous map
3559 * for a copy-on-write region, or an anonymous map
3560 * for a zero-fill region. (XXXCDC: submap case
3561 * ok?)
3562 */
3563
3564 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3565 if (UVM_ET_ISNEEDSCOPY(entry) &&
3566 ((entry->max_protection & VM_PROT_WRITE) ||
3567 (entry->object.uvm_obj == NULL))) {
3568 amap_copy(map, entry, 0, start, end);
3569 /* XXXCDC: wait OK? */
3570 }
3571 }
3572 }
3573 UVM_MAP_CLIP_START(map, entry, start, NULL);
3574 UVM_MAP_CLIP_END(map, entry, end, NULL);
3575 entry->wired_count++;
3576
3577 /*
3578 * Check for holes
3579 */
3580
3581 if (entry->protection == VM_PROT_NONE ||
3582 (entry->end < end &&
3583 (entry->next == &map->header ||
3584 entry->next->start > entry->end))) {
3585
3586 /*
3587 * found one. amap creation actions do not need to
3588 * be undone, but the wired counts need to be restored.
3589 */
3590
3591 while (entry != &map->header && entry->end > start) {
3592 entry->wired_count--;
3593 entry = entry->prev;
3594 }
3595 if ((lockflags & UVM_LK_EXIT) == 0)
3596 vm_map_unlock(map);
3597 UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
3598 return EINVAL;
3599 }
3600 entry = entry->next;
3601 }
3602
3603 /*
3604 * Pass 2.
3605 */
3606
3607 #ifdef DIAGNOSTIC
3608 timestamp_save = map->timestamp;
3609 #endif
3610 vm_map_busy(map);
3611 vm_map_unlock(map);
3612
3613 rv = 0;
3614 entry = start_entry;
3615 while (entry != &map->header && entry->start < end) {
3616 if (entry->wired_count == 1) {
3617 rv = uvm_fault_wire(map, entry->start, entry->end,
3618 entry->max_protection, 1);
3619 if (rv) {
3620
3621 /*
3622 * wiring failed. break out of the loop.
3623 * we'll clean up the map below, once we
3624 * have a write lock again.
3625 */
3626
3627 break;
3628 }
3629 }
3630 entry = entry->next;
3631 }
3632
3633 if (rv) { /* failed? */
3634
3635 /*
3636 * Get back to an exclusive (write) lock.
3637 */
3638
3639 vm_map_lock(map);
3640 vm_map_unbusy(map);
3641
3642 #ifdef DIAGNOSTIC
3643 if (timestamp_save + 1 != map->timestamp)
3644 panic("uvm_map_pageable: stale map");
3645 #endif
3646
3647 /*
3648 * first drop the wiring count on all the entries
3649 * which haven't actually been wired yet.
3650 */
3651
3652 failed_entry = entry;
3653 while (entry != &map->header && entry->start < end) {
3654 entry->wired_count--;
3655 entry = entry->next;
3656 }
3657
3658 /*
3659 * now, unwire all the entries that were successfully
3660 * wired above.
3661 */
3662
3663 entry = start_entry;
3664 while (entry != failed_entry) {
3665 entry->wired_count--;
3666 if (VM_MAPENT_ISWIRED(entry) == 0)
3667 uvm_map_entry_unwire(map, entry);
3668 entry = entry->next;
3669 }
3670 if ((lockflags & UVM_LK_EXIT) == 0)
3671 vm_map_unlock(map);
3672 UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
3673 return (rv);
3674 }
3675
3676 if ((lockflags & UVM_LK_EXIT) == 0) {
3677 vm_map_unbusy(map);
3678 } else {
3679
3680 /*
3681 * Get back to an exclusive (write) lock.
3682 */
3683
3684 vm_map_lock(map);
3685 vm_map_unbusy(map);
3686 }
3687
3688 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3689 return 0;
3690 }
3691
3692 /*
3693 * uvm_map_pageable_all: special case of uvm_map_pageable - affects
3694 * all mapped regions.
3695 *
3696 * => map must not be locked.
3697 * => if no flags are specified, all regions are unwired.
3698 * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
3699 */
3700
3701 int
3702 uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
3703 {
3704 struct vm_map_entry *entry, *failed_entry;
3705 vsize_t size;
3706 int rv;
3707 #ifdef DIAGNOSTIC
3708 u_int timestamp_save;
3709 #endif
3710 UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist);
3711 UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0);
3712
3713 KASSERT(map->flags & VM_MAP_PAGEABLE);
3714
3715 vm_map_lock(map);
3716
3717 /*
3718 * handle wiring and unwiring separately.
3719 */
3720
3721 if (flags == 0) { /* unwire */
3722
3723 /*
3724 * POSIX 1003.1b -- munlockall unlocks all regions,
3725 * regardless of how many times mlockall has been called.
3726 */
3727
3728 for (entry = map->header.next; entry != &map->header;
3729 entry = entry->next) {
3730 if (VM_MAPENT_ISWIRED(entry))
3731 uvm_map_entry_unwire(map, entry);
3732 }
3733 map->flags &= ~VM_MAP_WIREFUTURE;
3734 vm_map_unlock(map);
3735 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3736 return 0;
3737 }
3738
3739 if (flags & MCL_FUTURE) {
3740
3741 /*
3742 * must wire all future mappings; remember this.
3743 */
3744
3745 map->flags |= VM_MAP_WIREFUTURE;
3746 }
3747
3748 if ((flags & MCL_CURRENT) == 0) {
3749
3750 /*
3751 * no more work to do!
3752 */
3753
3754 UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
3755 vm_map_unlock(map);
3756 return 0;
3757 }
3758
3759 /*
3760 * wire case: in three passes [XXXCDC: ugly block of code here]
3761 *
3762 * 1: holding the write lock, count all pages mapped by non-wired
3763 * entries. if this would cause us to go over our limit, we fail.
3764 *
3765 * 2: still holding the write lock, we create any anonymous maps that
3766 * need to be created. then we increment its wiring count.
3767 *
3768 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
3769 * in the pages for any newly wired area (wired_count == 1).
3770 *
3771 * downgrading to a read lock for uvm_fault_wire avoids a possible
3772 * deadlock with another thread that may have faulted on one of
3773 * the pages to be wired (it would mark the page busy, blocking
3774 * us, then in turn block on the map lock that we hold). because
3775 * of problems in the recursive lock package, we cannot upgrade
3776 * to a write lock in vm_map_lookup. thus, any actions that
3777 * require the write lock must be done beforehand. because we
3778 * keep the read lock on the map, the copy-on-write status of the
3779 * entries we modify here cannot change.
3780 */
3781
3782 for (size = 0, entry = map->header.next; entry != &map->header;
3783 entry = entry->next) {
3784 if (entry->protection != VM_PROT_NONE &&
3785 VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3786 size += entry->end - entry->start;
3787 }
3788 }
3789
3790 if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
3791 vm_map_unlock(map);
3792 return ENOMEM;
3793 }
3794
3795 if (limit != 0 &&
3796 (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
3797 vm_map_unlock(map);
3798 return ENOMEM;
3799 }
3800
3801 /*
3802 * Pass 2.
3803 */
3804
3805 for (entry = map->header.next; entry != &map->header;
3806 entry = entry->next) {
3807 if (entry->protection == VM_PROT_NONE)
3808 continue;
3809 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3810
3811 /*
3812 * perform actions of vm_map_lookup that need the
3813 * write lock on the map: create an anonymous map
3814 * for a copy-on-write region, or an anonymous map
3815 * for a zero-fill region. (XXXCDC: submap case
3816 * ok?)
3817 */
3818
3819 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3820 if (UVM_ET_ISNEEDSCOPY(entry) &&
3821 ((entry->max_protection & VM_PROT_WRITE) ||
3822 (entry->object.uvm_obj == NULL))) {
3823 amap_copy(map, entry, 0, entry->start,
3824 entry->end);
3825 /* XXXCDC: wait OK? */
3826 }
3827 }
3828 }
3829 entry->wired_count++;
3830 }
3831
3832 /*
3833 * Pass 3.
3834 */
3835
3836 #ifdef DIAGNOSTIC
3837 timestamp_save = map->timestamp;
3838 #endif
3839 vm_map_busy(map);
3840 vm_map_unlock(map);
3841
3842 rv = 0;
3843 for (entry = map->header.next; entry != &map->header;
3844 entry = entry->next) {
3845 if (entry->wired_count == 1) {
3846 rv = uvm_fault_wire(map, entry->start, entry->end,
3847 entry->max_protection, 1);
3848 if (rv) {
3849
3850 /*
3851 * wiring failed. break out of the loop.
3852 * we'll clean up the map below, once we
3853 * have a write lock again.
3854 */
3855
3856 break;
3857 }
3858 }
3859 }
3860
3861 if (rv) {
3862
3863 /*
3864 * Get back an exclusive (write) lock.
3865 */
3866
3867 vm_map_lock(map);
3868 vm_map_unbusy(map);
3869
3870 #ifdef DIAGNOSTIC
3871 if (timestamp_save + 1 != map->timestamp)
3872 panic("uvm_map_pageable_all: stale map");
3873 #endif
3874
3875 /*
3876 * first drop the wiring count on all the entries
3877 * which haven't actually been wired yet.
3878 *
3879 * Skip VM_PROT_NONE entries like we did above.
3880 */
3881
3882 failed_entry = entry;
3883 for (/* nothing */; entry != &map->header;
3884 entry = entry->next) {
3885 if (entry->protection == VM_PROT_NONE)
3886 continue;
3887 entry->wired_count--;
3888 }
3889
3890 /*
3891 * now, unwire all the entries that were successfully
3892 * wired above.
3893 *
3894 * Skip VM_PROT_NONE entries like we did above.
3895 */
3896
3897 for (entry = map->header.next; entry != failed_entry;
3898 entry = entry->next) {
3899 if (entry->protection == VM_PROT_NONE)
3900 continue;
3901 entry->wired_count--;
3902 if (VM_MAPENT_ISWIRED(entry))
3903 uvm_map_entry_unwire(map, entry);
3904 }
3905 vm_map_unlock(map);
3906 UVMHIST_LOG(maphist,"<- done (RV=%d)", rv,0,0,0);
3907 return (rv);
3908 }
3909
3910 vm_map_unbusy(map);
3911
3912 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3913 return 0;
3914 }
3915
3916 /*
3917 * uvm_map_clean: clean out a map range
3918 *
3919 * => valid flags:
3920 * if (flags & PGO_CLEANIT): dirty pages are cleaned first
3921 * if (flags & PGO_SYNCIO): dirty pages are written synchronously
3922 * if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
3923 * if (flags & PGO_FREE): any cached pages are freed after clean
3924 * => returns an error if any part of the specified range isn't mapped
3925 * => never a need to flush amap layer since the anonymous memory has
3926 * no permanent home, but may deactivate pages there
3927 * => called from sys_msync() and sys_madvise()
3928 * => caller must not write-lock map (read OK).
3929 * => we may sleep while cleaning if SYNCIO [with map read-locked]
3930 */
3931
3932 int
3933 uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
3934 {
3935 struct vm_map_entry *current, *entry;
3936 struct uvm_object *uobj;
3937 struct vm_amap *amap;
3938 struct vm_anon *anon, *anon_tofree;
3939 struct vm_page *pg;
3940 vaddr_t offset;
3941 vsize_t size;
3942 voff_t uoff;
3943 int error, refs;
3944 UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
3945
3946 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
3947 map, start, end, flags);
3948 KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
3949 (PGO_FREE|PGO_DEACTIVATE));
3950
3951 vm_map_lock_read(map);
3952 VM_MAP_RANGE_CHECK(map, start, end);
3953 if (uvm_map_lookup_entry(map, start, &entry) == false) {
3954 vm_map_unlock_read(map);
3955 return EFAULT;
3956 }
3957
3958 /*
3959 * Make a first pass to check for holes and wiring problems.
3960 */
3961
3962 for (current = entry; current->start < end; current = current->next) {
3963 if (UVM_ET_ISSUBMAP(current)) {
3964 vm_map_unlock_read(map);
3965 return EINVAL;
3966 }
3967 if ((flags & PGO_FREE) != 0 && VM_MAPENT_ISWIRED(entry)) {
3968 vm_map_unlock_read(map);
3969 return EBUSY;
3970 }
3971 if (end <= current->end) {
3972 break;
3973 }
3974 if (current->end != current->next->start) {
3975 vm_map_unlock_read(map);
3976 return EFAULT;
3977 }
3978 }
3979
3980 error = 0;
3981 for (current = entry; start < end; current = current->next) {
3982 amap = current->aref.ar_amap; /* upper layer */
3983 uobj = current->object.uvm_obj; /* lower layer */
3984 KASSERT(start >= current->start);
3985
3986 /*
3987 * No amap cleaning necessary if:
3988 *
3989 * (1) There's no amap.
3990 *
3991 * (2) We're not deactivating or freeing pages.
3992 */
3993
3994 if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
3995 goto flush_object;
3996
3997 amap_lock(amap);
3998 anon_tofree = NULL;
3999 offset = start - current->start;
4000 size = MIN(end, current->end) - start;
4001 for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) {
4002 anon = amap_lookup(¤t->aref, offset);
4003 if (anon == NULL)
4004 continue;
4005
4006 KASSERT(anon->an_lock == amap->am_lock);
4007 pg = anon->an_page;
4008 if (pg == NULL) {
4009 continue;
4010 }
4011
4012 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
4013
4014 /*
4015 * In these first 3 cases, we just deactivate the page.
4016 */
4017
4018 case PGO_CLEANIT|PGO_FREE:
4019 case PGO_CLEANIT|PGO_DEACTIVATE:
4020 case PGO_DEACTIVATE:
4021 deactivate_it:
4022 /*
4023 * skip the page if it's loaned or wired,
4024 * since it shouldn't be on a paging queue
4025 * at all in these cases.
4026 */
4027
4028 mutex_enter(&uvm_pageqlock);
4029 if (pg->loan_count != 0 ||
4030 pg->wire_count != 0) {
4031 mutex_exit(&uvm_pageqlock);
4032 continue;
4033 }
4034 KASSERT(pg->uanon == anon);
4035 uvm_pagedeactivate(pg);
4036 mutex_exit(&uvm_pageqlock);
4037 continue;
4038
4039 case PGO_FREE:
4040
4041 /*
4042 * If there are multiple references to
4043 * the amap, just deactivate the page.
4044 */
4045
4046 if (amap_refs(amap) > 1)
4047 goto deactivate_it;
4048
4049 /* skip the page if it's wired */
4050 if (pg->wire_count != 0) {
4051 continue;
4052 }
4053 amap_unadd(¤t->aref, offset);
4054 refs = --anon->an_ref;
4055 if (refs == 0) {
4056 anon->an_link = anon_tofree;
4057 anon_tofree = anon;
4058 }
4059 continue;
4060 }
4061 }
4062 uvm_anfree(anon_tofree);
4063 amap_unlock(amap);
4064
4065 flush_object:
4066 /*
4067 * flush pages if we've got a valid backing object.
4068 * note that we must always clean object pages before
4069 * freeing them since otherwise we could reveal stale
4070 * data from files.
4071 */
4072
4073 uoff = current->offset + (start - current->start);
4074 size = MIN(end, current->end) - start;
4075 if (uobj != NULL) {
4076 mutex_enter(uobj->vmobjlock);
4077 if (uobj->pgops->pgo_put != NULL)
4078 error = (uobj->pgops->pgo_put)(uobj, uoff,
4079 uoff + size, flags | PGO_CLEANIT);
4080 else
4081 error = 0;
4082 }
4083 start += size;
4084 }
4085 vm_map_unlock_read(map);
4086 return (error);
4087 }
4088
4089
4090 /*
4091 * uvm_map_checkprot: check protection in map
4092 *
4093 * => must allow specified protection in a fully allocated region.
4094 * => map must be read or write locked by caller.
4095 */
4096
4097 bool
4098 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
4099 vm_prot_t protection)
4100 {
4101 struct vm_map_entry *entry;
4102 struct vm_map_entry *tmp_entry;
4103
4104 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
4105 return (false);
4106 }
4107 entry = tmp_entry;
4108 while (start < end) {
4109 if (entry == &map->header) {
4110 return (false);
4111 }
4112
4113 /*
4114 * no holes allowed
4115 */
4116
4117 if (start < entry->start) {
4118 return (false);
4119 }
4120
4121 /*
4122 * check protection associated with entry
4123 */
4124
4125 if ((entry->protection & protection) != protection) {
4126 return (false);
4127 }
4128 start = entry->end;
4129 entry = entry->next;
4130 }
4131 return (true);
4132 }
4133
4134 /*
4135 * uvmspace_alloc: allocate a vmspace structure.
4136 *
4137 * - structure includes vm_map and pmap
4138 * - XXX: no locking on this structure
4139 * - refcnt set to 1, rest must be init'd by caller
4140 */
4141 struct vmspace *
4142 uvmspace_alloc(vaddr_t vmin, vaddr_t vmax)
4143 {
4144 struct vmspace *vm;
4145 UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
4146
4147 vm = pool_cache_get(&uvm_vmspace_cache, PR_WAITOK);
4148 uvmspace_init(vm, NULL, vmin, vmax);
4149 UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0);
4150 return (vm);
4151 }
4152
4153 /*
4154 * uvmspace_init: initialize a vmspace structure.
4155 *
4156 * - XXX: no locking on this structure
4157 * - refcnt set to 1, rest must be init'd by caller
4158 */
4159 void
4160 uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin, vaddr_t vmax)
4161 {
4162 UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist);
4163
4164 memset(vm, 0, sizeof(*vm));
4165 uvm_map_setup(&vm->vm_map, vmin, vmax, VM_MAP_PAGEABLE
4166 #ifdef __USING_TOPDOWN_VM
4167 | VM_MAP_TOPDOWN
4168 #endif
4169 );
4170 if (pmap)
4171 pmap_reference(pmap);
4172 else
4173 pmap = pmap_create();
4174 vm->vm_map.pmap = pmap;
4175 vm->vm_refcnt = 1;
4176 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4177 }
4178
4179 /*
4180 * uvmspace_share: share a vmspace between two processes
4181 *
4182 * - used for vfork, threads(?)
4183 */
4184
4185 void
4186 uvmspace_share(struct proc *p1, struct proc *p2)
4187 {
4188
4189 uvmspace_addref(p1->p_vmspace);
4190 p2->p_vmspace = p1->p_vmspace;
4191 }
4192
4193 #if 0
4194
4195 /*
4196 * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
4197 *
4198 * - XXX: no locking on vmspace
4199 */
4200
4201 void
4202 uvmspace_unshare(struct lwp *l)
4203 {
4204 struct proc *p = l->l_proc;
4205 struct vmspace *nvm, *ovm = p->p_vmspace;
4206
4207 if (ovm->vm_refcnt == 1)
4208 /* nothing to do: vmspace isn't shared in the first place */
4209 return;
4210
4211 /* make a new vmspace, still holding old one */
4212 nvm = uvmspace_fork(ovm);
4213
4214 kpreempt_disable();
4215 pmap_deactivate(l); /* unbind old vmspace */
4216 p->p_vmspace = nvm;
4217 pmap_activate(l); /* switch to new vmspace */
4218 kpreempt_enable();
4219
4220 uvmspace_free(ovm); /* drop reference to old vmspace */
4221 }
4222
4223 #endif
4224
4225 /*
4226 * uvmspace_exec: the process wants to exec a new program
4227 */
4228
4229 void
4230 uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end)
4231 {
4232 struct proc *p = l->l_proc;
4233 struct vmspace *nvm, *ovm = p->p_vmspace;
4234 struct vm_map *map = &ovm->vm_map;
4235
4236 #ifdef __sparc__
4237 /* XXX cgd 960926: the sparc #ifdef should be a MD hook */
4238 kill_user_windows(l); /* before stack addresses go away */
4239 #endif
4240 #ifdef __HAVE_CPU_VMSPACE_EXEC
4241 cpu_vmspace_exec(l, start, end);
4242 #endif
4243
4244 /*
4245 * see if more than one process is using this vmspace...
4246 */
4247
4248 if (ovm->vm_refcnt == 1) {
4249
4250 /*
4251 * if p is the only process using its vmspace then we can safely
4252 * recycle that vmspace for the program that is being exec'd.
4253 */
4254
4255 #ifdef SYSVSHM
4256 /*
4257 * SYSV SHM semantics require us to kill all segments on an exec
4258 */
4259
4260 if (ovm->vm_shm)
4261 shmexit(ovm);
4262 #endif
4263
4264 /*
4265 * POSIX 1003.1b -- "lock future mappings" is revoked
4266 * when a process execs another program image.
4267 */
4268
4269 map->flags &= ~VM_MAP_WIREFUTURE;
4270
4271 /*
4272 * now unmap the old program
4273 */
4274
4275 pmap_remove_all(map->pmap);
4276 uvm_unmap(map, vm_map_min(map), vm_map_max(map));
4277 KASSERT(map->header.prev == &map->header);
4278 KASSERT(map->nentries == 0);
4279
4280 /*
4281 * resize the map
4282 */
4283
4284 vm_map_setmin(map, start);
4285 vm_map_setmax(map, end);
4286 } else {
4287
4288 /*
4289 * p's vmspace is being shared, so we can't reuse it for p since
4290 * it is still being used for others. allocate a new vmspace
4291 * for p
4292 */
4293
4294 nvm = uvmspace_alloc(start, end);
4295
4296 /*
4297 * install new vmspace and drop our ref to the old one.
4298 */
4299
4300 kpreempt_disable();
4301 pmap_deactivate(l);
4302 p->p_vmspace = nvm;
4303 pmap_activate(l);
4304 kpreempt_enable();
4305
4306 uvmspace_free(ovm);
4307 }
4308 }
4309
4310 /*
4311 * uvmspace_addref: add a referece to a vmspace.
4312 */
4313
4314 void
4315 uvmspace_addref(struct vmspace *vm)
4316 {
4317 struct vm_map *map = &vm->vm_map;
4318
4319 KASSERT((map->flags & VM_MAP_DYING) == 0);
4320
4321 mutex_enter(&map->misc_lock);
4322 KASSERT(vm->vm_refcnt > 0);
4323 vm->vm_refcnt++;
4324 mutex_exit(&map->misc_lock);
4325 }
4326
4327 /*
4328 * uvmspace_free: free a vmspace data structure
4329 */
4330
4331 void
4332 uvmspace_free(struct vmspace *vm)
4333 {
4334 struct vm_map_entry *dead_entries;
4335 struct vm_map *map = &vm->vm_map;
4336 int n;
4337
4338 UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
4339
4340 UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0);
4341 mutex_enter(&map->misc_lock);
4342 n = --vm->vm_refcnt;
4343 mutex_exit(&map->misc_lock);
4344 if (n > 0)
4345 return;
4346
4347 /*
4348 * at this point, there should be no other references to the map.
4349 * delete all of the mappings, then destroy the pmap.
4350 */
4351
4352 map->flags |= VM_MAP_DYING;
4353 pmap_remove_all(map->pmap);
4354 #ifdef SYSVSHM
4355 /* Get rid of any SYSV shared memory segments. */
4356 if (vm->vm_shm != NULL)
4357 shmexit(vm);
4358 #endif
4359 if (map->nentries) {
4360 uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map),
4361 &dead_entries, NULL, 0);
4362 if (dead_entries != NULL)
4363 uvm_unmap_detach(dead_entries, 0);
4364 }
4365 KASSERT(map->nentries == 0);
4366 KASSERT(map->size == 0);
4367 mutex_destroy(&map->misc_lock);
4368 mutex_destroy(&map->mutex);
4369 rw_destroy(&map->lock);
4370 cv_destroy(&map->cv);
4371 pmap_destroy(map->pmap);
4372 pool_cache_put(&uvm_vmspace_cache, vm);
4373 }
4374
4375 /*
4376 * F O R K - m a i n e n t r y p o i n t
4377 */
4378 /*
4379 * uvmspace_fork: fork a process' main map
4380 *
4381 * => create a new vmspace for child process from parent.
4382 * => parent's map must not be locked.
4383 */
4384
4385 struct vmspace *
4386 uvmspace_fork(struct vmspace *vm1)
4387 {
4388 struct vmspace *vm2;
4389 struct vm_map *old_map = &vm1->vm_map;
4390 struct vm_map *new_map;
4391 struct vm_map_entry *old_entry;
4392 struct vm_map_entry *new_entry;
4393 UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
4394
4395 vm_map_lock(old_map);
4396
4397 vm2 = uvmspace_alloc(vm_map_min(old_map), vm_map_max(old_map));
4398 memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
4399 (char *) (vm1 + 1) - (char *) &vm1->vm_startcopy);
4400 new_map = &vm2->vm_map; /* XXX */
4401
4402 old_entry = old_map->header.next;
4403 new_map->size = old_map->size;
4404
4405 /*
4406 * go entry-by-entry
4407 */
4408
4409 while (old_entry != &old_map->header) {
4410
4411 /*
4412 * first, some sanity checks on the old entry
4413 */
4414
4415 KASSERT(!UVM_ET_ISSUBMAP(old_entry));
4416 KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) ||
4417 !UVM_ET_ISNEEDSCOPY(old_entry));
4418
4419 switch (old_entry->inheritance) {
4420 case MAP_INHERIT_NONE:
4421
4422 /*
4423 * drop the mapping, modify size
4424 */
4425 new_map->size -= old_entry->end - old_entry->start;
4426 break;
4427
4428 case MAP_INHERIT_SHARE:
4429
4430 /*
4431 * share the mapping: this means we want the old and
4432 * new entries to share amaps and backing objects.
4433 */
4434 /*
4435 * if the old_entry needs a new amap (due to prev fork)
4436 * then we need to allocate it now so that we have
4437 * something we own to share with the new_entry. [in
4438 * other words, we need to clear needs_copy]
4439 */
4440
4441 if (UVM_ET_ISNEEDSCOPY(old_entry)) {
4442 /* get our own amap, clears needs_copy */
4443 amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,
4444 0, 0);
4445 /* XXXCDC: WAITOK??? */
4446 }
4447
4448 new_entry = uvm_mapent_alloc(new_map, 0);
4449 /* old_entry -> new_entry */
4450 uvm_mapent_copy(old_entry, new_entry);
4451
4452 /* new pmap has nothing wired in it */
4453 new_entry->wired_count = 0;
4454
4455 /*
4456 * gain reference to object backing the map (can't
4457 * be a submap, already checked this case).
4458 */
4459
4460 if (new_entry->aref.ar_amap)
4461 uvm_map_reference_amap(new_entry, AMAP_SHARED);
4462
4463 if (new_entry->object.uvm_obj &&
4464 new_entry->object.uvm_obj->pgops->pgo_reference)
4465 new_entry->object.uvm_obj->
4466 pgops->pgo_reference(
4467 new_entry->object.uvm_obj);
4468
4469 /* insert entry at end of new_map's entry list */
4470 uvm_map_entry_link(new_map, new_map->header.prev,
4471 new_entry);
4472
4473 break;
4474
4475 case MAP_INHERIT_COPY:
4476
4477 /*
4478 * copy-on-write the mapping (using mmap's
4479 * MAP_PRIVATE semantics)
4480 *
4481 * allocate new_entry, adjust reference counts.
4482 * (note that new references are read-only).
4483 */
4484
4485 new_entry = uvm_mapent_alloc(new_map, 0);
4486 /* old_entry -> new_entry */
4487 uvm_mapent_copy(old_entry, new_entry);
4488
4489 if (new_entry->aref.ar_amap)
4490 uvm_map_reference_amap(new_entry, 0);
4491
4492 if (new_entry->object.uvm_obj &&
4493 new_entry->object.uvm_obj->pgops->pgo_reference)
4494 new_entry->object.uvm_obj->pgops->pgo_reference
4495 (new_entry->object.uvm_obj);
4496
4497 /* new pmap has nothing wired in it */
4498 new_entry->wired_count = 0;
4499
4500 new_entry->etype |=
4501 (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
4502 uvm_map_entry_link(new_map, new_map->header.prev,
4503 new_entry);
4504
4505 /*
4506 * the new entry will need an amap. it will either
4507 * need to be copied from the old entry or created
4508 * from scratch (if the old entry does not have an
4509 * amap). can we defer this process until later
4510 * (by setting "needs_copy") or do we need to copy
4511 * the amap now?
4512 *
4513 * we must copy the amap now if any of the following
4514 * conditions hold:
4515 * 1. the old entry has an amap and that amap is
4516 * being shared. this means that the old (parent)
4517 * process is sharing the amap with another
4518 * process. if we do not clear needs_copy here
4519 * we will end up in a situation where both the
4520 * parent and child process are refering to the
4521 * same amap with "needs_copy" set. if the
4522 * parent write-faults, the fault routine will
4523 * clear "needs_copy" in the parent by allocating
4524 * a new amap. this is wrong because the
4525 * parent is supposed to be sharing the old amap
4526 * and the new amap will break that.
4527 *
4528 * 2. if the old entry has an amap and a non-zero
4529 * wire count then we are going to have to call
4530 * amap_cow_now to avoid page faults in the
4531 * parent process. since amap_cow_now requires
4532 * "needs_copy" to be clear we might as well
4533 * clear it here as well.
4534 *
4535 */
4536
4537 if (old_entry->aref.ar_amap != NULL) {
4538 if ((amap_flags(old_entry->aref.ar_amap) &
4539 AMAP_SHARED) != 0 ||
4540 VM_MAPENT_ISWIRED(old_entry)) {
4541
4542 amap_copy(new_map, new_entry,
4543 AMAP_COPY_NOCHUNK, 0, 0);
4544 /* XXXCDC: M_WAITOK ... ok? */
4545 }
4546 }
4547
4548 /*
4549 * if the parent's entry is wired down, then the
4550 * parent process does not want page faults on
4551 * access to that memory. this means that we
4552 * cannot do copy-on-write because we can't write
4553 * protect the old entry. in this case we
4554 * resolve all copy-on-write faults now, using
4555 * amap_cow_now. note that we have already
4556 * allocated any needed amap (above).
4557 */
4558
4559 if (VM_MAPENT_ISWIRED(old_entry)) {
4560
4561 /*
4562 * resolve all copy-on-write faults now
4563 * (note that there is nothing to do if
4564 * the old mapping does not have an amap).
4565 */
4566 if (old_entry->aref.ar_amap)
4567 amap_cow_now(new_map, new_entry);
4568
4569 } else {
4570
4571 /*
4572 * setup mappings to trigger copy-on-write faults
4573 * we must write-protect the parent if it has
4574 * an amap and it is not already "needs_copy"...
4575 * if it is already "needs_copy" then the parent
4576 * has already been write-protected by a previous
4577 * fork operation.
4578 */
4579
4580 if (old_entry->aref.ar_amap &&
4581 !UVM_ET_ISNEEDSCOPY(old_entry)) {
4582 if (old_entry->max_protection & VM_PROT_WRITE) {
4583 pmap_protect(old_map->pmap,
4584 old_entry->start,
4585 old_entry->end,
4586 old_entry->protection &
4587 ~VM_PROT_WRITE);
4588 }
4589 old_entry->etype |= UVM_ET_NEEDSCOPY;
4590 }
4591 }
4592 break;
4593 } /* end of switch statement */
4594 old_entry = old_entry->next;
4595 }
4596
4597 pmap_update(old_map->pmap);
4598 vm_map_unlock(old_map);
4599
4600 #ifdef SYSVSHM
4601 if (vm1->vm_shm)
4602 shmfork(vm1, vm2);
4603 #endif
4604
4605 #ifdef PMAP_FORK
4606 pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap);
4607 #endif
4608
4609 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4610 return (vm2);
4611 }
4612
4613
4614 /*
4615 * in-kernel map entry allocation.
4616 */
4617
4618 struct uvm_kmapent_hdr {
4619 LIST_ENTRY(uvm_kmapent_hdr) ukh_listq;
4620 int ukh_nused;
4621 struct vm_map_entry *ukh_freelist;
4622 struct vm_map *ukh_map;
4623 struct vm_map_entry ukh_entries[0];
4624 };
4625
4626 #define UVM_KMAPENT_CHUNK \
4627 ((PAGE_SIZE - sizeof(struct uvm_kmapent_hdr)) \
4628 / sizeof(struct vm_map_entry))
4629
4630 #define UVM_KHDR_FIND(entry) \
4631 ((struct uvm_kmapent_hdr *)(((vaddr_t)entry) & ~PAGE_MASK))
4632
4633
4634 #ifdef DIAGNOSTIC
4635 static struct vm_map *
4636 uvm_kmapent_map(struct vm_map_entry *entry)
4637 {
4638 const struct uvm_kmapent_hdr *ukh;
4639
4640 ukh = UVM_KHDR_FIND(entry);
4641 return ukh->ukh_map;
4642 }
4643 #endif
4644
4645 static inline struct vm_map_entry *
4646 uvm_kmapent_get(struct uvm_kmapent_hdr *ukh)
4647 {
4648 struct vm_map_entry *entry;
4649
4650 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4651 KASSERT(ukh->ukh_nused >= 0);
4652
4653 entry = ukh->ukh_freelist;
4654 if (entry) {
4655 KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4656 == UVM_MAP_KERNEL);
4657 ukh->ukh_freelist = entry->next;
4658 ukh->ukh_nused++;
4659 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4660 } else {
4661 KASSERT(ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4662 }
4663
4664 return entry;
4665 }
4666
4667 static inline void
4668 uvm_kmapent_put(struct uvm_kmapent_hdr *ukh, struct vm_map_entry *entry)
4669 {
4670
4671 KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4672 == UVM_MAP_KERNEL);
4673 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4674 KASSERT(ukh->ukh_nused > 0);
4675 KASSERT(ukh->ukh_freelist != NULL ||
4676 ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4677 KASSERT(ukh->ukh_freelist == NULL ||
4678 ukh->ukh_nused < UVM_KMAPENT_CHUNK);
4679
4680 ukh->ukh_nused--;
4681 entry->next = ukh->ukh_freelist;
4682 ukh->ukh_freelist = entry;
4683 }
4684
4685 /*
4686 * uvm_kmapent_alloc: allocate a map entry for in-kernel map
4687 */
4688
4689 static struct vm_map_entry *
4690 uvm_kmapent_alloc(struct vm_map *map, int flags)
4691 {
4692 struct vm_page *pg;
4693 struct uvm_kmapent_hdr *ukh;
4694 struct vm_map_entry *entry;
4695 #ifndef PMAP_MAP_POOLPAGE
4696 struct uvm_map_args args;
4697 uvm_flag_t mapflags = UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
4698 UVM_INH_NONE, UVM_ADV_RANDOM, flags | UVM_FLAG_NOMERGE);
4699 int error;
4700 #endif
4701 vaddr_t va;
4702 int i;
4703
4704 KDASSERT(UVM_KMAPENT_CHUNK > 2);
4705 KDASSERT(kernel_map != NULL);
4706 KASSERT(vm_map_pmap(map) == pmap_kernel());
4707
4708 UVMMAP_EVCNT_INCR(uke_alloc);
4709 entry = NULL;
4710 again:
4711 /*
4712 * try to grab an entry from freelist.
4713 */
4714 mutex_spin_enter(&uvm_kentry_lock);
4715 ukh = LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free);
4716 if (ukh) {
4717 entry = uvm_kmapent_get(ukh);
4718 if (ukh->ukh_nused == UVM_KMAPENT_CHUNK)
4719 LIST_REMOVE(ukh, ukh_listq);
4720 }
4721 mutex_spin_exit(&uvm_kentry_lock);
4722
4723 if (entry)
4724 return entry;
4725
4726 /*
4727 * there's no free entry for this vm_map.
4728 * now we need to allocate some vm_map_entry.
4729 * for simplicity, always allocate one page chunk of them at once.
4730 */
4731
4732 pg = uvm_pagealloc(NULL, 0, NULL,
4733 (flags & UVM_KMF_NOWAIT) != 0 ? UVM_PGA_USERESERVE : 0);
4734 if (__predict_false(pg == NULL)) {
4735 if (flags & UVM_FLAG_NOWAIT)
4736 return NULL;
4737 uvm_wait("kme_alloc");
4738 goto again;
4739 }
4740
4741 #ifdef PMAP_MAP_POOLPAGE
4742 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
4743 KASSERT(va != 0);
4744 #else
4745 error = uvm_map_prepare(map, 0, PAGE_SIZE, NULL, UVM_UNKNOWN_OFFSET,
4746 0, mapflags, &args);
4747 if (error) {
4748 uvm_pagefree(pg);
4749 return NULL;
4750 }
4751
4752 va = args.uma_start;
4753
4754 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
4755 VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
4756 pmap_update(vm_map_pmap(map));
4757
4758 #endif
4759 ukh = (void *)va;
4760
4761 /*
4762 * use the last entry for ukh itsself.
4763 */
4764
4765 i = UVM_KMAPENT_CHUNK - 1;
4766 #ifndef PMAP_MAP_POOLPAGE
4767 entry = &ukh->ukh_entries[i--];
4768 entry->flags = UVM_MAP_KERNEL | UVM_MAP_KMAPENT;
4769 error = uvm_map_enter(map, &args, entry);
4770 KASSERT(error == 0);
4771 #endif
4772
4773 ukh->ukh_nused = UVM_KMAPENT_CHUNK;
4774 ukh->ukh_map = map;
4775 ukh->ukh_freelist = NULL;
4776 for (; i >= 1; i--) {
4777 struct vm_map_entry *xentry = &ukh->ukh_entries[i];
4778
4779 xentry->flags = UVM_MAP_KERNEL;
4780 uvm_kmapent_put(ukh, xentry);
4781 }
4782 #ifdef PMAP_MAP_POOLPAGE
4783 KASSERT(ukh->ukh_nused == 1);
4784 #else
4785 KASSERT(ukh->ukh_nused == 2);
4786 #endif
4787
4788 mutex_spin_enter(&uvm_kentry_lock);
4789 LIST_INSERT_HEAD(&vm_map_to_kernel(map)->vmk_kentry_free,
4790 ukh, ukh_listq);
4791 mutex_spin_exit(&uvm_kentry_lock);
4792
4793 /*
4794 * return first entry.
4795 */
4796
4797 entry = &ukh->ukh_entries[0];
4798 entry->flags = UVM_MAP_KERNEL;
4799 UVMMAP_EVCNT_INCR(ukh_alloc);
4800
4801 return entry;
4802 }
4803
4804 /*
4805 * uvm_mapent_free: free map entry for in-kernel map
4806 */
4807
4808 static void
4809 uvm_kmapent_free(struct vm_map_entry *entry)
4810 {
4811 struct uvm_kmapent_hdr *ukh;
4812 struct vm_page *pg;
4813 struct vm_map *map;
4814 #ifndef PMAP_UNMAP_POOLPAGE
4815 struct pmap *pmap;
4816 struct vm_map_entry *deadentry;
4817 #endif
4818 vaddr_t va;
4819 paddr_t pa;
4820
4821 UVMMAP_EVCNT_INCR(uke_free);
4822 ukh = UVM_KHDR_FIND(entry);
4823 map = ukh->ukh_map;
4824
4825 mutex_spin_enter(&uvm_kentry_lock);
4826 uvm_kmapent_put(ukh, entry);
4827 #ifdef PMAP_UNMAP_POOLPAGE
4828 if (ukh->ukh_nused > 0) {
4829 #else
4830 if (ukh->ukh_nused > 1) {
4831 #endif
4832 if (ukh->ukh_nused == UVM_KMAPENT_CHUNK - 1)
4833 LIST_INSERT_HEAD(
4834 &vm_map_to_kernel(map)->vmk_kentry_free,
4835 ukh, ukh_listq);
4836 mutex_spin_exit(&uvm_kentry_lock);
4837 return;
4838 }
4839
4840 /*
4841 * now we can free this ukh.
4842 *
4843 * however, keep an empty ukh to avoid ping-pong.
4844 */
4845
4846 if (LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free) == ukh &&
4847 LIST_NEXT(ukh, ukh_listq) == NULL) {
4848 mutex_spin_exit(&uvm_kentry_lock);
4849 return;
4850 }
4851 LIST_REMOVE(ukh, ukh_listq);
4852 mutex_spin_exit(&uvm_kentry_lock);
4853
4854 va = (vaddr_t)ukh;
4855
4856 #ifdef PMAP_UNMAP_POOLPAGE
4857 KASSERT(ukh->ukh_nused == 0);
4858 pa = PMAP_UNMAP_POOLPAGE(va);
4859 KASSERT(pa != 0);
4860 #else
4861 KASSERT(ukh->ukh_nused == 1);
4862
4863 /*
4864 * remove map entry for ukh itsself.
4865 */
4866
4867 KASSERT((va & PAGE_MASK) == 0);
4868 vm_map_lock(map);
4869 uvm_unmap_remove(map, va, va + PAGE_SIZE, &deadentry, NULL, 0);
4870 KASSERT(deadentry->flags & UVM_MAP_KERNEL);
4871 KASSERT(deadentry->flags & UVM_MAP_KMAPENT);
4872 KASSERT(deadentry->next == NULL);
4873 KASSERT(deadentry == &ukh->ukh_entries[UVM_KMAPENT_CHUNK - 1]);
4874
4875 /*
4876 * unmap the page from pmap and free it.
4877 */
4878
4879 pmap = vm_map_pmap(map);
4880 KASSERT(pmap == pmap_kernel());
4881 if (!pmap_extract(pmap, va, &pa))
4882 panic("%s: no mapping", __func__);
4883 pmap_kremove(va, PAGE_SIZE);
4884 pmap_update(vm_map_pmap(map));
4885 vm_map_unlock(map);
4886 #endif /* !PMAP_UNMAP_POOLPAGE */
4887 pg = PHYS_TO_VM_PAGE(pa);
4888 uvm_pagefree(pg);
4889 UVMMAP_EVCNT_INCR(ukh_free);
4890 }
4891
4892 static vsize_t
4893 uvm_kmapent_overhead(vsize_t size)
4894 {
4895
4896 /*
4897 * - the max number of unmerged entries is howmany(size, PAGE_SIZE)
4898 * as the min allocation unit is PAGE_SIZE.
4899 * - UVM_KMAPENT_CHUNK "kmapent"s are allocated from a page.
4900 * one of them are used to map the page itself.
4901 */
4902
4903 return howmany(howmany(size, PAGE_SIZE), (UVM_KMAPENT_CHUNK - 1)) *
4904 PAGE_SIZE;
4905 }
4906
4907 /*
4908 * map entry reservation
4909 */
4910
4911 /*
4912 * uvm_mapent_reserve: reserve map entries for clipping before locking map.
4913 *
4914 * => needed when unmapping entries allocated without UVM_FLAG_QUANTUM.
4915 * => caller shouldn't hold map locked.
4916 */
4917 int
4918 uvm_mapent_reserve(struct vm_map *map, struct uvm_mapent_reservation *umr,
4919 int nentries, int flags)
4920 {
4921
4922 umr->umr_nentries = 0;
4923
4924 if ((flags & UVM_FLAG_QUANTUM) != 0)
4925 return 0;
4926
4927 if (!VM_MAP_USE_KMAPENT(map))
4928 return 0;
4929
4930 while (nentries--) {
4931 struct vm_map_entry *ent;
4932 ent = uvm_kmapent_alloc(map, flags);
4933 if (!ent) {
4934 uvm_mapent_unreserve(map, umr);
4935 return ENOMEM;
4936 }
4937 UMR_PUTENTRY(umr, ent);
4938 }
4939
4940 return 0;
4941 }
4942
4943 /*
4944 * uvm_mapent_unreserve:
4945 *
4946 * => caller shouldn't hold map locked.
4947 * => never fail or sleep.
4948 */
4949 void
4950 uvm_mapent_unreserve(struct vm_map *map, struct uvm_mapent_reservation *umr)
4951 {
4952
4953 while (!UMR_EMPTY(umr))
4954 uvm_kmapent_free(UMR_GETENTRY(umr));
4955 }
4956
4957 /*
4958 * uvm_mapent_trymerge: try to merge an entry with its neighbors.
4959 *
4960 * => called with map locked.
4961 * => return non zero if successfully merged.
4962 */
4963
4964 int
4965 uvm_mapent_trymerge(struct vm_map *map, struct vm_map_entry *entry, int flags)
4966 {
4967 struct uvm_object *uobj;
4968 struct vm_map_entry *next;
4969 struct vm_map_entry *prev;
4970 vsize_t size;
4971 int merged = 0;
4972 bool copying;
4973 int newetype;
4974
4975 if (VM_MAP_USE_KMAPENT(map)) {
4976 return 0;
4977 }
4978 if (entry->aref.ar_amap != NULL) {
4979 return 0;
4980 }
4981 if ((entry->flags & UVM_MAP_NOMERGE) != 0) {
4982 return 0;
4983 }
4984
4985 uobj = entry->object.uvm_obj;
4986 size = entry->end - entry->start;
4987 copying = (flags & UVM_MERGE_COPYING) != 0;
4988 newetype = copying ? (entry->etype & ~UVM_ET_NEEDSCOPY) : entry->etype;
4989
4990 next = entry->next;
4991 if (next != &map->header &&
4992 next->start == entry->end &&
4993 ((copying && next->aref.ar_amap != NULL &&
4994 amap_refs(next->aref.ar_amap) == 1) ||
4995 (!copying && next->aref.ar_amap == NULL)) &&
4996 UVM_ET_ISCOMPATIBLE(next, newetype,
4997 uobj, entry->flags, entry->protection,
4998 entry->max_protection, entry->inheritance, entry->advice,
4999 entry->wired_count) &&
5000 (uobj == NULL || entry->offset + size == next->offset)) {
5001 int error;
5002
5003 if (copying) {
5004 error = amap_extend(next, size,
5005 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_BACKWARDS);
5006 } else {
5007 error = 0;
5008 }
5009 if (error == 0) {
5010 if (uobj) {
5011 if (uobj->pgops->pgo_detach) {
5012 uobj->pgops->pgo_detach(uobj);
5013 }
5014 }
5015
5016 entry->end = next->end;
5017 clear_hints(map, next);
5018 uvm_map_entry_unlink(map, next);
5019 if (copying) {
5020 entry->aref = next->aref;
5021 entry->etype &= ~UVM_ET_NEEDSCOPY;
5022 }
5023 uvm_map_check(map, "trymerge forwardmerge");
5024 uvm_mapent_free_merged(map, next);
5025 merged++;
5026 }
5027 }
5028
5029 prev = entry->prev;
5030 if (prev != &map->header &&
5031 prev->end == entry->start &&
5032 ((copying && !merged && prev->aref.ar_amap != NULL &&
5033 amap_refs(prev->aref.ar_amap) == 1) ||
5034 (!copying && prev->aref.ar_amap == NULL)) &&
5035 UVM_ET_ISCOMPATIBLE(prev, newetype,
5036 uobj, entry->flags, entry->protection,
5037 entry->max_protection, entry->inheritance, entry->advice,
5038 entry->wired_count) &&
5039 (uobj == NULL ||
5040 prev->offset + prev->end - prev->start == entry->offset)) {
5041 int error;
5042
5043 if (copying) {
5044 error = amap_extend(prev, size,
5045 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_FORWARDS);
5046 } else {
5047 error = 0;
5048 }
5049 if (error == 0) {
5050 if (uobj) {
5051 if (uobj->pgops->pgo_detach) {
5052 uobj->pgops->pgo_detach(uobj);
5053 }
5054 entry->offset = prev->offset;
5055 }
5056
5057 entry->start = prev->start;
5058 clear_hints(map, prev);
5059 uvm_map_entry_unlink(map, prev);
5060 if (copying) {
5061 entry->aref = prev->aref;
5062 entry->etype &= ~UVM_ET_NEEDSCOPY;
5063 }
5064 uvm_map_check(map, "trymerge backmerge");
5065 uvm_mapent_free_merged(map, prev);
5066 merged++;
5067 }
5068 }
5069
5070 return merged;
5071 }
5072
5073 /*
5074 * uvm_map_create: create map
5075 */
5076
5077 struct vm_map *
5078 uvm_map_create(pmap_t pmap, vaddr_t vmin, vaddr_t vmax, int flags)
5079 {
5080 struct vm_map *result;
5081
5082 result = malloc(sizeof(struct vm_map), M_VMMAP, M_WAITOK);
5083 uvm_map_setup(result, vmin, vmax, flags);
5084 result->pmap = pmap;
5085 return(result);
5086 }
5087
5088 /*
5089 * uvm_map_setup: init map
5090 *
5091 * => map must not be in service yet.
5092 */
5093
5094 void
5095 uvm_map_setup(struct vm_map *map, vaddr_t vmin, vaddr_t vmax, int flags)
5096 {
5097 int ipl;
5098
5099 rb_tree_init(&map->rb_tree, &uvm_map_tree_ops);
5100 map->header.next = map->header.prev = &map->header;
5101 map->nentries = 0;
5102 map->size = 0;
5103 map->ref_count = 1;
5104 vm_map_setmin(map, vmin);
5105 vm_map_setmax(map, vmax);
5106 map->flags = flags;
5107 map->first_free = &map->header;
5108 map->hint = &map->header;
5109 map->timestamp = 0;
5110 map->busy = NULL;
5111
5112 if ((flags & VM_MAP_INTRSAFE) != 0) {
5113 ipl = IPL_VM;
5114 } else {
5115 ipl = IPL_NONE;
5116 }
5117
5118 rw_init(&map->lock);
5119 cv_init(&map->cv, "vm_map");
5120 mutex_init(&map->misc_lock, MUTEX_DRIVER, ipl);
5121 mutex_init(&map->mutex, MUTEX_DRIVER, ipl);
5122 }
5123
5124
5125 /*
5126 * U N M A P - m a i n e n t r y p o i n t
5127 */
5128
5129 /*
5130 * uvm_unmap1: remove mappings from a vm_map (from "start" up to "stop")
5131 *
5132 * => caller must check alignment and size
5133 * => map must be unlocked (we will lock it)
5134 * => flags is UVM_FLAG_QUANTUM or 0.
5135 */
5136
5137 void
5138 uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
5139 {
5140 struct vm_map_entry *dead_entries;
5141 struct uvm_mapent_reservation umr;
5142 UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist);
5143
5144 UVMHIST_LOG(maphist, " (map=0x%x, start=0x%x, end=0x%x)",
5145 map, start, end, 0);
5146 if (map == kernel_map) {
5147 LOCKDEBUG_MEM_CHECK((void *)start, end - start);
5148 }
5149 /*
5150 * work now done by helper functions. wipe the pmap's and then
5151 * detach from the dead entries...
5152 */
5153 uvm_mapent_reserve(map, &umr, 2, flags);
5154 vm_map_lock(map);
5155 uvm_unmap_remove(map, start, end, &dead_entries, &umr, flags);
5156 vm_map_unlock(map);
5157 uvm_mapent_unreserve(map, &umr);
5158
5159 if (dead_entries != NULL)
5160 uvm_unmap_detach(dead_entries, 0);
5161
5162 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
5163 }
5164
5165
5166 /*
5167 * uvm_map_reference: add reference to a map
5168 *
5169 * => map need not be locked (we use misc_lock).
5170 */
5171
5172 void
5173 uvm_map_reference(struct vm_map *map)
5174 {
5175 mutex_enter(&map->misc_lock);
5176 map->ref_count++;
5177 mutex_exit(&map->misc_lock);
5178 }
5179
5180 struct vm_map_kernel *
5181 vm_map_to_kernel(struct vm_map *map)
5182 {
5183
5184 KASSERT(VM_MAP_IS_KERNEL(map));
5185
5186 return (struct vm_map_kernel *)map;
5187 }
5188
5189 bool
5190 vm_map_starved_p(struct vm_map *map)
5191 {
5192
5193 if ((map->flags & VM_MAP_WANTVA) != 0) {
5194 return true;
5195 }
5196 /* XXX */
5197 if ((vm_map_max(map) - vm_map_min(map)) / 16 * 15 < map->size) {
5198 return true;
5199 }
5200 return false;
5201 }
5202
5203 void
5204 uvm_map_lock_entry(struct vm_map_entry *entry)
5205 {
5206
5207 if (UVM_ET_ISOBJ(entry)) {
5208 mutex_enter(entry->object.uvm_obj->vmobjlock);
5209 }
5210 if (entry->aref.ar_amap != NULL) {
5211 amap_lock(entry->aref.ar_amap);
5212 }
5213 }
5214
5215 void
5216 uvm_map_unlock_entry(struct vm_map_entry *entry)
5217 {
5218
5219 if (entry->aref.ar_amap != NULL) {
5220 amap_unlock(entry->aref.ar_amap);
5221 }
5222 if (UVM_ET_ISOBJ(entry)) {
5223 mutex_exit(entry->object.uvm_obj->vmobjlock);
5224 }
5225 }
5226
5227 #if defined(DDB) || defined(DEBUGPRINT)
5228
5229 /*
5230 * uvm_map_printit: actually prints the map
5231 */
5232
5233 void
5234 uvm_map_printit(struct vm_map *map, bool full,
5235 void (*pr)(const char *, ...))
5236 {
5237 struct vm_map_entry *entry;
5238
5239 (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, vm_map_min(map),
5240 vm_map_max(map));
5241 (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=0x%x\n",
5242 map->nentries, map->size, map->ref_count, map->timestamp,
5243 map->flags);
5244 (*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap,
5245 pmap_resident_count(map->pmap), pmap_wired_count(map->pmap));
5246 if (!full)
5247 return;
5248 for (entry = map->header.next; entry != &map->header;
5249 entry = entry->next) {
5250 (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%llx, amap=%p/%d\n",
5251 entry, entry->start, entry->end, entry->object.uvm_obj,
5252 (long long)entry->offset, entry->aref.ar_amap,
5253 entry->aref.ar_pageoff);
5254 (*pr)(
5255 "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
5256 "wc=%d, adv=%d\n",
5257 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
5258 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
5259 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
5260 entry->protection, entry->max_protection,
5261 entry->inheritance, entry->wired_count, entry->advice);
5262 }
5263 }
5264
5265 void
5266 uvm_whatis(uintptr_t addr, void (*pr)(const char *, ...))
5267 {
5268 struct vm_map *map;
5269
5270 for (map = kernel_map;;) {
5271 struct vm_map_entry *entry;
5272
5273 if (!uvm_map_lookup_entry_bytree(map, (vaddr_t)addr, &entry)) {
5274 break;
5275 }
5276 (*pr)("%p is %p+%zu from VMMAP %p\n",
5277 (void *)addr, (void *)entry->start,
5278 (size_t)(addr - (uintptr_t)entry->start), map);
5279 if (!UVM_ET_ISSUBMAP(entry)) {
5280 break;
5281 }
5282 map = entry->object.sub_map;
5283 }
5284 }
5285
5286 #endif /* DDB || DEBUGPRINT */
5287
5288 #ifndef __USER_VA0_IS_SAFE
5289 static int
5290 sysctl_user_va0_disable(SYSCTLFN_ARGS)
5291 {
5292 struct sysctlnode node;
5293 int t, error;
5294
5295 node = *rnode;
5296 node.sysctl_data = &t;
5297 t = user_va0_disable;
5298 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5299 if (error || newp == NULL)
5300 return (error);
5301
5302 /* lower only at securelevel < 1 */
5303 if (!t && user_va0_disable &&
5304 kauth_authorize_system(l->l_cred,
5305 KAUTH_SYSTEM_CHSYSFLAGS /* XXX */, 0,
5306 NULL, NULL, NULL))
5307 return EPERM;
5308
5309 user_va0_disable = !!t;
5310 return 0;
5311 }
5312
5313 SYSCTL_SETUP(sysctl_uvmmap_setup, "sysctl uvmmap setup")
5314 {
5315
5316 sysctl_createv(clog, 0, NULL, NULL,
5317 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5318 CTLTYPE_INT, "user_va0_disable",
5319 SYSCTL_DESCR("Disable VA 0"),
5320 sysctl_user_va0_disable, 0, &user_va0_disable, 0,
5321 CTL_VM, CTL_CREATE, CTL_EOL);
5322 }
5323 #endif
5324