uvm_map.c revision 1.296 1 /* $NetBSD: uvm_map.c,v 1.296 2011/04/08 10:38:36 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_map.c 8.3 (Berkeley) 1/12/94
37 * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
38 *
39 *
40 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 * All rights reserved.
42 *
43 * Permission to use, copy, modify and distribute this software and
44 * its documentation is hereby granted, provided that both the copyright
45 * notice and this permission notice appear in all copies of the
46 * software, derivative works or modified versions, and any portions
47 * thereof, and that both notices appear in supporting documentation.
48 *
49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 *
53 * Carnegie Mellon requests users of this software to return to
54 *
55 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
56 * School of Computer Science
57 * Carnegie Mellon University
58 * Pittsburgh PA 15213-3890
59 *
60 * any improvements or extensions that they make and grant Carnegie the
61 * rights to redistribute these changes.
62 */
63
64 /*
65 * uvm_map.c: uvm map operations
66 */
67
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.296 2011/04/08 10:38:36 yamt Exp $");
70
71 #include "opt_ddb.h"
72 #include "opt_uvmhist.h"
73 #include "opt_uvm.h"
74 #include "opt_sysv.h"
75
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/mman.h>
79 #include <sys/proc.h>
80 #include <sys/malloc.h>
81 #include <sys/pool.h>
82 #include <sys/kernel.h>
83 #include <sys/mount.h>
84 #include <sys/vnode.h>
85 #include <sys/lockdebug.h>
86 #include <sys/atomic.h>
87 #ifndef __USER_VA0_IS_SAFE
88 #include <sys/sysctl.h>
89 #include <sys/kauth.h>
90 #include "opt_user_va0_disable_default.h"
91 #endif
92
93 #ifdef SYSVSHM
94 #include <sys/shm.h>
95 #endif
96
97 #include <uvm/uvm.h>
98 #include <uvm/uvm_readahead.h>
99
100 #if defined(DDB) || defined(DEBUGPRINT)
101 #include <uvm/uvm_ddb.h>
102 #endif
103
104 #if !defined(UVMMAP_COUNTERS)
105
106 #define UVMMAP_EVCNT_DEFINE(name) /* nothing */
107 #define UVMMAP_EVCNT_INCR(ev) /* nothing */
108 #define UVMMAP_EVCNT_DECR(ev) /* nothing */
109
110 #else /* defined(UVMMAP_NOCOUNTERS) */
111
112 #include <sys/evcnt.h>
113 #define UVMMAP_EVCNT_DEFINE(name) \
114 struct evcnt uvmmap_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
115 "uvmmap", #name); \
116 EVCNT_ATTACH_STATIC(uvmmap_evcnt_##name);
117 #define UVMMAP_EVCNT_INCR(ev) uvmmap_evcnt_##ev.ev_count++
118 #define UVMMAP_EVCNT_DECR(ev) uvmmap_evcnt_##ev.ev_count--
119
120 #endif /* defined(UVMMAP_NOCOUNTERS) */
121
122 UVMMAP_EVCNT_DEFINE(ubackmerge)
123 UVMMAP_EVCNT_DEFINE(uforwmerge)
124 UVMMAP_EVCNT_DEFINE(ubimerge)
125 UVMMAP_EVCNT_DEFINE(unomerge)
126 UVMMAP_EVCNT_DEFINE(kbackmerge)
127 UVMMAP_EVCNT_DEFINE(kforwmerge)
128 UVMMAP_EVCNT_DEFINE(kbimerge)
129 UVMMAP_EVCNT_DEFINE(knomerge)
130 UVMMAP_EVCNT_DEFINE(map_call)
131 UVMMAP_EVCNT_DEFINE(mlk_call)
132 UVMMAP_EVCNT_DEFINE(mlk_hint)
133 UVMMAP_EVCNT_DEFINE(mlk_list)
134 UVMMAP_EVCNT_DEFINE(mlk_tree)
135 UVMMAP_EVCNT_DEFINE(mlk_treeloop)
136 UVMMAP_EVCNT_DEFINE(mlk_listloop)
137
138 UVMMAP_EVCNT_DEFINE(uke_alloc)
139 UVMMAP_EVCNT_DEFINE(uke_free)
140 UVMMAP_EVCNT_DEFINE(ukh_alloc)
141 UVMMAP_EVCNT_DEFINE(ukh_free)
142
143 const char vmmapbsy[] = "vmmapbsy";
144
145 /*
146 * cache for vmspace structures.
147 */
148
149 static struct pool_cache uvm_vmspace_cache;
150
151 /*
152 * cache for dynamically-allocated map entries.
153 */
154
155 static struct pool_cache uvm_map_entry_cache;
156
157 MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures");
158 MALLOC_DEFINE(M_VMPMAP, "VM pmap", "VM pmap");
159
160 #ifdef PMAP_GROWKERNEL
161 /*
162 * This global represents the end of the kernel virtual address
163 * space. If we want to exceed this, we must grow the kernel
164 * virtual address space dynamically.
165 *
166 * Note, this variable is locked by kernel_map's lock.
167 */
168 vaddr_t uvm_maxkaddr;
169 #endif
170
171 #ifndef __USER_VA0_IS_SAFE
172 #ifndef __USER_VA0_DISABLE_DEFAULT
173 #define __USER_VA0_DISABLE_DEFAULT 1
174 #endif
175 #ifdef USER_VA0_DISABLE_DEFAULT /* kernel config option overrides */
176 #undef __USER_VA0_DISABLE_DEFAULT
177 #define __USER_VA0_DISABLE_DEFAULT USER_VA0_DISABLE_DEFAULT
178 #endif
179 static int user_va0_disable = __USER_VA0_DISABLE_DEFAULT;
180 #endif
181
182 /*
183 * macros
184 */
185
186 /*
187 * VM_MAP_USE_KMAPENT: determine if uvm_kmapent_alloc/free is used
188 * for the vm_map.
189 */
190 extern struct vm_map *pager_map; /* XXX */
191 #define VM_MAP_USE_KMAPENT_FLAGS(flags) \
192 (((flags) & VM_MAP_INTRSAFE) != 0)
193 #define VM_MAP_USE_KMAPENT(map) \
194 (VM_MAP_USE_KMAPENT_FLAGS((map)->flags) || (map) == kernel_map)
195
196 /*
197 * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging
198 */
199
200 #define UVM_ET_ISCOMPATIBLE(ent, type, uobj, meflags, \
201 prot, maxprot, inh, adv, wire) \
202 ((ent)->etype == (type) && \
203 (((ent)->flags ^ (meflags)) & (UVM_MAP_NOMERGE | UVM_MAP_QUANTUM)) \
204 == 0 && \
205 (ent)->object.uvm_obj == (uobj) && \
206 (ent)->protection == (prot) && \
207 (ent)->max_protection == (maxprot) && \
208 (ent)->inheritance == (inh) && \
209 (ent)->advice == (adv) && \
210 (ent)->wired_count == (wire))
211
212 /*
213 * uvm_map_entry_link: insert entry into a map
214 *
215 * => map must be locked
216 */
217 #define uvm_map_entry_link(map, after_where, entry) do { \
218 uvm_mapent_check(entry); \
219 (map)->nentries++; \
220 (entry)->prev = (after_where); \
221 (entry)->next = (after_where)->next; \
222 (entry)->prev->next = (entry); \
223 (entry)->next->prev = (entry); \
224 uvm_rb_insert((map), (entry)); \
225 } while (/*CONSTCOND*/ 0)
226
227 /*
228 * uvm_map_entry_unlink: remove entry from a map
229 *
230 * => map must be locked
231 */
232 #define uvm_map_entry_unlink(map, entry) do { \
233 KASSERT((entry) != (map)->first_free); \
234 KASSERT((entry) != (map)->hint); \
235 uvm_mapent_check(entry); \
236 (map)->nentries--; \
237 (entry)->next->prev = (entry)->prev; \
238 (entry)->prev->next = (entry)->next; \
239 uvm_rb_remove((map), (entry)); \
240 } while (/*CONSTCOND*/ 0)
241
242 /*
243 * SAVE_HINT: saves the specified entry as the hint for future lookups.
244 *
245 * => map need not be locked.
246 */
247 #define SAVE_HINT(map, check, value) do { \
248 if ((map)->hint == (check)) \
249 (map)->hint = (value); \
250 } while (/*CONSTCOND*/ 0)
251
252 /*
253 * clear_hints: ensure that hints don't point to the entry.
254 *
255 * => map must be write-locked.
256 */
257 static void
258 clear_hints(struct vm_map *map, struct vm_map_entry *ent)
259 {
260
261 SAVE_HINT(map, ent, ent->prev);
262 if (map->first_free == ent) {
263 map->first_free = ent->prev;
264 }
265 }
266
267 /*
268 * VM_MAP_RANGE_CHECK: check and correct range
269 *
270 * => map must at least be read locked
271 */
272
273 #define VM_MAP_RANGE_CHECK(map, start, end) do { \
274 if (start < vm_map_min(map)) \
275 start = vm_map_min(map); \
276 if (end > vm_map_max(map)) \
277 end = vm_map_max(map); \
278 if (start > end) \
279 start = end; \
280 } while (/*CONSTCOND*/ 0)
281
282 /*
283 * local prototypes
284 */
285
286 static struct vm_map_entry *
287 uvm_mapent_alloc(struct vm_map *, int);
288 static struct vm_map_entry *
289 uvm_mapent_alloc_split(struct vm_map *,
290 const struct vm_map_entry *, int,
291 struct uvm_mapent_reservation *);
292 static void uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *);
293 static void uvm_mapent_free(struct vm_map_entry *);
294 #if defined(DEBUG)
295 static void _uvm_mapent_check(const struct vm_map_entry *, const char *,
296 int);
297 #define uvm_mapent_check(map) _uvm_mapent_check(map, __FILE__, __LINE__)
298 #else /* defined(DEBUG) */
299 #define uvm_mapent_check(e) /* nothing */
300 #endif /* defined(DEBUG) */
301 static struct vm_map_entry *
302 uvm_kmapent_alloc(struct vm_map *, int);
303 static void uvm_kmapent_free(struct vm_map_entry *);
304 static vsize_t uvm_kmapent_overhead(vsize_t);
305
306 static void uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
307 static void uvm_map_reference_amap(struct vm_map_entry *, int);
308 static int uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int,
309 struct vm_map_entry *);
310 static void uvm_map_unreference_amap(struct vm_map_entry *, int);
311
312 int _uvm_map_sanity(struct vm_map *);
313 int _uvm_tree_sanity(struct vm_map *);
314 static vsize_t uvm_rb_maxgap(const struct vm_map_entry *);
315
316 #define ROOT_ENTRY(map) ((struct vm_map_entry *)(map)->rb_tree.rbt_root)
317 #define LEFT_ENTRY(entry) ((struct vm_map_entry *)(entry)->rb_node.rb_left)
318 #define RIGHT_ENTRY(entry) ((struct vm_map_entry *)(entry)->rb_node.rb_right)
319 #define PARENT_ENTRY(map, entry) \
320 (ROOT_ENTRY(map) == (entry) \
321 ? NULL : (struct vm_map_entry *)RB_FATHER(&(entry)->rb_node))
322
323 static int
324 uvm_map_compare_nodes(void *ctx, const void *nparent, const void *nkey)
325 {
326 const struct vm_map_entry *eparent = nparent;
327 const struct vm_map_entry *ekey = nkey;
328
329 KASSERT(eparent->start < ekey->start || eparent->start >= ekey->end);
330 KASSERT(ekey->start < eparent->start || ekey->start >= eparent->end);
331
332 if (eparent->start < ekey->start)
333 return -1;
334 if (eparent->end >= ekey->start)
335 return 1;
336 return 0;
337 }
338
339 static int
340 uvm_map_compare_key(void *ctx, const void *nparent, const void *vkey)
341 {
342 const struct vm_map_entry *eparent = nparent;
343 const vaddr_t va = *(const vaddr_t *) vkey;
344
345 if (eparent->start < va)
346 return -1;
347 if (eparent->end >= va)
348 return 1;
349 return 0;
350 }
351
352 static const rb_tree_ops_t uvm_map_tree_ops = {
353 .rbto_compare_nodes = uvm_map_compare_nodes,
354 .rbto_compare_key = uvm_map_compare_key,
355 .rbto_node_offset = offsetof(struct vm_map_entry, rb_node),
356 .rbto_context = NULL
357 };
358
359 /*
360 * uvm_rb_gap: return the gap size between our entry and next entry.
361 */
362 static inline vsize_t
363 uvm_rb_gap(const struct vm_map_entry *entry)
364 {
365
366 KASSERT(entry->next != NULL);
367 return entry->next->start - entry->end;
368 }
369
370 static vsize_t
371 uvm_rb_maxgap(const struct vm_map_entry *entry)
372 {
373 struct vm_map_entry *child;
374 vsize_t maxgap = entry->gap;
375
376 /*
377 * We need maxgap to be the largest gap of us or any of our
378 * descendents. Since each of our children's maxgap is the
379 * cached value of their largest gap of themselves or their
380 * descendents, we can just use that value and avoid recursing
381 * down the tree to calculate it.
382 */
383 if ((child = LEFT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
384 maxgap = child->maxgap;
385
386 if ((child = RIGHT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
387 maxgap = child->maxgap;
388
389 return maxgap;
390 }
391
392 static void
393 uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
394 {
395 struct vm_map_entry *parent;
396
397 KASSERT(entry->gap == uvm_rb_gap(entry));
398 entry->maxgap = uvm_rb_maxgap(entry);
399
400 while ((parent = PARENT_ENTRY(map, entry)) != NULL) {
401 struct vm_map_entry *brother;
402 vsize_t maxgap = parent->gap;
403 unsigned int which;
404
405 KDASSERT(parent->gap == uvm_rb_gap(parent));
406 if (maxgap < entry->maxgap)
407 maxgap = entry->maxgap;
408 /*
409 * Since we work towards the root, we know entry's maxgap
410 * value is OK, but its brothers may now be out-of-date due
411 * to rebalancing. So refresh it.
412 */
413 which = RB_POSITION(&entry->rb_node) ^ RB_DIR_OTHER;
414 brother = (struct vm_map_entry *)parent->rb_node.rb_nodes[which];
415 if (brother != NULL) {
416 KDASSERT(brother->gap == uvm_rb_gap(brother));
417 brother->maxgap = uvm_rb_maxgap(brother);
418 if (maxgap < brother->maxgap)
419 maxgap = brother->maxgap;
420 }
421
422 parent->maxgap = maxgap;
423 entry = parent;
424 }
425 }
426
427 static void
428 uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
429 {
430 struct vm_map_entry *ret;
431
432 entry->gap = entry->maxgap = uvm_rb_gap(entry);
433 if (entry->prev != &map->header)
434 entry->prev->gap = uvm_rb_gap(entry->prev);
435
436 ret = rb_tree_insert_node(&map->rb_tree, entry);
437 KASSERTMSG(ret == entry,
438 ("uvm_rb_insert: map %p: duplicate entry %p", map, ret)
439 );
440
441 /*
442 * If the previous entry is not our immediate left child, then it's an
443 * ancestor and will be fixed up on the way to the root. We don't
444 * have to check entry->prev against &map->header since &map->header
445 * will never be in the tree.
446 */
447 uvm_rb_fixup(map,
448 LEFT_ENTRY(entry) == entry->prev ? entry->prev : entry);
449 }
450
451 static void
452 uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
453 {
454 struct vm_map_entry *prev_parent = NULL, *next_parent = NULL;
455
456 /*
457 * If we are removing an interior node, then an adjacent node will
458 * be used to replace its position in the tree. Therefore we will
459 * need to fixup the tree starting at the parent of the replacement
460 * node. So record their parents for later use.
461 */
462 if (entry->prev != &map->header)
463 prev_parent = PARENT_ENTRY(map, entry->prev);
464 if (entry->next != &map->header)
465 next_parent = PARENT_ENTRY(map, entry->next);
466
467 rb_tree_remove_node(&map->rb_tree, entry);
468
469 /*
470 * If the previous node has a new parent, fixup the tree starting
471 * at the previous node's old parent.
472 */
473 if (entry->prev != &map->header) {
474 /*
475 * Update the previous entry's gap due to our absence.
476 */
477 entry->prev->gap = uvm_rb_gap(entry->prev);
478 uvm_rb_fixup(map, entry->prev);
479 if (prev_parent != NULL
480 && prev_parent != entry
481 && prev_parent != PARENT_ENTRY(map, entry->prev))
482 uvm_rb_fixup(map, prev_parent);
483 }
484
485 /*
486 * If the next node has a new parent, fixup the tree starting
487 * at the next node's old parent.
488 */
489 if (entry->next != &map->header) {
490 uvm_rb_fixup(map, entry->next);
491 if (next_parent != NULL
492 && next_parent != entry
493 && next_parent != PARENT_ENTRY(map, entry->next))
494 uvm_rb_fixup(map, next_parent);
495 }
496 }
497
498 #if defined(DEBUG)
499 int uvm_debug_check_map = 0;
500 int uvm_debug_check_rbtree = 0;
501 #define uvm_map_check(map, name) \
502 _uvm_map_check((map), (name), __FILE__, __LINE__)
503 static void
504 _uvm_map_check(struct vm_map *map, const char *name,
505 const char *file, int line)
506 {
507
508 if ((uvm_debug_check_map && _uvm_map_sanity(map)) ||
509 (uvm_debug_check_rbtree && _uvm_tree_sanity(map))) {
510 panic("uvm_map_check failed: \"%s\" map=%p (%s:%d)",
511 name, map, file, line);
512 }
513 }
514 #else /* defined(DEBUG) */
515 #define uvm_map_check(map, name) /* nothing */
516 #endif /* defined(DEBUG) */
517
518 #if defined(DEBUG) || defined(DDB)
519 int
520 _uvm_map_sanity(struct vm_map *map)
521 {
522 bool first_free_found = false;
523 bool hint_found = false;
524 const struct vm_map_entry *e;
525 struct vm_map_entry *hint = map->hint;
526
527 e = &map->header;
528 for (;;) {
529 if (map->first_free == e) {
530 first_free_found = true;
531 } else if (!first_free_found && e->next->start > e->end) {
532 printf("first_free %p should be %p\n",
533 map->first_free, e);
534 return -1;
535 }
536 if (hint == e) {
537 hint_found = true;
538 }
539
540 e = e->next;
541 if (e == &map->header) {
542 break;
543 }
544 }
545 if (!first_free_found) {
546 printf("stale first_free\n");
547 return -1;
548 }
549 if (!hint_found) {
550 printf("stale hint\n");
551 return -1;
552 }
553 return 0;
554 }
555
556 int
557 _uvm_tree_sanity(struct vm_map *map)
558 {
559 struct vm_map_entry *tmp, *trtmp;
560 int n = 0, i = 1;
561
562 for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
563 if (tmp->gap != uvm_rb_gap(tmp)) {
564 printf("%d/%d gap %lx != %lx %s\n",
565 n + 1, map->nentries,
566 (ulong)tmp->gap, (ulong)uvm_rb_gap(tmp),
567 tmp->next == &map->header ? "(last)" : "");
568 goto error;
569 }
570 /*
571 * If any entries are out of order, tmp->gap will be unsigned
572 * and will likely exceed the size of the map.
573 */
574 if (tmp->gap >= vm_map_max(map) - vm_map_min(map)) {
575 printf("too large gap %zu\n", (size_t)tmp->gap);
576 goto error;
577 }
578 n++;
579 }
580
581 if (n != map->nentries) {
582 printf("nentries: %d vs %d\n", n, map->nentries);
583 goto error;
584 }
585
586 trtmp = NULL;
587 for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
588 if (tmp->maxgap != uvm_rb_maxgap(tmp)) {
589 printf("maxgap %lx != %lx\n",
590 (ulong)tmp->maxgap,
591 (ulong)uvm_rb_maxgap(tmp));
592 goto error;
593 }
594 if (trtmp != NULL && trtmp->start >= tmp->start) {
595 printf("corrupt: 0x%"PRIxVADDR"x >= 0x%"PRIxVADDR"x\n",
596 trtmp->start, tmp->start);
597 goto error;
598 }
599
600 trtmp = tmp;
601 }
602
603 for (tmp = map->header.next; tmp != &map->header;
604 tmp = tmp->next, i++) {
605 trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_LEFT);
606 if (trtmp == NULL)
607 trtmp = &map->header;
608 if (tmp->prev != trtmp) {
609 printf("lookup: %d: %p->prev=%p: %p\n",
610 i, tmp, tmp->prev, trtmp);
611 goto error;
612 }
613 trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_RIGHT);
614 if (trtmp == NULL)
615 trtmp = &map->header;
616 if (tmp->next != trtmp) {
617 printf("lookup: %d: %p->next=%p: %p\n",
618 i, tmp, tmp->next, trtmp);
619 goto error;
620 }
621 trtmp = rb_tree_find_node(&map->rb_tree, &tmp->start);
622 if (trtmp != tmp) {
623 printf("lookup: %d: %p - %p: %p\n", i, tmp, trtmp,
624 PARENT_ENTRY(map, tmp));
625 goto error;
626 }
627 }
628
629 return (0);
630 error:
631 return (-1);
632 }
633 #endif /* defined(DEBUG) || defined(DDB) */
634
635 #ifdef DIAGNOSTIC
636 static struct vm_map *uvm_kmapent_map(struct vm_map_entry *);
637 #endif
638
639 /*
640 * vm_map_lock: acquire an exclusive (write) lock on a map.
641 *
642 * => Note that "intrsafe" maps use only exclusive, spin locks.
643 *
644 * => The locking protocol provides for guaranteed upgrade from shared ->
645 * exclusive by whichever thread currently has the map marked busy.
646 * See "LOCKING PROTOCOL NOTES" in uvm_map.h. This is horrible; among
647 * other problems, it defeats any fairness guarantees provided by RW
648 * locks.
649 */
650
651 void
652 vm_map_lock(struct vm_map *map)
653 {
654
655 if ((map->flags & VM_MAP_INTRSAFE) != 0) {
656 mutex_spin_enter(&map->mutex);
657 return;
658 }
659
660 for (;;) {
661 rw_enter(&map->lock, RW_WRITER);
662 if (map->busy == NULL)
663 break;
664 if (map->busy == curlwp)
665 break;
666 mutex_enter(&map->misc_lock);
667 rw_exit(&map->lock);
668 if (map->busy != NULL)
669 cv_wait(&map->cv, &map->misc_lock);
670 mutex_exit(&map->misc_lock);
671 }
672
673 map->timestamp++;
674 }
675
676 /*
677 * vm_map_lock_try: try to lock a map, failing if it is already locked.
678 */
679
680 bool
681 vm_map_lock_try(struct vm_map *map)
682 {
683
684 if ((map->flags & VM_MAP_INTRSAFE) != 0)
685 return mutex_tryenter(&map->mutex);
686 if (!rw_tryenter(&map->lock, RW_WRITER))
687 return false;
688 if (map->busy != NULL) {
689 rw_exit(&map->lock);
690 return false;
691 }
692
693 map->timestamp++;
694 return true;
695 }
696
697 /*
698 * vm_map_unlock: release an exclusive lock on a map.
699 */
700
701 void
702 vm_map_unlock(struct vm_map *map)
703 {
704
705 if ((map->flags & VM_MAP_INTRSAFE) != 0)
706 mutex_spin_exit(&map->mutex);
707 else {
708 KASSERT(rw_write_held(&map->lock));
709 KASSERT(map->busy == NULL || map->busy == curlwp);
710 rw_exit(&map->lock);
711 }
712 }
713
714 /*
715 * vm_map_unbusy: mark the map as unbusy, and wake any waiters that
716 * want an exclusive lock.
717 */
718
719 void
720 vm_map_unbusy(struct vm_map *map)
721 {
722
723 KASSERT(map->busy == curlwp);
724
725 /*
726 * Safe to clear 'busy' and 'waiters' with only a read lock held:
727 *
728 * o they can only be set with a write lock held
729 * o writers are blocked out with a read or write hold
730 * o at any time, only one thread owns the set of values
731 */
732 mutex_enter(&map->misc_lock);
733 map->busy = NULL;
734 cv_broadcast(&map->cv);
735 mutex_exit(&map->misc_lock);
736 }
737
738 /*
739 * vm_map_lock_read: acquire a shared (read) lock on a map.
740 */
741
742 void
743 vm_map_lock_read(struct vm_map *map)
744 {
745
746 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
747
748 rw_enter(&map->lock, RW_READER);
749 }
750
751 /*
752 * vm_map_unlock_read: release a shared lock on a map.
753 */
754
755 void
756 vm_map_unlock_read(struct vm_map *map)
757 {
758
759 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
760
761 rw_exit(&map->lock);
762 }
763
764 /*
765 * vm_map_busy: mark a map as busy.
766 *
767 * => the caller must hold the map write locked
768 */
769
770 void
771 vm_map_busy(struct vm_map *map)
772 {
773
774 KASSERT(rw_write_held(&map->lock));
775 KASSERT(map->busy == NULL);
776
777 map->busy = curlwp;
778 }
779
780 /*
781 * vm_map_locked_p: return true if the map is write locked.
782 *
783 * => only for debug purposes like KASSERTs.
784 * => should not be used to verify that a map is not locked.
785 */
786
787 bool
788 vm_map_locked_p(struct vm_map *map)
789 {
790
791 if ((map->flags & VM_MAP_INTRSAFE) != 0) {
792 return mutex_owned(&map->mutex);
793 } else {
794 return rw_write_held(&map->lock);
795 }
796 }
797
798 /*
799 * uvm_mapent_alloc: allocate a map entry
800 */
801
802 static struct vm_map_entry *
803 uvm_mapent_alloc(struct vm_map *map, int flags)
804 {
805 struct vm_map_entry *me;
806 int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
807 UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
808
809 if (VM_MAP_USE_KMAPENT(map)) {
810 me = uvm_kmapent_alloc(map, flags);
811 } else {
812 me = pool_cache_get(&uvm_map_entry_cache, pflags);
813 if (__predict_false(me == NULL))
814 return NULL;
815 me->flags = 0;
816 }
817
818 UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me,
819 ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0);
820 return (me);
821 }
822
823 /*
824 * uvm_mapent_alloc_split: allocate a map entry for clipping.
825 *
826 * => map must be locked by caller if UVM_MAP_QUANTUM is set.
827 */
828
829 static struct vm_map_entry *
830 uvm_mapent_alloc_split(struct vm_map *map,
831 const struct vm_map_entry *old_entry, int flags,
832 struct uvm_mapent_reservation *umr)
833 {
834 struct vm_map_entry *me;
835
836 KASSERT(!VM_MAP_USE_KMAPENT(map) ||
837 (old_entry->flags & UVM_MAP_QUANTUM) || !UMR_EMPTY(umr));
838
839 if (old_entry->flags & UVM_MAP_QUANTUM) {
840 struct vm_map_kernel *vmk = vm_map_to_kernel(map);
841
842 KASSERT(vm_map_locked_p(map));
843 me = vmk->vmk_merged_entries;
844 KASSERT(me);
845 vmk->vmk_merged_entries = me->next;
846 KASSERT(me->flags & UVM_MAP_QUANTUM);
847 } else {
848 me = uvm_mapent_alloc(map, flags);
849 }
850
851 return me;
852 }
853
854 /*
855 * uvm_mapent_free: free map entry
856 */
857
858 static void
859 uvm_mapent_free(struct vm_map_entry *me)
860 {
861 UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
862
863 UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
864 me, me->flags, 0, 0);
865 if (me->flags & UVM_MAP_KERNEL) {
866 uvm_kmapent_free(me);
867 } else {
868 pool_cache_put(&uvm_map_entry_cache, me);
869 }
870 }
871
872 /*
873 * uvm_mapent_free_merged: free merged map entry
874 *
875 * => keep the entry if needed.
876 * => caller shouldn't hold map locked if VM_MAP_USE_KMAPENT(map) is true.
877 * => map should be locked if UVM_MAP_QUANTUM is set.
878 */
879
880 static void
881 uvm_mapent_free_merged(struct vm_map *map, struct vm_map_entry *me)
882 {
883
884 KASSERT(!(me->flags & UVM_MAP_KERNEL) || uvm_kmapent_map(me) == map);
885
886 if (me->flags & UVM_MAP_QUANTUM) {
887 /*
888 * keep this entry for later splitting.
889 */
890 struct vm_map_kernel *vmk;
891
892 KASSERT(vm_map_locked_p(map));
893 KASSERT(VM_MAP_IS_KERNEL(map));
894 KASSERT(!VM_MAP_USE_KMAPENT(map) ||
895 (me->flags & UVM_MAP_KERNEL));
896
897 vmk = vm_map_to_kernel(map);
898 me->next = vmk->vmk_merged_entries;
899 vmk->vmk_merged_entries = me;
900 } else {
901 uvm_mapent_free(me);
902 }
903 }
904
905 /*
906 * uvm_mapent_copy: copy a map entry, preserving flags
907 */
908
909 static inline void
910 uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
911 {
912
913 memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) -
914 ((char *)src));
915 }
916
917 /*
918 * uvm_mapent_overhead: calculate maximum kva overhead necessary for
919 * map entries.
920 *
921 * => size and flags are the same as uvm_km_suballoc's ones.
922 */
923
924 vsize_t
925 uvm_mapent_overhead(vsize_t size, int flags)
926 {
927
928 if (VM_MAP_USE_KMAPENT_FLAGS(flags)) {
929 return uvm_kmapent_overhead(size);
930 }
931 return 0;
932 }
933
934 #if defined(DEBUG)
935 static void
936 _uvm_mapent_check(const struct vm_map_entry *entry, const char *file, int line)
937 {
938
939 if (entry->start >= entry->end) {
940 goto bad;
941 }
942 if (UVM_ET_ISOBJ(entry)) {
943 if (entry->object.uvm_obj == NULL) {
944 goto bad;
945 }
946 } else if (UVM_ET_ISSUBMAP(entry)) {
947 if (entry->object.sub_map == NULL) {
948 goto bad;
949 }
950 } else {
951 if (entry->object.uvm_obj != NULL ||
952 entry->object.sub_map != NULL) {
953 goto bad;
954 }
955 }
956 if (!UVM_ET_ISOBJ(entry)) {
957 if (entry->offset != 0) {
958 goto bad;
959 }
960 }
961
962 return;
963
964 bad:
965 panic("%s: bad entry %p (%s:%d)", __func__, entry, file, line);
966 }
967 #endif /* defined(DEBUG) */
968
969 /*
970 * uvm_map_entry_unwire: unwire a map entry
971 *
972 * => map should be locked by caller
973 */
974
975 static inline void
976 uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
977 {
978
979 entry->wired_count = 0;
980 uvm_fault_unwire_locked(map, entry->start, entry->end);
981 }
982
983
984 /*
985 * wrapper for calling amap_ref()
986 */
987 static inline void
988 uvm_map_reference_amap(struct vm_map_entry *entry, int flags)
989 {
990
991 amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
992 (entry->end - entry->start) >> PAGE_SHIFT, flags);
993 }
994
995
996 /*
997 * wrapper for calling amap_unref()
998 */
999 static inline void
1000 uvm_map_unreference_amap(struct vm_map_entry *entry, int flags)
1001 {
1002
1003 amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
1004 (entry->end - entry->start) >> PAGE_SHIFT, flags);
1005 }
1006
1007
1008 /*
1009 * uvm_map_init: init mapping system at boot time.
1010 */
1011
1012 void
1013 uvm_map_init(void)
1014 {
1015 #if defined(UVMHIST)
1016 static struct uvm_history_ent maphistbuf[100];
1017 static struct uvm_history_ent pdhistbuf[100];
1018 #endif
1019
1020 /*
1021 * first, init logging system.
1022 */
1023
1024 UVMHIST_FUNC("uvm_map_init");
1025 UVMHIST_INIT_STATIC(maphist, maphistbuf);
1026 UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
1027 UVMHIST_CALLED(maphist);
1028 UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
1029
1030 /*
1031 * initialize the global lock for kernel map entry.
1032 */
1033
1034 mutex_init(&uvm_kentry_lock, MUTEX_DRIVER, IPL_VM);
1035
1036 /*
1037 * initialize caches.
1038 */
1039
1040 pool_cache_bootstrap(&uvm_map_entry_cache, sizeof(struct vm_map_entry),
1041 0, 0, 0, "vmmpepl", NULL, IPL_NONE, NULL, NULL, NULL);
1042 pool_cache_bootstrap(&uvm_vmspace_cache, sizeof(struct vmspace),
1043 0, 0, 0, "vmsppl", NULL, IPL_NONE, NULL, NULL, NULL);
1044 }
1045
1046 /*
1047 * clippers
1048 */
1049
1050 /*
1051 * uvm_mapent_splitadj: adjust map entries for splitting, after uvm_mapent_copy.
1052 */
1053
1054 static void
1055 uvm_mapent_splitadj(struct vm_map_entry *entry1, struct vm_map_entry *entry2,
1056 vaddr_t splitat)
1057 {
1058 vaddr_t adj;
1059
1060 KASSERT(entry1->start < splitat);
1061 KASSERT(splitat < entry1->end);
1062
1063 adj = splitat - entry1->start;
1064 entry1->end = entry2->start = splitat;
1065
1066 if (entry1->aref.ar_amap) {
1067 amap_splitref(&entry1->aref, &entry2->aref, adj);
1068 }
1069 if (UVM_ET_ISSUBMAP(entry1)) {
1070 /* ... unlikely to happen, but play it safe */
1071 uvm_map_reference(entry1->object.sub_map);
1072 } else if (UVM_ET_ISOBJ(entry1)) {
1073 KASSERT(entry1->object.uvm_obj != NULL); /* suppress coverity */
1074 entry2->offset += adj;
1075 if (entry1->object.uvm_obj->pgops &&
1076 entry1->object.uvm_obj->pgops->pgo_reference)
1077 entry1->object.uvm_obj->pgops->pgo_reference(
1078 entry1->object.uvm_obj);
1079 }
1080 }
1081
1082 /*
1083 * uvm_map_clip_start: ensure that the entry begins at or after
1084 * the starting address, if it doesn't we split the entry.
1085 *
1086 * => caller should use UVM_MAP_CLIP_START macro rather than calling
1087 * this directly
1088 * => map must be locked by caller
1089 */
1090
1091 void
1092 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
1093 vaddr_t start, struct uvm_mapent_reservation *umr)
1094 {
1095 struct vm_map_entry *new_entry;
1096
1097 /* uvm_map_simplify_entry(map, entry); */ /* XXX */
1098
1099 uvm_map_check(map, "clip_start entry");
1100 uvm_mapent_check(entry);
1101
1102 /*
1103 * Split off the front portion. note that we must insert the new
1104 * entry BEFORE this one, so that this entry has the specified
1105 * starting address.
1106 */
1107 new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
1108 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1109 uvm_mapent_splitadj(new_entry, entry, start);
1110 uvm_map_entry_link(map, entry->prev, new_entry);
1111
1112 uvm_map_check(map, "clip_start leave");
1113 }
1114
1115 /*
1116 * uvm_map_clip_end: ensure that the entry ends at or before
1117 * the ending address, if it does't we split the reference
1118 *
1119 * => caller should use UVM_MAP_CLIP_END macro rather than calling
1120 * this directly
1121 * => map must be locked by caller
1122 */
1123
1124 void
1125 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end,
1126 struct uvm_mapent_reservation *umr)
1127 {
1128 struct vm_map_entry *new_entry;
1129
1130 uvm_map_check(map, "clip_end entry");
1131 uvm_mapent_check(entry);
1132
1133 /*
1134 * Create a new entry and insert it
1135 * AFTER the specified entry
1136 */
1137 new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
1138 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1139 uvm_mapent_splitadj(entry, new_entry, end);
1140 uvm_map_entry_link(map, entry, new_entry);
1141
1142 uvm_map_check(map, "clip_end leave");
1143 }
1144
1145 static void
1146 vm_map_drain(struct vm_map *map, uvm_flag_t flags)
1147 {
1148
1149 if (!VM_MAP_IS_KERNEL(map)) {
1150 return;
1151 }
1152
1153 uvm_km_va_drain(map, flags);
1154 }
1155
1156 /*
1157 * M A P - m a i n e n t r y p o i n t
1158 */
1159 /*
1160 * uvm_map: establish a valid mapping in a map
1161 *
1162 * => assume startp is page aligned.
1163 * => assume size is a multiple of PAGE_SIZE.
1164 * => assume sys_mmap provides enough of a "hint" to have us skip
1165 * over text/data/bss area.
1166 * => map must be unlocked (we will lock it)
1167 * => <uobj,uoffset> value meanings (4 cases):
1168 * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER
1169 * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER
1170 * [3] <uobj,uoffset> == normal mapping
1171 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA
1172 *
1173 * case [4] is for kernel mappings where we don't know the offset until
1174 * we've found a virtual address. note that kernel object offsets are
1175 * always relative to vm_map_min(kernel_map).
1176 *
1177 * => if `align' is non-zero, we align the virtual address to the specified
1178 * alignment.
1179 * this is provided as a mechanism for large pages.
1180 *
1181 * => XXXCDC: need way to map in external amap?
1182 */
1183
1184 int
1185 uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
1186 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags)
1187 {
1188 struct uvm_map_args args;
1189 struct vm_map_entry *new_entry;
1190 int error;
1191
1192 KASSERT((flags & UVM_FLAG_QUANTUM) == 0 || VM_MAP_IS_KERNEL(map));
1193 KASSERT((size & PAGE_MASK) == 0);
1194
1195 #ifndef __USER_VA0_IS_SAFE
1196 if ((flags & UVM_FLAG_FIXED) && *startp == 0 &&
1197 !VM_MAP_IS_KERNEL(map) && user_va0_disable)
1198 return EACCES;
1199 #endif
1200
1201 /*
1202 * for pager_map, allocate the new entry first to avoid sleeping
1203 * for memory while we have the map locked.
1204 *
1205 * Also, because we allocate entries for in-kernel maps
1206 * a bit differently (cf. uvm_kmapent_alloc/free), we need to
1207 * allocate them before locking the map.
1208 */
1209
1210 new_entry = NULL;
1211 if (VM_MAP_USE_KMAPENT(map) || (flags & UVM_FLAG_QUANTUM) ||
1212 map == pager_map) {
1213 new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
1214 if (__predict_false(new_entry == NULL))
1215 return ENOMEM;
1216 if (flags & UVM_FLAG_QUANTUM)
1217 new_entry->flags |= UVM_MAP_QUANTUM;
1218 }
1219 if (map == pager_map)
1220 flags |= UVM_FLAG_NOMERGE;
1221
1222 error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align,
1223 flags, &args);
1224 if (!error) {
1225 error = uvm_map_enter(map, &args, new_entry);
1226 *startp = args.uma_start;
1227 } else if (new_entry) {
1228 uvm_mapent_free(new_entry);
1229 }
1230
1231 #if defined(DEBUG)
1232 if (!error && VM_MAP_IS_KERNEL(map)) {
1233 uvm_km_check_empty(map, *startp, *startp + size);
1234 }
1235 #endif /* defined(DEBUG) */
1236
1237 return error;
1238 }
1239
1240 int
1241 uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size,
1242 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags,
1243 struct uvm_map_args *args)
1244 {
1245 struct vm_map_entry *prev_entry;
1246 vm_prot_t prot = UVM_PROTECTION(flags);
1247 vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1248
1249 UVMHIST_FUNC("uvm_map_prepare");
1250 UVMHIST_CALLED(maphist);
1251
1252 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
1253 map, start, size, flags);
1254 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
1255
1256 /*
1257 * detect a popular device driver bug.
1258 */
1259
1260 KASSERT(doing_shutdown || curlwp != NULL ||
1261 (map->flags & VM_MAP_INTRSAFE));
1262
1263 /*
1264 * zero-sized mapping doesn't make any sense.
1265 */
1266 KASSERT(size > 0);
1267
1268 KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0);
1269
1270 uvm_map_check(map, "map entry");
1271
1272 /*
1273 * check sanity of protection code
1274 */
1275
1276 if ((prot & maxprot) != prot) {
1277 UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x",
1278 prot, maxprot,0,0);
1279 return EACCES;
1280 }
1281
1282 /*
1283 * figure out where to put new VM range
1284 */
1285
1286 retry:
1287 if (vm_map_lock_try(map) == false) {
1288 if ((flags & UVM_FLAG_TRYLOCK) != 0 &&
1289 (map->flags & VM_MAP_INTRSAFE) == 0) {
1290 return EAGAIN;
1291 }
1292 vm_map_lock(map); /* could sleep here */
1293 }
1294 prev_entry = uvm_map_findspace(map, start, size, &start,
1295 uobj, uoffset, align, flags);
1296 if (prev_entry == NULL) {
1297 unsigned int timestamp;
1298
1299 timestamp = map->timestamp;
1300 UVMHIST_LOG(maphist,"waiting va timestamp=0x%x",
1301 timestamp,0,0,0);
1302 map->flags |= VM_MAP_WANTVA;
1303 vm_map_unlock(map);
1304
1305 /*
1306 * try to reclaim kva and wait until someone does unmap.
1307 * fragile locking here, so we awaken every second to
1308 * recheck the condition.
1309 */
1310
1311 vm_map_drain(map, flags);
1312
1313 mutex_enter(&map->misc_lock);
1314 while ((map->flags & VM_MAP_WANTVA) != 0 &&
1315 map->timestamp == timestamp) {
1316 if ((flags & UVM_FLAG_WAITVA) == 0) {
1317 mutex_exit(&map->misc_lock);
1318 UVMHIST_LOG(maphist,
1319 "<- uvm_map_findspace failed!", 0,0,0,0);
1320 return ENOMEM;
1321 } else {
1322 cv_timedwait(&map->cv, &map->misc_lock, hz);
1323 }
1324 }
1325 mutex_exit(&map->misc_lock);
1326 goto retry;
1327 }
1328
1329 #ifdef PMAP_GROWKERNEL
1330 /*
1331 * If the kernel pmap can't map the requested space,
1332 * then allocate more resources for it.
1333 */
1334 if (map == kernel_map && uvm_maxkaddr < (start + size))
1335 uvm_maxkaddr = pmap_growkernel(start + size);
1336 #endif
1337
1338 UVMMAP_EVCNT_INCR(map_call);
1339
1340 /*
1341 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
1342 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in
1343 * either case we want to zero it before storing it in the map entry
1344 * (because it looks strange and confusing when debugging...)
1345 *
1346 * if uobj is not null
1347 * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
1348 * and we do not need to change uoffset.
1349 * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
1350 * now (based on the starting address of the map). this case is
1351 * for kernel object mappings where we don't know the offset until
1352 * the virtual address is found (with uvm_map_findspace). the
1353 * offset is the distance we are from the start of the map.
1354 */
1355
1356 if (uobj == NULL) {
1357 uoffset = 0;
1358 } else {
1359 if (uoffset == UVM_UNKNOWN_OFFSET) {
1360 KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
1361 uoffset = start - vm_map_min(kernel_map);
1362 }
1363 }
1364
1365 args->uma_flags = flags;
1366 args->uma_prev = prev_entry;
1367 args->uma_start = start;
1368 args->uma_size = size;
1369 args->uma_uobj = uobj;
1370 args->uma_uoffset = uoffset;
1371
1372 UVMHIST_LOG(maphist, "<- done!", 0,0,0,0);
1373 return 0;
1374 }
1375
1376 int
1377 uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args,
1378 struct vm_map_entry *new_entry)
1379 {
1380 struct vm_map_entry *prev_entry = args->uma_prev;
1381 struct vm_map_entry *dead = NULL;
1382
1383 const uvm_flag_t flags = args->uma_flags;
1384 const vm_prot_t prot = UVM_PROTECTION(flags);
1385 const vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1386 const vm_inherit_t inherit = UVM_INHERIT(flags);
1387 const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
1388 AMAP_EXTEND_NOWAIT : 0;
1389 const int advice = UVM_ADVICE(flags);
1390 const int meflagval = (flags & UVM_FLAG_QUANTUM) ?
1391 UVM_MAP_QUANTUM : 0;
1392
1393 vaddr_t start = args->uma_start;
1394 vsize_t size = args->uma_size;
1395 struct uvm_object *uobj = args->uma_uobj;
1396 voff_t uoffset = args->uma_uoffset;
1397
1398 const int kmap = (vm_map_pmap(map) == pmap_kernel());
1399 int merged = 0;
1400 int error;
1401 int newetype;
1402
1403 UVMHIST_FUNC("uvm_map_enter");
1404 UVMHIST_CALLED(maphist);
1405
1406 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
1407 map, start, size, flags);
1408 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
1409
1410 KASSERT(map->hint == prev_entry); /* bimerge case assumes this */
1411
1412 if (flags & UVM_FLAG_QUANTUM) {
1413 KASSERT(new_entry);
1414 KASSERT(new_entry->flags & UVM_MAP_QUANTUM);
1415 }
1416
1417 if (uobj)
1418 newetype = UVM_ET_OBJ;
1419 else
1420 newetype = 0;
1421
1422 if (flags & UVM_FLAG_COPYONW) {
1423 newetype |= UVM_ET_COPYONWRITE;
1424 if ((flags & UVM_FLAG_OVERLAY) == 0)
1425 newetype |= UVM_ET_NEEDSCOPY;
1426 }
1427
1428 /*
1429 * try and insert in map by extending previous entry, if possible.
1430 * XXX: we don't try and pull back the next entry. might be useful
1431 * for a stack, but we are currently allocating our stack in advance.
1432 */
1433
1434 if (flags & UVM_FLAG_NOMERGE)
1435 goto nomerge;
1436
1437 if (prev_entry->end == start &&
1438 prev_entry != &map->header &&
1439 UVM_ET_ISCOMPATIBLE(prev_entry, newetype, uobj, meflagval,
1440 prot, maxprot, inherit, advice, 0)) {
1441
1442 if (uobj && prev_entry->offset +
1443 (prev_entry->end - prev_entry->start) != uoffset)
1444 goto forwardmerge;
1445
1446 /*
1447 * can't extend a shared amap. note: no need to lock amap to
1448 * look at refs since we don't care about its exact value.
1449 * if it is one (i.e. we have only reference) it will stay there
1450 */
1451
1452 if (prev_entry->aref.ar_amap &&
1453 amap_refs(prev_entry->aref.ar_amap) != 1) {
1454 goto forwardmerge;
1455 }
1456
1457 if (prev_entry->aref.ar_amap) {
1458 error = amap_extend(prev_entry, size,
1459 amapwaitflag | AMAP_EXTEND_FORWARDS);
1460 if (error)
1461 goto nomerge;
1462 }
1463
1464 if (kmap) {
1465 UVMMAP_EVCNT_INCR(kbackmerge);
1466 } else {
1467 UVMMAP_EVCNT_INCR(ubackmerge);
1468 }
1469 UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0);
1470
1471 /*
1472 * drop our reference to uobj since we are extending a reference
1473 * that we already have (the ref count can not drop to zero).
1474 */
1475
1476 if (uobj && uobj->pgops->pgo_detach)
1477 uobj->pgops->pgo_detach(uobj);
1478
1479 /*
1480 * Now that we've merged the entries, note that we've grown
1481 * and our gap has shrunk. Then fix the tree.
1482 */
1483 prev_entry->end += size;
1484 prev_entry->gap -= size;
1485 uvm_rb_fixup(map, prev_entry);
1486
1487 uvm_map_check(map, "map backmerged");
1488
1489 UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
1490 merged++;
1491 }
1492
1493 forwardmerge:
1494 if (prev_entry->next->start == (start + size) &&
1495 prev_entry->next != &map->header &&
1496 UVM_ET_ISCOMPATIBLE(prev_entry->next, newetype, uobj, meflagval,
1497 prot, maxprot, inherit, advice, 0)) {
1498
1499 if (uobj && prev_entry->next->offset != uoffset + size)
1500 goto nomerge;
1501
1502 /*
1503 * can't extend a shared amap. note: no need to lock amap to
1504 * look at refs since we don't care about its exact value.
1505 * if it is one (i.e. we have only reference) it will stay there.
1506 *
1507 * note that we also can't merge two amaps, so if we
1508 * merged with the previous entry which has an amap,
1509 * and the next entry also has an amap, we give up.
1510 *
1511 * Interesting cases:
1512 * amap, new, amap -> give up second merge (single fwd extend)
1513 * amap, new, none -> double forward extend (extend again here)
1514 * none, new, amap -> double backward extend (done here)
1515 * uobj, new, amap -> single backward extend (done here)
1516 *
1517 * XXX should we attempt to deal with someone refilling
1518 * the deallocated region between two entries that are
1519 * backed by the same amap (ie, arefs is 2, "prev" and
1520 * "next" refer to it, and adding this allocation will
1521 * close the hole, thus restoring arefs to 1 and
1522 * deallocating the "next" vm_map_entry)? -- @@@
1523 */
1524
1525 if (prev_entry->next->aref.ar_amap &&
1526 (amap_refs(prev_entry->next->aref.ar_amap) != 1 ||
1527 (merged && prev_entry->aref.ar_amap))) {
1528 goto nomerge;
1529 }
1530
1531 if (merged) {
1532 /*
1533 * Try to extend the amap of the previous entry to
1534 * cover the next entry as well. If it doesn't work
1535 * just skip on, don't actually give up, since we've
1536 * already completed the back merge.
1537 */
1538 if (prev_entry->aref.ar_amap) {
1539 if (amap_extend(prev_entry,
1540 prev_entry->next->end -
1541 prev_entry->next->start,
1542 amapwaitflag | AMAP_EXTEND_FORWARDS))
1543 goto nomerge;
1544 }
1545
1546 /*
1547 * Try to extend the amap of the *next* entry
1548 * back to cover the new allocation *and* the
1549 * previous entry as well (the previous merge
1550 * didn't have an amap already otherwise we
1551 * wouldn't be checking here for an amap). If
1552 * it doesn't work just skip on, again, don't
1553 * actually give up, since we've already
1554 * completed the back merge.
1555 */
1556 else if (prev_entry->next->aref.ar_amap) {
1557 if (amap_extend(prev_entry->next,
1558 prev_entry->end -
1559 prev_entry->start,
1560 amapwaitflag | AMAP_EXTEND_BACKWARDS))
1561 goto nomerge;
1562 }
1563 } else {
1564 /*
1565 * Pull the next entry's amap backwards to cover this
1566 * new allocation.
1567 */
1568 if (prev_entry->next->aref.ar_amap) {
1569 error = amap_extend(prev_entry->next, size,
1570 amapwaitflag | AMAP_EXTEND_BACKWARDS);
1571 if (error)
1572 goto nomerge;
1573 }
1574 }
1575
1576 if (merged) {
1577 if (kmap) {
1578 UVMMAP_EVCNT_DECR(kbackmerge);
1579 UVMMAP_EVCNT_INCR(kbimerge);
1580 } else {
1581 UVMMAP_EVCNT_DECR(ubackmerge);
1582 UVMMAP_EVCNT_INCR(ubimerge);
1583 }
1584 } else {
1585 if (kmap) {
1586 UVMMAP_EVCNT_INCR(kforwmerge);
1587 } else {
1588 UVMMAP_EVCNT_INCR(uforwmerge);
1589 }
1590 }
1591 UVMHIST_LOG(maphist," starting forward merge", 0, 0, 0, 0);
1592
1593 /*
1594 * drop our reference to uobj since we are extending a reference
1595 * that we already have (the ref count can not drop to zero).
1596 * (if merged, we've already detached)
1597 */
1598 if (uobj && uobj->pgops->pgo_detach && !merged)
1599 uobj->pgops->pgo_detach(uobj);
1600
1601 if (merged) {
1602 dead = prev_entry->next;
1603 prev_entry->end = dead->end;
1604 uvm_map_entry_unlink(map, dead);
1605 if (dead->aref.ar_amap != NULL) {
1606 prev_entry->aref = dead->aref;
1607 dead->aref.ar_amap = NULL;
1608 }
1609 } else {
1610 prev_entry->next->start -= size;
1611 if (prev_entry != &map->header) {
1612 prev_entry->gap -= size;
1613 KASSERT(prev_entry->gap == uvm_rb_gap(prev_entry));
1614 uvm_rb_fixup(map, prev_entry);
1615 }
1616 if (uobj)
1617 prev_entry->next->offset = uoffset;
1618 }
1619
1620 uvm_map_check(map, "map forwardmerged");
1621
1622 UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0);
1623 merged++;
1624 }
1625
1626 nomerge:
1627 if (!merged) {
1628 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0);
1629 if (kmap) {
1630 UVMMAP_EVCNT_INCR(knomerge);
1631 } else {
1632 UVMMAP_EVCNT_INCR(unomerge);
1633 }
1634
1635 /*
1636 * allocate new entry and link it in.
1637 */
1638
1639 if (new_entry == NULL) {
1640 new_entry = uvm_mapent_alloc(map,
1641 (flags & UVM_FLAG_NOWAIT));
1642 if (__predict_false(new_entry == NULL)) {
1643 error = ENOMEM;
1644 goto done;
1645 }
1646 }
1647 new_entry->start = start;
1648 new_entry->end = new_entry->start + size;
1649 new_entry->object.uvm_obj = uobj;
1650 new_entry->offset = uoffset;
1651
1652 new_entry->etype = newetype;
1653
1654 if (flags & UVM_FLAG_NOMERGE) {
1655 new_entry->flags |= UVM_MAP_NOMERGE;
1656 }
1657
1658 new_entry->protection = prot;
1659 new_entry->max_protection = maxprot;
1660 new_entry->inheritance = inherit;
1661 new_entry->wired_count = 0;
1662 new_entry->advice = advice;
1663 if (flags & UVM_FLAG_OVERLAY) {
1664
1665 /*
1666 * to_add: for BSS we overallocate a little since we
1667 * are likely to extend
1668 */
1669
1670 vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
1671 UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
1672 struct vm_amap *amap = amap_alloc(size, to_add,
1673 (flags & UVM_FLAG_NOWAIT));
1674 if (__predict_false(amap == NULL)) {
1675 error = ENOMEM;
1676 goto done;
1677 }
1678 new_entry->aref.ar_pageoff = 0;
1679 new_entry->aref.ar_amap = amap;
1680 } else {
1681 new_entry->aref.ar_pageoff = 0;
1682 new_entry->aref.ar_amap = NULL;
1683 }
1684 uvm_map_entry_link(map, prev_entry, new_entry);
1685
1686 /*
1687 * Update the free space hint
1688 */
1689
1690 if ((map->first_free == prev_entry) &&
1691 (prev_entry->end >= new_entry->start))
1692 map->first_free = new_entry;
1693
1694 new_entry = NULL;
1695 }
1696
1697 map->size += size;
1698
1699 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1700
1701 error = 0;
1702 done:
1703 if ((flags & UVM_FLAG_QUANTUM) == 0) {
1704 /*
1705 * vmk_merged_entries is locked by the map's lock.
1706 */
1707 vm_map_unlock(map);
1708 }
1709 if (new_entry && error == 0) {
1710 KDASSERT(merged);
1711 uvm_mapent_free_merged(map, new_entry);
1712 new_entry = NULL;
1713 }
1714 if (dead) {
1715 KDASSERT(merged);
1716 uvm_mapent_free_merged(map, dead);
1717 }
1718 if ((flags & UVM_FLAG_QUANTUM) != 0) {
1719 vm_map_unlock(map);
1720 }
1721 if (new_entry != NULL) {
1722 uvm_mapent_free(new_entry);
1723 }
1724 return error;
1725 }
1726
1727 /*
1728 * uvm_map_lookup_entry_bytree: lookup an entry in tree
1729 */
1730
1731 static inline bool
1732 uvm_map_lookup_entry_bytree(struct vm_map *map, vaddr_t address,
1733 struct vm_map_entry **entry /* OUT */)
1734 {
1735 struct vm_map_entry *prev = &map->header;
1736 struct vm_map_entry *cur = ROOT_ENTRY(map);
1737
1738 while (cur) {
1739 UVMMAP_EVCNT_INCR(mlk_treeloop);
1740 if (address >= cur->start) {
1741 if (address < cur->end) {
1742 *entry = cur;
1743 return true;
1744 }
1745 prev = cur;
1746 cur = RIGHT_ENTRY(cur);
1747 } else
1748 cur = LEFT_ENTRY(cur);
1749 }
1750 *entry = prev;
1751 return false;
1752 }
1753
1754 /*
1755 * uvm_map_lookup_entry: find map entry at or before an address
1756 *
1757 * => map must at least be read-locked by caller
1758 * => entry is returned in "entry"
1759 * => return value is true if address is in the returned entry
1760 */
1761
1762 bool
1763 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
1764 struct vm_map_entry **entry /* OUT */)
1765 {
1766 struct vm_map_entry *cur;
1767 bool use_tree = false;
1768 UVMHIST_FUNC("uvm_map_lookup_entry");
1769 UVMHIST_CALLED(maphist);
1770
1771 UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)",
1772 map, address, entry, 0);
1773
1774 /*
1775 * start looking either from the head of the
1776 * list, or from the hint.
1777 */
1778
1779 cur = map->hint;
1780
1781 if (cur == &map->header)
1782 cur = cur->next;
1783
1784 UVMMAP_EVCNT_INCR(mlk_call);
1785 if (address >= cur->start) {
1786
1787 /*
1788 * go from hint to end of list.
1789 *
1790 * but first, make a quick check to see if
1791 * we are already looking at the entry we
1792 * want (which is usually the case).
1793 * note also that we don't need to save the hint
1794 * here... it is the same hint (unless we are
1795 * at the header, in which case the hint didn't
1796 * buy us anything anyway).
1797 */
1798
1799 if (cur != &map->header && cur->end > address) {
1800 UVMMAP_EVCNT_INCR(mlk_hint);
1801 *entry = cur;
1802 UVMHIST_LOG(maphist,"<- got it via hint (0x%x)",
1803 cur, 0, 0, 0);
1804 uvm_mapent_check(*entry);
1805 return (true);
1806 }
1807
1808 if (map->nentries > 15)
1809 use_tree = true;
1810 } else {
1811
1812 /*
1813 * invalid hint. use tree.
1814 */
1815 use_tree = true;
1816 }
1817
1818 uvm_map_check(map, __func__);
1819
1820 if (use_tree) {
1821 /*
1822 * Simple lookup in the tree. Happens when the hint is
1823 * invalid, or nentries reach a threshold.
1824 */
1825 UVMMAP_EVCNT_INCR(mlk_tree);
1826 if (uvm_map_lookup_entry_bytree(map, address, entry)) {
1827 goto got;
1828 } else {
1829 goto failed;
1830 }
1831 }
1832
1833 /*
1834 * search linearly
1835 */
1836
1837 UVMMAP_EVCNT_INCR(mlk_list);
1838 while (cur != &map->header) {
1839 UVMMAP_EVCNT_INCR(mlk_listloop);
1840 if (cur->end > address) {
1841 if (address >= cur->start) {
1842 /*
1843 * save this lookup for future
1844 * hints, and return
1845 */
1846
1847 *entry = cur;
1848 got:
1849 SAVE_HINT(map, map->hint, *entry);
1850 UVMHIST_LOG(maphist,"<- search got it (0x%x)",
1851 cur, 0, 0, 0);
1852 KDASSERT((*entry)->start <= address);
1853 KDASSERT(address < (*entry)->end);
1854 uvm_mapent_check(*entry);
1855 return (true);
1856 }
1857 break;
1858 }
1859 cur = cur->next;
1860 }
1861 *entry = cur->prev;
1862 failed:
1863 SAVE_HINT(map, map->hint, *entry);
1864 UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
1865 KDASSERT((*entry) == &map->header || (*entry)->end <= address);
1866 KDASSERT((*entry)->next == &map->header ||
1867 address < (*entry)->next->start);
1868 return (false);
1869 }
1870
1871 /*
1872 * See if the range between start and start + length fits in the gap
1873 * entry->next->start and entry->end. Returns 1 if fits, 0 if doesn't
1874 * fit, and -1 address wraps around.
1875 */
1876 static int
1877 uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
1878 vsize_t align, int topdown, struct vm_map_entry *entry)
1879 {
1880 vaddr_t end;
1881
1882 #ifdef PMAP_PREFER
1883 /*
1884 * push start address forward as needed to avoid VAC alias problems.
1885 * we only do this if a valid offset is specified.
1886 */
1887
1888 if (uoffset != UVM_UNKNOWN_OFFSET)
1889 PMAP_PREFER(uoffset, start, length, topdown);
1890 #endif
1891 if (align != 0) {
1892 if ((*start & (align - 1)) != 0) {
1893 if (topdown)
1894 *start &= ~(align - 1);
1895 else
1896 *start = roundup(*start, align);
1897 }
1898 /*
1899 * XXX Should we PMAP_PREFER() here again?
1900 * eh...i think we're okay
1901 */
1902 }
1903
1904 /*
1905 * Find the end of the proposed new region. Be sure we didn't
1906 * wrap around the address; if so, we lose. Otherwise, if the
1907 * proposed new region fits before the next entry, we win.
1908 */
1909
1910 end = *start + length;
1911 if (end < *start)
1912 return (-1);
1913
1914 if (entry->next->start >= end && *start >= entry->end)
1915 return (1);
1916
1917 return (0);
1918 }
1919
1920 /*
1921 * uvm_map_findspace: find "length" sized space in "map".
1922 *
1923 * => "hint" is a hint about where we want it, unless UVM_FLAG_FIXED is
1924 * set in "flags" (in which case we insist on using "hint").
1925 * => "result" is VA returned
1926 * => uobj/uoffset are to be used to handle VAC alignment, if required
1927 * => if "align" is non-zero, we attempt to align to that value.
1928 * => caller must at least have read-locked map
1929 * => returns NULL on failure, or pointer to prev. map entry if success
1930 * => note this is a cross between the old vm_map_findspace and vm_map_find
1931 */
1932
1933 struct vm_map_entry *
1934 uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
1935 vaddr_t *result /* OUT */, struct uvm_object *uobj, voff_t uoffset,
1936 vsize_t align, int flags)
1937 {
1938 struct vm_map_entry *entry;
1939 struct vm_map_entry *child, *prev, *tmp;
1940 vaddr_t orig_hint;
1941 const int topdown = map->flags & VM_MAP_TOPDOWN;
1942 UVMHIST_FUNC("uvm_map_findspace");
1943 UVMHIST_CALLED(maphist);
1944
1945 UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)",
1946 map, hint, length, flags);
1947 KASSERT((align & (align - 1)) == 0);
1948 KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1949
1950 uvm_map_check(map, "map_findspace entry");
1951
1952 /*
1953 * remember the original hint. if we are aligning, then we
1954 * may have to try again with no alignment constraint if
1955 * we fail the first time.
1956 */
1957
1958 orig_hint = hint;
1959 if (hint < vm_map_min(map)) { /* check ranges ... */
1960 if (flags & UVM_FLAG_FIXED) {
1961 UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
1962 return (NULL);
1963 }
1964 hint = vm_map_min(map);
1965 }
1966 if (hint > vm_map_max(map)) {
1967 UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]",
1968 hint, vm_map_min(map), vm_map_max(map), 0);
1969 return (NULL);
1970 }
1971
1972 /*
1973 * Look for the first possible address; if there's already
1974 * something at this address, we have to start after it.
1975 */
1976
1977 /*
1978 * @@@: there are four, no, eight cases to consider.
1979 *
1980 * 0: found, fixed, bottom up -> fail
1981 * 1: found, fixed, top down -> fail
1982 * 2: found, not fixed, bottom up -> start after entry->end,
1983 * loop up
1984 * 3: found, not fixed, top down -> start before entry->start,
1985 * loop down
1986 * 4: not found, fixed, bottom up -> check entry->next->start, fail
1987 * 5: not found, fixed, top down -> check entry->next->start, fail
1988 * 6: not found, not fixed, bottom up -> check entry->next->start,
1989 * loop up
1990 * 7: not found, not fixed, top down -> check entry->next->start,
1991 * loop down
1992 *
1993 * as you can see, it reduces to roughly five cases, and that
1994 * adding top down mapping only adds one unique case (without
1995 * it, there would be four cases).
1996 */
1997
1998 if ((flags & UVM_FLAG_FIXED) == 0 && hint == vm_map_min(map)) {
1999 entry = map->first_free;
2000 } else {
2001 if (uvm_map_lookup_entry(map, hint, &entry)) {
2002 /* "hint" address already in use ... */
2003 if (flags & UVM_FLAG_FIXED) {
2004 UVMHIST_LOG(maphist, "<- fixed & VA in use",
2005 0, 0, 0, 0);
2006 return (NULL);
2007 }
2008 if (topdown)
2009 /* Start from lower gap. */
2010 entry = entry->prev;
2011 } else if (flags & UVM_FLAG_FIXED) {
2012 if (entry->next->start >= hint + length &&
2013 hint + length > hint)
2014 goto found;
2015
2016 /* "hint" address is gap but too small */
2017 UVMHIST_LOG(maphist, "<- fixed mapping failed",
2018 0, 0, 0, 0);
2019 return (NULL); /* only one shot at it ... */
2020 } else {
2021 /*
2022 * See if given hint fits in this gap.
2023 */
2024 switch (uvm_map_space_avail(&hint, length,
2025 uoffset, align, topdown, entry)) {
2026 case 1:
2027 goto found;
2028 case -1:
2029 goto wraparound;
2030 }
2031
2032 if (topdown) {
2033 /*
2034 * Still there is a chance to fit
2035 * if hint > entry->end.
2036 */
2037 } else {
2038 /* Start from higher gap. */
2039 entry = entry->next;
2040 if (entry == &map->header)
2041 goto notfound;
2042 goto nextgap;
2043 }
2044 }
2045 }
2046
2047 /*
2048 * Note that all UVM_FLAGS_FIXED case is already handled.
2049 */
2050 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
2051
2052 /* Try to find the space in the red-black tree */
2053
2054 /* Check slot before any entry */
2055 hint = topdown ? entry->next->start - length : entry->end;
2056 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2057 topdown, entry)) {
2058 case 1:
2059 goto found;
2060 case -1:
2061 goto wraparound;
2062 }
2063
2064 nextgap:
2065 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
2066 /* If there is not enough space in the whole tree, we fail */
2067 tmp = ROOT_ENTRY(map);
2068 if (tmp == NULL || tmp->maxgap < length)
2069 goto notfound;
2070
2071 prev = NULL; /* previous candidate */
2072
2073 /* Find an entry close to hint that has enough space */
2074 for (; tmp;) {
2075 KASSERT(tmp->next->start == tmp->end + tmp->gap);
2076 if (topdown) {
2077 if (tmp->next->start < hint + length &&
2078 (prev == NULL || tmp->end > prev->end)) {
2079 if (tmp->gap >= length)
2080 prev = tmp;
2081 else if ((child = LEFT_ENTRY(tmp)) != NULL
2082 && child->maxgap >= length)
2083 prev = tmp;
2084 }
2085 } else {
2086 if (tmp->end >= hint &&
2087 (prev == NULL || tmp->end < prev->end)) {
2088 if (tmp->gap >= length)
2089 prev = tmp;
2090 else if ((child = RIGHT_ENTRY(tmp)) != NULL
2091 && child->maxgap >= length)
2092 prev = tmp;
2093 }
2094 }
2095 if (tmp->next->start < hint + length)
2096 child = RIGHT_ENTRY(tmp);
2097 else if (tmp->end > hint)
2098 child = LEFT_ENTRY(tmp);
2099 else {
2100 if (tmp->gap >= length)
2101 break;
2102 if (topdown)
2103 child = LEFT_ENTRY(tmp);
2104 else
2105 child = RIGHT_ENTRY(tmp);
2106 }
2107 if (child == NULL || child->maxgap < length)
2108 break;
2109 tmp = child;
2110 }
2111
2112 if (tmp != NULL && tmp->start < hint && hint < tmp->next->start) {
2113 /*
2114 * Check if the entry that we found satifies the
2115 * space requirement
2116 */
2117 if (topdown) {
2118 if (hint > tmp->next->start - length)
2119 hint = tmp->next->start - length;
2120 } else {
2121 if (hint < tmp->end)
2122 hint = tmp->end;
2123 }
2124 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2125 topdown, tmp)) {
2126 case 1:
2127 entry = tmp;
2128 goto found;
2129 case -1:
2130 goto wraparound;
2131 }
2132 if (tmp->gap >= length)
2133 goto listsearch;
2134 }
2135 if (prev == NULL)
2136 goto notfound;
2137
2138 if (topdown) {
2139 KASSERT(orig_hint >= prev->next->start - length ||
2140 prev->next->start - length > prev->next->start);
2141 hint = prev->next->start - length;
2142 } else {
2143 KASSERT(orig_hint <= prev->end);
2144 hint = prev->end;
2145 }
2146 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2147 topdown, prev)) {
2148 case 1:
2149 entry = prev;
2150 goto found;
2151 case -1:
2152 goto wraparound;
2153 }
2154 if (prev->gap >= length)
2155 goto listsearch;
2156
2157 if (topdown)
2158 tmp = LEFT_ENTRY(prev);
2159 else
2160 tmp = RIGHT_ENTRY(prev);
2161 for (;;) {
2162 KASSERT(tmp && tmp->maxgap >= length);
2163 if (topdown)
2164 child = RIGHT_ENTRY(tmp);
2165 else
2166 child = LEFT_ENTRY(tmp);
2167 if (child && child->maxgap >= length) {
2168 tmp = child;
2169 continue;
2170 }
2171 if (tmp->gap >= length)
2172 break;
2173 if (topdown)
2174 tmp = LEFT_ENTRY(tmp);
2175 else
2176 tmp = RIGHT_ENTRY(tmp);
2177 }
2178
2179 if (topdown) {
2180 KASSERT(orig_hint >= tmp->next->start - length ||
2181 tmp->next->start - length > tmp->next->start);
2182 hint = tmp->next->start - length;
2183 } else {
2184 KASSERT(orig_hint <= tmp->end);
2185 hint = tmp->end;
2186 }
2187 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2188 topdown, tmp)) {
2189 case 1:
2190 entry = tmp;
2191 goto found;
2192 case -1:
2193 goto wraparound;
2194 }
2195
2196 /*
2197 * The tree fails to find an entry because of offset or alignment
2198 * restrictions. Search the list instead.
2199 */
2200 listsearch:
2201 /*
2202 * Look through the rest of the map, trying to fit a new region in
2203 * the gap between existing regions, or after the very last region.
2204 * note: entry->end = base VA of current gap,
2205 * entry->next->start = VA of end of current gap
2206 */
2207
2208 for (;;) {
2209 /* Update hint for current gap. */
2210 hint = topdown ? entry->next->start - length : entry->end;
2211
2212 /* See if it fits. */
2213 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2214 topdown, entry)) {
2215 case 1:
2216 goto found;
2217 case -1:
2218 goto wraparound;
2219 }
2220
2221 /* Advance to next/previous gap */
2222 if (topdown) {
2223 if (entry == &map->header) {
2224 UVMHIST_LOG(maphist, "<- failed (off start)",
2225 0,0,0,0);
2226 goto notfound;
2227 }
2228 entry = entry->prev;
2229 } else {
2230 entry = entry->next;
2231 if (entry == &map->header) {
2232 UVMHIST_LOG(maphist, "<- failed (off end)",
2233 0,0,0,0);
2234 goto notfound;
2235 }
2236 }
2237 }
2238
2239 found:
2240 SAVE_HINT(map, map->hint, entry);
2241 *result = hint;
2242 UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0);
2243 KASSERT( topdown || hint >= orig_hint);
2244 KASSERT(!topdown || hint <= orig_hint);
2245 KASSERT(entry->end <= hint);
2246 KASSERT(hint + length <= entry->next->start);
2247 return (entry);
2248
2249 wraparound:
2250 UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0);
2251
2252 return (NULL);
2253
2254 notfound:
2255 UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0);
2256
2257 return (NULL);
2258 }
2259
2260 /*
2261 * U N M A P - m a i n h e l p e r f u n c t i o n s
2262 */
2263
2264 /*
2265 * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
2266 *
2267 * => caller must check alignment and size
2268 * => map must be locked by caller
2269 * => we return a list of map entries that we've remove from the map
2270 * in "entry_list"
2271 */
2272
2273 void
2274 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
2275 struct vm_map_entry **entry_list /* OUT */,
2276 struct uvm_mapent_reservation *umr, int flags)
2277 {
2278 struct vm_map_entry *entry, *first_entry, *next;
2279 vaddr_t len;
2280 UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist);
2281
2282 UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)",
2283 map, start, end, 0);
2284 VM_MAP_RANGE_CHECK(map, start, end);
2285
2286 uvm_map_check(map, "unmap_remove entry");
2287
2288 /*
2289 * find first entry
2290 */
2291
2292 if (uvm_map_lookup_entry(map, start, &first_entry) == true) {
2293 /* clip and go... */
2294 entry = first_entry;
2295 UVM_MAP_CLIP_START(map, entry, start, umr);
2296 /* critical! prevents stale hint */
2297 SAVE_HINT(map, entry, entry->prev);
2298 } else {
2299 entry = first_entry->next;
2300 }
2301
2302 /*
2303 * Save the free space hint
2304 */
2305
2306 if (map->first_free != &map->header && map->first_free->start >= start)
2307 map->first_free = entry->prev;
2308
2309 /*
2310 * note: we now re-use first_entry for a different task. we remove
2311 * a number of map entries from the map and save them in a linked
2312 * list headed by "first_entry". once we remove them from the map
2313 * the caller should unlock the map and drop the references to the
2314 * backing objects [c.f. uvm_unmap_detach]. the object is to
2315 * separate unmapping from reference dropping. why?
2316 * [1] the map has to be locked for unmapping
2317 * [2] the map need not be locked for reference dropping
2318 * [3] dropping references may trigger pager I/O, and if we hit
2319 * a pager that does synchronous I/O we may have to wait for it.
2320 * [4] we would like all waiting for I/O to occur with maps unlocked
2321 * so that we don't block other threads.
2322 */
2323
2324 first_entry = NULL;
2325 *entry_list = NULL;
2326
2327 /*
2328 * break up the area into map entry sized regions and unmap. note
2329 * that all mappings have to be removed before we can even consider
2330 * dropping references to amaps or VM objects (otherwise we could end
2331 * up with a mapping to a page on the free list which would be very bad)
2332 */
2333
2334 while ((entry != &map->header) && (entry->start < end)) {
2335 KASSERT((entry->flags & UVM_MAP_FIRST) == 0);
2336
2337 UVM_MAP_CLIP_END(map, entry, end, umr);
2338 next = entry->next;
2339 len = entry->end - entry->start;
2340
2341 /*
2342 * unwire before removing addresses from the pmap; otherwise
2343 * unwiring will put the entries back into the pmap (XXX).
2344 */
2345
2346 if (VM_MAPENT_ISWIRED(entry)) {
2347 uvm_map_entry_unwire(map, entry);
2348 }
2349 if (flags & UVM_FLAG_VAONLY) {
2350
2351 /* nothing */
2352
2353 } else if ((map->flags & VM_MAP_PAGEABLE) == 0) {
2354
2355 /*
2356 * if the map is non-pageable, any pages mapped there
2357 * must be wired and entered with pmap_kenter_pa(),
2358 * and we should free any such pages immediately.
2359 * this is mostly used for kmem_map.
2360 */
2361 KASSERT(vm_map_pmap(map) == pmap_kernel());
2362
2363 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
2364 uvm_km_pgremove_intrsafe(map, entry->start,
2365 entry->end);
2366 pmap_kremove(entry->start, len);
2367 }
2368 } else if (UVM_ET_ISOBJ(entry) &&
2369 UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
2370 KASSERT(vm_map_pmap(map) == pmap_kernel());
2371
2372 /*
2373 * note: kernel object mappings are currently used in
2374 * two ways:
2375 * [1] "normal" mappings of pages in the kernel object
2376 * [2] uvm_km_valloc'd allocations in which we
2377 * pmap_enter in some non-kernel-object page
2378 * (e.g. vmapbuf).
2379 *
2380 * for case [1], we need to remove the mapping from
2381 * the pmap and then remove the page from the kernel
2382 * object (because, once pages in a kernel object are
2383 * unmapped they are no longer needed, unlike, say,
2384 * a vnode where you might want the data to persist
2385 * until flushed out of a queue).
2386 *
2387 * for case [2], we need to remove the mapping from
2388 * the pmap. there shouldn't be any pages at the
2389 * specified offset in the kernel object [but it
2390 * doesn't hurt to call uvm_km_pgremove just to be
2391 * safe?]
2392 *
2393 * uvm_km_pgremove currently does the following:
2394 * for pages in the kernel object in range:
2395 * - drops the swap slot
2396 * - uvm_pagefree the page
2397 */
2398
2399 /*
2400 * remove mappings from pmap and drop the pages
2401 * from the object. offsets are always relative
2402 * to vm_map_min(kernel_map).
2403 */
2404
2405 pmap_remove(pmap_kernel(), entry->start,
2406 entry->start + len);
2407 uvm_km_pgremove(entry->start, entry->end);
2408
2409 /*
2410 * null out kernel_object reference, we've just
2411 * dropped it
2412 */
2413
2414 entry->etype &= ~UVM_ET_OBJ;
2415 entry->object.uvm_obj = NULL;
2416 } else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
2417
2418 /*
2419 * remove mappings the standard way.
2420 */
2421
2422 pmap_remove(map->pmap, entry->start, entry->end);
2423 }
2424
2425 #if defined(DEBUG)
2426 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
2427
2428 /*
2429 * check if there's remaining mapping,
2430 * which is a bug in caller.
2431 */
2432
2433 vaddr_t va;
2434 for (va = entry->start; va < entry->end;
2435 va += PAGE_SIZE) {
2436 if (pmap_extract(vm_map_pmap(map), va, NULL)) {
2437 panic("uvm_unmap_remove: has mapping");
2438 }
2439 }
2440
2441 if (VM_MAP_IS_KERNEL(map)) {
2442 uvm_km_check_empty(map, entry->start,
2443 entry->end);
2444 }
2445 }
2446 #endif /* defined(DEBUG) */
2447
2448 /*
2449 * remove entry from map and put it on our list of entries
2450 * that we've nuked. then go to next entry.
2451 */
2452
2453 UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0,0);
2454
2455 /* critical! prevents stale hint */
2456 SAVE_HINT(map, entry, entry->prev);
2457
2458 uvm_map_entry_unlink(map, entry);
2459 KASSERT(map->size >= len);
2460 map->size -= len;
2461 entry->prev = NULL;
2462 entry->next = first_entry;
2463 first_entry = entry;
2464 entry = next;
2465 }
2466
2467 /*
2468 * Note: if map is dying, leave pmap_update() for pmap_destroy(),
2469 * which will be called later.
2470 */
2471 if ((map->flags & VM_MAP_DYING) == 0) {
2472 pmap_update(vm_map_pmap(map));
2473 } else {
2474 KASSERT(vm_map_pmap(map) != pmap_kernel());
2475 }
2476
2477 uvm_map_check(map, "unmap_remove leave");
2478
2479 /*
2480 * now we've cleaned up the map and are ready for the caller to drop
2481 * references to the mapped objects.
2482 */
2483
2484 *entry_list = first_entry;
2485 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
2486
2487 if (map->flags & VM_MAP_WANTVA) {
2488 mutex_enter(&map->misc_lock);
2489 map->flags &= ~VM_MAP_WANTVA;
2490 cv_broadcast(&map->cv);
2491 mutex_exit(&map->misc_lock);
2492 }
2493 }
2494
2495 /*
2496 * uvm_unmap_detach: drop references in a chain of map entries
2497 *
2498 * => we will free the map entries as we traverse the list.
2499 */
2500
2501 void
2502 uvm_unmap_detach(struct vm_map_entry *first_entry, int flags)
2503 {
2504 struct vm_map_entry *next_entry;
2505 UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
2506
2507 while (first_entry) {
2508 KASSERT(!VM_MAPENT_ISWIRED(first_entry));
2509 UVMHIST_LOG(maphist,
2510 " detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d",
2511 first_entry, first_entry->aref.ar_amap,
2512 first_entry->object.uvm_obj,
2513 UVM_ET_ISSUBMAP(first_entry));
2514
2515 /*
2516 * drop reference to amap, if we've got one
2517 */
2518
2519 if (first_entry->aref.ar_amap)
2520 uvm_map_unreference_amap(first_entry, flags);
2521
2522 /*
2523 * drop reference to our backing object, if we've got one
2524 */
2525
2526 KASSERT(!UVM_ET_ISSUBMAP(first_entry));
2527 if (UVM_ET_ISOBJ(first_entry) &&
2528 first_entry->object.uvm_obj->pgops->pgo_detach) {
2529 (*first_entry->object.uvm_obj->pgops->pgo_detach)
2530 (first_entry->object.uvm_obj);
2531 }
2532 next_entry = first_entry->next;
2533 uvm_mapent_free(first_entry);
2534 first_entry = next_entry;
2535 }
2536 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
2537 }
2538
2539 /*
2540 * E X T R A C T I O N F U N C T I O N S
2541 */
2542
2543 /*
2544 * uvm_map_reserve: reserve space in a vm_map for future use.
2545 *
2546 * => we reserve space in a map by putting a dummy map entry in the
2547 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
2548 * => map should be unlocked (we will write lock it)
2549 * => we return true if we were able to reserve space
2550 * => XXXCDC: should be inline?
2551 */
2552
2553 int
2554 uvm_map_reserve(struct vm_map *map, vsize_t size,
2555 vaddr_t offset /* hint for pmap_prefer */,
2556 vsize_t align /* alignment */,
2557 vaddr_t *raddr /* IN:hint, OUT: reserved VA */,
2558 uvm_flag_t flags /* UVM_FLAG_FIXED or 0 */)
2559 {
2560 UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
2561
2562 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
2563 map,size,offset,raddr);
2564
2565 size = round_page(size);
2566
2567 /*
2568 * reserve some virtual space.
2569 */
2570
2571 if (uvm_map(map, raddr, size, NULL, offset, align,
2572 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
2573 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE|flags)) != 0) {
2574 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
2575 return (false);
2576 }
2577
2578 UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
2579 return (true);
2580 }
2581
2582 /*
2583 * uvm_map_replace: replace a reserved (blank) area of memory with
2584 * real mappings.
2585 *
2586 * => caller must WRITE-LOCK the map
2587 * => we return true if replacement was a success
2588 * => we expect the newents chain to have nnewents entrys on it and
2589 * we expect newents->prev to point to the last entry on the list
2590 * => note newents is allowed to be NULL
2591 */
2592
2593 static int
2594 uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
2595 struct vm_map_entry *newents, int nnewents, vsize_t nsize,
2596 struct vm_map_entry **oldentryp)
2597 {
2598 struct vm_map_entry *oldent, *last;
2599
2600 uvm_map_check(map, "map_replace entry");
2601
2602 /*
2603 * first find the blank map entry at the specified address
2604 */
2605
2606 if (!uvm_map_lookup_entry(map, start, &oldent)) {
2607 return (false);
2608 }
2609
2610 /*
2611 * check to make sure we have a proper blank entry
2612 */
2613
2614 if (end < oldent->end && !VM_MAP_USE_KMAPENT(map)) {
2615 UVM_MAP_CLIP_END(map, oldent, end, NULL);
2616 }
2617 if (oldent->start != start || oldent->end != end ||
2618 oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
2619 return (false);
2620 }
2621
2622 #ifdef DIAGNOSTIC
2623
2624 /*
2625 * sanity check the newents chain
2626 */
2627
2628 {
2629 struct vm_map_entry *tmpent = newents;
2630 int nent = 0;
2631 vsize_t sz = 0;
2632 vaddr_t cur = start;
2633
2634 while (tmpent) {
2635 nent++;
2636 sz += tmpent->end - tmpent->start;
2637 if (tmpent->start < cur)
2638 panic("uvm_map_replace1");
2639 if (tmpent->start >= tmpent->end || tmpent->end > end) {
2640 panic("uvm_map_replace2: "
2641 "tmpent->start=0x%"PRIxVADDR
2642 ", tmpent->end=0x%"PRIxVADDR
2643 ", end=0x%"PRIxVADDR,
2644 tmpent->start, tmpent->end, end);
2645 }
2646 cur = tmpent->end;
2647 if (tmpent->next) {
2648 if (tmpent->next->prev != tmpent)
2649 panic("uvm_map_replace3");
2650 } else {
2651 if (newents->prev != tmpent)
2652 panic("uvm_map_replace4");
2653 }
2654 tmpent = tmpent->next;
2655 }
2656 if (nent != nnewents)
2657 panic("uvm_map_replace5");
2658 if (sz != nsize)
2659 panic("uvm_map_replace6");
2660 }
2661 #endif
2662
2663 /*
2664 * map entry is a valid blank! replace it. (this does all the
2665 * work of map entry link/unlink...).
2666 */
2667
2668 if (newents) {
2669 last = newents->prev;
2670
2671 /* critical: flush stale hints out of map */
2672 SAVE_HINT(map, map->hint, newents);
2673 if (map->first_free == oldent)
2674 map->first_free = last;
2675
2676 last->next = oldent->next;
2677 last->next->prev = last;
2678
2679 /* Fix RB tree */
2680 uvm_rb_remove(map, oldent);
2681
2682 newents->prev = oldent->prev;
2683 newents->prev->next = newents;
2684 map->nentries = map->nentries + (nnewents - 1);
2685
2686 /* Fixup the RB tree */
2687 {
2688 int i;
2689 struct vm_map_entry *tmp;
2690
2691 tmp = newents;
2692 for (i = 0; i < nnewents && tmp; i++) {
2693 uvm_rb_insert(map, tmp);
2694 tmp = tmp->next;
2695 }
2696 }
2697 } else {
2698 /* NULL list of new entries: just remove the old one */
2699 clear_hints(map, oldent);
2700 uvm_map_entry_unlink(map, oldent);
2701 }
2702 map->size -= end - start - nsize;
2703
2704 uvm_map_check(map, "map_replace leave");
2705
2706 /*
2707 * now we can free the old blank entry and return.
2708 */
2709
2710 *oldentryp = oldent;
2711 return (true);
2712 }
2713
2714 /*
2715 * uvm_map_extract: extract a mapping from a map and put it somewhere
2716 * (maybe removing the old mapping)
2717 *
2718 * => maps should be unlocked (we will write lock them)
2719 * => returns 0 on success, error code otherwise
2720 * => start must be page aligned
2721 * => len must be page sized
2722 * => flags:
2723 * UVM_EXTRACT_REMOVE: remove mappings from srcmap
2724 * UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
2725 * UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
2726 * UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
2727 * >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
2728 * >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
2729 * be used from within the kernel in a kernel level map <<<
2730 */
2731
2732 int
2733 uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
2734 struct vm_map *dstmap, vaddr_t *dstaddrp, int flags)
2735 {
2736 vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge;
2737 struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry,
2738 *deadentry, *oldentry;
2739 struct vm_map_entry *resentry = NULL; /* a dummy reservation entry */
2740 vsize_t elen;
2741 int nchain, error, copy_ok;
2742 vsize_t nsize;
2743 UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
2744
2745 UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start,
2746 len,0);
2747 UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0);
2748
2749 /*
2750 * step 0: sanity check: start must be on a page boundary, length
2751 * must be page sized. can't ask for CONTIG/QREF if you asked for
2752 * REMOVE.
2753 */
2754
2755 KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0);
2756 KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
2757 (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
2758
2759 /*
2760 * step 1: reserve space in the target map for the extracted area
2761 */
2762
2763 if ((flags & UVM_EXTRACT_RESERVED) == 0) {
2764 dstaddr = vm_map_min(dstmap);
2765 if (!uvm_map_reserve(dstmap, len, start, 0, &dstaddr, 0))
2766 return (ENOMEM);
2767 *dstaddrp = dstaddr; /* pass address back to caller */
2768 UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0);
2769 } else {
2770 dstaddr = *dstaddrp;
2771 }
2772
2773 /*
2774 * step 2: setup for the extraction process loop by init'ing the
2775 * map entry chain, locking src map, and looking up the first useful
2776 * entry in the map.
2777 */
2778
2779 end = start + len;
2780 newend = dstaddr + len;
2781 chain = endchain = NULL;
2782 nchain = 0;
2783 nsize = 0;
2784 vm_map_lock(srcmap);
2785
2786 if (uvm_map_lookup_entry(srcmap, start, &entry)) {
2787
2788 /* "start" is within an entry */
2789 if (flags & UVM_EXTRACT_QREF) {
2790
2791 /*
2792 * for quick references we don't clip the entry, so
2793 * the entry may map space "before" the starting
2794 * virtual address... this is the "fudge" factor
2795 * (which can be non-zero only the first time
2796 * through the "while" loop in step 3).
2797 */
2798
2799 fudge = start - entry->start;
2800 } else {
2801
2802 /*
2803 * normal reference: we clip the map to fit (thus
2804 * fudge is zero)
2805 */
2806
2807 UVM_MAP_CLIP_START(srcmap, entry, start, NULL);
2808 SAVE_HINT(srcmap, srcmap->hint, entry->prev);
2809 fudge = 0;
2810 }
2811 } else {
2812
2813 /* "start" is not within an entry ... skip to next entry */
2814 if (flags & UVM_EXTRACT_CONTIG) {
2815 error = EINVAL;
2816 goto bad; /* definite hole here ... */
2817 }
2818
2819 entry = entry->next;
2820 fudge = 0;
2821 }
2822
2823 /* save values from srcmap for step 6 */
2824 orig_entry = entry;
2825 orig_fudge = fudge;
2826
2827 /*
2828 * step 3: now start looping through the map entries, extracting
2829 * as we go.
2830 */
2831
2832 while (entry->start < end && entry != &srcmap->header) {
2833
2834 /* if we are not doing a quick reference, clip it */
2835 if ((flags & UVM_EXTRACT_QREF) == 0)
2836 UVM_MAP_CLIP_END(srcmap, entry, end, NULL);
2837
2838 /* clear needs_copy (allow chunking) */
2839 if (UVM_ET_ISNEEDSCOPY(entry)) {
2840 amap_copy(srcmap, entry,
2841 AMAP_COPY_NOWAIT|AMAP_COPY_NOMERGE, start, end);
2842 if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */
2843 error = ENOMEM;
2844 goto bad;
2845 }
2846
2847 /* amap_copy could clip (during chunk)! update fudge */
2848 if (fudge) {
2849 fudge = start - entry->start;
2850 orig_fudge = fudge;
2851 }
2852 }
2853
2854 /* calculate the offset of this from "start" */
2855 oldoffset = (entry->start + fudge) - start;
2856
2857 /* allocate a new map entry */
2858 newentry = uvm_mapent_alloc(dstmap, 0);
2859 if (newentry == NULL) {
2860 error = ENOMEM;
2861 goto bad;
2862 }
2863
2864 /* set up new map entry */
2865 newentry->next = NULL;
2866 newentry->prev = endchain;
2867 newentry->start = dstaddr + oldoffset;
2868 newentry->end =
2869 newentry->start + (entry->end - (entry->start + fudge));
2870 if (newentry->end > newend || newentry->end < newentry->start)
2871 newentry->end = newend;
2872 newentry->object.uvm_obj = entry->object.uvm_obj;
2873 if (newentry->object.uvm_obj) {
2874 if (newentry->object.uvm_obj->pgops->pgo_reference)
2875 newentry->object.uvm_obj->pgops->
2876 pgo_reference(newentry->object.uvm_obj);
2877 newentry->offset = entry->offset + fudge;
2878 } else {
2879 newentry->offset = 0;
2880 }
2881 newentry->etype = entry->etype;
2882 newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
2883 entry->max_protection : entry->protection;
2884 newentry->max_protection = entry->max_protection;
2885 newentry->inheritance = entry->inheritance;
2886 newentry->wired_count = 0;
2887 newentry->aref.ar_amap = entry->aref.ar_amap;
2888 if (newentry->aref.ar_amap) {
2889 newentry->aref.ar_pageoff =
2890 entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
2891 uvm_map_reference_amap(newentry, AMAP_SHARED |
2892 ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
2893 } else {
2894 newentry->aref.ar_pageoff = 0;
2895 }
2896 newentry->advice = entry->advice;
2897 if ((flags & UVM_EXTRACT_QREF) != 0) {
2898 newentry->flags |= UVM_MAP_NOMERGE;
2899 }
2900
2901 /* now link it on the chain */
2902 nchain++;
2903 nsize += newentry->end - newentry->start;
2904 if (endchain == NULL) {
2905 chain = endchain = newentry;
2906 } else {
2907 endchain->next = newentry;
2908 endchain = newentry;
2909 }
2910
2911 /* end of 'while' loop! */
2912 if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
2913 (entry->next == &srcmap->header ||
2914 entry->next->start != entry->end)) {
2915 error = EINVAL;
2916 goto bad;
2917 }
2918 entry = entry->next;
2919 fudge = 0;
2920 }
2921
2922 /*
2923 * step 4: close off chain (in format expected by uvm_map_replace)
2924 */
2925
2926 if (chain)
2927 chain->prev = endchain;
2928
2929 /*
2930 * step 5: attempt to lock the dest map so we can pmap_copy.
2931 * note usage of copy_ok:
2932 * 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
2933 * 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
2934 */
2935
2936 if (srcmap == dstmap || vm_map_lock_try(dstmap) == true) {
2937 copy_ok = 1;
2938 if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2939 nchain, nsize, &resentry)) {
2940 if (srcmap != dstmap)
2941 vm_map_unlock(dstmap);
2942 error = EIO;
2943 goto bad;
2944 }
2945 } else {
2946 copy_ok = 0;
2947 /* replace defered until step 7 */
2948 }
2949
2950 /*
2951 * step 6: traverse the srcmap a second time to do the following:
2952 * - if we got a lock on the dstmap do pmap_copy
2953 * - if UVM_EXTRACT_REMOVE remove the entries
2954 * we make use of orig_entry and orig_fudge (saved in step 2)
2955 */
2956
2957 if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
2958
2959 /* purge possible stale hints from srcmap */
2960 if (flags & UVM_EXTRACT_REMOVE) {
2961 SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
2962 if (srcmap->first_free != &srcmap->header &&
2963 srcmap->first_free->start >= start)
2964 srcmap->first_free = orig_entry->prev;
2965 }
2966
2967 entry = orig_entry;
2968 fudge = orig_fudge;
2969 deadentry = NULL; /* for UVM_EXTRACT_REMOVE */
2970
2971 while (entry->start < end && entry != &srcmap->header) {
2972 if (copy_ok) {
2973 oldoffset = (entry->start + fudge) - start;
2974 elen = MIN(end, entry->end) -
2975 (entry->start + fudge);
2976 pmap_copy(dstmap->pmap, srcmap->pmap,
2977 dstaddr + oldoffset, elen,
2978 entry->start + fudge);
2979 }
2980
2981 /* we advance "entry" in the following if statement */
2982 if (flags & UVM_EXTRACT_REMOVE) {
2983 pmap_remove(srcmap->pmap, entry->start,
2984 entry->end);
2985 oldentry = entry; /* save entry */
2986 entry = entry->next; /* advance */
2987 uvm_map_entry_unlink(srcmap, oldentry);
2988 /* add to dead list */
2989 oldentry->next = deadentry;
2990 deadentry = oldentry;
2991 } else {
2992 entry = entry->next; /* advance */
2993 }
2994
2995 /* end of 'while' loop */
2996 fudge = 0;
2997 }
2998 pmap_update(srcmap->pmap);
2999
3000 /*
3001 * unlock dstmap. we will dispose of deadentry in
3002 * step 7 if needed
3003 */
3004
3005 if (copy_ok && srcmap != dstmap)
3006 vm_map_unlock(dstmap);
3007
3008 } else {
3009 deadentry = NULL;
3010 }
3011
3012 /*
3013 * step 7: we are done with the source map, unlock. if copy_ok
3014 * is 0 then we have not replaced the dummy mapping in dstmap yet
3015 * and we need to do so now.
3016 */
3017
3018 vm_map_unlock(srcmap);
3019 if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
3020 uvm_unmap_detach(deadentry, 0); /* dispose of old entries */
3021
3022 /* now do the replacement if we didn't do it in step 5 */
3023 if (copy_ok == 0) {
3024 vm_map_lock(dstmap);
3025 error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
3026 nchain, nsize, &resentry);
3027 vm_map_unlock(dstmap);
3028
3029 if (error == false) {
3030 error = EIO;
3031 goto bad2;
3032 }
3033 }
3034
3035 if (resentry != NULL)
3036 uvm_mapent_free(resentry);
3037
3038 return (0);
3039
3040 /*
3041 * bad: failure recovery
3042 */
3043 bad:
3044 vm_map_unlock(srcmap);
3045 bad2: /* src already unlocked */
3046 if (chain)
3047 uvm_unmap_detach(chain,
3048 (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
3049
3050 if (resentry != NULL)
3051 uvm_mapent_free(resentry);
3052
3053 if ((flags & UVM_EXTRACT_RESERVED) == 0) {
3054 uvm_unmap(dstmap, dstaddr, dstaddr+len); /* ??? */
3055 }
3056 return (error);
3057 }
3058
3059 /* end of extraction functions */
3060
3061 /*
3062 * uvm_map_submap: punch down part of a map into a submap
3063 *
3064 * => only the kernel_map is allowed to be submapped
3065 * => the purpose of submapping is to break up the locking granularity
3066 * of a larger map
3067 * => the range specified must have been mapped previously with a uvm_map()
3068 * call [with uobj==NULL] to create a blank map entry in the main map.
3069 * [And it had better still be blank!]
3070 * => maps which contain submaps should never be copied or forked.
3071 * => to remove a submap, use uvm_unmap() on the main map
3072 * and then uvm_map_deallocate() the submap.
3073 * => main map must be unlocked.
3074 * => submap must have been init'd and have a zero reference count.
3075 * [need not be locked as we don't actually reference it]
3076 */
3077
3078 int
3079 uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
3080 struct vm_map *submap)
3081 {
3082 struct vm_map_entry *entry;
3083 struct uvm_mapent_reservation umr;
3084 int error;
3085
3086 uvm_mapent_reserve(map, &umr, 2, 0);
3087
3088 vm_map_lock(map);
3089 VM_MAP_RANGE_CHECK(map, start, end);
3090
3091 if (uvm_map_lookup_entry(map, start, &entry)) {
3092 UVM_MAP_CLIP_START(map, entry, start, &umr);
3093 UVM_MAP_CLIP_END(map, entry, end, &umr); /* to be safe */
3094 } else {
3095 entry = NULL;
3096 }
3097
3098 if (entry != NULL &&
3099 entry->start == start && entry->end == end &&
3100 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
3101 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
3102 entry->etype |= UVM_ET_SUBMAP;
3103 entry->object.sub_map = submap;
3104 entry->offset = 0;
3105 uvm_map_reference(submap);
3106 error = 0;
3107 } else {
3108 error = EINVAL;
3109 }
3110 vm_map_unlock(map);
3111
3112 uvm_mapent_unreserve(map, &umr);
3113
3114 return error;
3115 }
3116
3117 /*
3118 * uvm_map_setup_kernel: init in-kernel map
3119 *
3120 * => map must not be in service yet.
3121 */
3122
3123 void
3124 uvm_map_setup_kernel(struct vm_map_kernel *map,
3125 vaddr_t vmin, vaddr_t vmax, int flags)
3126 {
3127
3128 uvm_map_setup(&map->vmk_map, vmin, vmax, flags);
3129 callback_head_init(&map->vmk_reclaim_callback, IPL_VM);
3130 LIST_INIT(&map->vmk_kentry_free);
3131 map->vmk_merged_entries = NULL;
3132 }
3133
3134
3135 /*
3136 * uvm_map_protect: change map protection
3137 *
3138 * => set_max means set max_protection.
3139 * => map must be unlocked.
3140 */
3141
3142 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
3143 ~VM_PROT_WRITE : VM_PROT_ALL)
3144
3145 int
3146 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
3147 vm_prot_t new_prot, bool set_max)
3148 {
3149 struct vm_map_entry *current, *entry;
3150 int error = 0;
3151 UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
3152 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
3153 map, start, end, new_prot);
3154
3155 vm_map_lock(map);
3156 VM_MAP_RANGE_CHECK(map, start, end);
3157 if (uvm_map_lookup_entry(map, start, &entry)) {
3158 UVM_MAP_CLIP_START(map, entry, start, NULL);
3159 } else {
3160 entry = entry->next;
3161 }
3162
3163 /*
3164 * make a first pass to check for protection violations.
3165 */
3166
3167 current = entry;
3168 while ((current != &map->header) && (current->start < end)) {
3169 if (UVM_ET_ISSUBMAP(current)) {
3170 error = EINVAL;
3171 goto out;
3172 }
3173 if ((new_prot & current->max_protection) != new_prot) {
3174 error = EACCES;
3175 goto out;
3176 }
3177 /*
3178 * Don't allow VM_PROT_EXECUTE to be set on entries that
3179 * point to vnodes that are associated with a NOEXEC file
3180 * system.
3181 */
3182 if (UVM_ET_ISOBJ(current) &&
3183 UVM_OBJ_IS_VNODE(current->object.uvm_obj)) {
3184 struct vnode *vp =
3185 (struct vnode *) current->object.uvm_obj;
3186
3187 if ((new_prot & VM_PROT_EXECUTE) != 0 &&
3188 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
3189 error = EACCES;
3190 goto out;
3191 }
3192 }
3193
3194 current = current->next;
3195 }
3196
3197 /* go back and fix up protections (no need to clip this time). */
3198
3199 current = entry;
3200 while ((current != &map->header) && (current->start < end)) {
3201 vm_prot_t old_prot;
3202
3203 UVM_MAP_CLIP_END(map, current, end, NULL);
3204 old_prot = current->protection;
3205 if (set_max)
3206 current->protection =
3207 (current->max_protection = new_prot) & old_prot;
3208 else
3209 current->protection = new_prot;
3210
3211 /*
3212 * update physical map if necessary. worry about copy-on-write
3213 * here -- CHECK THIS XXX
3214 */
3215
3216 if (current->protection != old_prot) {
3217 /* update pmap! */
3218 pmap_protect(map->pmap, current->start, current->end,
3219 current->protection & MASK(entry));
3220
3221 /*
3222 * If this entry points at a vnode, and the
3223 * protection includes VM_PROT_EXECUTE, mark
3224 * the vnode as VEXECMAP.
3225 */
3226 if (UVM_ET_ISOBJ(current)) {
3227 struct uvm_object *uobj =
3228 current->object.uvm_obj;
3229
3230 if (UVM_OBJ_IS_VNODE(uobj) &&
3231 (current->protection & VM_PROT_EXECUTE)) {
3232 vn_markexec((struct vnode *) uobj);
3233 }
3234 }
3235 }
3236
3237 /*
3238 * If the map is configured to lock any future mappings,
3239 * wire this entry now if the old protection was VM_PROT_NONE
3240 * and the new protection is not VM_PROT_NONE.
3241 */
3242
3243 if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
3244 VM_MAPENT_ISWIRED(entry) == 0 &&
3245 old_prot == VM_PROT_NONE &&
3246 new_prot != VM_PROT_NONE) {
3247 if (uvm_map_pageable(map, entry->start,
3248 entry->end, false,
3249 UVM_LK_ENTER|UVM_LK_EXIT) != 0) {
3250
3251 /*
3252 * If locking the entry fails, remember the
3253 * error if it's the first one. Note we
3254 * still continue setting the protection in
3255 * the map, but will return the error
3256 * condition regardless.
3257 *
3258 * XXX Ignore what the actual error is,
3259 * XXX just call it a resource shortage
3260 * XXX so that it doesn't get confused
3261 * XXX what uvm_map_protect() itself would
3262 * XXX normally return.
3263 */
3264
3265 error = ENOMEM;
3266 }
3267 }
3268 current = current->next;
3269 }
3270 pmap_update(map->pmap);
3271
3272 out:
3273 vm_map_unlock(map);
3274
3275 UVMHIST_LOG(maphist, "<- done, error=%d",error,0,0,0);
3276 return error;
3277 }
3278
3279 #undef MASK
3280
3281 /*
3282 * uvm_map_inherit: set inheritance code for range of addrs in map.
3283 *
3284 * => map must be unlocked
3285 * => note that the inherit code is used during a "fork". see fork
3286 * code for details.
3287 */
3288
3289 int
3290 uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
3291 vm_inherit_t new_inheritance)
3292 {
3293 struct vm_map_entry *entry, *temp_entry;
3294 UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
3295 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)",
3296 map, start, end, new_inheritance);
3297
3298 switch (new_inheritance) {
3299 case MAP_INHERIT_NONE:
3300 case MAP_INHERIT_COPY:
3301 case MAP_INHERIT_SHARE:
3302 break;
3303 default:
3304 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3305 return EINVAL;
3306 }
3307
3308 vm_map_lock(map);
3309 VM_MAP_RANGE_CHECK(map, start, end);
3310 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3311 entry = temp_entry;
3312 UVM_MAP_CLIP_START(map, entry, start, NULL);
3313 } else {
3314 entry = temp_entry->next;
3315 }
3316 while ((entry != &map->header) && (entry->start < end)) {
3317 UVM_MAP_CLIP_END(map, entry, end, NULL);
3318 entry->inheritance = new_inheritance;
3319 entry = entry->next;
3320 }
3321 vm_map_unlock(map);
3322 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3323 return 0;
3324 }
3325
3326 /*
3327 * uvm_map_advice: set advice code for range of addrs in map.
3328 *
3329 * => map must be unlocked
3330 */
3331
3332 int
3333 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
3334 {
3335 struct vm_map_entry *entry, *temp_entry;
3336 UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist);
3337 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_adv=0x%x)",
3338 map, start, end, new_advice);
3339
3340 vm_map_lock(map);
3341 VM_MAP_RANGE_CHECK(map, start, end);
3342 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3343 entry = temp_entry;
3344 UVM_MAP_CLIP_START(map, entry, start, NULL);
3345 } else {
3346 entry = temp_entry->next;
3347 }
3348
3349 /*
3350 * XXXJRT: disallow holes?
3351 */
3352
3353 while ((entry != &map->header) && (entry->start < end)) {
3354 UVM_MAP_CLIP_END(map, entry, end, NULL);
3355
3356 switch (new_advice) {
3357 case MADV_NORMAL:
3358 case MADV_RANDOM:
3359 case MADV_SEQUENTIAL:
3360 /* nothing special here */
3361 break;
3362
3363 default:
3364 vm_map_unlock(map);
3365 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3366 return EINVAL;
3367 }
3368 entry->advice = new_advice;
3369 entry = entry->next;
3370 }
3371
3372 vm_map_unlock(map);
3373 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3374 return 0;
3375 }
3376
3377 /*
3378 * uvm_map_willneed: apply MADV_WILLNEED
3379 */
3380
3381 int
3382 uvm_map_willneed(struct vm_map *map, vaddr_t start, vaddr_t end)
3383 {
3384 struct vm_map_entry *entry;
3385 UVMHIST_FUNC("uvm_map_willneed"); UVMHIST_CALLED(maphist);
3386 UVMHIST_LOG(maphist,"(map=0x%lx,start=0x%lx,end=0x%lx)",
3387 map, start, end, 0);
3388
3389 vm_map_lock_read(map);
3390 VM_MAP_RANGE_CHECK(map, start, end);
3391 if (!uvm_map_lookup_entry(map, start, &entry)) {
3392 entry = entry->next;
3393 }
3394 while (entry->start < end) {
3395 struct vm_amap * const amap = entry->aref.ar_amap;
3396 struct uvm_object * const uobj = entry->object.uvm_obj;
3397
3398 KASSERT(entry != &map->header);
3399 KASSERT(start < entry->end);
3400 /*
3401 * For now, we handle only the easy but commonly-requested case.
3402 * ie. start prefetching of backing uobj pages.
3403 *
3404 * XXX It might be useful to pmap_enter() the already-in-core
3405 * pages by inventing a "weak" mode for uvm_fault() which would
3406 * only do the PGO_LOCKED pgo_get().
3407 */
3408 if (UVM_ET_ISOBJ(entry) && amap == NULL && uobj != NULL) {
3409 off_t offset;
3410 off_t size;
3411
3412 offset = entry->offset;
3413 if (start < entry->start) {
3414 offset += entry->start - start;
3415 }
3416 size = entry->offset + (entry->end - entry->start);
3417 if (entry->end < end) {
3418 size -= end - entry->end;
3419 }
3420 uvm_readahead(uobj, offset, size);
3421 }
3422 entry = entry->next;
3423 }
3424 vm_map_unlock_read(map);
3425 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3426 return 0;
3427 }
3428
3429 /*
3430 * uvm_map_pageable: sets the pageability of a range in a map.
3431 *
3432 * => wires map entries. should not be used for transient page locking.
3433 * for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
3434 * => regions specified as not pageable require lock-down (wired) memory
3435 * and page tables.
3436 * => map must never be read-locked
3437 * => if islocked is true, map is already write-locked
3438 * => we always unlock the map, since we must downgrade to a read-lock
3439 * to call uvm_fault_wire()
3440 * => XXXCDC: check this and try and clean it up.
3441 */
3442
3443 int
3444 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
3445 bool new_pageable, int lockflags)
3446 {
3447 struct vm_map_entry *entry, *start_entry, *failed_entry;
3448 int rv;
3449 #ifdef DIAGNOSTIC
3450 u_int timestamp_save;
3451 #endif
3452 UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
3453 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
3454 map, start, end, new_pageable);
3455 KASSERT(map->flags & VM_MAP_PAGEABLE);
3456
3457 if ((lockflags & UVM_LK_ENTER) == 0)
3458 vm_map_lock(map);
3459 VM_MAP_RANGE_CHECK(map, start, end);
3460
3461 /*
3462 * only one pageability change may take place at one time, since
3463 * uvm_fault_wire assumes it will be called only once for each
3464 * wiring/unwiring. therefore, we have to make sure we're actually
3465 * changing the pageability for the entire region. we do so before
3466 * making any changes.
3467 */
3468
3469 if (uvm_map_lookup_entry(map, start, &start_entry) == false) {
3470 if ((lockflags & UVM_LK_EXIT) == 0)
3471 vm_map_unlock(map);
3472
3473 UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0);
3474 return EFAULT;
3475 }
3476 entry = start_entry;
3477
3478 /*
3479 * handle wiring and unwiring separately.
3480 */
3481
3482 if (new_pageable) { /* unwire */
3483 UVM_MAP_CLIP_START(map, entry, start, NULL);
3484
3485 /*
3486 * unwiring. first ensure that the range to be unwired is
3487 * really wired down and that there are no holes.
3488 */
3489
3490 while ((entry != &map->header) && (entry->start < end)) {
3491 if (entry->wired_count == 0 ||
3492 (entry->end < end &&
3493 (entry->next == &map->header ||
3494 entry->next->start > entry->end))) {
3495 if ((lockflags & UVM_LK_EXIT) == 0)
3496 vm_map_unlock(map);
3497 UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0);
3498 return EINVAL;
3499 }
3500 entry = entry->next;
3501 }
3502
3503 /*
3504 * POSIX 1003.1b - a single munlock call unlocks a region,
3505 * regardless of the number of mlock calls made on that
3506 * region.
3507 */
3508
3509 entry = start_entry;
3510 while ((entry != &map->header) && (entry->start < end)) {
3511 UVM_MAP_CLIP_END(map, entry, end, NULL);
3512 if (VM_MAPENT_ISWIRED(entry))
3513 uvm_map_entry_unwire(map, entry);
3514 entry = entry->next;
3515 }
3516 if ((lockflags & UVM_LK_EXIT) == 0)
3517 vm_map_unlock(map);
3518 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3519 return 0;
3520 }
3521
3522 /*
3523 * wire case: in two passes [XXXCDC: ugly block of code here]
3524 *
3525 * 1: holding the write lock, we create any anonymous maps that need
3526 * to be created. then we clip each map entry to the region to
3527 * be wired and increment its wiring count.
3528 *
3529 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
3530 * in the pages for any newly wired area (wired_count == 1).
3531 *
3532 * downgrading to a read lock for uvm_fault_wire avoids a possible
3533 * deadlock with another thread that may have faulted on one of
3534 * the pages to be wired (it would mark the page busy, blocking
3535 * us, then in turn block on the map lock that we hold). because
3536 * of problems in the recursive lock package, we cannot upgrade
3537 * to a write lock in vm_map_lookup. thus, any actions that
3538 * require the write lock must be done beforehand. because we
3539 * keep the read lock on the map, the copy-on-write status of the
3540 * entries we modify here cannot change.
3541 */
3542
3543 while ((entry != &map->header) && (entry->start < end)) {
3544 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3545
3546 /*
3547 * perform actions of vm_map_lookup that need the
3548 * write lock on the map: create an anonymous map
3549 * for a copy-on-write region, or an anonymous map
3550 * for a zero-fill region. (XXXCDC: submap case
3551 * ok?)
3552 */
3553
3554 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3555 if (UVM_ET_ISNEEDSCOPY(entry) &&
3556 ((entry->max_protection & VM_PROT_WRITE) ||
3557 (entry->object.uvm_obj == NULL))) {
3558 amap_copy(map, entry, 0, start, end);
3559 /* XXXCDC: wait OK? */
3560 }
3561 }
3562 }
3563 UVM_MAP_CLIP_START(map, entry, start, NULL);
3564 UVM_MAP_CLIP_END(map, entry, end, NULL);
3565 entry->wired_count++;
3566
3567 /*
3568 * Check for holes
3569 */
3570
3571 if (entry->protection == VM_PROT_NONE ||
3572 (entry->end < end &&
3573 (entry->next == &map->header ||
3574 entry->next->start > entry->end))) {
3575
3576 /*
3577 * found one. amap creation actions do not need to
3578 * be undone, but the wired counts need to be restored.
3579 */
3580
3581 while (entry != &map->header && entry->end > start) {
3582 entry->wired_count--;
3583 entry = entry->prev;
3584 }
3585 if ((lockflags & UVM_LK_EXIT) == 0)
3586 vm_map_unlock(map);
3587 UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
3588 return EINVAL;
3589 }
3590 entry = entry->next;
3591 }
3592
3593 /*
3594 * Pass 2.
3595 */
3596
3597 #ifdef DIAGNOSTIC
3598 timestamp_save = map->timestamp;
3599 #endif
3600 vm_map_busy(map);
3601 vm_map_unlock(map);
3602
3603 rv = 0;
3604 entry = start_entry;
3605 while (entry != &map->header && entry->start < end) {
3606 if (entry->wired_count == 1) {
3607 rv = uvm_fault_wire(map, entry->start, entry->end,
3608 entry->max_protection, 1);
3609 if (rv) {
3610
3611 /*
3612 * wiring failed. break out of the loop.
3613 * we'll clean up the map below, once we
3614 * have a write lock again.
3615 */
3616
3617 break;
3618 }
3619 }
3620 entry = entry->next;
3621 }
3622
3623 if (rv) { /* failed? */
3624
3625 /*
3626 * Get back to an exclusive (write) lock.
3627 */
3628
3629 vm_map_lock(map);
3630 vm_map_unbusy(map);
3631
3632 #ifdef DIAGNOSTIC
3633 if (timestamp_save + 1 != map->timestamp)
3634 panic("uvm_map_pageable: stale map");
3635 #endif
3636
3637 /*
3638 * first drop the wiring count on all the entries
3639 * which haven't actually been wired yet.
3640 */
3641
3642 failed_entry = entry;
3643 while (entry != &map->header && entry->start < end) {
3644 entry->wired_count--;
3645 entry = entry->next;
3646 }
3647
3648 /*
3649 * now, unwire all the entries that were successfully
3650 * wired above.
3651 */
3652
3653 entry = start_entry;
3654 while (entry != failed_entry) {
3655 entry->wired_count--;
3656 if (VM_MAPENT_ISWIRED(entry) == 0)
3657 uvm_map_entry_unwire(map, entry);
3658 entry = entry->next;
3659 }
3660 if ((lockflags & UVM_LK_EXIT) == 0)
3661 vm_map_unlock(map);
3662 UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
3663 return (rv);
3664 }
3665
3666 if ((lockflags & UVM_LK_EXIT) == 0) {
3667 vm_map_unbusy(map);
3668 } else {
3669
3670 /*
3671 * Get back to an exclusive (write) lock.
3672 */
3673
3674 vm_map_lock(map);
3675 vm_map_unbusy(map);
3676 }
3677
3678 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3679 return 0;
3680 }
3681
3682 /*
3683 * uvm_map_pageable_all: special case of uvm_map_pageable - affects
3684 * all mapped regions.
3685 *
3686 * => map must not be locked.
3687 * => if no flags are specified, all regions are unwired.
3688 * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
3689 */
3690
3691 int
3692 uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
3693 {
3694 struct vm_map_entry *entry, *failed_entry;
3695 vsize_t size;
3696 int rv;
3697 #ifdef DIAGNOSTIC
3698 u_int timestamp_save;
3699 #endif
3700 UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist);
3701 UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0);
3702
3703 KASSERT(map->flags & VM_MAP_PAGEABLE);
3704
3705 vm_map_lock(map);
3706
3707 /*
3708 * handle wiring and unwiring separately.
3709 */
3710
3711 if (flags == 0) { /* unwire */
3712
3713 /*
3714 * POSIX 1003.1b -- munlockall unlocks all regions,
3715 * regardless of how many times mlockall has been called.
3716 */
3717
3718 for (entry = map->header.next; entry != &map->header;
3719 entry = entry->next) {
3720 if (VM_MAPENT_ISWIRED(entry))
3721 uvm_map_entry_unwire(map, entry);
3722 }
3723 map->flags &= ~VM_MAP_WIREFUTURE;
3724 vm_map_unlock(map);
3725 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3726 return 0;
3727 }
3728
3729 if (flags & MCL_FUTURE) {
3730
3731 /*
3732 * must wire all future mappings; remember this.
3733 */
3734
3735 map->flags |= VM_MAP_WIREFUTURE;
3736 }
3737
3738 if ((flags & MCL_CURRENT) == 0) {
3739
3740 /*
3741 * no more work to do!
3742 */
3743
3744 UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
3745 vm_map_unlock(map);
3746 return 0;
3747 }
3748
3749 /*
3750 * wire case: in three passes [XXXCDC: ugly block of code here]
3751 *
3752 * 1: holding the write lock, count all pages mapped by non-wired
3753 * entries. if this would cause us to go over our limit, we fail.
3754 *
3755 * 2: still holding the write lock, we create any anonymous maps that
3756 * need to be created. then we increment its wiring count.
3757 *
3758 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
3759 * in the pages for any newly wired area (wired_count == 1).
3760 *
3761 * downgrading to a read lock for uvm_fault_wire avoids a possible
3762 * deadlock with another thread that may have faulted on one of
3763 * the pages to be wired (it would mark the page busy, blocking
3764 * us, then in turn block on the map lock that we hold). because
3765 * of problems in the recursive lock package, we cannot upgrade
3766 * to a write lock in vm_map_lookup. thus, any actions that
3767 * require the write lock must be done beforehand. because we
3768 * keep the read lock on the map, the copy-on-write status of the
3769 * entries we modify here cannot change.
3770 */
3771
3772 for (size = 0, entry = map->header.next; entry != &map->header;
3773 entry = entry->next) {
3774 if (entry->protection != VM_PROT_NONE &&
3775 VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3776 size += entry->end - entry->start;
3777 }
3778 }
3779
3780 if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
3781 vm_map_unlock(map);
3782 return ENOMEM;
3783 }
3784
3785 if (limit != 0 &&
3786 (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
3787 vm_map_unlock(map);
3788 return ENOMEM;
3789 }
3790
3791 /*
3792 * Pass 2.
3793 */
3794
3795 for (entry = map->header.next; entry != &map->header;
3796 entry = entry->next) {
3797 if (entry->protection == VM_PROT_NONE)
3798 continue;
3799 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3800
3801 /*
3802 * perform actions of vm_map_lookup that need the
3803 * write lock on the map: create an anonymous map
3804 * for a copy-on-write region, or an anonymous map
3805 * for a zero-fill region. (XXXCDC: submap case
3806 * ok?)
3807 */
3808
3809 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3810 if (UVM_ET_ISNEEDSCOPY(entry) &&
3811 ((entry->max_protection & VM_PROT_WRITE) ||
3812 (entry->object.uvm_obj == NULL))) {
3813 amap_copy(map, entry, 0, entry->start,
3814 entry->end);
3815 /* XXXCDC: wait OK? */
3816 }
3817 }
3818 }
3819 entry->wired_count++;
3820 }
3821
3822 /*
3823 * Pass 3.
3824 */
3825
3826 #ifdef DIAGNOSTIC
3827 timestamp_save = map->timestamp;
3828 #endif
3829 vm_map_busy(map);
3830 vm_map_unlock(map);
3831
3832 rv = 0;
3833 for (entry = map->header.next; entry != &map->header;
3834 entry = entry->next) {
3835 if (entry->wired_count == 1) {
3836 rv = uvm_fault_wire(map, entry->start, entry->end,
3837 entry->max_protection, 1);
3838 if (rv) {
3839
3840 /*
3841 * wiring failed. break out of the loop.
3842 * we'll clean up the map below, once we
3843 * have a write lock again.
3844 */
3845
3846 break;
3847 }
3848 }
3849 }
3850
3851 if (rv) {
3852
3853 /*
3854 * Get back an exclusive (write) lock.
3855 */
3856
3857 vm_map_lock(map);
3858 vm_map_unbusy(map);
3859
3860 #ifdef DIAGNOSTIC
3861 if (timestamp_save + 1 != map->timestamp)
3862 panic("uvm_map_pageable_all: stale map");
3863 #endif
3864
3865 /*
3866 * first drop the wiring count on all the entries
3867 * which haven't actually been wired yet.
3868 *
3869 * Skip VM_PROT_NONE entries like we did above.
3870 */
3871
3872 failed_entry = entry;
3873 for (/* nothing */; entry != &map->header;
3874 entry = entry->next) {
3875 if (entry->protection == VM_PROT_NONE)
3876 continue;
3877 entry->wired_count--;
3878 }
3879
3880 /*
3881 * now, unwire all the entries that were successfully
3882 * wired above.
3883 *
3884 * Skip VM_PROT_NONE entries like we did above.
3885 */
3886
3887 for (entry = map->header.next; entry != failed_entry;
3888 entry = entry->next) {
3889 if (entry->protection == VM_PROT_NONE)
3890 continue;
3891 entry->wired_count--;
3892 if (VM_MAPENT_ISWIRED(entry))
3893 uvm_map_entry_unwire(map, entry);
3894 }
3895 vm_map_unlock(map);
3896 UVMHIST_LOG(maphist,"<- done (RV=%d)", rv,0,0,0);
3897 return (rv);
3898 }
3899
3900 vm_map_unbusy(map);
3901
3902 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3903 return 0;
3904 }
3905
3906 /*
3907 * uvm_map_clean: clean out a map range
3908 *
3909 * => valid flags:
3910 * if (flags & PGO_CLEANIT): dirty pages are cleaned first
3911 * if (flags & PGO_SYNCIO): dirty pages are written synchronously
3912 * if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
3913 * if (flags & PGO_FREE): any cached pages are freed after clean
3914 * => returns an error if any part of the specified range isn't mapped
3915 * => never a need to flush amap layer since the anonymous memory has
3916 * no permanent home, but may deactivate pages there
3917 * => called from sys_msync() and sys_madvise()
3918 * => caller must not write-lock map (read OK).
3919 * => we may sleep while cleaning if SYNCIO [with map read-locked]
3920 */
3921
3922 int
3923 uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
3924 {
3925 struct vm_map_entry *current, *entry;
3926 struct uvm_object *uobj;
3927 struct vm_amap *amap;
3928 struct vm_anon *anon;
3929 struct vm_page *pg;
3930 vaddr_t offset;
3931 vsize_t size;
3932 voff_t uoff;
3933 int error, refs;
3934 UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
3935
3936 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
3937 map, start, end, flags);
3938 KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
3939 (PGO_FREE|PGO_DEACTIVATE));
3940
3941 vm_map_lock_read(map);
3942 VM_MAP_RANGE_CHECK(map, start, end);
3943 if (uvm_map_lookup_entry(map, start, &entry) == false) {
3944 vm_map_unlock_read(map);
3945 return EFAULT;
3946 }
3947
3948 /*
3949 * Make a first pass to check for holes and wiring problems.
3950 */
3951
3952 for (current = entry; current->start < end; current = current->next) {
3953 if (UVM_ET_ISSUBMAP(current)) {
3954 vm_map_unlock_read(map);
3955 return EINVAL;
3956 }
3957 if ((flags & PGO_FREE) != 0 && VM_MAPENT_ISWIRED(entry)) {
3958 vm_map_unlock_read(map);
3959 return EBUSY;
3960 }
3961 if (end <= current->end) {
3962 break;
3963 }
3964 if (current->end != current->next->start) {
3965 vm_map_unlock_read(map);
3966 return EFAULT;
3967 }
3968 }
3969
3970 error = 0;
3971 for (current = entry; start < end; current = current->next) {
3972 amap = current->aref.ar_amap; /* upper layer */
3973 uobj = current->object.uvm_obj; /* lower layer */
3974 KASSERT(start >= current->start);
3975
3976 /*
3977 * No amap cleaning necessary if:
3978 *
3979 * (1) There's no amap.
3980 *
3981 * (2) We're not deactivating or freeing pages.
3982 */
3983
3984 if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
3985 goto flush_object;
3986
3987 amap_lock(amap);
3988 offset = start - current->start;
3989 size = MIN(end, current->end) - start;
3990 for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) {
3991 anon = amap_lookup(¤t->aref, offset);
3992 if (anon == NULL)
3993 continue;
3994
3995 mutex_enter(&anon->an_lock);
3996 pg = anon->an_page;
3997 if (pg == NULL) {
3998 mutex_exit(&anon->an_lock);
3999 continue;
4000 }
4001
4002 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
4003
4004 /*
4005 * In these first 3 cases, we just deactivate the page.
4006 */
4007
4008 case PGO_CLEANIT|PGO_FREE:
4009 case PGO_CLEANIT|PGO_DEACTIVATE:
4010 case PGO_DEACTIVATE:
4011 deactivate_it:
4012 /*
4013 * skip the page if it's loaned or wired,
4014 * since it shouldn't be on a paging queue
4015 * at all in these cases.
4016 */
4017
4018 mutex_enter(&uvm_pageqlock);
4019 if (pg->loan_count != 0 ||
4020 pg->wire_count != 0) {
4021 mutex_exit(&uvm_pageqlock);
4022 mutex_exit(&anon->an_lock);
4023 continue;
4024 }
4025 KASSERT(pg->uanon == anon);
4026 uvm_pagedeactivate(pg);
4027 mutex_exit(&uvm_pageqlock);
4028 mutex_exit(&anon->an_lock);
4029 continue;
4030
4031 case PGO_FREE:
4032
4033 /*
4034 * If there are multiple references to
4035 * the amap, just deactivate the page.
4036 */
4037
4038 if (amap_refs(amap) > 1)
4039 goto deactivate_it;
4040
4041 /* skip the page if it's wired */
4042 if (pg->wire_count != 0) {
4043 mutex_exit(&anon->an_lock);
4044 continue;
4045 }
4046 amap_unadd(¤t->aref, offset);
4047 refs = --anon->an_ref;
4048 mutex_exit(&anon->an_lock);
4049 if (refs == 0)
4050 uvm_anfree(anon);
4051 continue;
4052 }
4053 }
4054 amap_unlock(amap);
4055
4056 flush_object:
4057 /*
4058 * flush pages if we've got a valid backing object.
4059 * note that we must always clean object pages before
4060 * freeing them since otherwise we could reveal stale
4061 * data from files.
4062 */
4063
4064 uoff = current->offset + (start - current->start);
4065 size = MIN(end, current->end) - start;
4066 if (uobj != NULL) {
4067 mutex_enter(&uobj->vmobjlock);
4068 if (uobj->pgops->pgo_put != NULL)
4069 error = (uobj->pgops->pgo_put)(uobj, uoff,
4070 uoff + size, flags | PGO_CLEANIT);
4071 else
4072 error = 0;
4073 }
4074 start += size;
4075 }
4076 vm_map_unlock_read(map);
4077 return (error);
4078 }
4079
4080
4081 /*
4082 * uvm_map_checkprot: check protection in map
4083 *
4084 * => must allow specified protection in a fully allocated region.
4085 * => map must be read or write locked by caller.
4086 */
4087
4088 bool
4089 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
4090 vm_prot_t protection)
4091 {
4092 struct vm_map_entry *entry;
4093 struct vm_map_entry *tmp_entry;
4094
4095 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
4096 return (false);
4097 }
4098 entry = tmp_entry;
4099 while (start < end) {
4100 if (entry == &map->header) {
4101 return (false);
4102 }
4103
4104 /*
4105 * no holes allowed
4106 */
4107
4108 if (start < entry->start) {
4109 return (false);
4110 }
4111
4112 /*
4113 * check protection associated with entry
4114 */
4115
4116 if ((entry->protection & protection) != protection) {
4117 return (false);
4118 }
4119 start = entry->end;
4120 entry = entry->next;
4121 }
4122 return (true);
4123 }
4124
4125 /*
4126 * uvmspace_alloc: allocate a vmspace structure.
4127 *
4128 * - structure includes vm_map and pmap
4129 * - XXX: no locking on this structure
4130 * - refcnt set to 1, rest must be init'd by caller
4131 */
4132 struct vmspace *
4133 uvmspace_alloc(vaddr_t vmin, vaddr_t vmax)
4134 {
4135 struct vmspace *vm;
4136 UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
4137
4138 vm = pool_cache_get(&uvm_vmspace_cache, PR_WAITOK);
4139 uvmspace_init(vm, NULL, vmin, vmax);
4140 UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0);
4141 return (vm);
4142 }
4143
4144 /*
4145 * uvmspace_init: initialize a vmspace structure.
4146 *
4147 * - XXX: no locking on this structure
4148 * - refcnt set to 1, rest must be init'd by caller
4149 */
4150 void
4151 uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin, vaddr_t vmax)
4152 {
4153 UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist);
4154
4155 memset(vm, 0, sizeof(*vm));
4156 uvm_map_setup(&vm->vm_map, vmin, vmax, VM_MAP_PAGEABLE
4157 #ifdef __USING_TOPDOWN_VM
4158 | VM_MAP_TOPDOWN
4159 #endif
4160 );
4161 if (pmap)
4162 pmap_reference(pmap);
4163 else
4164 pmap = pmap_create();
4165 vm->vm_map.pmap = pmap;
4166 vm->vm_refcnt = 1;
4167 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4168 }
4169
4170 /*
4171 * uvmspace_share: share a vmspace between two processes
4172 *
4173 * - used for vfork, threads(?)
4174 */
4175
4176 void
4177 uvmspace_share(struct proc *p1, struct proc *p2)
4178 {
4179
4180 uvmspace_addref(p1->p_vmspace);
4181 p2->p_vmspace = p1->p_vmspace;
4182 }
4183
4184 #if 0
4185
4186 /*
4187 * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
4188 *
4189 * - XXX: no locking on vmspace
4190 */
4191
4192 void
4193 uvmspace_unshare(struct lwp *l)
4194 {
4195 struct proc *p = l->l_proc;
4196 struct vmspace *nvm, *ovm = p->p_vmspace;
4197
4198 if (ovm->vm_refcnt == 1)
4199 /* nothing to do: vmspace isn't shared in the first place */
4200 return;
4201
4202 /* make a new vmspace, still holding old one */
4203 nvm = uvmspace_fork(ovm);
4204
4205 kpreempt_disable();
4206 pmap_deactivate(l); /* unbind old vmspace */
4207 p->p_vmspace = nvm;
4208 pmap_activate(l); /* switch to new vmspace */
4209 kpreempt_enable();
4210
4211 uvmspace_free(ovm); /* drop reference to old vmspace */
4212 }
4213
4214 #endif
4215
4216 /*
4217 * uvmspace_exec: the process wants to exec a new program
4218 */
4219
4220 void
4221 uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end)
4222 {
4223 struct proc *p = l->l_proc;
4224 struct vmspace *nvm, *ovm = p->p_vmspace;
4225 struct vm_map *map = &ovm->vm_map;
4226
4227 #ifdef __sparc__
4228 /* XXX cgd 960926: the sparc #ifdef should be a MD hook */
4229 kill_user_windows(l); /* before stack addresses go away */
4230 #endif
4231 #ifdef __HAVE_CPU_VMSPACE_EXEC
4232 cpu_vmspace_exec(l, start, end);
4233 #endif
4234
4235 /*
4236 * see if more than one process is using this vmspace...
4237 */
4238
4239 if (ovm->vm_refcnt == 1) {
4240
4241 /*
4242 * if p is the only process using its vmspace then we can safely
4243 * recycle that vmspace for the program that is being exec'd.
4244 */
4245
4246 #ifdef SYSVSHM
4247 /*
4248 * SYSV SHM semantics require us to kill all segments on an exec
4249 */
4250
4251 if (ovm->vm_shm)
4252 shmexit(ovm);
4253 #endif
4254
4255 /*
4256 * POSIX 1003.1b -- "lock future mappings" is revoked
4257 * when a process execs another program image.
4258 */
4259
4260 map->flags &= ~VM_MAP_WIREFUTURE;
4261
4262 /*
4263 * now unmap the old program
4264 */
4265
4266 pmap_remove_all(map->pmap);
4267 uvm_unmap(map, vm_map_min(map), vm_map_max(map));
4268 KASSERT(map->header.prev == &map->header);
4269 KASSERT(map->nentries == 0);
4270
4271 /*
4272 * resize the map
4273 */
4274
4275 vm_map_setmin(map, start);
4276 vm_map_setmax(map, end);
4277 } else {
4278
4279 /*
4280 * p's vmspace is being shared, so we can't reuse it for p since
4281 * it is still being used for others. allocate a new vmspace
4282 * for p
4283 */
4284
4285 nvm = uvmspace_alloc(start, end);
4286
4287 /*
4288 * install new vmspace and drop our ref to the old one.
4289 */
4290
4291 kpreempt_disable();
4292 pmap_deactivate(l);
4293 p->p_vmspace = nvm;
4294 pmap_activate(l);
4295 kpreempt_enable();
4296
4297 uvmspace_free(ovm);
4298 }
4299 }
4300
4301 /*
4302 * uvmspace_addref: add a referece to a vmspace.
4303 */
4304
4305 void
4306 uvmspace_addref(struct vmspace *vm)
4307 {
4308 struct vm_map *map = &vm->vm_map;
4309
4310 KASSERT((map->flags & VM_MAP_DYING) == 0);
4311
4312 mutex_enter(&map->misc_lock);
4313 KASSERT(vm->vm_refcnt > 0);
4314 vm->vm_refcnt++;
4315 mutex_exit(&map->misc_lock);
4316 }
4317
4318 /*
4319 * uvmspace_free: free a vmspace data structure
4320 */
4321
4322 void
4323 uvmspace_free(struct vmspace *vm)
4324 {
4325 struct vm_map_entry *dead_entries;
4326 struct vm_map *map = &vm->vm_map;
4327 int n;
4328
4329 UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
4330
4331 UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0);
4332 mutex_enter(&map->misc_lock);
4333 n = --vm->vm_refcnt;
4334 mutex_exit(&map->misc_lock);
4335 if (n > 0)
4336 return;
4337
4338 /*
4339 * at this point, there should be no other references to the map.
4340 * delete all of the mappings, then destroy the pmap.
4341 */
4342
4343 map->flags |= VM_MAP_DYING;
4344 pmap_remove_all(map->pmap);
4345 #ifdef SYSVSHM
4346 /* Get rid of any SYSV shared memory segments. */
4347 if (vm->vm_shm != NULL)
4348 shmexit(vm);
4349 #endif
4350 if (map->nentries) {
4351 uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map),
4352 &dead_entries, NULL, 0);
4353 if (dead_entries != NULL)
4354 uvm_unmap_detach(dead_entries, 0);
4355 }
4356 KASSERT(map->nentries == 0);
4357 KASSERT(map->size == 0);
4358 mutex_destroy(&map->misc_lock);
4359 mutex_destroy(&map->mutex);
4360 rw_destroy(&map->lock);
4361 cv_destroy(&map->cv);
4362 pmap_destroy(map->pmap);
4363 pool_cache_put(&uvm_vmspace_cache, vm);
4364 }
4365
4366 /*
4367 * F O R K - m a i n e n t r y p o i n t
4368 */
4369 /*
4370 * uvmspace_fork: fork a process' main map
4371 *
4372 * => create a new vmspace for child process from parent.
4373 * => parent's map must not be locked.
4374 */
4375
4376 struct vmspace *
4377 uvmspace_fork(struct vmspace *vm1)
4378 {
4379 struct vmspace *vm2;
4380 struct vm_map *old_map = &vm1->vm_map;
4381 struct vm_map *new_map;
4382 struct vm_map_entry *old_entry;
4383 struct vm_map_entry *new_entry;
4384 UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
4385
4386 vm_map_lock(old_map);
4387
4388 vm2 = uvmspace_alloc(vm_map_min(old_map), vm_map_max(old_map));
4389 memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
4390 (char *) (vm1 + 1) - (char *) &vm1->vm_startcopy);
4391 new_map = &vm2->vm_map; /* XXX */
4392
4393 old_entry = old_map->header.next;
4394 new_map->size = old_map->size;
4395
4396 /*
4397 * go entry-by-entry
4398 */
4399
4400 while (old_entry != &old_map->header) {
4401
4402 /*
4403 * first, some sanity checks on the old entry
4404 */
4405
4406 KASSERT(!UVM_ET_ISSUBMAP(old_entry));
4407 KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) ||
4408 !UVM_ET_ISNEEDSCOPY(old_entry));
4409
4410 switch (old_entry->inheritance) {
4411 case MAP_INHERIT_NONE:
4412
4413 /*
4414 * drop the mapping, modify size
4415 */
4416 new_map->size -= old_entry->end - old_entry->start;
4417 break;
4418
4419 case MAP_INHERIT_SHARE:
4420
4421 /*
4422 * share the mapping: this means we want the old and
4423 * new entries to share amaps and backing objects.
4424 */
4425 /*
4426 * if the old_entry needs a new amap (due to prev fork)
4427 * then we need to allocate it now so that we have
4428 * something we own to share with the new_entry. [in
4429 * other words, we need to clear needs_copy]
4430 */
4431
4432 if (UVM_ET_ISNEEDSCOPY(old_entry)) {
4433 /* get our own amap, clears needs_copy */
4434 amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,
4435 0, 0);
4436 /* XXXCDC: WAITOK??? */
4437 }
4438
4439 new_entry = uvm_mapent_alloc(new_map, 0);
4440 /* old_entry -> new_entry */
4441 uvm_mapent_copy(old_entry, new_entry);
4442
4443 /* new pmap has nothing wired in it */
4444 new_entry->wired_count = 0;
4445
4446 /*
4447 * gain reference to object backing the map (can't
4448 * be a submap, already checked this case).
4449 */
4450
4451 if (new_entry->aref.ar_amap)
4452 uvm_map_reference_amap(new_entry, AMAP_SHARED);
4453
4454 if (new_entry->object.uvm_obj &&
4455 new_entry->object.uvm_obj->pgops->pgo_reference)
4456 new_entry->object.uvm_obj->
4457 pgops->pgo_reference(
4458 new_entry->object.uvm_obj);
4459
4460 /* insert entry at end of new_map's entry list */
4461 uvm_map_entry_link(new_map, new_map->header.prev,
4462 new_entry);
4463
4464 break;
4465
4466 case MAP_INHERIT_COPY:
4467
4468 /*
4469 * copy-on-write the mapping (using mmap's
4470 * MAP_PRIVATE semantics)
4471 *
4472 * allocate new_entry, adjust reference counts.
4473 * (note that new references are read-only).
4474 */
4475
4476 new_entry = uvm_mapent_alloc(new_map, 0);
4477 /* old_entry -> new_entry */
4478 uvm_mapent_copy(old_entry, new_entry);
4479
4480 if (new_entry->aref.ar_amap)
4481 uvm_map_reference_amap(new_entry, 0);
4482
4483 if (new_entry->object.uvm_obj &&
4484 new_entry->object.uvm_obj->pgops->pgo_reference)
4485 new_entry->object.uvm_obj->pgops->pgo_reference
4486 (new_entry->object.uvm_obj);
4487
4488 /* new pmap has nothing wired in it */
4489 new_entry->wired_count = 0;
4490
4491 new_entry->etype |=
4492 (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
4493 uvm_map_entry_link(new_map, new_map->header.prev,
4494 new_entry);
4495
4496 /*
4497 * the new entry will need an amap. it will either
4498 * need to be copied from the old entry or created
4499 * from scratch (if the old entry does not have an
4500 * amap). can we defer this process until later
4501 * (by setting "needs_copy") or do we need to copy
4502 * the amap now?
4503 *
4504 * we must copy the amap now if any of the following
4505 * conditions hold:
4506 * 1. the old entry has an amap and that amap is
4507 * being shared. this means that the old (parent)
4508 * process is sharing the amap with another
4509 * process. if we do not clear needs_copy here
4510 * we will end up in a situation where both the
4511 * parent and child process are refering to the
4512 * same amap with "needs_copy" set. if the
4513 * parent write-faults, the fault routine will
4514 * clear "needs_copy" in the parent by allocating
4515 * a new amap. this is wrong because the
4516 * parent is supposed to be sharing the old amap
4517 * and the new amap will break that.
4518 *
4519 * 2. if the old entry has an amap and a non-zero
4520 * wire count then we are going to have to call
4521 * amap_cow_now to avoid page faults in the
4522 * parent process. since amap_cow_now requires
4523 * "needs_copy" to be clear we might as well
4524 * clear it here as well.
4525 *
4526 */
4527
4528 if (old_entry->aref.ar_amap != NULL) {
4529 if ((amap_flags(old_entry->aref.ar_amap) &
4530 AMAP_SHARED) != 0 ||
4531 VM_MAPENT_ISWIRED(old_entry)) {
4532
4533 amap_copy(new_map, new_entry,
4534 AMAP_COPY_NOCHUNK, 0, 0);
4535 /* XXXCDC: M_WAITOK ... ok? */
4536 }
4537 }
4538
4539 /*
4540 * if the parent's entry is wired down, then the
4541 * parent process does not want page faults on
4542 * access to that memory. this means that we
4543 * cannot do copy-on-write because we can't write
4544 * protect the old entry. in this case we
4545 * resolve all copy-on-write faults now, using
4546 * amap_cow_now. note that we have already
4547 * allocated any needed amap (above).
4548 */
4549
4550 if (VM_MAPENT_ISWIRED(old_entry)) {
4551
4552 /*
4553 * resolve all copy-on-write faults now
4554 * (note that there is nothing to do if
4555 * the old mapping does not have an amap).
4556 */
4557 if (old_entry->aref.ar_amap)
4558 amap_cow_now(new_map, new_entry);
4559
4560 } else {
4561
4562 /*
4563 * setup mappings to trigger copy-on-write faults
4564 * we must write-protect the parent if it has
4565 * an amap and it is not already "needs_copy"...
4566 * if it is already "needs_copy" then the parent
4567 * has already been write-protected by a previous
4568 * fork operation.
4569 */
4570
4571 if (old_entry->aref.ar_amap &&
4572 !UVM_ET_ISNEEDSCOPY(old_entry)) {
4573 if (old_entry->max_protection & VM_PROT_WRITE) {
4574 pmap_protect(old_map->pmap,
4575 old_entry->start,
4576 old_entry->end,
4577 old_entry->protection &
4578 ~VM_PROT_WRITE);
4579 }
4580 old_entry->etype |= UVM_ET_NEEDSCOPY;
4581 }
4582 }
4583 break;
4584 } /* end of switch statement */
4585 old_entry = old_entry->next;
4586 }
4587
4588 pmap_update(old_map->pmap);
4589 vm_map_unlock(old_map);
4590
4591 #ifdef SYSVSHM
4592 if (vm1->vm_shm)
4593 shmfork(vm1, vm2);
4594 #endif
4595
4596 #ifdef PMAP_FORK
4597 pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap);
4598 #endif
4599
4600 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4601 return (vm2);
4602 }
4603
4604
4605 /*
4606 * in-kernel map entry allocation.
4607 */
4608
4609 struct uvm_kmapent_hdr {
4610 LIST_ENTRY(uvm_kmapent_hdr) ukh_listq;
4611 int ukh_nused;
4612 struct vm_map_entry *ukh_freelist;
4613 struct vm_map *ukh_map;
4614 struct vm_map_entry ukh_entries[0];
4615 };
4616
4617 #define UVM_KMAPENT_CHUNK \
4618 ((PAGE_SIZE - sizeof(struct uvm_kmapent_hdr)) \
4619 / sizeof(struct vm_map_entry))
4620
4621 #define UVM_KHDR_FIND(entry) \
4622 ((struct uvm_kmapent_hdr *)(((vaddr_t)entry) & ~PAGE_MASK))
4623
4624
4625 #ifdef DIAGNOSTIC
4626 static struct vm_map *
4627 uvm_kmapent_map(struct vm_map_entry *entry)
4628 {
4629 const struct uvm_kmapent_hdr *ukh;
4630
4631 ukh = UVM_KHDR_FIND(entry);
4632 return ukh->ukh_map;
4633 }
4634 #endif
4635
4636 static inline struct vm_map_entry *
4637 uvm_kmapent_get(struct uvm_kmapent_hdr *ukh)
4638 {
4639 struct vm_map_entry *entry;
4640
4641 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4642 KASSERT(ukh->ukh_nused >= 0);
4643
4644 entry = ukh->ukh_freelist;
4645 if (entry) {
4646 KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4647 == UVM_MAP_KERNEL);
4648 ukh->ukh_freelist = entry->next;
4649 ukh->ukh_nused++;
4650 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4651 } else {
4652 KASSERT(ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4653 }
4654
4655 return entry;
4656 }
4657
4658 static inline void
4659 uvm_kmapent_put(struct uvm_kmapent_hdr *ukh, struct vm_map_entry *entry)
4660 {
4661
4662 KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4663 == UVM_MAP_KERNEL);
4664 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4665 KASSERT(ukh->ukh_nused > 0);
4666 KASSERT(ukh->ukh_freelist != NULL ||
4667 ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4668 KASSERT(ukh->ukh_freelist == NULL ||
4669 ukh->ukh_nused < UVM_KMAPENT_CHUNK);
4670
4671 ukh->ukh_nused--;
4672 entry->next = ukh->ukh_freelist;
4673 ukh->ukh_freelist = entry;
4674 }
4675
4676 /*
4677 * uvm_kmapent_alloc: allocate a map entry for in-kernel map
4678 */
4679
4680 static struct vm_map_entry *
4681 uvm_kmapent_alloc(struct vm_map *map, int flags)
4682 {
4683 struct vm_page *pg;
4684 struct uvm_kmapent_hdr *ukh;
4685 struct vm_map_entry *entry;
4686 #ifndef PMAP_MAP_POOLPAGE
4687 struct uvm_map_args args;
4688 uvm_flag_t mapflags = UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
4689 UVM_INH_NONE, UVM_ADV_RANDOM, flags | UVM_FLAG_NOMERGE);
4690 int error;
4691 #endif
4692 vaddr_t va;
4693 int i;
4694
4695 KDASSERT(UVM_KMAPENT_CHUNK > 2);
4696 KDASSERT(kernel_map != NULL);
4697 KASSERT(vm_map_pmap(map) == pmap_kernel());
4698
4699 UVMMAP_EVCNT_INCR(uke_alloc);
4700 entry = NULL;
4701 again:
4702 /*
4703 * try to grab an entry from freelist.
4704 */
4705 mutex_spin_enter(&uvm_kentry_lock);
4706 ukh = LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free);
4707 if (ukh) {
4708 entry = uvm_kmapent_get(ukh);
4709 if (ukh->ukh_nused == UVM_KMAPENT_CHUNK)
4710 LIST_REMOVE(ukh, ukh_listq);
4711 }
4712 mutex_spin_exit(&uvm_kentry_lock);
4713
4714 if (entry)
4715 return entry;
4716
4717 /*
4718 * there's no free entry for this vm_map.
4719 * now we need to allocate some vm_map_entry.
4720 * for simplicity, always allocate one page chunk of them at once.
4721 */
4722
4723 pg = uvm_pagealloc(NULL, 0, NULL,
4724 (flags & UVM_KMF_NOWAIT) != 0 ? UVM_PGA_USERESERVE : 0);
4725 if (__predict_false(pg == NULL)) {
4726 if (flags & UVM_FLAG_NOWAIT)
4727 return NULL;
4728 uvm_wait("kme_alloc");
4729 goto again;
4730 }
4731
4732 #ifdef PMAP_MAP_POOLPAGE
4733 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
4734 KASSERT(va != 0);
4735 #else
4736 error = uvm_map_prepare(map, 0, PAGE_SIZE, NULL, UVM_UNKNOWN_OFFSET,
4737 0, mapflags, &args);
4738 if (error) {
4739 uvm_pagefree(pg);
4740 return NULL;
4741 }
4742
4743 va = args.uma_start;
4744
4745 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
4746 VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
4747 pmap_update(vm_map_pmap(map));
4748
4749 #endif
4750 ukh = (void *)va;
4751
4752 /*
4753 * use the last entry for ukh itsself.
4754 */
4755
4756 i = UVM_KMAPENT_CHUNK - 1;
4757 #ifndef PMAP_MAP_POOLPAGE
4758 entry = &ukh->ukh_entries[i--];
4759 entry->flags = UVM_MAP_KERNEL | UVM_MAP_KMAPENT;
4760 error = uvm_map_enter(map, &args, entry);
4761 KASSERT(error == 0);
4762 #endif
4763
4764 ukh->ukh_nused = UVM_KMAPENT_CHUNK;
4765 ukh->ukh_map = map;
4766 ukh->ukh_freelist = NULL;
4767 for (; i >= 1; i--) {
4768 struct vm_map_entry *xentry = &ukh->ukh_entries[i];
4769
4770 xentry->flags = UVM_MAP_KERNEL;
4771 uvm_kmapent_put(ukh, xentry);
4772 }
4773 #ifdef PMAP_MAP_POOLPAGE
4774 KASSERT(ukh->ukh_nused == 1);
4775 #else
4776 KASSERT(ukh->ukh_nused == 2);
4777 #endif
4778
4779 mutex_spin_enter(&uvm_kentry_lock);
4780 LIST_INSERT_HEAD(&vm_map_to_kernel(map)->vmk_kentry_free,
4781 ukh, ukh_listq);
4782 mutex_spin_exit(&uvm_kentry_lock);
4783
4784 /*
4785 * return first entry.
4786 */
4787
4788 entry = &ukh->ukh_entries[0];
4789 entry->flags = UVM_MAP_KERNEL;
4790 UVMMAP_EVCNT_INCR(ukh_alloc);
4791
4792 return entry;
4793 }
4794
4795 /*
4796 * uvm_mapent_free: free map entry for in-kernel map
4797 */
4798
4799 static void
4800 uvm_kmapent_free(struct vm_map_entry *entry)
4801 {
4802 struct uvm_kmapent_hdr *ukh;
4803 struct vm_page *pg;
4804 struct vm_map *map;
4805 #ifndef PMAP_UNMAP_POOLPAGE
4806 struct pmap *pmap;
4807 struct vm_map_entry *deadentry;
4808 #endif
4809 vaddr_t va;
4810 paddr_t pa;
4811
4812 UVMMAP_EVCNT_INCR(uke_free);
4813 ukh = UVM_KHDR_FIND(entry);
4814 map = ukh->ukh_map;
4815
4816 mutex_spin_enter(&uvm_kentry_lock);
4817 uvm_kmapent_put(ukh, entry);
4818 #ifdef PMAP_UNMAP_POOLPAGE
4819 if (ukh->ukh_nused > 0) {
4820 #else
4821 if (ukh->ukh_nused > 1) {
4822 #endif
4823 if (ukh->ukh_nused == UVM_KMAPENT_CHUNK - 1)
4824 LIST_INSERT_HEAD(
4825 &vm_map_to_kernel(map)->vmk_kentry_free,
4826 ukh, ukh_listq);
4827 mutex_spin_exit(&uvm_kentry_lock);
4828 return;
4829 }
4830
4831 /*
4832 * now we can free this ukh.
4833 *
4834 * however, keep an empty ukh to avoid ping-pong.
4835 */
4836
4837 if (LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free) == ukh &&
4838 LIST_NEXT(ukh, ukh_listq) == NULL) {
4839 mutex_spin_exit(&uvm_kentry_lock);
4840 return;
4841 }
4842 LIST_REMOVE(ukh, ukh_listq);
4843 mutex_spin_exit(&uvm_kentry_lock);
4844
4845 va = (vaddr_t)ukh;
4846
4847 #ifdef PMAP_UNMAP_POOLPAGE
4848 KASSERT(ukh->ukh_nused == 0);
4849 pa = PMAP_UNMAP_POOLPAGE(va);
4850 KASSERT(pa != 0);
4851 #else
4852 KASSERT(ukh->ukh_nused == 1);
4853
4854 /*
4855 * remove map entry for ukh itsself.
4856 */
4857
4858 KASSERT((va & PAGE_MASK) == 0);
4859 vm_map_lock(map);
4860 uvm_unmap_remove(map, va, va + PAGE_SIZE, &deadentry, NULL, 0);
4861 KASSERT(deadentry->flags & UVM_MAP_KERNEL);
4862 KASSERT(deadentry->flags & UVM_MAP_KMAPENT);
4863 KASSERT(deadentry->next == NULL);
4864 KASSERT(deadentry == &ukh->ukh_entries[UVM_KMAPENT_CHUNK - 1]);
4865
4866 /*
4867 * unmap the page from pmap and free it.
4868 */
4869
4870 pmap = vm_map_pmap(map);
4871 KASSERT(pmap == pmap_kernel());
4872 if (!pmap_extract(pmap, va, &pa))
4873 panic("%s: no mapping", __func__);
4874 pmap_kremove(va, PAGE_SIZE);
4875 pmap_update(vm_map_pmap(map));
4876 vm_map_unlock(map);
4877 #endif /* !PMAP_UNMAP_POOLPAGE */
4878 pg = PHYS_TO_VM_PAGE(pa);
4879 uvm_pagefree(pg);
4880 UVMMAP_EVCNT_INCR(ukh_free);
4881 }
4882
4883 static vsize_t
4884 uvm_kmapent_overhead(vsize_t size)
4885 {
4886
4887 /*
4888 * - the max number of unmerged entries is howmany(size, PAGE_SIZE)
4889 * as the min allocation unit is PAGE_SIZE.
4890 * - UVM_KMAPENT_CHUNK "kmapent"s are allocated from a page.
4891 * one of them are used to map the page itself.
4892 */
4893
4894 return howmany(howmany(size, PAGE_SIZE), (UVM_KMAPENT_CHUNK - 1)) *
4895 PAGE_SIZE;
4896 }
4897
4898 /*
4899 * map entry reservation
4900 */
4901
4902 /*
4903 * uvm_mapent_reserve: reserve map entries for clipping before locking map.
4904 *
4905 * => needed when unmapping entries allocated without UVM_FLAG_QUANTUM.
4906 * => caller shouldn't hold map locked.
4907 */
4908 int
4909 uvm_mapent_reserve(struct vm_map *map, struct uvm_mapent_reservation *umr,
4910 int nentries, int flags)
4911 {
4912
4913 umr->umr_nentries = 0;
4914
4915 if ((flags & UVM_FLAG_QUANTUM) != 0)
4916 return 0;
4917
4918 if (!VM_MAP_USE_KMAPENT(map))
4919 return 0;
4920
4921 while (nentries--) {
4922 struct vm_map_entry *ent;
4923 ent = uvm_kmapent_alloc(map, flags);
4924 if (!ent) {
4925 uvm_mapent_unreserve(map, umr);
4926 return ENOMEM;
4927 }
4928 UMR_PUTENTRY(umr, ent);
4929 }
4930
4931 return 0;
4932 }
4933
4934 /*
4935 * uvm_mapent_unreserve:
4936 *
4937 * => caller shouldn't hold map locked.
4938 * => never fail or sleep.
4939 */
4940 void
4941 uvm_mapent_unreserve(struct vm_map *map, struct uvm_mapent_reservation *umr)
4942 {
4943
4944 while (!UMR_EMPTY(umr))
4945 uvm_kmapent_free(UMR_GETENTRY(umr));
4946 }
4947
4948 /*
4949 * uvm_mapent_trymerge: try to merge an entry with its neighbors.
4950 *
4951 * => called with map locked.
4952 * => return non zero if successfully merged.
4953 */
4954
4955 int
4956 uvm_mapent_trymerge(struct vm_map *map, struct vm_map_entry *entry, int flags)
4957 {
4958 struct uvm_object *uobj;
4959 struct vm_map_entry *next;
4960 struct vm_map_entry *prev;
4961 vsize_t size;
4962 int merged = 0;
4963 bool copying;
4964 int newetype;
4965
4966 if (VM_MAP_USE_KMAPENT(map)) {
4967 return 0;
4968 }
4969 if (entry->aref.ar_amap != NULL) {
4970 return 0;
4971 }
4972 if ((entry->flags & UVM_MAP_NOMERGE) != 0) {
4973 return 0;
4974 }
4975
4976 uobj = entry->object.uvm_obj;
4977 size = entry->end - entry->start;
4978 copying = (flags & UVM_MERGE_COPYING) != 0;
4979 newetype = copying ? (entry->etype & ~UVM_ET_NEEDSCOPY) : entry->etype;
4980
4981 next = entry->next;
4982 if (next != &map->header &&
4983 next->start == entry->end &&
4984 ((copying && next->aref.ar_amap != NULL &&
4985 amap_refs(next->aref.ar_amap) == 1) ||
4986 (!copying && next->aref.ar_amap == NULL)) &&
4987 UVM_ET_ISCOMPATIBLE(next, newetype,
4988 uobj, entry->flags, entry->protection,
4989 entry->max_protection, entry->inheritance, entry->advice,
4990 entry->wired_count) &&
4991 (uobj == NULL || entry->offset + size == next->offset)) {
4992 int error;
4993
4994 if (copying) {
4995 error = amap_extend(next, size,
4996 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_BACKWARDS);
4997 } else {
4998 error = 0;
4999 }
5000 if (error == 0) {
5001 if (uobj) {
5002 if (uobj->pgops->pgo_detach) {
5003 uobj->pgops->pgo_detach(uobj);
5004 }
5005 }
5006
5007 entry->end = next->end;
5008 clear_hints(map, next);
5009 uvm_map_entry_unlink(map, next);
5010 if (copying) {
5011 entry->aref = next->aref;
5012 entry->etype &= ~UVM_ET_NEEDSCOPY;
5013 }
5014 uvm_map_check(map, "trymerge forwardmerge");
5015 uvm_mapent_free_merged(map, next);
5016 merged++;
5017 }
5018 }
5019
5020 prev = entry->prev;
5021 if (prev != &map->header &&
5022 prev->end == entry->start &&
5023 ((copying && !merged && prev->aref.ar_amap != NULL &&
5024 amap_refs(prev->aref.ar_amap) == 1) ||
5025 (!copying && prev->aref.ar_amap == NULL)) &&
5026 UVM_ET_ISCOMPATIBLE(prev, newetype,
5027 uobj, entry->flags, entry->protection,
5028 entry->max_protection, entry->inheritance, entry->advice,
5029 entry->wired_count) &&
5030 (uobj == NULL ||
5031 prev->offset + prev->end - prev->start == entry->offset)) {
5032 int error;
5033
5034 if (copying) {
5035 error = amap_extend(prev, size,
5036 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_FORWARDS);
5037 } else {
5038 error = 0;
5039 }
5040 if (error == 0) {
5041 if (uobj) {
5042 if (uobj->pgops->pgo_detach) {
5043 uobj->pgops->pgo_detach(uobj);
5044 }
5045 entry->offset = prev->offset;
5046 }
5047
5048 entry->start = prev->start;
5049 clear_hints(map, prev);
5050 uvm_map_entry_unlink(map, prev);
5051 if (copying) {
5052 entry->aref = prev->aref;
5053 entry->etype &= ~UVM_ET_NEEDSCOPY;
5054 }
5055 uvm_map_check(map, "trymerge backmerge");
5056 uvm_mapent_free_merged(map, prev);
5057 merged++;
5058 }
5059 }
5060
5061 return merged;
5062 }
5063
5064 /*
5065 * uvm_map_create: create map
5066 */
5067
5068 struct vm_map *
5069 uvm_map_create(pmap_t pmap, vaddr_t vmin, vaddr_t vmax, int flags)
5070 {
5071 struct vm_map *result;
5072
5073 result = malloc(sizeof(struct vm_map), M_VMMAP, M_WAITOK);
5074 uvm_map_setup(result, vmin, vmax, flags);
5075 result->pmap = pmap;
5076 return(result);
5077 }
5078
5079 /*
5080 * uvm_map_setup: init map
5081 *
5082 * => map must not be in service yet.
5083 */
5084
5085 void
5086 uvm_map_setup(struct vm_map *map, vaddr_t vmin, vaddr_t vmax, int flags)
5087 {
5088 int ipl;
5089
5090 rb_tree_init(&map->rb_tree, &uvm_map_tree_ops);
5091 map->header.next = map->header.prev = &map->header;
5092 map->nentries = 0;
5093 map->size = 0;
5094 map->ref_count = 1;
5095 vm_map_setmin(map, vmin);
5096 vm_map_setmax(map, vmax);
5097 map->flags = flags;
5098 map->first_free = &map->header;
5099 map->hint = &map->header;
5100 map->timestamp = 0;
5101 map->busy = NULL;
5102
5103 if ((flags & VM_MAP_INTRSAFE) != 0) {
5104 ipl = IPL_VM;
5105 } else {
5106 ipl = IPL_NONE;
5107 }
5108
5109 rw_init(&map->lock);
5110 cv_init(&map->cv, "vm_map");
5111 mutex_init(&map->misc_lock, MUTEX_DRIVER, ipl);
5112 mutex_init(&map->mutex, MUTEX_DRIVER, ipl);
5113 }
5114
5115
5116 /*
5117 * U N M A P - m a i n e n t r y p o i n t
5118 */
5119
5120 /*
5121 * uvm_unmap1: remove mappings from a vm_map (from "start" up to "stop")
5122 *
5123 * => caller must check alignment and size
5124 * => map must be unlocked (we will lock it)
5125 * => flags is UVM_FLAG_QUANTUM or 0.
5126 */
5127
5128 void
5129 uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
5130 {
5131 struct vm_map_entry *dead_entries;
5132 struct uvm_mapent_reservation umr;
5133 UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist);
5134
5135 UVMHIST_LOG(maphist, " (map=0x%x, start=0x%x, end=0x%x)",
5136 map, start, end, 0);
5137 if (map == kernel_map) {
5138 LOCKDEBUG_MEM_CHECK((void *)start, end - start);
5139 }
5140 /*
5141 * work now done by helper functions. wipe the pmap's and then
5142 * detach from the dead entries...
5143 */
5144 uvm_mapent_reserve(map, &umr, 2, flags);
5145 vm_map_lock(map);
5146 uvm_unmap_remove(map, start, end, &dead_entries, &umr, flags);
5147 vm_map_unlock(map);
5148 uvm_mapent_unreserve(map, &umr);
5149
5150 if (dead_entries != NULL)
5151 uvm_unmap_detach(dead_entries, 0);
5152
5153 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
5154 }
5155
5156
5157 /*
5158 * uvm_map_reference: add reference to a map
5159 *
5160 * => map need not be locked (we use misc_lock).
5161 */
5162
5163 void
5164 uvm_map_reference(struct vm_map *map)
5165 {
5166 mutex_enter(&map->misc_lock);
5167 map->ref_count++;
5168 mutex_exit(&map->misc_lock);
5169 }
5170
5171 struct vm_map_kernel *
5172 vm_map_to_kernel(struct vm_map *map)
5173 {
5174
5175 KASSERT(VM_MAP_IS_KERNEL(map));
5176
5177 return (struct vm_map_kernel *)map;
5178 }
5179
5180 bool
5181 vm_map_starved_p(struct vm_map *map)
5182 {
5183
5184 if ((map->flags & VM_MAP_WANTVA) != 0) {
5185 return true;
5186 }
5187 /* XXX */
5188 if ((vm_map_max(map) - vm_map_min(map)) / 16 * 15 < map->size) {
5189 return true;
5190 }
5191 return false;
5192 }
5193
5194 #if defined(DDB) || defined(DEBUGPRINT)
5195
5196 /*
5197 * uvm_map_printit: actually prints the map
5198 */
5199
5200 void
5201 uvm_map_printit(struct vm_map *map, bool full,
5202 void (*pr)(const char *, ...))
5203 {
5204 struct vm_map_entry *entry;
5205
5206 (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, vm_map_min(map),
5207 vm_map_max(map));
5208 (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=0x%x\n",
5209 map->nentries, map->size, map->ref_count, map->timestamp,
5210 map->flags);
5211 (*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap,
5212 pmap_resident_count(map->pmap), pmap_wired_count(map->pmap));
5213 if (!full)
5214 return;
5215 for (entry = map->header.next; entry != &map->header;
5216 entry = entry->next) {
5217 (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%llx, amap=%p/%d\n",
5218 entry, entry->start, entry->end, entry->object.uvm_obj,
5219 (long long)entry->offset, entry->aref.ar_amap,
5220 entry->aref.ar_pageoff);
5221 (*pr)(
5222 "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
5223 "wc=%d, adv=%d\n",
5224 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
5225 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
5226 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
5227 entry->protection, entry->max_protection,
5228 entry->inheritance, entry->wired_count, entry->advice);
5229 }
5230 }
5231
5232 void
5233 uvm_whatis(uintptr_t addr, void (*pr)(const char *, ...))
5234 {
5235 struct vm_map *map;
5236
5237 for (map = kernel_map;;) {
5238 struct vm_map_entry *entry;
5239
5240 if (!uvm_map_lookup_entry_bytree(map, (vaddr_t)addr, &entry)) {
5241 break;
5242 }
5243 (*pr)("%p is %p+%zu from VMMAP %p\n",
5244 (void *)addr, (void *)entry->start,
5245 (size_t)(addr - (uintptr_t)entry->start), map);
5246 if (!UVM_ET_ISSUBMAP(entry)) {
5247 break;
5248 }
5249 map = entry->object.sub_map;
5250 }
5251 }
5252
5253 #endif /* DDB || DEBUGPRINT */
5254
5255 #ifndef __USER_VA0_IS_SAFE
5256 static int
5257 sysctl_user_va0_disable(SYSCTLFN_ARGS)
5258 {
5259 struct sysctlnode node;
5260 int t, error;
5261
5262 node = *rnode;
5263 node.sysctl_data = &t;
5264 t = user_va0_disable;
5265 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5266 if (error || newp == NULL)
5267 return (error);
5268
5269 /* lower only at securelevel < 1 */
5270 if (!t && user_va0_disable &&
5271 kauth_authorize_system(l->l_cred,
5272 KAUTH_SYSTEM_CHSYSFLAGS /* XXX */, 0,
5273 NULL, NULL, NULL))
5274 return EPERM;
5275
5276 user_va0_disable = !!t;
5277 return 0;
5278 }
5279
5280 SYSCTL_SETUP(sysctl_uvmmap_setup, "sysctl uvmmap setup")
5281 {
5282
5283 sysctl_createv(clog, 0, NULL, NULL,
5284 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5285 CTLTYPE_INT, "user_va0_disable",
5286 SYSCTL_DESCR("Disable VA 0"),
5287 sysctl_user_va0_disable, 0, &user_va0_disable, 0,
5288 CTL_VM, CTL_CREATE, CTL_EOL);
5289 }
5290 #endif
5291