uvm_map.c revision 1.287 1 /* $NetBSD: uvm_map.c,v 1.287 2010/02/08 19:02:33 joerg Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_map.c 8.3 (Berkeley) 1/12/94
42 * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 /*
70 * uvm_map.c: uvm map operations
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.287 2010/02/08 19:02:33 joerg Exp $");
75
76 #include "opt_ddb.h"
77 #include "opt_uvmhist.h"
78 #include "opt_uvm.h"
79 #include "opt_sysv.h"
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/mman.h>
84 #include <sys/proc.h>
85 #include <sys/malloc.h>
86 #include <sys/pool.h>
87 #include <sys/kernel.h>
88 #include <sys/mount.h>
89 #include <sys/vnode.h>
90 #include <sys/lockdebug.h>
91 #include <sys/atomic.h>
92
93 #ifdef SYSVSHM
94 #include <sys/shm.h>
95 #endif
96
97 #include <uvm/uvm.h>
98 #include <uvm/uvm_readahead.h>
99
100 #if defined(DDB) || defined(DEBUGPRINT)
101 #include <uvm/uvm_ddb.h>
102 #endif
103
104 #if !defined(UVMMAP_COUNTERS)
105
106 #define UVMMAP_EVCNT_DEFINE(name) /* nothing */
107 #define UVMMAP_EVCNT_INCR(ev) /* nothing */
108 #define UVMMAP_EVCNT_DECR(ev) /* nothing */
109
110 #else /* defined(UVMMAP_NOCOUNTERS) */
111
112 #include <sys/evcnt.h>
113 #define UVMMAP_EVCNT_DEFINE(name) \
114 struct evcnt uvmmap_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
115 "uvmmap", #name); \
116 EVCNT_ATTACH_STATIC(uvmmap_evcnt_##name);
117 #define UVMMAP_EVCNT_INCR(ev) uvmmap_evcnt_##ev.ev_count++
118 #define UVMMAP_EVCNT_DECR(ev) uvmmap_evcnt_##ev.ev_count--
119
120 #endif /* defined(UVMMAP_NOCOUNTERS) */
121
122 UVMMAP_EVCNT_DEFINE(ubackmerge)
123 UVMMAP_EVCNT_DEFINE(uforwmerge)
124 UVMMAP_EVCNT_DEFINE(ubimerge)
125 UVMMAP_EVCNT_DEFINE(unomerge)
126 UVMMAP_EVCNT_DEFINE(kbackmerge)
127 UVMMAP_EVCNT_DEFINE(kforwmerge)
128 UVMMAP_EVCNT_DEFINE(kbimerge)
129 UVMMAP_EVCNT_DEFINE(knomerge)
130 UVMMAP_EVCNT_DEFINE(map_call)
131 UVMMAP_EVCNT_DEFINE(mlk_call)
132 UVMMAP_EVCNT_DEFINE(mlk_hint)
133 UVMMAP_EVCNT_DEFINE(mlk_list)
134 UVMMAP_EVCNT_DEFINE(mlk_tree)
135 UVMMAP_EVCNT_DEFINE(mlk_treeloop)
136 UVMMAP_EVCNT_DEFINE(mlk_listloop)
137
138 UVMMAP_EVCNT_DEFINE(uke_alloc)
139 UVMMAP_EVCNT_DEFINE(uke_free)
140 UVMMAP_EVCNT_DEFINE(ukh_alloc)
141 UVMMAP_EVCNT_DEFINE(ukh_free)
142
143 const char vmmapbsy[] = "vmmapbsy";
144
145 /*
146 * cache for vmspace structures.
147 */
148
149 static struct pool_cache uvm_vmspace_cache;
150
151 /*
152 * cache for dynamically-allocated map entries.
153 */
154
155 static struct pool_cache uvm_map_entry_cache;
156
157 MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures");
158 MALLOC_DEFINE(M_VMPMAP, "VM pmap", "VM pmap");
159
160 #ifdef PMAP_GROWKERNEL
161 /*
162 * This global represents the end of the kernel virtual address
163 * space. If we want to exceed this, we must grow the kernel
164 * virtual address space dynamically.
165 *
166 * Note, this variable is locked by kernel_map's lock.
167 */
168 vaddr_t uvm_maxkaddr;
169 #endif
170
171 /*
172 * macros
173 */
174
175 /*
176 * VM_MAP_USE_KMAPENT: determine if uvm_kmapent_alloc/free is used
177 * for the vm_map.
178 */
179 extern struct vm_map *pager_map; /* XXX */
180 #define VM_MAP_USE_KMAPENT_FLAGS(flags) \
181 (((flags) & VM_MAP_INTRSAFE) != 0)
182 #define VM_MAP_USE_KMAPENT(map) \
183 (VM_MAP_USE_KMAPENT_FLAGS((map)->flags) || (map) == kernel_map)
184
185 /*
186 * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging
187 */
188
189 #define UVM_ET_ISCOMPATIBLE(ent, type, uobj, meflags, \
190 prot, maxprot, inh, adv, wire) \
191 ((ent)->etype == (type) && \
192 (((ent)->flags ^ (meflags)) & (UVM_MAP_NOMERGE | UVM_MAP_QUANTUM)) \
193 == 0 && \
194 (ent)->object.uvm_obj == (uobj) && \
195 (ent)->protection == (prot) && \
196 (ent)->max_protection == (maxprot) && \
197 (ent)->inheritance == (inh) && \
198 (ent)->advice == (adv) && \
199 (ent)->wired_count == (wire))
200
201 /*
202 * uvm_map_entry_link: insert entry into a map
203 *
204 * => map must be locked
205 */
206 #define uvm_map_entry_link(map, after_where, entry) do { \
207 uvm_mapent_check(entry); \
208 (map)->nentries++; \
209 (entry)->prev = (after_where); \
210 (entry)->next = (after_where)->next; \
211 (entry)->prev->next = (entry); \
212 (entry)->next->prev = (entry); \
213 uvm_rb_insert((map), (entry)); \
214 } while (/*CONSTCOND*/ 0)
215
216 /*
217 * uvm_map_entry_unlink: remove entry from a map
218 *
219 * => map must be locked
220 */
221 #define uvm_map_entry_unlink(map, entry) do { \
222 KASSERT((entry) != (map)->first_free); \
223 KASSERT((entry) != (map)->hint); \
224 uvm_mapent_check(entry); \
225 (map)->nentries--; \
226 (entry)->next->prev = (entry)->prev; \
227 (entry)->prev->next = (entry)->next; \
228 uvm_rb_remove((map), (entry)); \
229 } while (/*CONSTCOND*/ 0)
230
231 /*
232 * SAVE_HINT: saves the specified entry as the hint for future lookups.
233 *
234 * => map need not be locked.
235 */
236 #define SAVE_HINT(map, check, value) do { \
237 if ((map)->hint == (check)) \
238 (map)->hint = (value); \
239 } while (/*CONSTCOND*/ 0)
240
241 /*
242 * clear_hints: ensure that hints don't point to the entry.
243 *
244 * => map must be write-locked.
245 */
246 static void
247 clear_hints(struct vm_map *map, struct vm_map_entry *ent)
248 {
249
250 SAVE_HINT(map, ent, ent->prev);
251 if (map->first_free == ent) {
252 map->first_free = ent->prev;
253 }
254 }
255
256 /*
257 * VM_MAP_RANGE_CHECK: check and correct range
258 *
259 * => map must at least be read locked
260 */
261
262 #define VM_MAP_RANGE_CHECK(map, start, end) do { \
263 if (start < vm_map_min(map)) \
264 start = vm_map_min(map); \
265 if (end > vm_map_max(map)) \
266 end = vm_map_max(map); \
267 if (start > end) \
268 start = end; \
269 } while (/*CONSTCOND*/ 0)
270
271 /*
272 * local prototypes
273 */
274
275 static struct vm_map_entry *
276 uvm_mapent_alloc(struct vm_map *, int);
277 static struct vm_map_entry *
278 uvm_mapent_alloc_split(struct vm_map *,
279 const struct vm_map_entry *, int,
280 struct uvm_mapent_reservation *);
281 static void uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *);
282 static void uvm_mapent_free(struct vm_map_entry *);
283 #if defined(DEBUG)
284 static void _uvm_mapent_check(const struct vm_map_entry *, const char *,
285 int);
286 #define uvm_mapent_check(map) _uvm_mapent_check(map, __FILE__, __LINE__)
287 #else /* defined(DEBUG) */
288 #define uvm_mapent_check(e) /* nothing */
289 #endif /* defined(DEBUG) */
290 static struct vm_map_entry *
291 uvm_kmapent_alloc(struct vm_map *, int);
292 static void uvm_kmapent_free(struct vm_map_entry *);
293 static vsize_t uvm_kmapent_overhead(vsize_t);
294
295 static void uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
296 static void uvm_map_reference_amap(struct vm_map_entry *, int);
297 static int uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int,
298 struct vm_map_entry *);
299 static void uvm_map_unreference_amap(struct vm_map_entry *, int);
300
301 int _uvm_map_sanity(struct vm_map *);
302 int _uvm_tree_sanity(struct vm_map *);
303 static vsize_t uvm_rb_maxgap(const struct vm_map_entry *);
304
305 CTASSERT(offsetof(struct vm_map_entry, rb_node) == 0);
306 #define ROOT_ENTRY(map) ((struct vm_map_entry *)(map)->rb_tree.rbt_root)
307 #define LEFT_ENTRY(entry) ((struct vm_map_entry *)(entry)->rb_node.rb_left)
308 #define RIGHT_ENTRY(entry) ((struct vm_map_entry *)(entry)->rb_node.rb_right)
309 #define PARENT_ENTRY(map, entry) \
310 (ROOT_ENTRY(map) == (entry) \
311 ? NULL \
312 : (struct vm_map_entry *)RB_FATHER(&(entry)->rb_node))
313
314 static int
315 uvm_map_compare_nodes(const struct rb_node *nparent,
316 const struct rb_node *nkey)
317 {
318 const struct vm_map_entry *eparent = (const void *) nparent;
319 const struct vm_map_entry *ekey = (const void *) nkey;
320
321 KASSERT(eparent->start < ekey->start || eparent->start >= ekey->end);
322 KASSERT(ekey->start < eparent->start || ekey->start >= eparent->end);
323
324 if (ekey->start < eparent->start)
325 return -1;
326 if (ekey->start >= eparent->end)
327 return 1;
328 return 0;
329 }
330
331 static int
332 uvm_map_compare_key(const struct rb_node *nparent, const void *vkey)
333 {
334 const struct vm_map_entry *eparent = (const void *) nparent;
335 const vaddr_t va = *(const vaddr_t *) vkey;
336
337 if (va < eparent->start)
338 return -1;
339 if (va >= eparent->end)
340 return 1;
341 return 0;
342 }
343
344 static const struct rb_tree_ops uvm_map_tree_ops = {
345 .rbto_compare_nodes = uvm_map_compare_nodes,
346 .rbto_compare_key = uvm_map_compare_key,
347 };
348
349 static inline vsize_t
350 uvm_rb_gap(const struct vm_map_entry *entry)
351 {
352 KASSERT(entry->next != NULL);
353 return entry->next->start - entry->end;
354 }
355
356 static vsize_t
357 uvm_rb_maxgap(const struct vm_map_entry *entry)
358 {
359 struct vm_map_entry *child;
360 vsize_t maxgap = entry->gap;
361
362 /*
363 * We need maxgap to be the largest gap of us or any of our
364 * descendents. Since each of our children's maxgap is the
365 * cached value of their largest gap of themselves or their
366 * descendents, we can just use that value and avoid recursing
367 * down the tree to calculate it.
368 */
369 if ((child = LEFT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
370 maxgap = child->maxgap;
371
372 if ((child = RIGHT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
373 maxgap = child->maxgap;
374
375 return maxgap;
376 }
377
378 static void
379 uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
380 {
381 struct vm_map_entry *parent;
382
383 KASSERT(entry->gap == uvm_rb_gap(entry));
384 entry->maxgap = uvm_rb_maxgap(entry);
385
386 while ((parent = PARENT_ENTRY(map, entry)) != NULL) {
387 struct vm_map_entry *brother;
388 vsize_t maxgap = parent->gap;
389
390 KDASSERT(parent->gap == uvm_rb_gap(parent));
391 if (maxgap < entry->maxgap)
392 maxgap = entry->maxgap;
393 /*
394 * Since we work our towards the root, we know entry's maxgap
395 * value is ok but its brothers may now be out-of-date due
396 * rebalancing. So refresh it.
397 */
398 brother = (struct vm_map_entry *)parent->rb_node.rb_nodes[RB_POSITION(&entry->rb_node) ^ RB_DIR_OTHER];
399 if (brother != NULL) {
400 KDASSERT(brother->gap == uvm_rb_gap(brother));
401 brother->maxgap = uvm_rb_maxgap(brother);
402 if (maxgap < brother->maxgap)
403 maxgap = brother->maxgap;
404 }
405
406 parent->maxgap = maxgap;
407 entry = parent;
408 }
409 }
410
411 static void
412 uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
413 {
414 entry->gap = entry->maxgap = uvm_rb_gap(entry);
415 if (entry->prev != &map->header)
416 entry->prev->gap = uvm_rb_gap(entry->prev);
417
418 if (!rb_tree_insert_node(&map->rb_tree, &entry->rb_node))
419 panic("uvm_rb_insert: map %p: duplicate entry?", map);
420
421 /*
422 * If the previous entry is not our immediate left child, then it's an
423 * ancestor and will be fixed up on the way to the root. We don't
424 * have to check entry->prev against &map->header since &map->header
425 * will never be in the tree.
426 */
427 uvm_rb_fixup(map,
428 LEFT_ENTRY(entry) == entry->prev ? entry->prev : entry);
429 }
430
431 static void
432 uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
433 {
434 struct vm_map_entry *prev_parent = NULL, *next_parent = NULL;
435
436 /*
437 * If we are removing an interior node, then an adjacent node will
438 * be used to replace its position in the tree. Therefore we will
439 * need to fixup the tree starting at the parent of the replacement
440 * node. So record their parents for later use.
441 */
442 if (entry->prev != &map->header)
443 prev_parent = PARENT_ENTRY(map, entry->prev);
444 if (entry->next != &map->header)
445 next_parent = PARENT_ENTRY(map, entry->next);
446
447 rb_tree_remove_node(&map->rb_tree, &entry->rb_node);
448
449 /*
450 * If the previous node has a new parent, fixup the tree starting
451 * at the previous node's old parent.
452 */
453 if (entry->prev != &map->header) {
454 /*
455 * Update the previous entry's gap due to our absence.
456 */
457 entry->prev->gap = uvm_rb_gap(entry->prev);
458 uvm_rb_fixup(map, entry->prev);
459 if (prev_parent != NULL
460 && prev_parent != entry
461 && prev_parent != PARENT_ENTRY(map, entry->prev))
462 uvm_rb_fixup(map, prev_parent);
463 }
464
465 /*
466 * If the next node has a new parent, fixup the tree starting
467 * at the next node's old parent.
468 */
469 if (entry->next != &map->header) {
470 uvm_rb_fixup(map, entry->next);
471 if (next_parent != NULL
472 && next_parent != entry
473 && next_parent != PARENT_ENTRY(map, entry->next))
474 uvm_rb_fixup(map, next_parent);
475 }
476 }
477
478 #if defined(DEBUG)
479 int uvm_debug_check_map = 0;
480 int uvm_debug_check_rbtree = 0;
481 #define uvm_map_check(map, name) \
482 _uvm_map_check((map), (name), __FILE__, __LINE__)
483 static void
484 _uvm_map_check(struct vm_map *map, const char *name,
485 const char *file, int line)
486 {
487
488 if ((uvm_debug_check_map && _uvm_map_sanity(map)) ||
489 (uvm_debug_check_rbtree && _uvm_tree_sanity(map))) {
490 panic("uvm_map_check failed: \"%s\" map=%p (%s:%d)",
491 name, map, file, line);
492 }
493 }
494 #else /* defined(DEBUG) */
495 #define uvm_map_check(map, name) /* nothing */
496 #endif /* defined(DEBUG) */
497
498 #if defined(DEBUG) || defined(DDB)
499 int
500 _uvm_map_sanity(struct vm_map *map)
501 {
502 bool first_free_found = false;
503 bool hint_found = false;
504 const struct vm_map_entry *e;
505 struct vm_map_entry *hint = map->hint;
506
507 e = &map->header;
508 for (;;) {
509 if (map->first_free == e) {
510 first_free_found = true;
511 } else if (!first_free_found && e->next->start > e->end) {
512 printf("first_free %p should be %p\n",
513 map->first_free, e);
514 return -1;
515 }
516 if (hint == e) {
517 hint_found = true;
518 }
519
520 e = e->next;
521 if (e == &map->header) {
522 break;
523 }
524 }
525 if (!first_free_found) {
526 printf("stale first_free\n");
527 return -1;
528 }
529 if (!hint_found) {
530 printf("stale hint\n");
531 return -1;
532 }
533 return 0;
534 }
535
536 int
537 _uvm_tree_sanity(struct vm_map *map)
538 {
539 struct vm_map_entry *tmp, *trtmp;
540 int n = 0, i = 1;
541
542 for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
543 if (tmp->gap != uvm_rb_gap(tmp)) {
544 printf("%d/%d gap %lx != %lx %s\n",
545 n + 1, map->nentries,
546 (ulong)tmp->gap, (ulong)uvm_rb_gap(tmp),
547 tmp->next == &map->header ? "(last)" : "");
548 goto error;
549 }
550 /*
551 * If any entries are out of order, tmp->gap will be unsigned
552 * and will likely exceed the size of the map.
553 */
554 if (tmp->gap >= vm_map_max(map) - vm_map_min(map)) {
555 printf("too large gap %zu\n", (size_t)tmp->gap);
556 goto error;
557 }
558 n++;
559 }
560
561 if (n != map->nentries) {
562 printf("nentries: %d vs %d\n", n, map->nentries);
563 goto error;
564 }
565
566 trtmp = NULL;
567 for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
568 if (tmp->maxgap != uvm_rb_maxgap(tmp)) {
569 printf("maxgap %lx != %lx\n",
570 (ulong)tmp->maxgap,
571 (ulong)uvm_rb_maxgap(tmp));
572 goto error;
573 }
574 if (trtmp != NULL && trtmp->start >= tmp->start) {
575 printf("corrupt: 0x%"PRIxVADDR"x >= 0x%"PRIxVADDR"x\n",
576 trtmp->start, tmp->start);
577 goto error;
578 }
579
580 trtmp = tmp;
581 }
582
583 for (tmp = map->header.next; tmp != &map->header;
584 tmp = tmp->next, i++) {
585 trtmp = (void *) rb_tree_iterate(&map->rb_tree, &tmp->rb_node,
586 RB_DIR_LEFT);
587 if (trtmp == NULL)
588 trtmp = &map->header;
589 if (tmp->prev != trtmp) {
590 printf("lookup: %d: %p->prev=%p: %p\n",
591 i, tmp, tmp->prev, trtmp);
592 goto error;
593 }
594 trtmp = (void *) rb_tree_iterate(&map->rb_tree, &tmp->rb_node,
595 RB_DIR_RIGHT);
596 if (trtmp == NULL)
597 trtmp = &map->header;
598 if (tmp->next != trtmp) {
599 printf("lookup: %d: %p->next=%p: %p\n",
600 i, tmp, tmp->next, trtmp);
601 goto error;
602 }
603 trtmp = (void *)rb_tree_find_node(&map->rb_tree, &tmp->start);
604 if (trtmp != tmp) {
605 printf("lookup: %d: %p - %p: %p\n", i, tmp, trtmp,
606 PARENT_ENTRY(map, tmp));
607 goto error;
608 }
609 }
610
611 return (0);
612 error:
613 return (-1);
614 }
615 #endif /* defined(DEBUG) || defined(DDB) */
616
617 #ifdef DIAGNOSTIC
618 static struct vm_map *uvm_kmapent_map(struct vm_map_entry *);
619 #endif
620
621 /*
622 * vm_map_lock: acquire an exclusive (write) lock on a map.
623 *
624 * => Note that "intrsafe" maps use only exclusive, spin locks.
625 *
626 * => The locking protocol provides for guaranteed upgrade from shared ->
627 * exclusive by whichever thread currently has the map marked busy.
628 * See "LOCKING PROTOCOL NOTES" in uvm_map.h. This is horrible; among
629 * other problems, it defeats any fairness guarantees provided by RW
630 * locks.
631 */
632
633 void
634 vm_map_lock(struct vm_map *map)
635 {
636
637 if ((map->flags & VM_MAP_INTRSAFE) != 0) {
638 mutex_spin_enter(&map->mutex);
639 return;
640 }
641
642 for (;;) {
643 rw_enter(&map->lock, RW_WRITER);
644 if (map->busy == NULL)
645 break;
646 if (map->busy == curlwp)
647 break;
648 mutex_enter(&map->misc_lock);
649 rw_exit(&map->lock);
650 if (map->busy != NULL)
651 cv_wait(&map->cv, &map->misc_lock);
652 mutex_exit(&map->misc_lock);
653 }
654
655 map->timestamp++;
656 }
657
658 /*
659 * vm_map_lock_try: try to lock a map, failing if it is already locked.
660 */
661
662 bool
663 vm_map_lock_try(struct vm_map *map)
664 {
665
666 if ((map->flags & VM_MAP_INTRSAFE) != 0)
667 return mutex_tryenter(&map->mutex);
668 if (!rw_tryenter(&map->lock, RW_WRITER))
669 return false;
670 if (map->busy != NULL) {
671 rw_exit(&map->lock);
672 return false;
673 }
674
675 map->timestamp++;
676 return true;
677 }
678
679 /*
680 * vm_map_unlock: release an exclusive lock on a map.
681 */
682
683 void
684 vm_map_unlock(struct vm_map *map)
685 {
686
687 if ((map->flags & VM_MAP_INTRSAFE) != 0)
688 mutex_spin_exit(&map->mutex);
689 else {
690 KASSERT(rw_write_held(&map->lock));
691 KASSERT(map->busy == NULL || map->busy == curlwp);
692 rw_exit(&map->lock);
693 }
694 }
695
696 /*
697 * vm_map_unbusy: mark the map as unbusy, and wake any waiters that
698 * want an exclusive lock.
699 */
700
701 void
702 vm_map_unbusy(struct vm_map *map)
703 {
704
705 KASSERT(map->busy == curlwp);
706
707 /*
708 * Safe to clear 'busy' and 'waiters' with only a read lock held:
709 *
710 * o they can only be set with a write lock held
711 * o writers are blocked out with a read or write hold
712 * o at any time, only one thread owns the set of values
713 */
714 mutex_enter(&map->misc_lock);
715 map->busy = NULL;
716 cv_broadcast(&map->cv);
717 mutex_exit(&map->misc_lock);
718 }
719
720 /*
721 * vm_map_lock_read: acquire a shared (read) lock on a map.
722 */
723
724 void
725 vm_map_lock_read(struct vm_map *map)
726 {
727
728 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
729
730 rw_enter(&map->lock, RW_READER);
731 }
732
733 /*
734 * vm_map_unlock_read: release a shared lock on a map.
735 */
736
737 void
738 vm_map_unlock_read(struct vm_map *map)
739 {
740
741 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
742
743 rw_exit(&map->lock);
744 }
745
746 /*
747 * vm_map_busy: mark a map as busy.
748 *
749 * => the caller must hold the map write locked
750 */
751
752 void
753 vm_map_busy(struct vm_map *map)
754 {
755
756 KASSERT(rw_write_held(&map->lock));
757 KASSERT(map->busy == NULL);
758
759 map->busy = curlwp;
760 }
761
762 /*
763 * vm_map_locked_p: return true if the map is write locked.
764 *
765 * => only for debug purposes like KASSERTs.
766 * => should not be used to verify that a map is not locked.
767 */
768
769 bool
770 vm_map_locked_p(struct vm_map *map)
771 {
772
773 if ((map->flags & VM_MAP_INTRSAFE) != 0) {
774 return mutex_owned(&map->mutex);
775 } else {
776 return rw_write_held(&map->lock);
777 }
778 }
779
780 /*
781 * uvm_mapent_alloc: allocate a map entry
782 */
783
784 static struct vm_map_entry *
785 uvm_mapent_alloc(struct vm_map *map, int flags)
786 {
787 struct vm_map_entry *me;
788 int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
789 UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
790
791 if (VM_MAP_USE_KMAPENT(map)) {
792 me = uvm_kmapent_alloc(map, flags);
793 } else {
794 me = pool_cache_get(&uvm_map_entry_cache, pflags);
795 if (__predict_false(me == NULL))
796 return NULL;
797 me->flags = 0;
798 }
799
800 UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me,
801 ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0);
802 return (me);
803 }
804
805 /*
806 * uvm_mapent_alloc_split: allocate a map entry for clipping.
807 *
808 * => map must be locked by caller if UVM_MAP_QUANTUM is set.
809 */
810
811 static struct vm_map_entry *
812 uvm_mapent_alloc_split(struct vm_map *map,
813 const struct vm_map_entry *old_entry, int flags,
814 struct uvm_mapent_reservation *umr)
815 {
816 struct vm_map_entry *me;
817
818 KASSERT(!VM_MAP_USE_KMAPENT(map) ||
819 (old_entry->flags & UVM_MAP_QUANTUM) || !UMR_EMPTY(umr));
820
821 if (old_entry->flags & UVM_MAP_QUANTUM) {
822 struct vm_map_kernel *vmk = vm_map_to_kernel(map);
823
824 KASSERT(vm_map_locked_p(map));
825 me = vmk->vmk_merged_entries;
826 KASSERT(me);
827 vmk->vmk_merged_entries = me->next;
828 KASSERT(me->flags & UVM_MAP_QUANTUM);
829 } else {
830 me = uvm_mapent_alloc(map, flags);
831 }
832
833 return me;
834 }
835
836 /*
837 * uvm_mapent_free: free map entry
838 */
839
840 static void
841 uvm_mapent_free(struct vm_map_entry *me)
842 {
843 UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
844
845 UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
846 me, me->flags, 0, 0);
847 if (me->flags & UVM_MAP_KERNEL) {
848 uvm_kmapent_free(me);
849 } else {
850 pool_cache_put(&uvm_map_entry_cache, me);
851 }
852 }
853
854 /*
855 * uvm_mapent_free_merged: free merged map entry
856 *
857 * => keep the entry if needed.
858 * => caller shouldn't hold map locked if VM_MAP_USE_KMAPENT(map) is true.
859 * => map should be locked if UVM_MAP_QUANTUM is set.
860 */
861
862 static void
863 uvm_mapent_free_merged(struct vm_map *map, struct vm_map_entry *me)
864 {
865
866 KASSERT(!(me->flags & UVM_MAP_KERNEL) || uvm_kmapent_map(me) == map);
867
868 if (me->flags & UVM_MAP_QUANTUM) {
869 /*
870 * keep this entry for later splitting.
871 */
872 struct vm_map_kernel *vmk;
873
874 KASSERT(vm_map_locked_p(map));
875 KASSERT(VM_MAP_IS_KERNEL(map));
876 KASSERT(!VM_MAP_USE_KMAPENT(map) ||
877 (me->flags & UVM_MAP_KERNEL));
878
879 vmk = vm_map_to_kernel(map);
880 me->next = vmk->vmk_merged_entries;
881 vmk->vmk_merged_entries = me;
882 } else {
883 uvm_mapent_free(me);
884 }
885 }
886
887 /*
888 * uvm_mapent_copy: copy a map entry, preserving flags
889 */
890
891 static inline void
892 uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
893 {
894
895 memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) -
896 ((char *)src));
897 }
898
899 /*
900 * uvm_mapent_overhead: calculate maximum kva overhead necessary for
901 * map entries.
902 *
903 * => size and flags are the same as uvm_km_suballoc's ones.
904 */
905
906 vsize_t
907 uvm_mapent_overhead(vsize_t size, int flags)
908 {
909
910 if (VM_MAP_USE_KMAPENT_FLAGS(flags)) {
911 return uvm_kmapent_overhead(size);
912 }
913 return 0;
914 }
915
916 #if defined(DEBUG)
917 static void
918 _uvm_mapent_check(const struct vm_map_entry *entry, const char *file, int line)
919 {
920
921 if (entry->start >= entry->end) {
922 goto bad;
923 }
924 if (UVM_ET_ISOBJ(entry)) {
925 if (entry->object.uvm_obj == NULL) {
926 goto bad;
927 }
928 } else if (UVM_ET_ISSUBMAP(entry)) {
929 if (entry->object.sub_map == NULL) {
930 goto bad;
931 }
932 } else {
933 if (entry->object.uvm_obj != NULL ||
934 entry->object.sub_map != NULL) {
935 goto bad;
936 }
937 }
938 if (!UVM_ET_ISOBJ(entry)) {
939 if (entry->offset != 0) {
940 goto bad;
941 }
942 }
943
944 return;
945
946 bad:
947 panic("%s: bad entry %p (%s:%d)", __func__, entry, file, line);
948 }
949 #endif /* defined(DEBUG) */
950
951 /*
952 * uvm_map_entry_unwire: unwire a map entry
953 *
954 * => map should be locked by caller
955 */
956
957 static inline void
958 uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
959 {
960
961 entry->wired_count = 0;
962 uvm_fault_unwire_locked(map, entry->start, entry->end);
963 }
964
965
966 /*
967 * wrapper for calling amap_ref()
968 */
969 static inline void
970 uvm_map_reference_amap(struct vm_map_entry *entry, int flags)
971 {
972
973 amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
974 (entry->end - entry->start) >> PAGE_SHIFT, flags);
975 }
976
977
978 /*
979 * wrapper for calling amap_unref()
980 */
981 static inline void
982 uvm_map_unreference_amap(struct vm_map_entry *entry, int flags)
983 {
984
985 amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
986 (entry->end - entry->start) >> PAGE_SHIFT, flags);
987 }
988
989
990 /*
991 * uvm_map_init: init mapping system at boot time.
992 */
993
994 void
995 uvm_map_init(void)
996 {
997 #if defined(UVMHIST)
998 static struct uvm_history_ent maphistbuf[100];
999 static struct uvm_history_ent pdhistbuf[100];
1000 #endif
1001
1002 /*
1003 * first, init logging system.
1004 */
1005
1006 UVMHIST_FUNC("uvm_map_init");
1007 UVMHIST_INIT_STATIC(maphist, maphistbuf);
1008 UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
1009 UVMHIST_CALLED(maphist);
1010 UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
1011
1012 /*
1013 * initialize the global lock for kernel map entry.
1014 */
1015
1016 mutex_init(&uvm_kentry_lock, MUTEX_DRIVER, IPL_VM);
1017
1018 /*
1019 * initialize caches.
1020 */
1021
1022 pool_cache_bootstrap(&uvm_map_entry_cache, sizeof(struct vm_map_entry),
1023 0, 0, 0, "vmmpepl", NULL, IPL_NONE, NULL, NULL, NULL);
1024 pool_cache_bootstrap(&uvm_vmspace_cache, sizeof(struct vmspace),
1025 0, 0, 0, "vmsppl", NULL, IPL_NONE, NULL, NULL, NULL);
1026 }
1027
1028 /*
1029 * clippers
1030 */
1031
1032 /*
1033 * uvm_mapent_splitadj: adjust map entries for splitting, after uvm_mapent_copy.
1034 */
1035
1036 static void
1037 uvm_mapent_splitadj(struct vm_map_entry *entry1, struct vm_map_entry *entry2,
1038 vaddr_t splitat)
1039 {
1040 vaddr_t adj;
1041
1042 KASSERT(entry1->start < splitat);
1043 KASSERT(splitat < entry1->end);
1044
1045 adj = splitat - entry1->start;
1046 entry1->end = entry2->start = splitat;
1047
1048 if (entry1->aref.ar_amap) {
1049 amap_splitref(&entry1->aref, &entry2->aref, adj);
1050 }
1051 if (UVM_ET_ISSUBMAP(entry1)) {
1052 /* ... unlikely to happen, but play it safe */
1053 uvm_map_reference(entry1->object.sub_map);
1054 } else if (UVM_ET_ISOBJ(entry1)) {
1055 KASSERT(entry1->object.uvm_obj != NULL); /* suppress coverity */
1056 entry2->offset += adj;
1057 if (entry1->object.uvm_obj->pgops &&
1058 entry1->object.uvm_obj->pgops->pgo_reference)
1059 entry1->object.uvm_obj->pgops->pgo_reference(
1060 entry1->object.uvm_obj);
1061 }
1062 }
1063
1064 /*
1065 * uvm_map_clip_start: ensure that the entry begins at or after
1066 * the starting address, if it doesn't we split the entry.
1067 *
1068 * => caller should use UVM_MAP_CLIP_START macro rather than calling
1069 * this directly
1070 * => map must be locked by caller
1071 */
1072
1073 void
1074 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
1075 vaddr_t start, struct uvm_mapent_reservation *umr)
1076 {
1077 struct vm_map_entry *new_entry;
1078
1079 /* uvm_map_simplify_entry(map, entry); */ /* XXX */
1080
1081 uvm_map_check(map, "clip_start entry");
1082 uvm_mapent_check(entry);
1083
1084 /*
1085 * Split off the front portion. note that we must insert the new
1086 * entry BEFORE this one, so that this entry has the specified
1087 * starting address.
1088 */
1089 new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
1090 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1091 uvm_mapent_splitadj(new_entry, entry, start);
1092 uvm_map_entry_link(map, entry->prev, new_entry);
1093
1094 uvm_map_check(map, "clip_start leave");
1095 }
1096
1097 /*
1098 * uvm_map_clip_end: ensure that the entry ends at or before
1099 * the ending address, if it does't we split the reference
1100 *
1101 * => caller should use UVM_MAP_CLIP_END macro rather than calling
1102 * this directly
1103 * => map must be locked by caller
1104 */
1105
1106 void
1107 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end,
1108 struct uvm_mapent_reservation *umr)
1109 {
1110 struct vm_map_entry *new_entry;
1111
1112 uvm_map_check(map, "clip_end entry");
1113 uvm_mapent_check(entry);
1114
1115 /*
1116 * Create a new entry and insert it
1117 * AFTER the specified entry
1118 */
1119 new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
1120 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1121 uvm_mapent_splitadj(entry, new_entry, end);
1122 uvm_map_entry_link(map, entry, new_entry);
1123
1124 uvm_map_check(map, "clip_end leave");
1125 }
1126
1127 static void
1128 vm_map_drain(struct vm_map *map, uvm_flag_t flags)
1129 {
1130
1131 if (!VM_MAP_IS_KERNEL(map)) {
1132 return;
1133 }
1134
1135 uvm_km_va_drain(map, flags);
1136 }
1137
1138 /*
1139 * M A P - m a i n e n t r y p o i n t
1140 */
1141 /*
1142 * uvm_map: establish a valid mapping in a map
1143 *
1144 * => assume startp is page aligned.
1145 * => assume size is a multiple of PAGE_SIZE.
1146 * => assume sys_mmap provides enough of a "hint" to have us skip
1147 * over text/data/bss area.
1148 * => map must be unlocked (we will lock it)
1149 * => <uobj,uoffset> value meanings (4 cases):
1150 * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER
1151 * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER
1152 * [3] <uobj,uoffset> == normal mapping
1153 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA
1154 *
1155 * case [4] is for kernel mappings where we don't know the offset until
1156 * we've found a virtual address. note that kernel object offsets are
1157 * always relative to vm_map_min(kernel_map).
1158 *
1159 * => if `align' is non-zero, we align the virtual address to the specified
1160 * alignment.
1161 * this is provided as a mechanism for large pages.
1162 *
1163 * => XXXCDC: need way to map in external amap?
1164 */
1165
1166 int
1167 uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
1168 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags)
1169 {
1170 struct uvm_map_args args;
1171 struct vm_map_entry *new_entry;
1172 int error;
1173
1174 KASSERT((flags & UVM_FLAG_QUANTUM) == 0 || VM_MAP_IS_KERNEL(map));
1175 KASSERT((size & PAGE_MASK) == 0);
1176
1177 /*
1178 * for pager_map, allocate the new entry first to avoid sleeping
1179 * for memory while we have the map locked.
1180 *
1181 * Also, because we allocate entries for in-kernel maps
1182 * a bit differently (cf. uvm_kmapent_alloc/free), we need to
1183 * allocate them before locking the map.
1184 */
1185
1186 new_entry = NULL;
1187 if (VM_MAP_USE_KMAPENT(map) || (flags & UVM_FLAG_QUANTUM) ||
1188 map == pager_map) {
1189 new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
1190 if (__predict_false(new_entry == NULL))
1191 return ENOMEM;
1192 if (flags & UVM_FLAG_QUANTUM)
1193 new_entry->flags |= UVM_MAP_QUANTUM;
1194 }
1195 if (map == pager_map)
1196 flags |= UVM_FLAG_NOMERGE;
1197
1198 error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align,
1199 flags, &args);
1200 if (!error) {
1201 error = uvm_map_enter(map, &args, new_entry);
1202 *startp = args.uma_start;
1203 } else if (new_entry) {
1204 uvm_mapent_free(new_entry);
1205 }
1206
1207 #if defined(DEBUG)
1208 if (!error && VM_MAP_IS_KERNEL(map)) {
1209 uvm_km_check_empty(map, *startp, *startp + size);
1210 }
1211 #endif /* defined(DEBUG) */
1212
1213 return error;
1214 }
1215
1216 int
1217 uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size,
1218 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags,
1219 struct uvm_map_args *args)
1220 {
1221 struct vm_map_entry *prev_entry;
1222 vm_prot_t prot = UVM_PROTECTION(flags);
1223 vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1224
1225 UVMHIST_FUNC("uvm_map_prepare");
1226 UVMHIST_CALLED(maphist);
1227
1228 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
1229 map, start, size, flags);
1230 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
1231
1232 /*
1233 * detect a popular device driver bug.
1234 */
1235
1236 KASSERT(doing_shutdown || curlwp != NULL ||
1237 (map->flags & VM_MAP_INTRSAFE));
1238
1239 /*
1240 * zero-sized mapping doesn't make any sense.
1241 */
1242 KASSERT(size > 0);
1243
1244 KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0);
1245
1246 uvm_map_check(map, "map entry");
1247
1248 /*
1249 * check sanity of protection code
1250 */
1251
1252 if ((prot & maxprot) != prot) {
1253 UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x",
1254 prot, maxprot,0,0);
1255 return EACCES;
1256 }
1257
1258 /*
1259 * figure out where to put new VM range
1260 */
1261
1262 retry:
1263 if (vm_map_lock_try(map) == false) {
1264 if ((flags & UVM_FLAG_TRYLOCK) != 0 &&
1265 (map->flags & VM_MAP_INTRSAFE) == 0) {
1266 return EAGAIN;
1267 }
1268 vm_map_lock(map); /* could sleep here */
1269 }
1270 prev_entry = uvm_map_findspace(map, start, size, &start,
1271 uobj, uoffset, align, flags);
1272 if (prev_entry == NULL) {
1273 unsigned int timestamp;
1274
1275 timestamp = map->timestamp;
1276 UVMHIST_LOG(maphist,"waiting va timestamp=0x%x",
1277 timestamp,0,0,0);
1278 map->flags |= VM_MAP_WANTVA;
1279 vm_map_unlock(map);
1280
1281 /*
1282 * try to reclaim kva and wait until someone does unmap.
1283 * fragile locking here, so we awaken every second to
1284 * recheck the condition.
1285 */
1286
1287 vm_map_drain(map, flags);
1288
1289 mutex_enter(&map->misc_lock);
1290 while ((map->flags & VM_MAP_WANTVA) != 0 &&
1291 map->timestamp == timestamp) {
1292 if ((flags & UVM_FLAG_WAITVA) == 0) {
1293 mutex_exit(&map->misc_lock);
1294 UVMHIST_LOG(maphist,
1295 "<- uvm_map_findspace failed!", 0,0,0,0);
1296 return ENOMEM;
1297 } else {
1298 cv_timedwait(&map->cv, &map->misc_lock, hz);
1299 }
1300 }
1301 mutex_exit(&map->misc_lock);
1302 goto retry;
1303 }
1304
1305 #ifdef PMAP_GROWKERNEL
1306 /*
1307 * If the kernel pmap can't map the requested space,
1308 * then allocate more resources for it.
1309 */
1310 if (map == kernel_map && uvm_maxkaddr < (start + size))
1311 uvm_maxkaddr = pmap_growkernel(start + size);
1312 #endif
1313
1314 UVMMAP_EVCNT_INCR(map_call);
1315
1316 /*
1317 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
1318 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in
1319 * either case we want to zero it before storing it in the map entry
1320 * (because it looks strange and confusing when debugging...)
1321 *
1322 * if uobj is not null
1323 * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
1324 * and we do not need to change uoffset.
1325 * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
1326 * now (based on the starting address of the map). this case is
1327 * for kernel object mappings where we don't know the offset until
1328 * the virtual address is found (with uvm_map_findspace). the
1329 * offset is the distance we are from the start of the map.
1330 */
1331
1332 if (uobj == NULL) {
1333 uoffset = 0;
1334 } else {
1335 if (uoffset == UVM_UNKNOWN_OFFSET) {
1336 KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
1337 uoffset = start - vm_map_min(kernel_map);
1338 }
1339 }
1340
1341 args->uma_flags = flags;
1342 args->uma_prev = prev_entry;
1343 args->uma_start = start;
1344 args->uma_size = size;
1345 args->uma_uobj = uobj;
1346 args->uma_uoffset = uoffset;
1347
1348 UVMHIST_LOG(maphist, "<- done!", 0,0,0,0);
1349 return 0;
1350 }
1351
1352 int
1353 uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args,
1354 struct vm_map_entry *new_entry)
1355 {
1356 struct vm_map_entry *prev_entry = args->uma_prev;
1357 struct vm_map_entry *dead = NULL;
1358
1359 const uvm_flag_t flags = args->uma_flags;
1360 const vm_prot_t prot = UVM_PROTECTION(flags);
1361 const vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1362 const vm_inherit_t inherit = UVM_INHERIT(flags);
1363 const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
1364 AMAP_EXTEND_NOWAIT : 0;
1365 const int advice = UVM_ADVICE(flags);
1366 const int meflagval = (flags & UVM_FLAG_QUANTUM) ?
1367 UVM_MAP_QUANTUM : 0;
1368
1369 vaddr_t start = args->uma_start;
1370 vsize_t size = args->uma_size;
1371 struct uvm_object *uobj = args->uma_uobj;
1372 voff_t uoffset = args->uma_uoffset;
1373
1374 const int kmap = (vm_map_pmap(map) == pmap_kernel());
1375 int merged = 0;
1376 int error;
1377 int newetype;
1378
1379 UVMHIST_FUNC("uvm_map_enter");
1380 UVMHIST_CALLED(maphist);
1381
1382 UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
1383 map, start, size, flags);
1384 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
1385
1386 KASSERT(map->hint == prev_entry); /* bimerge case assumes this */
1387
1388 if (flags & UVM_FLAG_QUANTUM) {
1389 KASSERT(new_entry);
1390 KASSERT(new_entry->flags & UVM_MAP_QUANTUM);
1391 }
1392
1393 if (uobj)
1394 newetype = UVM_ET_OBJ;
1395 else
1396 newetype = 0;
1397
1398 if (flags & UVM_FLAG_COPYONW) {
1399 newetype |= UVM_ET_COPYONWRITE;
1400 if ((flags & UVM_FLAG_OVERLAY) == 0)
1401 newetype |= UVM_ET_NEEDSCOPY;
1402 }
1403
1404 /*
1405 * try and insert in map by extending previous entry, if possible.
1406 * XXX: we don't try and pull back the next entry. might be useful
1407 * for a stack, but we are currently allocating our stack in advance.
1408 */
1409
1410 if (flags & UVM_FLAG_NOMERGE)
1411 goto nomerge;
1412
1413 if (prev_entry->end == start &&
1414 prev_entry != &map->header &&
1415 UVM_ET_ISCOMPATIBLE(prev_entry, newetype, uobj, meflagval,
1416 prot, maxprot, inherit, advice, 0)) {
1417
1418 if (uobj && prev_entry->offset +
1419 (prev_entry->end - prev_entry->start) != uoffset)
1420 goto forwardmerge;
1421
1422 /*
1423 * can't extend a shared amap. note: no need to lock amap to
1424 * look at refs since we don't care about its exact value.
1425 * if it is one (i.e. we have only reference) it will stay there
1426 */
1427
1428 if (prev_entry->aref.ar_amap &&
1429 amap_refs(prev_entry->aref.ar_amap) != 1) {
1430 goto forwardmerge;
1431 }
1432
1433 if (prev_entry->aref.ar_amap) {
1434 error = amap_extend(prev_entry, size,
1435 amapwaitflag | AMAP_EXTEND_FORWARDS);
1436 if (error)
1437 goto nomerge;
1438 }
1439
1440 if (kmap) {
1441 UVMMAP_EVCNT_INCR(kbackmerge);
1442 } else {
1443 UVMMAP_EVCNT_INCR(ubackmerge);
1444 }
1445 UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0);
1446
1447 /*
1448 * drop our reference to uobj since we are extending a reference
1449 * that we already have (the ref count can not drop to zero).
1450 */
1451
1452 if (uobj && uobj->pgops->pgo_detach)
1453 uobj->pgops->pgo_detach(uobj);
1454
1455 /*
1456 * Now that we've merged the entries, note that we've grown
1457 * and our gap has shrunk. Then fix the tree.
1458 */
1459 prev_entry->end += size;
1460 prev_entry->gap -= size;
1461 uvm_rb_fixup(map, prev_entry);
1462
1463 uvm_map_check(map, "map backmerged");
1464
1465 UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
1466 merged++;
1467 }
1468
1469 forwardmerge:
1470 if (prev_entry->next->start == (start + size) &&
1471 prev_entry->next != &map->header &&
1472 UVM_ET_ISCOMPATIBLE(prev_entry->next, newetype, uobj, meflagval,
1473 prot, maxprot, inherit, advice, 0)) {
1474
1475 if (uobj && prev_entry->next->offset != uoffset + size)
1476 goto nomerge;
1477
1478 /*
1479 * can't extend a shared amap. note: no need to lock amap to
1480 * look at refs since we don't care about its exact value.
1481 * if it is one (i.e. we have only reference) it will stay there.
1482 *
1483 * note that we also can't merge two amaps, so if we
1484 * merged with the previous entry which has an amap,
1485 * and the next entry also has an amap, we give up.
1486 *
1487 * Interesting cases:
1488 * amap, new, amap -> give up second merge (single fwd extend)
1489 * amap, new, none -> double forward extend (extend again here)
1490 * none, new, amap -> double backward extend (done here)
1491 * uobj, new, amap -> single backward extend (done here)
1492 *
1493 * XXX should we attempt to deal with someone refilling
1494 * the deallocated region between two entries that are
1495 * backed by the same amap (ie, arefs is 2, "prev" and
1496 * "next" refer to it, and adding this allocation will
1497 * close the hole, thus restoring arefs to 1 and
1498 * deallocating the "next" vm_map_entry)? -- @@@
1499 */
1500
1501 if (prev_entry->next->aref.ar_amap &&
1502 (amap_refs(prev_entry->next->aref.ar_amap) != 1 ||
1503 (merged && prev_entry->aref.ar_amap))) {
1504 goto nomerge;
1505 }
1506
1507 if (merged) {
1508 /*
1509 * Try to extend the amap of the previous entry to
1510 * cover the next entry as well. If it doesn't work
1511 * just skip on, don't actually give up, since we've
1512 * already completed the back merge.
1513 */
1514 if (prev_entry->aref.ar_amap) {
1515 if (amap_extend(prev_entry,
1516 prev_entry->next->end -
1517 prev_entry->next->start,
1518 amapwaitflag | AMAP_EXTEND_FORWARDS))
1519 goto nomerge;
1520 }
1521
1522 /*
1523 * Try to extend the amap of the *next* entry
1524 * back to cover the new allocation *and* the
1525 * previous entry as well (the previous merge
1526 * didn't have an amap already otherwise we
1527 * wouldn't be checking here for an amap). If
1528 * it doesn't work just skip on, again, don't
1529 * actually give up, since we've already
1530 * completed the back merge.
1531 */
1532 else if (prev_entry->next->aref.ar_amap) {
1533 if (amap_extend(prev_entry->next,
1534 prev_entry->end -
1535 prev_entry->start,
1536 amapwaitflag | AMAP_EXTEND_BACKWARDS))
1537 goto nomerge;
1538 }
1539 } else {
1540 /*
1541 * Pull the next entry's amap backwards to cover this
1542 * new allocation.
1543 */
1544 if (prev_entry->next->aref.ar_amap) {
1545 error = amap_extend(prev_entry->next, size,
1546 amapwaitflag | AMAP_EXTEND_BACKWARDS);
1547 if (error)
1548 goto nomerge;
1549 }
1550 }
1551
1552 if (merged) {
1553 if (kmap) {
1554 UVMMAP_EVCNT_DECR(kbackmerge);
1555 UVMMAP_EVCNT_INCR(kbimerge);
1556 } else {
1557 UVMMAP_EVCNT_DECR(ubackmerge);
1558 UVMMAP_EVCNT_INCR(ubimerge);
1559 }
1560 } else {
1561 if (kmap) {
1562 UVMMAP_EVCNT_INCR(kforwmerge);
1563 } else {
1564 UVMMAP_EVCNT_INCR(uforwmerge);
1565 }
1566 }
1567 UVMHIST_LOG(maphist," starting forward merge", 0, 0, 0, 0);
1568
1569 /*
1570 * drop our reference to uobj since we are extending a reference
1571 * that we already have (the ref count can not drop to zero).
1572 * (if merged, we've already detached)
1573 */
1574 if (uobj && uobj->pgops->pgo_detach && !merged)
1575 uobj->pgops->pgo_detach(uobj);
1576
1577 if (merged) {
1578 dead = prev_entry->next;
1579 prev_entry->end = dead->end;
1580 uvm_map_entry_unlink(map, dead);
1581 if (dead->aref.ar_amap != NULL) {
1582 prev_entry->aref = dead->aref;
1583 dead->aref.ar_amap = NULL;
1584 }
1585 } else {
1586 prev_entry->next->start -= size;
1587 if (prev_entry != &map->header) {
1588 prev_entry->gap -= size;
1589 KASSERT(prev_entry->gap == uvm_rb_gap(prev_entry));
1590 uvm_rb_fixup(map, prev_entry);
1591 }
1592 if (uobj)
1593 prev_entry->next->offset = uoffset;
1594 }
1595
1596 uvm_map_check(map, "map forwardmerged");
1597
1598 UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0);
1599 merged++;
1600 }
1601
1602 nomerge:
1603 if (!merged) {
1604 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0);
1605 if (kmap) {
1606 UVMMAP_EVCNT_INCR(knomerge);
1607 } else {
1608 UVMMAP_EVCNT_INCR(unomerge);
1609 }
1610
1611 /*
1612 * allocate new entry and link it in.
1613 */
1614
1615 if (new_entry == NULL) {
1616 new_entry = uvm_mapent_alloc(map,
1617 (flags & UVM_FLAG_NOWAIT));
1618 if (__predict_false(new_entry == NULL)) {
1619 error = ENOMEM;
1620 goto done;
1621 }
1622 }
1623 new_entry->start = start;
1624 new_entry->end = new_entry->start + size;
1625 new_entry->object.uvm_obj = uobj;
1626 new_entry->offset = uoffset;
1627
1628 new_entry->etype = newetype;
1629
1630 if (flags & UVM_FLAG_NOMERGE) {
1631 new_entry->flags |= UVM_MAP_NOMERGE;
1632 }
1633
1634 new_entry->protection = prot;
1635 new_entry->max_protection = maxprot;
1636 new_entry->inheritance = inherit;
1637 new_entry->wired_count = 0;
1638 new_entry->advice = advice;
1639 if (flags & UVM_FLAG_OVERLAY) {
1640
1641 /*
1642 * to_add: for BSS we overallocate a little since we
1643 * are likely to extend
1644 */
1645
1646 vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
1647 UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
1648 struct vm_amap *amap = amap_alloc(size, to_add,
1649 (flags & UVM_FLAG_NOWAIT));
1650 if (__predict_false(amap == NULL)) {
1651 error = ENOMEM;
1652 goto done;
1653 }
1654 new_entry->aref.ar_pageoff = 0;
1655 new_entry->aref.ar_amap = amap;
1656 } else {
1657 new_entry->aref.ar_pageoff = 0;
1658 new_entry->aref.ar_amap = NULL;
1659 }
1660 uvm_map_entry_link(map, prev_entry, new_entry);
1661
1662 /*
1663 * Update the free space hint
1664 */
1665
1666 if ((map->first_free == prev_entry) &&
1667 (prev_entry->end >= new_entry->start))
1668 map->first_free = new_entry;
1669
1670 new_entry = NULL;
1671 }
1672
1673 map->size += size;
1674
1675 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1676
1677 error = 0;
1678 done:
1679 if ((flags & UVM_FLAG_QUANTUM) == 0) {
1680 /*
1681 * vmk_merged_entries is locked by the map's lock.
1682 */
1683 vm_map_unlock(map);
1684 }
1685 if (new_entry && error == 0) {
1686 KDASSERT(merged);
1687 uvm_mapent_free_merged(map, new_entry);
1688 new_entry = NULL;
1689 }
1690 if (dead) {
1691 KDASSERT(merged);
1692 uvm_mapent_free_merged(map, dead);
1693 }
1694 if ((flags & UVM_FLAG_QUANTUM) != 0) {
1695 vm_map_unlock(map);
1696 }
1697 if (new_entry != NULL) {
1698 uvm_mapent_free(new_entry);
1699 }
1700 return error;
1701 }
1702
1703 /*
1704 * uvm_map_lookup_entry_bytree: lookup an entry in tree
1705 */
1706
1707 static inline bool
1708 uvm_map_lookup_entry_bytree(struct vm_map *map, vaddr_t address,
1709 struct vm_map_entry **entry /* OUT */)
1710 {
1711 struct vm_map_entry *prev = &map->header;
1712 struct vm_map_entry *cur = ROOT_ENTRY(map);
1713
1714 while (cur) {
1715 UVMMAP_EVCNT_INCR(mlk_treeloop);
1716 if (address >= cur->start) {
1717 if (address < cur->end) {
1718 *entry = cur;
1719 return true;
1720 }
1721 prev = cur;
1722 cur = RIGHT_ENTRY(cur);
1723 } else
1724 cur = LEFT_ENTRY(cur);
1725 }
1726 *entry = prev;
1727 return false;
1728 }
1729
1730 /*
1731 * uvm_map_lookup_entry: find map entry at or before an address
1732 *
1733 * => map must at least be read-locked by caller
1734 * => entry is returned in "entry"
1735 * => return value is true if address is in the returned entry
1736 */
1737
1738 bool
1739 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
1740 struct vm_map_entry **entry /* OUT */)
1741 {
1742 struct vm_map_entry *cur;
1743 bool use_tree = false;
1744 UVMHIST_FUNC("uvm_map_lookup_entry");
1745 UVMHIST_CALLED(maphist);
1746
1747 UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)",
1748 map, address, entry, 0);
1749
1750 /*
1751 * start looking either from the head of the
1752 * list, or from the hint.
1753 */
1754
1755 cur = map->hint;
1756
1757 if (cur == &map->header)
1758 cur = cur->next;
1759
1760 UVMMAP_EVCNT_INCR(mlk_call);
1761 if (address >= cur->start) {
1762
1763 /*
1764 * go from hint to end of list.
1765 *
1766 * but first, make a quick check to see if
1767 * we are already looking at the entry we
1768 * want (which is usually the case).
1769 * note also that we don't need to save the hint
1770 * here... it is the same hint (unless we are
1771 * at the header, in which case the hint didn't
1772 * buy us anything anyway).
1773 */
1774
1775 if (cur != &map->header && cur->end > address) {
1776 UVMMAP_EVCNT_INCR(mlk_hint);
1777 *entry = cur;
1778 UVMHIST_LOG(maphist,"<- got it via hint (0x%x)",
1779 cur, 0, 0, 0);
1780 uvm_mapent_check(*entry);
1781 return (true);
1782 }
1783
1784 if (map->nentries > 15)
1785 use_tree = true;
1786 } else {
1787
1788 /*
1789 * invalid hint. use tree.
1790 */
1791 use_tree = true;
1792 }
1793
1794 uvm_map_check(map, __func__);
1795
1796 if (use_tree) {
1797 /*
1798 * Simple lookup in the tree. Happens when the hint is
1799 * invalid, or nentries reach a threshold.
1800 */
1801 UVMMAP_EVCNT_INCR(mlk_tree);
1802 if (uvm_map_lookup_entry_bytree(map, address, entry)) {
1803 goto got;
1804 } else {
1805 goto failed;
1806 }
1807 }
1808
1809 /*
1810 * search linearly
1811 */
1812
1813 UVMMAP_EVCNT_INCR(mlk_list);
1814 while (cur != &map->header) {
1815 UVMMAP_EVCNT_INCR(mlk_listloop);
1816 if (cur->end > address) {
1817 if (address >= cur->start) {
1818 /*
1819 * save this lookup for future
1820 * hints, and return
1821 */
1822
1823 *entry = cur;
1824 got:
1825 SAVE_HINT(map, map->hint, *entry);
1826 UVMHIST_LOG(maphist,"<- search got it (0x%x)",
1827 cur, 0, 0, 0);
1828 KDASSERT((*entry)->start <= address);
1829 KDASSERT(address < (*entry)->end);
1830 uvm_mapent_check(*entry);
1831 return (true);
1832 }
1833 break;
1834 }
1835 cur = cur->next;
1836 }
1837 *entry = cur->prev;
1838 failed:
1839 SAVE_HINT(map, map->hint, *entry);
1840 UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
1841 KDASSERT((*entry) == &map->header || (*entry)->end <= address);
1842 KDASSERT((*entry)->next == &map->header ||
1843 address < (*entry)->next->start);
1844 return (false);
1845 }
1846
1847 /*
1848 * See if the range between start and start + length fits in the gap
1849 * entry->next->start and entry->end. Returns 1 if fits, 0 if doesn't
1850 * fit, and -1 address wraps around.
1851 */
1852 static int
1853 uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
1854 vsize_t align, int topdown, struct vm_map_entry *entry)
1855 {
1856 vaddr_t end;
1857
1858 #ifdef PMAP_PREFER
1859 /*
1860 * push start address forward as needed to avoid VAC alias problems.
1861 * we only do this if a valid offset is specified.
1862 */
1863
1864 if (uoffset != UVM_UNKNOWN_OFFSET)
1865 PMAP_PREFER(uoffset, start, length, topdown);
1866 #endif
1867 if (align != 0) {
1868 if ((*start & (align - 1)) != 0) {
1869 if (topdown)
1870 *start &= ~(align - 1);
1871 else
1872 *start = roundup(*start, align);
1873 }
1874 /*
1875 * XXX Should we PMAP_PREFER() here again?
1876 * eh...i think we're okay
1877 */
1878 }
1879
1880 /*
1881 * Find the end of the proposed new region. Be sure we didn't
1882 * wrap around the address; if so, we lose. Otherwise, if the
1883 * proposed new region fits before the next entry, we win.
1884 */
1885
1886 end = *start + length;
1887 if (end < *start)
1888 return (-1);
1889
1890 if (entry->next->start >= end && *start >= entry->end)
1891 return (1);
1892
1893 return (0);
1894 }
1895
1896 /*
1897 * uvm_map_findspace: find "length" sized space in "map".
1898 *
1899 * => "hint" is a hint about where we want it, unless UVM_FLAG_FIXED is
1900 * set in "flags" (in which case we insist on using "hint").
1901 * => "result" is VA returned
1902 * => uobj/uoffset are to be used to handle VAC alignment, if required
1903 * => if "align" is non-zero, we attempt to align to that value.
1904 * => caller must at least have read-locked map
1905 * => returns NULL on failure, or pointer to prev. map entry if success
1906 * => note this is a cross between the old vm_map_findspace and vm_map_find
1907 */
1908
1909 struct vm_map_entry *
1910 uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
1911 vaddr_t *result /* OUT */, struct uvm_object *uobj, voff_t uoffset,
1912 vsize_t align, int flags)
1913 {
1914 struct vm_map_entry *entry;
1915 struct vm_map_entry *child, *prev, *tmp;
1916 vaddr_t orig_hint;
1917 const int topdown = map->flags & VM_MAP_TOPDOWN;
1918 UVMHIST_FUNC("uvm_map_findspace");
1919 UVMHIST_CALLED(maphist);
1920
1921 UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)",
1922 map, hint, length, flags);
1923 KASSERT((align & (align - 1)) == 0);
1924 KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1925
1926 uvm_map_check(map, "map_findspace entry");
1927
1928 /*
1929 * remember the original hint. if we are aligning, then we
1930 * may have to try again with no alignment constraint if
1931 * we fail the first time.
1932 */
1933
1934 orig_hint = hint;
1935 if (hint < vm_map_min(map)) { /* check ranges ... */
1936 if (flags & UVM_FLAG_FIXED) {
1937 UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
1938 return (NULL);
1939 }
1940 hint = vm_map_min(map);
1941 }
1942 if (hint > vm_map_max(map)) {
1943 UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]",
1944 hint, vm_map_min(map), vm_map_max(map), 0);
1945 return (NULL);
1946 }
1947
1948 /*
1949 * Look for the first possible address; if there's already
1950 * something at this address, we have to start after it.
1951 */
1952
1953 /*
1954 * @@@: there are four, no, eight cases to consider.
1955 *
1956 * 0: found, fixed, bottom up -> fail
1957 * 1: found, fixed, top down -> fail
1958 * 2: found, not fixed, bottom up -> start after entry->end,
1959 * loop up
1960 * 3: found, not fixed, top down -> start before entry->start,
1961 * loop down
1962 * 4: not found, fixed, bottom up -> check entry->next->start, fail
1963 * 5: not found, fixed, top down -> check entry->next->start, fail
1964 * 6: not found, not fixed, bottom up -> check entry->next->start,
1965 * loop up
1966 * 7: not found, not fixed, top down -> check entry->next->start,
1967 * loop down
1968 *
1969 * as you can see, it reduces to roughly five cases, and that
1970 * adding top down mapping only adds one unique case (without
1971 * it, there would be four cases).
1972 */
1973
1974 if ((flags & UVM_FLAG_FIXED) == 0 && hint == vm_map_min(map)) {
1975 entry = map->first_free;
1976 } else {
1977 if (uvm_map_lookup_entry(map, hint, &entry)) {
1978 /* "hint" address already in use ... */
1979 if (flags & UVM_FLAG_FIXED) {
1980 UVMHIST_LOG(maphist, "<- fixed & VA in use",
1981 0, 0, 0, 0);
1982 return (NULL);
1983 }
1984 if (topdown)
1985 /* Start from lower gap. */
1986 entry = entry->prev;
1987 } else if (flags & UVM_FLAG_FIXED) {
1988 if (entry->next->start >= hint + length &&
1989 hint + length > hint)
1990 goto found;
1991
1992 /* "hint" address is gap but too small */
1993 UVMHIST_LOG(maphist, "<- fixed mapping failed",
1994 0, 0, 0, 0);
1995 return (NULL); /* only one shot at it ... */
1996 } else {
1997 /*
1998 * See if given hint fits in this gap.
1999 */
2000 switch (uvm_map_space_avail(&hint, length,
2001 uoffset, align, topdown, entry)) {
2002 case 1:
2003 goto found;
2004 case -1:
2005 goto wraparound;
2006 }
2007
2008 if (topdown) {
2009 /*
2010 * Still there is a chance to fit
2011 * if hint > entry->end.
2012 */
2013 } else {
2014 /* Start from higher gap. */
2015 entry = entry->next;
2016 if (entry == &map->header)
2017 goto notfound;
2018 goto nextgap;
2019 }
2020 }
2021 }
2022
2023 /*
2024 * Note that all UVM_FLAGS_FIXED case is already handled.
2025 */
2026 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
2027
2028 /* Try to find the space in the red-black tree */
2029
2030 /* Check slot before any entry */
2031 hint = topdown ? entry->next->start - length : entry->end;
2032 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2033 topdown, entry)) {
2034 case 1:
2035 goto found;
2036 case -1:
2037 goto wraparound;
2038 }
2039
2040 nextgap:
2041 KDASSERT((flags & UVM_FLAG_FIXED) == 0);
2042 /* If there is not enough space in the whole tree, we fail */
2043 tmp = ROOT_ENTRY(map);
2044 if (tmp == NULL || tmp->maxgap < length)
2045 goto notfound;
2046
2047 prev = NULL; /* previous candidate */
2048
2049 /* Find an entry close to hint that has enough space */
2050 for (; tmp;) {
2051 KASSERT(tmp->next->start == tmp->end + tmp->gap);
2052 if (topdown) {
2053 if (tmp->next->start < hint + length &&
2054 (prev == NULL || tmp->end > prev->end)) {
2055 if (tmp->gap >= length)
2056 prev = tmp;
2057 else if ((child = LEFT_ENTRY(tmp)) != NULL
2058 && child->maxgap >= length)
2059 prev = tmp;
2060 }
2061 } else {
2062 if (tmp->end >= hint &&
2063 (prev == NULL || tmp->end < prev->end)) {
2064 if (tmp->gap >= length)
2065 prev = tmp;
2066 else if ((child = RIGHT_ENTRY(tmp)) != NULL
2067 && child->maxgap >= length)
2068 prev = tmp;
2069 }
2070 }
2071 if (tmp->next->start < hint + length)
2072 child = RIGHT_ENTRY(tmp);
2073 else if (tmp->end > hint)
2074 child = LEFT_ENTRY(tmp);
2075 else {
2076 if (tmp->gap >= length)
2077 break;
2078 if (topdown)
2079 child = LEFT_ENTRY(tmp);
2080 else
2081 child = RIGHT_ENTRY(tmp);
2082 }
2083 if (child == NULL || child->maxgap < length)
2084 break;
2085 tmp = child;
2086 }
2087
2088 if (tmp != NULL && tmp->start < hint && hint < tmp->next->start) {
2089 /*
2090 * Check if the entry that we found satifies the
2091 * space requirement
2092 */
2093 if (topdown) {
2094 if (hint > tmp->next->start - length)
2095 hint = tmp->next->start - length;
2096 } else {
2097 if (hint < tmp->end)
2098 hint = tmp->end;
2099 }
2100 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2101 topdown, tmp)) {
2102 case 1:
2103 entry = tmp;
2104 goto found;
2105 case -1:
2106 goto wraparound;
2107 }
2108 if (tmp->gap >= length)
2109 goto listsearch;
2110 }
2111 if (prev == NULL)
2112 goto notfound;
2113
2114 if (topdown) {
2115 KASSERT(orig_hint >= prev->next->start - length ||
2116 prev->next->start - length > prev->next->start);
2117 hint = prev->next->start - length;
2118 } else {
2119 KASSERT(orig_hint <= prev->end);
2120 hint = prev->end;
2121 }
2122 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2123 topdown, prev)) {
2124 case 1:
2125 entry = prev;
2126 goto found;
2127 case -1:
2128 goto wraparound;
2129 }
2130 if (prev->gap >= length)
2131 goto listsearch;
2132
2133 if (topdown)
2134 tmp = LEFT_ENTRY(prev);
2135 else
2136 tmp = RIGHT_ENTRY(prev);
2137 for (;;) {
2138 KASSERT(tmp && tmp->maxgap >= length);
2139 if (topdown)
2140 child = RIGHT_ENTRY(tmp);
2141 else
2142 child = LEFT_ENTRY(tmp);
2143 if (child && child->maxgap >= length) {
2144 tmp = child;
2145 continue;
2146 }
2147 if (tmp->gap >= length)
2148 break;
2149 if (topdown)
2150 tmp = LEFT_ENTRY(tmp);
2151 else
2152 tmp = RIGHT_ENTRY(tmp);
2153 }
2154
2155 if (topdown) {
2156 KASSERT(orig_hint >= tmp->next->start - length ||
2157 tmp->next->start - length > tmp->next->start);
2158 hint = tmp->next->start - length;
2159 } else {
2160 KASSERT(orig_hint <= tmp->end);
2161 hint = tmp->end;
2162 }
2163 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2164 topdown, tmp)) {
2165 case 1:
2166 entry = tmp;
2167 goto found;
2168 case -1:
2169 goto wraparound;
2170 }
2171
2172 /*
2173 * The tree fails to find an entry because of offset or alignment
2174 * restrictions. Search the list instead.
2175 */
2176 listsearch:
2177 /*
2178 * Look through the rest of the map, trying to fit a new region in
2179 * the gap between existing regions, or after the very last region.
2180 * note: entry->end = base VA of current gap,
2181 * entry->next->start = VA of end of current gap
2182 */
2183
2184 for (;;) {
2185 /* Update hint for current gap. */
2186 hint = topdown ? entry->next->start - length : entry->end;
2187
2188 /* See if it fits. */
2189 switch (uvm_map_space_avail(&hint, length, uoffset, align,
2190 topdown, entry)) {
2191 case 1:
2192 goto found;
2193 case -1:
2194 goto wraparound;
2195 }
2196
2197 /* Advance to next/previous gap */
2198 if (topdown) {
2199 if (entry == &map->header) {
2200 UVMHIST_LOG(maphist, "<- failed (off start)",
2201 0,0,0,0);
2202 goto notfound;
2203 }
2204 entry = entry->prev;
2205 } else {
2206 entry = entry->next;
2207 if (entry == &map->header) {
2208 UVMHIST_LOG(maphist, "<- failed (off end)",
2209 0,0,0,0);
2210 goto notfound;
2211 }
2212 }
2213 }
2214
2215 found:
2216 SAVE_HINT(map, map->hint, entry);
2217 *result = hint;
2218 UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0);
2219 KASSERT( topdown || hint >= orig_hint);
2220 KASSERT(!topdown || hint <= orig_hint);
2221 KASSERT(entry->end <= hint);
2222 KASSERT(hint + length <= entry->next->start);
2223 return (entry);
2224
2225 wraparound:
2226 UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0);
2227
2228 return (NULL);
2229
2230 notfound:
2231 UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0);
2232
2233 return (NULL);
2234 }
2235
2236 /*
2237 * U N M A P - m a i n h e l p e r f u n c t i o n s
2238 */
2239
2240 /*
2241 * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
2242 *
2243 * => caller must check alignment and size
2244 * => map must be locked by caller
2245 * => we return a list of map entries that we've remove from the map
2246 * in "entry_list"
2247 */
2248
2249 void
2250 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
2251 struct vm_map_entry **entry_list /* OUT */,
2252 struct uvm_mapent_reservation *umr, int flags)
2253 {
2254 struct vm_map_entry *entry, *first_entry, *next;
2255 vaddr_t len;
2256 UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist);
2257
2258 UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)",
2259 map, start, end, 0);
2260 VM_MAP_RANGE_CHECK(map, start, end);
2261
2262 uvm_map_check(map, "unmap_remove entry");
2263
2264 /*
2265 * find first entry
2266 */
2267
2268 if (uvm_map_lookup_entry(map, start, &first_entry) == true) {
2269 /* clip and go... */
2270 entry = first_entry;
2271 UVM_MAP_CLIP_START(map, entry, start, umr);
2272 /* critical! prevents stale hint */
2273 SAVE_HINT(map, entry, entry->prev);
2274 } else {
2275 entry = first_entry->next;
2276 }
2277
2278 /*
2279 * Save the free space hint
2280 */
2281
2282 if (map->first_free != &map->header && map->first_free->start >= start)
2283 map->first_free = entry->prev;
2284
2285 /*
2286 * note: we now re-use first_entry for a different task. we remove
2287 * a number of map entries from the map and save them in a linked
2288 * list headed by "first_entry". once we remove them from the map
2289 * the caller should unlock the map and drop the references to the
2290 * backing objects [c.f. uvm_unmap_detach]. the object is to
2291 * separate unmapping from reference dropping. why?
2292 * [1] the map has to be locked for unmapping
2293 * [2] the map need not be locked for reference dropping
2294 * [3] dropping references may trigger pager I/O, and if we hit
2295 * a pager that does synchronous I/O we may have to wait for it.
2296 * [4] we would like all waiting for I/O to occur with maps unlocked
2297 * so that we don't block other threads.
2298 */
2299
2300 first_entry = NULL;
2301 *entry_list = NULL;
2302
2303 /*
2304 * break up the area into map entry sized regions and unmap. note
2305 * that all mappings have to be removed before we can even consider
2306 * dropping references to amaps or VM objects (otherwise we could end
2307 * up with a mapping to a page on the free list which would be very bad)
2308 */
2309
2310 while ((entry != &map->header) && (entry->start < end)) {
2311 KASSERT((entry->flags & UVM_MAP_FIRST) == 0);
2312
2313 UVM_MAP_CLIP_END(map, entry, end, umr);
2314 next = entry->next;
2315 len = entry->end - entry->start;
2316
2317 /*
2318 * unwire before removing addresses from the pmap; otherwise
2319 * unwiring will put the entries back into the pmap (XXX).
2320 */
2321
2322 if (VM_MAPENT_ISWIRED(entry)) {
2323 uvm_map_entry_unwire(map, entry);
2324 }
2325 if (flags & UVM_FLAG_VAONLY) {
2326
2327 /* nothing */
2328
2329 } else if ((map->flags & VM_MAP_PAGEABLE) == 0) {
2330
2331 /*
2332 * if the map is non-pageable, any pages mapped there
2333 * must be wired and entered with pmap_kenter_pa(),
2334 * and we should free any such pages immediately.
2335 * this is mostly used for kmem_map.
2336 */
2337
2338 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
2339 uvm_km_pgremove_intrsafe(map, entry->start,
2340 entry->end);
2341 pmap_kremove(entry->start, len);
2342 }
2343 } else if (UVM_ET_ISOBJ(entry) &&
2344 UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
2345 KASSERT(vm_map_pmap(map) == pmap_kernel());
2346
2347 /*
2348 * note: kernel object mappings are currently used in
2349 * two ways:
2350 * [1] "normal" mappings of pages in the kernel object
2351 * [2] uvm_km_valloc'd allocations in which we
2352 * pmap_enter in some non-kernel-object page
2353 * (e.g. vmapbuf).
2354 *
2355 * for case [1], we need to remove the mapping from
2356 * the pmap and then remove the page from the kernel
2357 * object (because, once pages in a kernel object are
2358 * unmapped they are no longer needed, unlike, say,
2359 * a vnode where you might want the data to persist
2360 * until flushed out of a queue).
2361 *
2362 * for case [2], we need to remove the mapping from
2363 * the pmap. there shouldn't be any pages at the
2364 * specified offset in the kernel object [but it
2365 * doesn't hurt to call uvm_km_pgremove just to be
2366 * safe?]
2367 *
2368 * uvm_km_pgremove currently does the following:
2369 * for pages in the kernel object in range:
2370 * - drops the swap slot
2371 * - uvm_pagefree the page
2372 */
2373
2374 /*
2375 * remove mappings from pmap and drop the pages
2376 * from the object. offsets are always relative
2377 * to vm_map_min(kernel_map).
2378 */
2379
2380 pmap_remove(pmap_kernel(), entry->start,
2381 entry->start + len);
2382 uvm_km_pgremove(entry->start, entry->end);
2383
2384 /*
2385 * null out kernel_object reference, we've just
2386 * dropped it
2387 */
2388
2389 entry->etype &= ~UVM_ET_OBJ;
2390 entry->object.uvm_obj = NULL;
2391 } else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
2392
2393 /*
2394 * remove mappings the standard way.
2395 */
2396
2397 pmap_remove(map->pmap, entry->start, entry->end);
2398 }
2399
2400 #if defined(DEBUG)
2401 if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
2402
2403 /*
2404 * check if there's remaining mapping,
2405 * which is a bug in caller.
2406 */
2407
2408 vaddr_t va;
2409 for (va = entry->start; va < entry->end;
2410 va += PAGE_SIZE) {
2411 if (pmap_extract(vm_map_pmap(map), va, NULL)) {
2412 panic("uvm_unmap_remove: has mapping");
2413 }
2414 }
2415
2416 if (VM_MAP_IS_KERNEL(map)) {
2417 uvm_km_check_empty(map, entry->start,
2418 entry->end);
2419 }
2420 }
2421 #endif /* defined(DEBUG) */
2422
2423 /*
2424 * remove entry from map and put it on our list of entries
2425 * that we've nuked. then go to next entry.
2426 */
2427
2428 UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0,0);
2429
2430 /* critical! prevents stale hint */
2431 SAVE_HINT(map, entry, entry->prev);
2432
2433 uvm_map_entry_unlink(map, entry);
2434 KASSERT(map->size >= len);
2435 map->size -= len;
2436 entry->prev = NULL;
2437 entry->next = first_entry;
2438 first_entry = entry;
2439 entry = next;
2440 }
2441 if ((map->flags & VM_MAP_DYING) == 0) {
2442 pmap_update(vm_map_pmap(map));
2443 }
2444
2445 uvm_map_check(map, "unmap_remove leave");
2446
2447 /*
2448 * now we've cleaned up the map and are ready for the caller to drop
2449 * references to the mapped objects.
2450 */
2451
2452 *entry_list = first_entry;
2453 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
2454
2455 if (map->flags & VM_MAP_WANTVA) {
2456 mutex_enter(&map->misc_lock);
2457 map->flags &= ~VM_MAP_WANTVA;
2458 cv_broadcast(&map->cv);
2459 mutex_exit(&map->misc_lock);
2460 }
2461 }
2462
2463 /*
2464 * uvm_unmap_detach: drop references in a chain of map entries
2465 *
2466 * => we will free the map entries as we traverse the list.
2467 */
2468
2469 void
2470 uvm_unmap_detach(struct vm_map_entry *first_entry, int flags)
2471 {
2472 struct vm_map_entry *next_entry;
2473 UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
2474
2475 while (first_entry) {
2476 KASSERT(!VM_MAPENT_ISWIRED(first_entry));
2477 UVMHIST_LOG(maphist,
2478 " detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d",
2479 first_entry, first_entry->aref.ar_amap,
2480 first_entry->object.uvm_obj,
2481 UVM_ET_ISSUBMAP(first_entry));
2482
2483 /*
2484 * drop reference to amap, if we've got one
2485 */
2486
2487 if (first_entry->aref.ar_amap)
2488 uvm_map_unreference_amap(first_entry, flags);
2489
2490 /*
2491 * drop reference to our backing object, if we've got one
2492 */
2493
2494 KASSERT(!UVM_ET_ISSUBMAP(first_entry));
2495 if (UVM_ET_ISOBJ(first_entry) &&
2496 first_entry->object.uvm_obj->pgops->pgo_detach) {
2497 (*first_entry->object.uvm_obj->pgops->pgo_detach)
2498 (first_entry->object.uvm_obj);
2499 }
2500 next_entry = first_entry->next;
2501 uvm_mapent_free(first_entry);
2502 first_entry = next_entry;
2503 }
2504 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
2505 }
2506
2507 /*
2508 * E X T R A C T I O N F U N C T I O N S
2509 */
2510
2511 /*
2512 * uvm_map_reserve: reserve space in a vm_map for future use.
2513 *
2514 * => we reserve space in a map by putting a dummy map entry in the
2515 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
2516 * => map should be unlocked (we will write lock it)
2517 * => we return true if we were able to reserve space
2518 * => XXXCDC: should be inline?
2519 */
2520
2521 int
2522 uvm_map_reserve(struct vm_map *map, vsize_t size,
2523 vaddr_t offset /* hint for pmap_prefer */,
2524 vsize_t align /* alignment */,
2525 vaddr_t *raddr /* IN:hint, OUT: reserved VA */,
2526 uvm_flag_t flags /* UVM_FLAG_FIXED or 0 */)
2527 {
2528 UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
2529
2530 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
2531 map,size,offset,raddr);
2532
2533 size = round_page(size);
2534
2535 /*
2536 * reserve some virtual space.
2537 */
2538
2539 if (uvm_map(map, raddr, size, NULL, offset, align,
2540 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
2541 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE|flags)) != 0) {
2542 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
2543 return (false);
2544 }
2545
2546 UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
2547 return (true);
2548 }
2549
2550 /*
2551 * uvm_map_replace: replace a reserved (blank) area of memory with
2552 * real mappings.
2553 *
2554 * => caller must WRITE-LOCK the map
2555 * => we return true if replacement was a success
2556 * => we expect the newents chain to have nnewents entrys on it and
2557 * we expect newents->prev to point to the last entry on the list
2558 * => note newents is allowed to be NULL
2559 */
2560
2561 static int
2562 uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
2563 struct vm_map_entry *newents, int nnewents, vsize_t nsize,
2564 struct vm_map_entry **oldentryp)
2565 {
2566 struct vm_map_entry *oldent, *last;
2567
2568 uvm_map_check(map, "map_replace entry");
2569
2570 /*
2571 * first find the blank map entry at the specified address
2572 */
2573
2574 if (!uvm_map_lookup_entry(map, start, &oldent)) {
2575 return (false);
2576 }
2577
2578 /*
2579 * check to make sure we have a proper blank entry
2580 */
2581
2582 if (end < oldent->end && !VM_MAP_USE_KMAPENT(map)) {
2583 UVM_MAP_CLIP_END(map, oldent, end, NULL);
2584 }
2585 if (oldent->start != start || oldent->end != end ||
2586 oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
2587 return (false);
2588 }
2589
2590 #ifdef DIAGNOSTIC
2591
2592 /*
2593 * sanity check the newents chain
2594 */
2595
2596 {
2597 struct vm_map_entry *tmpent = newents;
2598 int nent = 0;
2599 vsize_t sz = 0;
2600 vaddr_t cur = start;
2601
2602 while (tmpent) {
2603 nent++;
2604 sz += tmpent->end - tmpent->start;
2605 if (tmpent->start < cur)
2606 panic("uvm_map_replace1");
2607 if (tmpent->start >= tmpent->end || tmpent->end > end) {
2608 panic("uvm_map_replace2: "
2609 "tmpent->start=0x%"PRIxVADDR
2610 ", tmpent->end=0x%"PRIxVADDR
2611 ", end=0x%"PRIxVADDR,
2612 tmpent->start, tmpent->end, end);
2613 }
2614 cur = tmpent->end;
2615 if (tmpent->next) {
2616 if (tmpent->next->prev != tmpent)
2617 panic("uvm_map_replace3");
2618 } else {
2619 if (newents->prev != tmpent)
2620 panic("uvm_map_replace4");
2621 }
2622 tmpent = tmpent->next;
2623 }
2624 if (nent != nnewents)
2625 panic("uvm_map_replace5");
2626 if (sz != nsize)
2627 panic("uvm_map_replace6");
2628 }
2629 #endif
2630
2631 /*
2632 * map entry is a valid blank! replace it. (this does all the
2633 * work of map entry link/unlink...).
2634 */
2635
2636 if (newents) {
2637 last = newents->prev;
2638
2639 /* critical: flush stale hints out of map */
2640 SAVE_HINT(map, map->hint, newents);
2641 if (map->first_free == oldent)
2642 map->first_free = last;
2643
2644 last->next = oldent->next;
2645 last->next->prev = last;
2646
2647 /* Fix RB tree */
2648 uvm_rb_remove(map, oldent);
2649
2650 newents->prev = oldent->prev;
2651 newents->prev->next = newents;
2652 map->nentries = map->nentries + (nnewents - 1);
2653
2654 /* Fixup the RB tree */
2655 {
2656 int i;
2657 struct vm_map_entry *tmp;
2658
2659 tmp = newents;
2660 for (i = 0; i < nnewents && tmp; i++) {
2661 uvm_rb_insert(map, tmp);
2662 tmp = tmp->next;
2663 }
2664 }
2665 } else {
2666 /* NULL list of new entries: just remove the old one */
2667 clear_hints(map, oldent);
2668 uvm_map_entry_unlink(map, oldent);
2669 }
2670 map->size -= end - start - nsize;
2671
2672 uvm_map_check(map, "map_replace leave");
2673
2674 /*
2675 * now we can free the old blank entry and return.
2676 */
2677
2678 *oldentryp = oldent;
2679 return (true);
2680 }
2681
2682 /*
2683 * uvm_map_extract: extract a mapping from a map and put it somewhere
2684 * (maybe removing the old mapping)
2685 *
2686 * => maps should be unlocked (we will write lock them)
2687 * => returns 0 on success, error code otherwise
2688 * => start must be page aligned
2689 * => len must be page sized
2690 * => flags:
2691 * UVM_EXTRACT_REMOVE: remove mappings from srcmap
2692 * UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
2693 * UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
2694 * UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
2695 * >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
2696 * >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
2697 * be used from within the kernel in a kernel level map <<<
2698 */
2699
2700 int
2701 uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
2702 struct vm_map *dstmap, vaddr_t *dstaddrp, int flags)
2703 {
2704 vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge;
2705 struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry,
2706 *deadentry, *oldentry;
2707 struct vm_map_entry *resentry = NULL; /* a dummy reservation entry */
2708 vsize_t elen;
2709 int nchain, error, copy_ok;
2710 vsize_t nsize;
2711 UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
2712
2713 UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start,
2714 len,0);
2715 UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0);
2716
2717 /*
2718 * step 0: sanity check: start must be on a page boundary, length
2719 * must be page sized. can't ask for CONTIG/QREF if you asked for
2720 * REMOVE.
2721 */
2722
2723 KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0);
2724 KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
2725 (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
2726
2727 /*
2728 * step 1: reserve space in the target map for the extracted area
2729 */
2730
2731 if ((flags & UVM_EXTRACT_RESERVED) == 0) {
2732 dstaddr = vm_map_min(dstmap);
2733 if (!uvm_map_reserve(dstmap, len, start, 0, &dstaddr, 0))
2734 return (ENOMEM);
2735 *dstaddrp = dstaddr; /* pass address back to caller */
2736 UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0);
2737 } else {
2738 dstaddr = *dstaddrp;
2739 }
2740
2741 /*
2742 * step 2: setup for the extraction process loop by init'ing the
2743 * map entry chain, locking src map, and looking up the first useful
2744 * entry in the map.
2745 */
2746
2747 end = start + len;
2748 newend = dstaddr + len;
2749 chain = endchain = NULL;
2750 nchain = 0;
2751 nsize = 0;
2752 vm_map_lock(srcmap);
2753
2754 if (uvm_map_lookup_entry(srcmap, start, &entry)) {
2755
2756 /* "start" is within an entry */
2757 if (flags & UVM_EXTRACT_QREF) {
2758
2759 /*
2760 * for quick references we don't clip the entry, so
2761 * the entry may map space "before" the starting
2762 * virtual address... this is the "fudge" factor
2763 * (which can be non-zero only the first time
2764 * through the "while" loop in step 3).
2765 */
2766
2767 fudge = start - entry->start;
2768 } else {
2769
2770 /*
2771 * normal reference: we clip the map to fit (thus
2772 * fudge is zero)
2773 */
2774
2775 UVM_MAP_CLIP_START(srcmap, entry, start, NULL);
2776 SAVE_HINT(srcmap, srcmap->hint, entry->prev);
2777 fudge = 0;
2778 }
2779 } else {
2780
2781 /* "start" is not within an entry ... skip to next entry */
2782 if (flags & UVM_EXTRACT_CONTIG) {
2783 error = EINVAL;
2784 goto bad; /* definite hole here ... */
2785 }
2786
2787 entry = entry->next;
2788 fudge = 0;
2789 }
2790
2791 /* save values from srcmap for step 6 */
2792 orig_entry = entry;
2793 orig_fudge = fudge;
2794
2795 /*
2796 * step 3: now start looping through the map entries, extracting
2797 * as we go.
2798 */
2799
2800 while (entry->start < end && entry != &srcmap->header) {
2801
2802 /* if we are not doing a quick reference, clip it */
2803 if ((flags & UVM_EXTRACT_QREF) == 0)
2804 UVM_MAP_CLIP_END(srcmap, entry, end, NULL);
2805
2806 /* clear needs_copy (allow chunking) */
2807 if (UVM_ET_ISNEEDSCOPY(entry)) {
2808 amap_copy(srcmap, entry,
2809 AMAP_COPY_NOWAIT|AMAP_COPY_NOMERGE, start, end);
2810 if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */
2811 error = ENOMEM;
2812 goto bad;
2813 }
2814
2815 /* amap_copy could clip (during chunk)! update fudge */
2816 if (fudge) {
2817 fudge = start - entry->start;
2818 orig_fudge = fudge;
2819 }
2820 }
2821
2822 /* calculate the offset of this from "start" */
2823 oldoffset = (entry->start + fudge) - start;
2824
2825 /* allocate a new map entry */
2826 newentry = uvm_mapent_alloc(dstmap, 0);
2827 if (newentry == NULL) {
2828 error = ENOMEM;
2829 goto bad;
2830 }
2831
2832 /* set up new map entry */
2833 newentry->next = NULL;
2834 newentry->prev = endchain;
2835 newentry->start = dstaddr + oldoffset;
2836 newentry->end =
2837 newentry->start + (entry->end - (entry->start + fudge));
2838 if (newentry->end > newend || newentry->end < newentry->start)
2839 newentry->end = newend;
2840 newentry->object.uvm_obj = entry->object.uvm_obj;
2841 if (newentry->object.uvm_obj) {
2842 if (newentry->object.uvm_obj->pgops->pgo_reference)
2843 newentry->object.uvm_obj->pgops->
2844 pgo_reference(newentry->object.uvm_obj);
2845 newentry->offset = entry->offset + fudge;
2846 } else {
2847 newentry->offset = 0;
2848 }
2849 newentry->etype = entry->etype;
2850 newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
2851 entry->max_protection : entry->protection;
2852 newentry->max_protection = entry->max_protection;
2853 newentry->inheritance = entry->inheritance;
2854 newentry->wired_count = 0;
2855 newentry->aref.ar_amap = entry->aref.ar_amap;
2856 if (newentry->aref.ar_amap) {
2857 newentry->aref.ar_pageoff =
2858 entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
2859 uvm_map_reference_amap(newentry, AMAP_SHARED |
2860 ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
2861 } else {
2862 newentry->aref.ar_pageoff = 0;
2863 }
2864 newentry->advice = entry->advice;
2865 if ((flags & UVM_EXTRACT_QREF) != 0) {
2866 newentry->flags |= UVM_MAP_NOMERGE;
2867 }
2868
2869 /* now link it on the chain */
2870 nchain++;
2871 nsize += newentry->end - newentry->start;
2872 if (endchain == NULL) {
2873 chain = endchain = newentry;
2874 } else {
2875 endchain->next = newentry;
2876 endchain = newentry;
2877 }
2878
2879 /* end of 'while' loop! */
2880 if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
2881 (entry->next == &srcmap->header ||
2882 entry->next->start != entry->end)) {
2883 error = EINVAL;
2884 goto bad;
2885 }
2886 entry = entry->next;
2887 fudge = 0;
2888 }
2889
2890 /*
2891 * step 4: close off chain (in format expected by uvm_map_replace)
2892 */
2893
2894 if (chain)
2895 chain->prev = endchain;
2896
2897 /*
2898 * step 5: attempt to lock the dest map so we can pmap_copy.
2899 * note usage of copy_ok:
2900 * 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
2901 * 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
2902 */
2903
2904 if (srcmap == dstmap || vm_map_lock_try(dstmap) == true) {
2905 copy_ok = 1;
2906 if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2907 nchain, nsize, &resentry)) {
2908 if (srcmap != dstmap)
2909 vm_map_unlock(dstmap);
2910 error = EIO;
2911 goto bad;
2912 }
2913 } else {
2914 copy_ok = 0;
2915 /* replace defered until step 7 */
2916 }
2917
2918 /*
2919 * step 6: traverse the srcmap a second time to do the following:
2920 * - if we got a lock on the dstmap do pmap_copy
2921 * - if UVM_EXTRACT_REMOVE remove the entries
2922 * we make use of orig_entry and orig_fudge (saved in step 2)
2923 */
2924
2925 if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
2926
2927 /* purge possible stale hints from srcmap */
2928 if (flags & UVM_EXTRACT_REMOVE) {
2929 SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
2930 if (srcmap->first_free != &srcmap->header &&
2931 srcmap->first_free->start >= start)
2932 srcmap->first_free = orig_entry->prev;
2933 }
2934
2935 entry = orig_entry;
2936 fudge = orig_fudge;
2937 deadentry = NULL; /* for UVM_EXTRACT_REMOVE */
2938
2939 while (entry->start < end && entry != &srcmap->header) {
2940 if (copy_ok) {
2941 oldoffset = (entry->start + fudge) - start;
2942 elen = MIN(end, entry->end) -
2943 (entry->start + fudge);
2944 pmap_copy(dstmap->pmap, srcmap->pmap,
2945 dstaddr + oldoffset, elen,
2946 entry->start + fudge);
2947 }
2948
2949 /* we advance "entry" in the following if statement */
2950 if (flags & UVM_EXTRACT_REMOVE) {
2951 pmap_remove(srcmap->pmap, entry->start,
2952 entry->end);
2953 oldentry = entry; /* save entry */
2954 entry = entry->next; /* advance */
2955 uvm_map_entry_unlink(srcmap, oldentry);
2956 /* add to dead list */
2957 oldentry->next = deadentry;
2958 deadentry = oldentry;
2959 } else {
2960 entry = entry->next; /* advance */
2961 }
2962
2963 /* end of 'while' loop */
2964 fudge = 0;
2965 }
2966 pmap_update(srcmap->pmap);
2967
2968 /*
2969 * unlock dstmap. we will dispose of deadentry in
2970 * step 7 if needed
2971 */
2972
2973 if (copy_ok && srcmap != dstmap)
2974 vm_map_unlock(dstmap);
2975
2976 } else {
2977 deadentry = NULL;
2978 }
2979
2980 /*
2981 * step 7: we are done with the source map, unlock. if copy_ok
2982 * is 0 then we have not replaced the dummy mapping in dstmap yet
2983 * and we need to do so now.
2984 */
2985
2986 vm_map_unlock(srcmap);
2987 if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
2988 uvm_unmap_detach(deadentry, 0); /* dispose of old entries */
2989
2990 /* now do the replacement if we didn't do it in step 5 */
2991 if (copy_ok == 0) {
2992 vm_map_lock(dstmap);
2993 error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2994 nchain, nsize, &resentry);
2995 vm_map_unlock(dstmap);
2996
2997 if (error == false) {
2998 error = EIO;
2999 goto bad2;
3000 }
3001 }
3002
3003 if (resentry != NULL)
3004 uvm_mapent_free(resentry);
3005
3006 return (0);
3007
3008 /*
3009 * bad: failure recovery
3010 */
3011 bad:
3012 vm_map_unlock(srcmap);
3013 bad2: /* src already unlocked */
3014 if (chain)
3015 uvm_unmap_detach(chain,
3016 (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
3017
3018 if (resentry != NULL)
3019 uvm_mapent_free(resentry);
3020
3021 if ((flags & UVM_EXTRACT_RESERVED) == 0) {
3022 uvm_unmap(dstmap, dstaddr, dstaddr+len); /* ??? */
3023 }
3024 return (error);
3025 }
3026
3027 /* end of extraction functions */
3028
3029 /*
3030 * uvm_map_submap: punch down part of a map into a submap
3031 *
3032 * => only the kernel_map is allowed to be submapped
3033 * => the purpose of submapping is to break up the locking granularity
3034 * of a larger map
3035 * => the range specified must have been mapped previously with a uvm_map()
3036 * call [with uobj==NULL] to create a blank map entry in the main map.
3037 * [And it had better still be blank!]
3038 * => maps which contain submaps should never be copied or forked.
3039 * => to remove a submap, use uvm_unmap() on the main map
3040 * and then uvm_map_deallocate() the submap.
3041 * => main map must be unlocked.
3042 * => submap must have been init'd and have a zero reference count.
3043 * [need not be locked as we don't actually reference it]
3044 */
3045
3046 int
3047 uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
3048 struct vm_map *submap)
3049 {
3050 struct vm_map_entry *entry;
3051 struct uvm_mapent_reservation umr;
3052 int error;
3053
3054 uvm_mapent_reserve(map, &umr, 2, 0);
3055
3056 vm_map_lock(map);
3057 VM_MAP_RANGE_CHECK(map, start, end);
3058
3059 if (uvm_map_lookup_entry(map, start, &entry)) {
3060 UVM_MAP_CLIP_START(map, entry, start, &umr);
3061 UVM_MAP_CLIP_END(map, entry, end, &umr); /* to be safe */
3062 } else {
3063 entry = NULL;
3064 }
3065
3066 if (entry != NULL &&
3067 entry->start == start && entry->end == end &&
3068 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
3069 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
3070 entry->etype |= UVM_ET_SUBMAP;
3071 entry->object.sub_map = submap;
3072 entry->offset = 0;
3073 uvm_map_reference(submap);
3074 error = 0;
3075 } else {
3076 error = EINVAL;
3077 }
3078 vm_map_unlock(map);
3079
3080 uvm_mapent_unreserve(map, &umr);
3081
3082 return error;
3083 }
3084
3085 /*
3086 * uvm_map_setup_kernel: init in-kernel map
3087 *
3088 * => map must not be in service yet.
3089 */
3090
3091 void
3092 uvm_map_setup_kernel(struct vm_map_kernel *map,
3093 vaddr_t vmin, vaddr_t vmax, int flags)
3094 {
3095
3096 uvm_map_setup(&map->vmk_map, vmin, vmax, flags);
3097 callback_head_init(&map->vmk_reclaim_callback, IPL_VM);
3098 LIST_INIT(&map->vmk_kentry_free);
3099 map->vmk_merged_entries = NULL;
3100 }
3101
3102
3103 /*
3104 * uvm_map_protect: change map protection
3105 *
3106 * => set_max means set max_protection.
3107 * => map must be unlocked.
3108 */
3109
3110 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
3111 ~VM_PROT_WRITE : VM_PROT_ALL)
3112
3113 int
3114 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
3115 vm_prot_t new_prot, bool set_max)
3116 {
3117 struct vm_map_entry *current, *entry;
3118 int error = 0;
3119 UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
3120 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
3121 map, start, end, new_prot);
3122
3123 vm_map_lock(map);
3124 VM_MAP_RANGE_CHECK(map, start, end);
3125 if (uvm_map_lookup_entry(map, start, &entry)) {
3126 UVM_MAP_CLIP_START(map, entry, start, NULL);
3127 } else {
3128 entry = entry->next;
3129 }
3130
3131 /*
3132 * make a first pass to check for protection violations.
3133 */
3134
3135 current = entry;
3136 while ((current != &map->header) && (current->start < end)) {
3137 if (UVM_ET_ISSUBMAP(current)) {
3138 error = EINVAL;
3139 goto out;
3140 }
3141 if ((new_prot & current->max_protection) != new_prot) {
3142 error = EACCES;
3143 goto out;
3144 }
3145 /*
3146 * Don't allow VM_PROT_EXECUTE to be set on entries that
3147 * point to vnodes that are associated with a NOEXEC file
3148 * system.
3149 */
3150 if (UVM_ET_ISOBJ(current) &&
3151 UVM_OBJ_IS_VNODE(current->object.uvm_obj)) {
3152 struct vnode *vp =
3153 (struct vnode *) current->object.uvm_obj;
3154
3155 if ((new_prot & VM_PROT_EXECUTE) != 0 &&
3156 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
3157 error = EACCES;
3158 goto out;
3159 }
3160 }
3161
3162 current = current->next;
3163 }
3164
3165 /* go back and fix up protections (no need to clip this time). */
3166
3167 current = entry;
3168 while ((current != &map->header) && (current->start < end)) {
3169 vm_prot_t old_prot;
3170
3171 UVM_MAP_CLIP_END(map, current, end, NULL);
3172 old_prot = current->protection;
3173 if (set_max)
3174 current->protection =
3175 (current->max_protection = new_prot) & old_prot;
3176 else
3177 current->protection = new_prot;
3178
3179 /*
3180 * update physical map if necessary. worry about copy-on-write
3181 * here -- CHECK THIS XXX
3182 */
3183
3184 if (current->protection != old_prot) {
3185 /* update pmap! */
3186 pmap_protect(map->pmap, current->start, current->end,
3187 current->protection & MASK(entry));
3188
3189 /*
3190 * If this entry points at a vnode, and the
3191 * protection includes VM_PROT_EXECUTE, mark
3192 * the vnode as VEXECMAP.
3193 */
3194 if (UVM_ET_ISOBJ(current)) {
3195 struct uvm_object *uobj =
3196 current->object.uvm_obj;
3197
3198 if (UVM_OBJ_IS_VNODE(uobj) &&
3199 (current->protection & VM_PROT_EXECUTE)) {
3200 vn_markexec((struct vnode *) uobj);
3201 }
3202 }
3203 }
3204
3205 /*
3206 * If the map is configured to lock any future mappings,
3207 * wire this entry now if the old protection was VM_PROT_NONE
3208 * and the new protection is not VM_PROT_NONE.
3209 */
3210
3211 if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
3212 VM_MAPENT_ISWIRED(entry) == 0 &&
3213 old_prot == VM_PROT_NONE &&
3214 new_prot != VM_PROT_NONE) {
3215 if (uvm_map_pageable(map, entry->start,
3216 entry->end, false,
3217 UVM_LK_ENTER|UVM_LK_EXIT) != 0) {
3218
3219 /*
3220 * If locking the entry fails, remember the
3221 * error if it's the first one. Note we
3222 * still continue setting the protection in
3223 * the map, but will return the error
3224 * condition regardless.
3225 *
3226 * XXX Ignore what the actual error is,
3227 * XXX just call it a resource shortage
3228 * XXX so that it doesn't get confused
3229 * XXX what uvm_map_protect() itself would
3230 * XXX normally return.
3231 */
3232
3233 error = ENOMEM;
3234 }
3235 }
3236 current = current->next;
3237 }
3238 pmap_update(map->pmap);
3239
3240 out:
3241 vm_map_unlock(map);
3242
3243 UVMHIST_LOG(maphist, "<- done, error=%d",error,0,0,0);
3244 return error;
3245 }
3246
3247 #undef MASK
3248
3249 /*
3250 * uvm_map_inherit: set inheritance code for range of addrs in map.
3251 *
3252 * => map must be unlocked
3253 * => note that the inherit code is used during a "fork". see fork
3254 * code for details.
3255 */
3256
3257 int
3258 uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
3259 vm_inherit_t new_inheritance)
3260 {
3261 struct vm_map_entry *entry, *temp_entry;
3262 UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
3263 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)",
3264 map, start, end, new_inheritance);
3265
3266 switch (new_inheritance) {
3267 case MAP_INHERIT_NONE:
3268 case MAP_INHERIT_COPY:
3269 case MAP_INHERIT_SHARE:
3270 break;
3271 default:
3272 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3273 return EINVAL;
3274 }
3275
3276 vm_map_lock(map);
3277 VM_MAP_RANGE_CHECK(map, start, end);
3278 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3279 entry = temp_entry;
3280 UVM_MAP_CLIP_START(map, entry, start, NULL);
3281 } else {
3282 entry = temp_entry->next;
3283 }
3284 while ((entry != &map->header) && (entry->start < end)) {
3285 UVM_MAP_CLIP_END(map, entry, end, NULL);
3286 entry->inheritance = new_inheritance;
3287 entry = entry->next;
3288 }
3289 vm_map_unlock(map);
3290 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3291 return 0;
3292 }
3293
3294 /*
3295 * uvm_map_advice: set advice code for range of addrs in map.
3296 *
3297 * => map must be unlocked
3298 */
3299
3300 int
3301 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
3302 {
3303 struct vm_map_entry *entry, *temp_entry;
3304 UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist);
3305 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_adv=0x%x)",
3306 map, start, end, new_advice);
3307
3308 vm_map_lock(map);
3309 VM_MAP_RANGE_CHECK(map, start, end);
3310 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3311 entry = temp_entry;
3312 UVM_MAP_CLIP_START(map, entry, start, NULL);
3313 } else {
3314 entry = temp_entry->next;
3315 }
3316
3317 /*
3318 * XXXJRT: disallow holes?
3319 */
3320
3321 while ((entry != &map->header) && (entry->start < end)) {
3322 UVM_MAP_CLIP_END(map, entry, end, NULL);
3323
3324 switch (new_advice) {
3325 case MADV_NORMAL:
3326 case MADV_RANDOM:
3327 case MADV_SEQUENTIAL:
3328 /* nothing special here */
3329 break;
3330
3331 default:
3332 vm_map_unlock(map);
3333 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3334 return EINVAL;
3335 }
3336 entry->advice = new_advice;
3337 entry = entry->next;
3338 }
3339
3340 vm_map_unlock(map);
3341 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3342 return 0;
3343 }
3344
3345 /*
3346 * uvm_map_willneed: apply MADV_WILLNEED
3347 */
3348
3349 int
3350 uvm_map_willneed(struct vm_map *map, vaddr_t start, vaddr_t end)
3351 {
3352 struct vm_map_entry *entry;
3353 UVMHIST_FUNC("uvm_map_willneed"); UVMHIST_CALLED(maphist);
3354 UVMHIST_LOG(maphist,"(map=0x%lx,start=0x%lx,end=0x%lx)",
3355 map, start, end, 0);
3356
3357 vm_map_lock_read(map);
3358 VM_MAP_RANGE_CHECK(map, start, end);
3359 if (!uvm_map_lookup_entry(map, start, &entry)) {
3360 entry = entry->next;
3361 }
3362 while (entry->start < end) {
3363 struct vm_amap * const amap = entry->aref.ar_amap;
3364 struct uvm_object * const uobj = entry->object.uvm_obj;
3365
3366 KASSERT(entry != &map->header);
3367 KASSERT(start < entry->end);
3368 /*
3369 * XXX IMPLEMENT ME.
3370 * Should invent a "weak" mode for uvm_fault()
3371 * which would only do the PGO_LOCKED pgo_get().
3372 *
3373 * for now, we handle only the easy but common case.
3374 */
3375 if (UVM_ET_ISOBJ(entry) && amap == NULL && uobj != NULL) {
3376 off_t offset;
3377 off_t size;
3378
3379 offset = entry->offset;
3380 if (start < entry->start) {
3381 offset += entry->start - start;
3382 }
3383 size = entry->offset + (entry->end - entry->start);
3384 if (entry->end < end) {
3385 size -= end - entry->end;
3386 }
3387 uvm_readahead(uobj, offset, size);
3388 }
3389 entry = entry->next;
3390 }
3391 vm_map_unlock_read(map);
3392 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3393 return 0;
3394 }
3395
3396 /*
3397 * uvm_map_pageable: sets the pageability of a range in a map.
3398 *
3399 * => wires map entries. should not be used for transient page locking.
3400 * for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
3401 * => regions specified as not pageable require lock-down (wired) memory
3402 * and page tables.
3403 * => map must never be read-locked
3404 * => if islocked is true, map is already write-locked
3405 * => we always unlock the map, since we must downgrade to a read-lock
3406 * to call uvm_fault_wire()
3407 * => XXXCDC: check this and try and clean it up.
3408 */
3409
3410 int
3411 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
3412 bool new_pageable, int lockflags)
3413 {
3414 struct vm_map_entry *entry, *start_entry, *failed_entry;
3415 int rv;
3416 #ifdef DIAGNOSTIC
3417 u_int timestamp_save;
3418 #endif
3419 UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
3420 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
3421 map, start, end, new_pageable);
3422 KASSERT(map->flags & VM_MAP_PAGEABLE);
3423
3424 if ((lockflags & UVM_LK_ENTER) == 0)
3425 vm_map_lock(map);
3426 VM_MAP_RANGE_CHECK(map, start, end);
3427
3428 /*
3429 * only one pageability change may take place at one time, since
3430 * uvm_fault_wire assumes it will be called only once for each
3431 * wiring/unwiring. therefore, we have to make sure we're actually
3432 * changing the pageability for the entire region. we do so before
3433 * making any changes.
3434 */
3435
3436 if (uvm_map_lookup_entry(map, start, &start_entry) == false) {
3437 if ((lockflags & UVM_LK_EXIT) == 0)
3438 vm_map_unlock(map);
3439
3440 UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0);
3441 return EFAULT;
3442 }
3443 entry = start_entry;
3444
3445 /*
3446 * handle wiring and unwiring separately.
3447 */
3448
3449 if (new_pageable) { /* unwire */
3450 UVM_MAP_CLIP_START(map, entry, start, NULL);
3451
3452 /*
3453 * unwiring. first ensure that the range to be unwired is
3454 * really wired down and that there are no holes.
3455 */
3456
3457 while ((entry != &map->header) && (entry->start < end)) {
3458 if (entry->wired_count == 0 ||
3459 (entry->end < end &&
3460 (entry->next == &map->header ||
3461 entry->next->start > entry->end))) {
3462 if ((lockflags & UVM_LK_EXIT) == 0)
3463 vm_map_unlock(map);
3464 UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0);
3465 return EINVAL;
3466 }
3467 entry = entry->next;
3468 }
3469
3470 /*
3471 * POSIX 1003.1b - a single munlock call unlocks a region,
3472 * regardless of the number of mlock calls made on that
3473 * region.
3474 */
3475
3476 entry = start_entry;
3477 while ((entry != &map->header) && (entry->start < end)) {
3478 UVM_MAP_CLIP_END(map, entry, end, NULL);
3479 if (VM_MAPENT_ISWIRED(entry))
3480 uvm_map_entry_unwire(map, entry);
3481 entry = entry->next;
3482 }
3483 if ((lockflags & UVM_LK_EXIT) == 0)
3484 vm_map_unlock(map);
3485 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3486 return 0;
3487 }
3488
3489 /*
3490 * wire case: in two passes [XXXCDC: ugly block of code here]
3491 *
3492 * 1: holding the write lock, we create any anonymous maps that need
3493 * to be created. then we clip each map entry to the region to
3494 * be wired and increment its wiring count.
3495 *
3496 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
3497 * in the pages for any newly wired area (wired_count == 1).
3498 *
3499 * downgrading to a read lock for uvm_fault_wire avoids a possible
3500 * deadlock with another thread that may have faulted on one of
3501 * the pages to be wired (it would mark the page busy, blocking
3502 * us, then in turn block on the map lock that we hold). because
3503 * of problems in the recursive lock package, we cannot upgrade
3504 * to a write lock in vm_map_lookup. thus, any actions that
3505 * require the write lock must be done beforehand. because we
3506 * keep the read lock on the map, the copy-on-write status of the
3507 * entries we modify here cannot change.
3508 */
3509
3510 while ((entry != &map->header) && (entry->start < end)) {
3511 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3512
3513 /*
3514 * perform actions of vm_map_lookup that need the
3515 * write lock on the map: create an anonymous map
3516 * for a copy-on-write region, or an anonymous map
3517 * for a zero-fill region. (XXXCDC: submap case
3518 * ok?)
3519 */
3520
3521 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3522 if (UVM_ET_ISNEEDSCOPY(entry) &&
3523 ((entry->max_protection & VM_PROT_WRITE) ||
3524 (entry->object.uvm_obj == NULL))) {
3525 amap_copy(map, entry, 0, start, end);
3526 /* XXXCDC: wait OK? */
3527 }
3528 }
3529 }
3530 UVM_MAP_CLIP_START(map, entry, start, NULL);
3531 UVM_MAP_CLIP_END(map, entry, end, NULL);
3532 entry->wired_count++;
3533
3534 /*
3535 * Check for holes
3536 */
3537
3538 if (entry->protection == VM_PROT_NONE ||
3539 (entry->end < end &&
3540 (entry->next == &map->header ||
3541 entry->next->start > entry->end))) {
3542
3543 /*
3544 * found one. amap creation actions do not need to
3545 * be undone, but the wired counts need to be restored.
3546 */
3547
3548 while (entry != &map->header && entry->end > start) {
3549 entry->wired_count--;
3550 entry = entry->prev;
3551 }
3552 if ((lockflags & UVM_LK_EXIT) == 0)
3553 vm_map_unlock(map);
3554 UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
3555 return EINVAL;
3556 }
3557 entry = entry->next;
3558 }
3559
3560 /*
3561 * Pass 2.
3562 */
3563
3564 #ifdef DIAGNOSTIC
3565 timestamp_save = map->timestamp;
3566 #endif
3567 vm_map_busy(map);
3568 vm_map_unlock(map);
3569
3570 rv = 0;
3571 entry = start_entry;
3572 while (entry != &map->header && entry->start < end) {
3573 if (entry->wired_count == 1) {
3574 rv = uvm_fault_wire(map, entry->start, entry->end,
3575 entry->max_protection, 1);
3576 if (rv) {
3577
3578 /*
3579 * wiring failed. break out of the loop.
3580 * we'll clean up the map below, once we
3581 * have a write lock again.
3582 */
3583
3584 break;
3585 }
3586 }
3587 entry = entry->next;
3588 }
3589
3590 if (rv) { /* failed? */
3591
3592 /*
3593 * Get back to an exclusive (write) lock.
3594 */
3595
3596 vm_map_lock(map);
3597 vm_map_unbusy(map);
3598
3599 #ifdef DIAGNOSTIC
3600 if (timestamp_save + 1 != map->timestamp)
3601 panic("uvm_map_pageable: stale map");
3602 #endif
3603
3604 /*
3605 * first drop the wiring count on all the entries
3606 * which haven't actually been wired yet.
3607 */
3608
3609 failed_entry = entry;
3610 while (entry != &map->header && entry->start < end) {
3611 entry->wired_count--;
3612 entry = entry->next;
3613 }
3614
3615 /*
3616 * now, unwire all the entries that were successfully
3617 * wired above.
3618 */
3619
3620 entry = start_entry;
3621 while (entry != failed_entry) {
3622 entry->wired_count--;
3623 if (VM_MAPENT_ISWIRED(entry) == 0)
3624 uvm_map_entry_unwire(map, entry);
3625 entry = entry->next;
3626 }
3627 if ((lockflags & UVM_LK_EXIT) == 0)
3628 vm_map_unlock(map);
3629 UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
3630 return (rv);
3631 }
3632
3633 if ((lockflags & UVM_LK_EXIT) == 0) {
3634 vm_map_unbusy(map);
3635 } else {
3636
3637 /*
3638 * Get back to an exclusive (write) lock.
3639 */
3640
3641 vm_map_lock(map);
3642 vm_map_unbusy(map);
3643 }
3644
3645 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3646 return 0;
3647 }
3648
3649 /*
3650 * uvm_map_pageable_all: special case of uvm_map_pageable - affects
3651 * all mapped regions.
3652 *
3653 * => map must not be locked.
3654 * => if no flags are specified, all regions are unwired.
3655 * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
3656 */
3657
3658 int
3659 uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
3660 {
3661 struct vm_map_entry *entry, *failed_entry;
3662 vsize_t size;
3663 int rv;
3664 #ifdef DIAGNOSTIC
3665 u_int timestamp_save;
3666 #endif
3667 UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist);
3668 UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0);
3669
3670 KASSERT(map->flags & VM_MAP_PAGEABLE);
3671
3672 vm_map_lock(map);
3673
3674 /*
3675 * handle wiring and unwiring separately.
3676 */
3677
3678 if (flags == 0) { /* unwire */
3679
3680 /*
3681 * POSIX 1003.1b -- munlockall unlocks all regions,
3682 * regardless of how many times mlockall has been called.
3683 */
3684
3685 for (entry = map->header.next; entry != &map->header;
3686 entry = entry->next) {
3687 if (VM_MAPENT_ISWIRED(entry))
3688 uvm_map_entry_unwire(map, entry);
3689 }
3690 map->flags &= ~VM_MAP_WIREFUTURE;
3691 vm_map_unlock(map);
3692 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3693 return 0;
3694 }
3695
3696 if (flags & MCL_FUTURE) {
3697
3698 /*
3699 * must wire all future mappings; remember this.
3700 */
3701
3702 map->flags |= VM_MAP_WIREFUTURE;
3703 }
3704
3705 if ((flags & MCL_CURRENT) == 0) {
3706
3707 /*
3708 * no more work to do!
3709 */
3710
3711 UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
3712 vm_map_unlock(map);
3713 return 0;
3714 }
3715
3716 /*
3717 * wire case: in three passes [XXXCDC: ugly block of code here]
3718 *
3719 * 1: holding the write lock, count all pages mapped by non-wired
3720 * entries. if this would cause us to go over our limit, we fail.
3721 *
3722 * 2: still holding the write lock, we create any anonymous maps that
3723 * need to be created. then we increment its wiring count.
3724 *
3725 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
3726 * in the pages for any newly wired area (wired_count == 1).
3727 *
3728 * downgrading to a read lock for uvm_fault_wire avoids a possible
3729 * deadlock with another thread that may have faulted on one of
3730 * the pages to be wired (it would mark the page busy, blocking
3731 * us, then in turn block on the map lock that we hold). because
3732 * of problems in the recursive lock package, we cannot upgrade
3733 * to a write lock in vm_map_lookup. thus, any actions that
3734 * require the write lock must be done beforehand. because we
3735 * keep the read lock on the map, the copy-on-write status of the
3736 * entries we modify here cannot change.
3737 */
3738
3739 for (size = 0, entry = map->header.next; entry != &map->header;
3740 entry = entry->next) {
3741 if (entry->protection != VM_PROT_NONE &&
3742 VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3743 size += entry->end - entry->start;
3744 }
3745 }
3746
3747 if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
3748 vm_map_unlock(map);
3749 return ENOMEM;
3750 }
3751
3752 if (limit != 0 &&
3753 (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
3754 vm_map_unlock(map);
3755 return ENOMEM;
3756 }
3757
3758 /*
3759 * Pass 2.
3760 */
3761
3762 for (entry = map->header.next; entry != &map->header;
3763 entry = entry->next) {
3764 if (entry->protection == VM_PROT_NONE)
3765 continue;
3766 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3767
3768 /*
3769 * perform actions of vm_map_lookup that need the
3770 * write lock on the map: create an anonymous map
3771 * for a copy-on-write region, or an anonymous map
3772 * for a zero-fill region. (XXXCDC: submap case
3773 * ok?)
3774 */
3775
3776 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3777 if (UVM_ET_ISNEEDSCOPY(entry) &&
3778 ((entry->max_protection & VM_PROT_WRITE) ||
3779 (entry->object.uvm_obj == NULL))) {
3780 amap_copy(map, entry, 0, entry->start,
3781 entry->end);
3782 /* XXXCDC: wait OK? */
3783 }
3784 }
3785 }
3786 entry->wired_count++;
3787 }
3788
3789 /*
3790 * Pass 3.
3791 */
3792
3793 #ifdef DIAGNOSTIC
3794 timestamp_save = map->timestamp;
3795 #endif
3796 vm_map_busy(map);
3797 vm_map_unlock(map);
3798
3799 rv = 0;
3800 for (entry = map->header.next; entry != &map->header;
3801 entry = entry->next) {
3802 if (entry->wired_count == 1) {
3803 rv = uvm_fault_wire(map, entry->start, entry->end,
3804 entry->max_protection, 1);
3805 if (rv) {
3806
3807 /*
3808 * wiring failed. break out of the loop.
3809 * we'll clean up the map below, once we
3810 * have a write lock again.
3811 */
3812
3813 break;
3814 }
3815 }
3816 }
3817
3818 if (rv) {
3819
3820 /*
3821 * Get back an exclusive (write) lock.
3822 */
3823
3824 vm_map_lock(map);
3825 vm_map_unbusy(map);
3826
3827 #ifdef DIAGNOSTIC
3828 if (timestamp_save + 1 != map->timestamp)
3829 panic("uvm_map_pageable_all: stale map");
3830 #endif
3831
3832 /*
3833 * first drop the wiring count on all the entries
3834 * which haven't actually been wired yet.
3835 *
3836 * Skip VM_PROT_NONE entries like we did above.
3837 */
3838
3839 failed_entry = entry;
3840 for (/* nothing */; entry != &map->header;
3841 entry = entry->next) {
3842 if (entry->protection == VM_PROT_NONE)
3843 continue;
3844 entry->wired_count--;
3845 }
3846
3847 /*
3848 * now, unwire all the entries that were successfully
3849 * wired above.
3850 *
3851 * Skip VM_PROT_NONE entries like we did above.
3852 */
3853
3854 for (entry = map->header.next; entry != failed_entry;
3855 entry = entry->next) {
3856 if (entry->protection == VM_PROT_NONE)
3857 continue;
3858 entry->wired_count--;
3859 if (VM_MAPENT_ISWIRED(entry))
3860 uvm_map_entry_unwire(map, entry);
3861 }
3862 vm_map_unlock(map);
3863 UVMHIST_LOG(maphist,"<- done (RV=%d)", rv,0,0,0);
3864 return (rv);
3865 }
3866
3867 vm_map_unbusy(map);
3868
3869 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3870 return 0;
3871 }
3872
3873 /*
3874 * uvm_map_clean: clean out a map range
3875 *
3876 * => valid flags:
3877 * if (flags & PGO_CLEANIT): dirty pages are cleaned first
3878 * if (flags & PGO_SYNCIO): dirty pages are written synchronously
3879 * if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
3880 * if (flags & PGO_FREE): any cached pages are freed after clean
3881 * => returns an error if any part of the specified range isn't mapped
3882 * => never a need to flush amap layer since the anonymous memory has
3883 * no permanent home, but may deactivate pages there
3884 * => called from sys_msync() and sys_madvise()
3885 * => caller must not write-lock map (read OK).
3886 * => we may sleep while cleaning if SYNCIO [with map read-locked]
3887 */
3888
3889 int
3890 uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
3891 {
3892 struct vm_map_entry *current, *entry;
3893 struct uvm_object *uobj;
3894 struct vm_amap *amap;
3895 struct vm_anon *anon;
3896 struct vm_page *pg;
3897 vaddr_t offset;
3898 vsize_t size;
3899 voff_t uoff;
3900 int error, refs;
3901 UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
3902
3903 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
3904 map, start, end, flags);
3905 KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
3906 (PGO_FREE|PGO_DEACTIVATE));
3907
3908 vm_map_lock_read(map);
3909 VM_MAP_RANGE_CHECK(map, start, end);
3910 if (uvm_map_lookup_entry(map, start, &entry) == false) {
3911 vm_map_unlock_read(map);
3912 return EFAULT;
3913 }
3914
3915 /*
3916 * Make a first pass to check for holes and wiring problems.
3917 */
3918
3919 for (current = entry; current->start < end; current = current->next) {
3920 if (UVM_ET_ISSUBMAP(current)) {
3921 vm_map_unlock_read(map);
3922 return EINVAL;
3923 }
3924 if ((flags & PGO_FREE) != 0 && VM_MAPENT_ISWIRED(entry)) {
3925 vm_map_unlock_read(map);
3926 return EBUSY;
3927 }
3928 if (end <= current->end) {
3929 break;
3930 }
3931 if (current->end != current->next->start) {
3932 vm_map_unlock_read(map);
3933 return EFAULT;
3934 }
3935 }
3936
3937 error = 0;
3938 for (current = entry; start < end; current = current->next) {
3939 amap = current->aref.ar_amap; /* upper layer */
3940 uobj = current->object.uvm_obj; /* lower layer */
3941 KASSERT(start >= current->start);
3942
3943 /*
3944 * No amap cleaning necessary if:
3945 *
3946 * (1) There's no amap.
3947 *
3948 * (2) We're not deactivating or freeing pages.
3949 */
3950
3951 if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
3952 goto flush_object;
3953
3954 amap_lock(amap);
3955 offset = start - current->start;
3956 size = MIN(end, current->end) - start;
3957 for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) {
3958 anon = amap_lookup(¤t->aref, offset);
3959 if (anon == NULL)
3960 continue;
3961
3962 mutex_enter(&anon->an_lock);
3963 pg = anon->an_page;
3964 if (pg == NULL) {
3965 mutex_exit(&anon->an_lock);
3966 continue;
3967 }
3968
3969 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
3970
3971 /*
3972 * In these first 3 cases, we just deactivate the page.
3973 */
3974
3975 case PGO_CLEANIT|PGO_FREE:
3976 case PGO_CLEANIT|PGO_DEACTIVATE:
3977 case PGO_DEACTIVATE:
3978 deactivate_it:
3979 /*
3980 * skip the page if it's loaned or wired,
3981 * since it shouldn't be on a paging queue
3982 * at all in these cases.
3983 */
3984
3985 mutex_enter(&uvm_pageqlock);
3986 if (pg->loan_count != 0 ||
3987 pg->wire_count != 0) {
3988 mutex_exit(&uvm_pageqlock);
3989 mutex_exit(&anon->an_lock);
3990 continue;
3991 }
3992 KASSERT(pg->uanon == anon);
3993 uvm_pagedeactivate(pg);
3994 mutex_exit(&uvm_pageqlock);
3995 mutex_exit(&anon->an_lock);
3996 continue;
3997
3998 case PGO_FREE:
3999
4000 /*
4001 * If there are multiple references to
4002 * the amap, just deactivate the page.
4003 */
4004
4005 if (amap_refs(amap) > 1)
4006 goto deactivate_it;
4007
4008 /* skip the page if it's wired */
4009 if (pg->wire_count != 0) {
4010 mutex_exit(&anon->an_lock);
4011 continue;
4012 }
4013 amap_unadd(¤t->aref, offset);
4014 refs = --anon->an_ref;
4015 mutex_exit(&anon->an_lock);
4016 if (refs == 0)
4017 uvm_anfree(anon);
4018 continue;
4019 }
4020 }
4021 amap_unlock(amap);
4022
4023 flush_object:
4024 /*
4025 * flush pages if we've got a valid backing object.
4026 * note that we must always clean object pages before
4027 * freeing them since otherwise we could reveal stale
4028 * data from files.
4029 */
4030
4031 uoff = current->offset + (start - current->start);
4032 size = MIN(end, current->end) - start;
4033 if (uobj != NULL) {
4034 mutex_enter(&uobj->vmobjlock);
4035 if (uobj->pgops->pgo_put != NULL)
4036 error = (uobj->pgops->pgo_put)(uobj, uoff,
4037 uoff + size, flags | PGO_CLEANIT);
4038 else
4039 error = 0;
4040 }
4041 start += size;
4042 }
4043 vm_map_unlock_read(map);
4044 return (error);
4045 }
4046
4047
4048 /*
4049 * uvm_map_checkprot: check protection in map
4050 *
4051 * => must allow specified protection in a fully allocated region.
4052 * => map must be read or write locked by caller.
4053 */
4054
4055 bool
4056 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
4057 vm_prot_t protection)
4058 {
4059 struct vm_map_entry *entry;
4060 struct vm_map_entry *tmp_entry;
4061
4062 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
4063 return (false);
4064 }
4065 entry = tmp_entry;
4066 while (start < end) {
4067 if (entry == &map->header) {
4068 return (false);
4069 }
4070
4071 /*
4072 * no holes allowed
4073 */
4074
4075 if (start < entry->start) {
4076 return (false);
4077 }
4078
4079 /*
4080 * check protection associated with entry
4081 */
4082
4083 if ((entry->protection & protection) != protection) {
4084 return (false);
4085 }
4086 start = entry->end;
4087 entry = entry->next;
4088 }
4089 return (true);
4090 }
4091
4092 /*
4093 * uvmspace_alloc: allocate a vmspace structure.
4094 *
4095 * - structure includes vm_map and pmap
4096 * - XXX: no locking on this structure
4097 * - refcnt set to 1, rest must be init'd by caller
4098 */
4099 struct vmspace *
4100 uvmspace_alloc(vaddr_t vmin, vaddr_t vmax)
4101 {
4102 struct vmspace *vm;
4103 UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
4104
4105 vm = pool_cache_get(&uvm_vmspace_cache, PR_WAITOK);
4106 uvmspace_init(vm, NULL, vmin, vmax);
4107 UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0);
4108 return (vm);
4109 }
4110
4111 /*
4112 * uvmspace_init: initialize a vmspace structure.
4113 *
4114 * - XXX: no locking on this structure
4115 * - refcnt set to 1, rest must be init'd by caller
4116 */
4117 void
4118 uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin, vaddr_t vmax)
4119 {
4120 UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist);
4121
4122 memset(vm, 0, sizeof(*vm));
4123 uvm_map_setup(&vm->vm_map, vmin, vmax, VM_MAP_PAGEABLE
4124 #ifdef __USING_TOPDOWN_VM
4125 | VM_MAP_TOPDOWN
4126 #endif
4127 );
4128 if (pmap)
4129 pmap_reference(pmap);
4130 else
4131 pmap = pmap_create();
4132 vm->vm_map.pmap = pmap;
4133 vm->vm_refcnt = 1;
4134 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4135 }
4136
4137 /*
4138 * uvmspace_share: share a vmspace between two processes
4139 *
4140 * - used for vfork, threads(?)
4141 */
4142
4143 void
4144 uvmspace_share(struct proc *p1, struct proc *p2)
4145 {
4146
4147 uvmspace_addref(p1->p_vmspace);
4148 p2->p_vmspace = p1->p_vmspace;
4149 }
4150
4151 #if 0
4152
4153 /*
4154 * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
4155 *
4156 * - XXX: no locking on vmspace
4157 */
4158
4159 void
4160 uvmspace_unshare(struct lwp *l)
4161 {
4162 struct proc *p = l->l_proc;
4163 struct vmspace *nvm, *ovm = p->p_vmspace;
4164
4165 if (ovm->vm_refcnt == 1)
4166 /* nothing to do: vmspace isn't shared in the first place */
4167 return;
4168
4169 /* make a new vmspace, still holding old one */
4170 nvm = uvmspace_fork(ovm);
4171
4172 kpreempt_disable();
4173 pmap_deactivate(l); /* unbind old vmspace */
4174 p->p_vmspace = nvm;
4175 pmap_activate(l); /* switch to new vmspace */
4176 kpreempt_enable();
4177
4178 uvmspace_free(ovm); /* drop reference to old vmspace */
4179 }
4180
4181 #endif
4182
4183 /*
4184 * uvmspace_exec: the process wants to exec a new program
4185 */
4186
4187 void
4188 uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end)
4189 {
4190 struct proc *p = l->l_proc;
4191 struct vmspace *nvm, *ovm = p->p_vmspace;
4192 struct vm_map *map = &ovm->vm_map;
4193
4194 #ifdef __sparc__
4195 /* XXX cgd 960926: the sparc #ifdef should be a MD hook */
4196 kill_user_windows(l); /* before stack addresses go away */
4197 #endif
4198
4199 /*
4200 * see if more than one process is using this vmspace...
4201 */
4202
4203 if (ovm->vm_refcnt == 1) {
4204
4205 /*
4206 * if p is the only process using its vmspace then we can safely
4207 * recycle that vmspace for the program that is being exec'd.
4208 */
4209
4210 #ifdef SYSVSHM
4211 /*
4212 * SYSV SHM semantics require us to kill all segments on an exec
4213 */
4214
4215 if (ovm->vm_shm)
4216 shmexit(ovm);
4217 #endif
4218
4219 /*
4220 * POSIX 1003.1b -- "lock future mappings" is revoked
4221 * when a process execs another program image.
4222 */
4223
4224 map->flags &= ~VM_MAP_WIREFUTURE;
4225
4226 /*
4227 * now unmap the old program
4228 */
4229
4230 pmap_remove_all(map->pmap);
4231 uvm_unmap(map, vm_map_min(map), vm_map_max(map));
4232 KASSERT(map->header.prev == &map->header);
4233 KASSERT(map->nentries == 0);
4234
4235 /*
4236 * resize the map
4237 */
4238
4239 vm_map_setmin(map, start);
4240 vm_map_setmax(map, end);
4241 } else {
4242
4243 /*
4244 * p's vmspace is being shared, so we can't reuse it for p since
4245 * it is still being used for others. allocate a new vmspace
4246 * for p
4247 */
4248
4249 nvm = uvmspace_alloc(start, end);
4250
4251 /*
4252 * install new vmspace and drop our ref to the old one.
4253 */
4254
4255 kpreempt_disable();
4256 pmap_deactivate(l);
4257 p->p_vmspace = nvm;
4258 pmap_activate(l);
4259 kpreempt_enable();
4260
4261 uvmspace_free(ovm);
4262 }
4263 }
4264
4265 /*
4266 * uvmspace_addref: add a referece to a vmspace.
4267 */
4268
4269 void
4270 uvmspace_addref(struct vmspace *vm)
4271 {
4272 struct vm_map *map = &vm->vm_map;
4273
4274 KASSERT((map->flags & VM_MAP_DYING) == 0);
4275
4276 mutex_enter(&map->misc_lock);
4277 KASSERT(vm->vm_refcnt > 0);
4278 vm->vm_refcnt++;
4279 mutex_exit(&map->misc_lock);
4280 }
4281
4282 /*
4283 * uvmspace_free: free a vmspace data structure
4284 */
4285
4286 void
4287 uvmspace_free(struct vmspace *vm)
4288 {
4289 struct vm_map_entry *dead_entries;
4290 struct vm_map *map = &vm->vm_map;
4291 int n;
4292
4293 UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
4294
4295 UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0);
4296 mutex_enter(&map->misc_lock);
4297 n = --vm->vm_refcnt;
4298 mutex_exit(&map->misc_lock);
4299 if (n > 0)
4300 return;
4301
4302 /*
4303 * at this point, there should be no other references to the map.
4304 * delete all of the mappings, then destroy the pmap.
4305 */
4306
4307 map->flags |= VM_MAP_DYING;
4308 pmap_remove_all(map->pmap);
4309 #ifdef SYSVSHM
4310 /* Get rid of any SYSV shared memory segments. */
4311 if (vm->vm_shm != NULL)
4312 shmexit(vm);
4313 #endif
4314 if (map->nentries) {
4315 uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map),
4316 &dead_entries, NULL, 0);
4317 if (dead_entries != NULL)
4318 uvm_unmap_detach(dead_entries, 0);
4319 }
4320 KASSERT(map->nentries == 0);
4321 KASSERT(map->size == 0);
4322 mutex_destroy(&map->misc_lock);
4323 mutex_destroy(&map->mutex);
4324 rw_destroy(&map->lock);
4325 cv_destroy(&map->cv);
4326 pmap_destroy(map->pmap);
4327 pool_cache_put(&uvm_vmspace_cache, vm);
4328 }
4329
4330 /*
4331 * F O R K - m a i n e n t r y p o i n t
4332 */
4333 /*
4334 * uvmspace_fork: fork a process' main map
4335 *
4336 * => create a new vmspace for child process from parent.
4337 * => parent's map must not be locked.
4338 */
4339
4340 struct vmspace *
4341 uvmspace_fork(struct vmspace *vm1)
4342 {
4343 struct vmspace *vm2;
4344 struct vm_map *old_map = &vm1->vm_map;
4345 struct vm_map *new_map;
4346 struct vm_map_entry *old_entry;
4347 struct vm_map_entry *new_entry;
4348 UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
4349
4350 vm_map_lock(old_map);
4351
4352 vm2 = uvmspace_alloc(vm_map_min(old_map), vm_map_max(old_map));
4353 memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
4354 (char *) (vm1 + 1) - (char *) &vm1->vm_startcopy);
4355 new_map = &vm2->vm_map; /* XXX */
4356
4357 old_entry = old_map->header.next;
4358 new_map->size = old_map->size;
4359
4360 /*
4361 * go entry-by-entry
4362 */
4363
4364 while (old_entry != &old_map->header) {
4365
4366 /*
4367 * first, some sanity checks on the old entry
4368 */
4369
4370 KASSERT(!UVM_ET_ISSUBMAP(old_entry));
4371 KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) ||
4372 !UVM_ET_ISNEEDSCOPY(old_entry));
4373
4374 switch (old_entry->inheritance) {
4375 case MAP_INHERIT_NONE:
4376
4377 /*
4378 * drop the mapping, modify size
4379 */
4380 new_map->size -= old_entry->end - old_entry->start;
4381 break;
4382
4383 case MAP_INHERIT_SHARE:
4384
4385 /*
4386 * share the mapping: this means we want the old and
4387 * new entries to share amaps and backing objects.
4388 */
4389 /*
4390 * if the old_entry needs a new amap (due to prev fork)
4391 * then we need to allocate it now so that we have
4392 * something we own to share with the new_entry. [in
4393 * other words, we need to clear needs_copy]
4394 */
4395
4396 if (UVM_ET_ISNEEDSCOPY(old_entry)) {
4397 /* get our own amap, clears needs_copy */
4398 amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,
4399 0, 0);
4400 /* XXXCDC: WAITOK??? */
4401 }
4402
4403 new_entry = uvm_mapent_alloc(new_map, 0);
4404 /* old_entry -> new_entry */
4405 uvm_mapent_copy(old_entry, new_entry);
4406
4407 /* new pmap has nothing wired in it */
4408 new_entry->wired_count = 0;
4409
4410 /*
4411 * gain reference to object backing the map (can't
4412 * be a submap, already checked this case).
4413 */
4414
4415 if (new_entry->aref.ar_amap)
4416 uvm_map_reference_amap(new_entry, AMAP_SHARED);
4417
4418 if (new_entry->object.uvm_obj &&
4419 new_entry->object.uvm_obj->pgops->pgo_reference)
4420 new_entry->object.uvm_obj->
4421 pgops->pgo_reference(
4422 new_entry->object.uvm_obj);
4423
4424 /* insert entry at end of new_map's entry list */
4425 uvm_map_entry_link(new_map, new_map->header.prev,
4426 new_entry);
4427
4428 break;
4429
4430 case MAP_INHERIT_COPY:
4431
4432 /*
4433 * copy-on-write the mapping (using mmap's
4434 * MAP_PRIVATE semantics)
4435 *
4436 * allocate new_entry, adjust reference counts.
4437 * (note that new references are read-only).
4438 */
4439
4440 new_entry = uvm_mapent_alloc(new_map, 0);
4441 /* old_entry -> new_entry */
4442 uvm_mapent_copy(old_entry, new_entry);
4443
4444 if (new_entry->aref.ar_amap)
4445 uvm_map_reference_amap(new_entry, 0);
4446
4447 if (new_entry->object.uvm_obj &&
4448 new_entry->object.uvm_obj->pgops->pgo_reference)
4449 new_entry->object.uvm_obj->pgops->pgo_reference
4450 (new_entry->object.uvm_obj);
4451
4452 /* new pmap has nothing wired in it */
4453 new_entry->wired_count = 0;
4454
4455 new_entry->etype |=
4456 (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
4457 uvm_map_entry_link(new_map, new_map->header.prev,
4458 new_entry);
4459
4460 /*
4461 * the new entry will need an amap. it will either
4462 * need to be copied from the old entry or created
4463 * from scratch (if the old entry does not have an
4464 * amap). can we defer this process until later
4465 * (by setting "needs_copy") or do we need to copy
4466 * the amap now?
4467 *
4468 * we must copy the amap now if any of the following
4469 * conditions hold:
4470 * 1. the old entry has an amap and that amap is
4471 * being shared. this means that the old (parent)
4472 * process is sharing the amap with another
4473 * process. if we do not clear needs_copy here
4474 * we will end up in a situation where both the
4475 * parent and child process are refering to the
4476 * same amap with "needs_copy" set. if the
4477 * parent write-faults, the fault routine will
4478 * clear "needs_copy" in the parent by allocating
4479 * a new amap. this is wrong because the
4480 * parent is supposed to be sharing the old amap
4481 * and the new amap will break that.
4482 *
4483 * 2. if the old entry has an amap and a non-zero
4484 * wire count then we are going to have to call
4485 * amap_cow_now to avoid page faults in the
4486 * parent process. since amap_cow_now requires
4487 * "needs_copy" to be clear we might as well
4488 * clear it here as well.
4489 *
4490 */
4491
4492 if (old_entry->aref.ar_amap != NULL) {
4493 if ((amap_flags(old_entry->aref.ar_amap) &
4494 AMAP_SHARED) != 0 ||
4495 VM_MAPENT_ISWIRED(old_entry)) {
4496
4497 amap_copy(new_map, new_entry,
4498 AMAP_COPY_NOCHUNK, 0, 0);
4499 /* XXXCDC: M_WAITOK ... ok? */
4500 }
4501 }
4502
4503 /*
4504 * if the parent's entry is wired down, then the
4505 * parent process does not want page faults on
4506 * access to that memory. this means that we
4507 * cannot do copy-on-write because we can't write
4508 * protect the old entry. in this case we
4509 * resolve all copy-on-write faults now, using
4510 * amap_cow_now. note that we have already
4511 * allocated any needed amap (above).
4512 */
4513
4514 if (VM_MAPENT_ISWIRED(old_entry)) {
4515
4516 /*
4517 * resolve all copy-on-write faults now
4518 * (note that there is nothing to do if
4519 * the old mapping does not have an amap).
4520 */
4521 if (old_entry->aref.ar_amap)
4522 amap_cow_now(new_map, new_entry);
4523
4524 } else {
4525
4526 /*
4527 * setup mappings to trigger copy-on-write faults
4528 * we must write-protect the parent if it has
4529 * an amap and it is not already "needs_copy"...
4530 * if it is already "needs_copy" then the parent
4531 * has already been write-protected by a previous
4532 * fork operation.
4533 */
4534
4535 if (old_entry->aref.ar_amap &&
4536 !UVM_ET_ISNEEDSCOPY(old_entry)) {
4537 if (old_entry->max_protection & VM_PROT_WRITE) {
4538 pmap_protect(old_map->pmap,
4539 old_entry->start,
4540 old_entry->end,
4541 old_entry->protection &
4542 ~VM_PROT_WRITE);
4543 }
4544 old_entry->etype |= UVM_ET_NEEDSCOPY;
4545 }
4546 }
4547 break;
4548 } /* end of switch statement */
4549 old_entry = old_entry->next;
4550 }
4551
4552 pmap_update(old_map->pmap);
4553 vm_map_unlock(old_map);
4554
4555 #ifdef SYSVSHM
4556 if (vm1->vm_shm)
4557 shmfork(vm1, vm2);
4558 #endif
4559
4560 #ifdef PMAP_FORK
4561 pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap);
4562 #endif
4563
4564 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4565 return (vm2);
4566 }
4567
4568
4569 /*
4570 * in-kernel map entry allocation.
4571 */
4572
4573 struct uvm_kmapent_hdr {
4574 LIST_ENTRY(uvm_kmapent_hdr) ukh_listq;
4575 int ukh_nused;
4576 struct vm_map_entry *ukh_freelist;
4577 struct vm_map *ukh_map;
4578 struct vm_map_entry ukh_entries[0];
4579 };
4580
4581 #define UVM_KMAPENT_CHUNK \
4582 ((PAGE_SIZE - sizeof(struct uvm_kmapent_hdr)) \
4583 / sizeof(struct vm_map_entry))
4584
4585 #define UVM_KHDR_FIND(entry) \
4586 ((struct uvm_kmapent_hdr *)(((vaddr_t)entry) & ~PAGE_MASK))
4587
4588
4589 #ifdef DIAGNOSTIC
4590 static struct vm_map *
4591 uvm_kmapent_map(struct vm_map_entry *entry)
4592 {
4593 const struct uvm_kmapent_hdr *ukh;
4594
4595 ukh = UVM_KHDR_FIND(entry);
4596 return ukh->ukh_map;
4597 }
4598 #endif
4599
4600 static inline struct vm_map_entry *
4601 uvm_kmapent_get(struct uvm_kmapent_hdr *ukh)
4602 {
4603 struct vm_map_entry *entry;
4604
4605 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4606 KASSERT(ukh->ukh_nused >= 0);
4607
4608 entry = ukh->ukh_freelist;
4609 if (entry) {
4610 KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4611 == UVM_MAP_KERNEL);
4612 ukh->ukh_freelist = entry->next;
4613 ukh->ukh_nused++;
4614 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4615 } else {
4616 KASSERT(ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4617 }
4618
4619 return entry;
4620 }
4621
4622 static inline void
4623 uvm_kmapent_put(struct uvm_kmapent_hdr *ukh, struct vm_map_entry *entry)
4624 {
4625
4626 KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4627 == UVM_MAP_KERNEL);
4628 KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4629 KASSERT(ukh->ukh_nused > 0);
4630 KASSERT(ukh->ukh_freelist != NULL ||
4631 ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4632 KASSERT(ukh->ukh_freelist == NULL ||
4633 ukh->ukh_nused < UVM_KMAPENT_CHUNK);
4634
4635 ukh->ukh_nused--;
4636 entry->next = ukh->ukh_freelist;
4637 ukh->ukh_freelist = entry;
4638 }
4639
4640 /*
4641 * uvm_kmapent_alloc: allocate a map entry for in-kernel map
4642 */
4643
4644 static struct vm_map_entry *
4645 uvm_kmapent_alloc(struct vm_map *map, int flags)
4646 {
4647 struct vm_page *pg;
4648 struct uvm_kmapent_hdr *ukh;
4649 struct vm_map_entry *entry;
4650 #ifndef PMAP_MAP_POOLPAGE
4651 struct uvm_map_args args;
4652 uvm_flag_t mapflags = UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
4653 UVM_INH_NONE, UVM_ADV_RANDOM, flags | UVM_FLAG_NOMERGE);
4654 int error;
4655 #endif
4656 vaddr_t va;
4657 int i;
4658
4659 KDASSERT(UVM_KMAPENT_CHUNK > 2);
4660 KDASSERT(kernel_map != NULL);
4661 KASSERT(vm_map_pmap(map) == pmap_kernel());
4662
4663 UVMMAP_EVCNT_INCR(uke_alloc);
4664 entry = NULL;
4665 again:
4666 /*
4667 * try to grab an entry from freelist.
4668 */
4669 mutex_spin_enter(&uvm_kentry_lock);
4670 ukh = LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free);
4671 if (ukh) {
4672 entry = uvm_kmapent_get(ukh);
4673 if (ukh->ukh_nused == UVM_KMAPENT_CHUNK)
4674 LIST_REMOVE(ukh, ukh_listq);
4675 }
4676 mutex_spin_exit(&uvm_kentry_lock);
4677
4678 if (entry)
4679 return entry;
4680
4681 /*
4682 * there's no free entry for this vm_map.
4683 * now we need to allocate some vm_map_entry.
4684 * for simplicity, always allocate one page chunk of them at once.
4685 */
4686
4687 pg = uvm_pagealloc(NULL, 0, NULL,
4688 (flags & UVM_KMF_NOWAIT) != 0 ? UVM_PGA_USERESERVE : 0);
4689 if (__predict_false(pg == NULL)) {
4690 if (flags & UVM_FLAG_NOWAIT)
4691 return NULL;
4692 uvm_wait("kme_alloc");
4693 goto again;
4694 }
4695
4696 #ifdef PMAP_MAP_POOLPAGE
4697 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
4698 KASSERT(va != 0);
4699 #else
4700 error = uvm_map_prepare(map, 0, PAGE_SIZE, NULL, UVM_UNKNOWN_OFFSET,
4701 0, mapflags, &args);
4702 if (error) {
4703 uvm_pagefree(pg);
4704 return NULL;
4705 }
4706
4707 va = args.uma_start;
4708
4709 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
4710 VM_PROT_READ|VM_PROT_WRITE|PMAP_KMPAGE, 0);
4711 pmap_update(vm_map_pmap(map));
4712
4713 #endif
4714 ukh = (void *)va;
4715
4716 /*
4717 * use the last entry for ukh itsself.
4718 */
4719
4720 i = UVM_KMAPENT_CHUNK - 1;
4721 #ifndef PMAP_MAP_POOLPAGE
4722 entry = &ukh->ukh_entries[i--];
4723 entry->flags = UVM_MAP_KERNEL | UVM_MAP_KMAPENT;
4724 error = uvm_map_enter(map, &args, entry);
4725 KASSERT(error == 0);
4726 #endif
4727
4728 ukh->ukh_nused = UVM_KMAPENT_CHUNK;
4729 ukh->ukh_map = map;
4730 ukh->ukh_freelist = NULL;
4731 for (; i >= 1; i--) {
4732 struct vm_map_entry *xentry = &ukh->ukh_entries[i];
4733
4734 xentry->flags = UVM_MAP_KERNEL;
4735 uvm_kmapent_put(ukh, xentry);
4736 }
4737 #ifdef PMAP_MAP_POOLPAGE
4738 KASSERT(ukh->ukh_nused == 1);
4739 #else
4740 KASSERT(ukh->ukh_nused == 2);
4741 #endif
4742
4743 mutex_spin_enter(&uvm_kentry_lock);
4744 LIST_INSERT_HEAD(&vm_map_to_kernel(map)->vmk_kentry_free,
4745 ukh, ukh_listq);
4746 mutex_spin_exit(&uvm_kentry_lock);
4747
4748 /*
4749 * return first entry.
4750 */
4751
4752 entry = &ukh->ukh_entries[0];
4753 entry->flags = UVM_MAP_KERNEL;
4754 UVMMAP_EVCNT_INCR(ukh_alloc);
4755
4756 return entry;
4757 }
4758
4759 /*
4760 * uvm_mapent_free: free map entry for in-kernel map
4761 */
4762
4763 static void
4764 uvm_kmapent_free(struct vm_map_entry *entry)
4765 {
4766 struct uvm_kmapent_hdr *ukh;
4767 struct vm_page *pg;
4768 struct vm_map *map;
4769 #ifndef PMAP_UNMAP_POOLPAGE
4770 struct pmap *pmap;
4771 struct vm_map_entry *deadentry;
4772 #endif
4773 vaddr_t va;
4774 paddr_t pa;
4775
4776 UVMMAP_EVCNT_INCR(uke_free);
4777 ukh = UVM_KHDR_FIND(entry);
4778 map = ukh->ukh_map;
4779
4780 mutex_spin_enter(&uvm_kentry_lock);
4781 uvm_kmapent_put(ukh, entry);
4782 #ifdef PMAP_UNMAP_POOLPAGE
4783 if (ukh->ukh_nused > 0) {
4784 #else
4785 if (ukh->ukh_nused > 1) {
4786 #endif
4787 if (ukh->ukh_nused == UVM_KMAPENT_CHUNK - 1)
4788 LIST_INSERT_HEAD(
4789 &vm_map_to_kernel(map)->vmk_kentry_free,
4790 ukh, ukh_listq);
4791 mutex_spin_exit(&uvm_kentry_lock);
4792 return;
4793 }
4794
4795 /*
4796 * now we can free this ukh.
4797 *
4798 * however, keep an empty ukh to avoid ping-pong.
4799 */
4800
4801 if (LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free) == ukh &&
4802 LIST_NEXT(ukh, ukh_listq) == NULL) {
4803 mutex_spin_exit(&uvm_kentry_lock);
4804 return;
4805 }
4806 LIST_REMOVE(ukh, ukh_listq);
4807 mutex_spin_exit(&uvm_kentry_lock);
4808
4809 va = (vaddr_t)ukh;
4810
4811 #ifdef PMAP_UNMAP_POOLPAGE
4812 KASSERT(ukh->ukh_nused == 0);
4813 pa = PMAP_UNMAP_POOLPAGE(va);
4814 KASSERT(pa != 0);
4815 #else
4816 KASSERT(ukh->ukh_nused == 1);
4817
4818 /*
4819 * remove map entry for ukh itsself.
4820 */
4821
4822 KASSERT((va & PAGE_MASK) == 0);
4823 vm_map_lock(map);
4824 uvm_unmap_remove(map, va, va + PAGE_SIZE, &deadentry, NULL, 0);
4825 KASSERT(deadentry->flags & UVM_MAP_KERNEL);
4826 KASSERT(deadentry->flags & UVM_MAP_KMAPENT);
4827 KASSERT(deadentry->next == NULL);
4828 KASSERT(deadentry == &ukh->ukh_entries[UVM_KMAPENT_CHUNK - 1]);
4829
4830 /*
4831 * unmap the page from pmap and free it.
4832 */
4833
4834 pmap = vm_map_pmap(map);
4835 KASSERT(pmap == pmap_kernel());
4836 if (!pmap_extract(pmap, va, &pa))
4837 panic("%s: no mapping", __func__);
4838 pmap_kremove(va, PAGE_SIZE);
4839 pmap_update(vm_map_pmap(map));
4840 vm_map_unlock(map);
4841 #endif /* !PMAP_UNMAP_POOLPAGE */
4842 pg = PHYS_TO_VM_PAGE(pa);
4843 uvm_pagefree(pg);
4844 UVMMAP_EVCNT_INCR(ukh_free);
4845 }
4846
4847 static vsize_t
4848 uvm_kmapent_overhead(vsize_t size)
4849 {
4850
4851 /*
4852 * - the max number of unmerged entries is howmany(size, PAGE_SIZE)
4853 * as the min allocation unit is PAGE_SIZE.
4854 * - UVM_KMAPENT_CHUNK "kmapent"s are allocated from a page.
4855 * one of them are used to map the page itself.
4856 */
4857
4858 return howmany(howmany(size, PAGE_SIZE), (UVM_KMAPENT_CHUNK - 1)) *
4859 PAGE_SIZE;
4860 }
4861
4862 /*
4863 * map entry reservation
4864 */
4865
4866 /*
4867 * uvm_mapent_reserve: reserve map entries for clipping before locking map.
4868 *
4869 * => needed when unmapping entries allocated without UVM_FLAG_QUANTUM.
4870 * => caller shouldn't hold map locked.
4871 */
4872 int
4873 uvm_mapent_reserve(struct vm_map *map, struct uvm_mapent_reservation *umr,
4874 int nentries, int flags)
4875 {
4876
4877 umr->umr_nentries = 0;
4878
4879 if ((flags & UVM_FLAG_QUANTUM) != 0)
4880 return 0;
4881
4882 if (!VM_MAP_USE_KMAPENT(map))
4883 return 0;
4884
4885 while (nentries--) {
4886 struct vm_map_entry *ent;
4887 ent = uvm_kmapent_alloc(map, flags);
4888 if (!ent) {
4889 uvm_mapent_unreserve(map, umr);
4890 return ENOMEM;
4891 }
4892 UMR_PUTENTRY(umr, ent);
4893 }
4894
4895 return 0;
4896 }
4897
4898 /*
4899 * uvm_mapent_unreserve:
4900 *
4901 * => caller shouldn't hold map locked.
4902 * => never fail or sleep.
4903 */
4904 void
4905 uvm_mapent_unreserve(struct vm_map *map, struct uvm_mapent_reservation *umr)
4906 {
4907
4908 while (!UMR_EMPTY(umr))
4909 uvm_kmapent_free(UMR_GETENTRY(umr));
4910 }
4911
4912 /*
4913 * uvm_mapent_trymerge: try to merge an entry with its neighbors.
4914 *
4915 * => called with map locked.
4916 * => return non zero if successfully merged.
4917 */
4918
4919 int
4920 uvm_mapent_trymerge(struct vm_map *map, struct vm_map_entry *entry, int flags)
4921 {
4922 struct uvm_object *uobj;
4923 struct vm_map_entry *next;
4924 struct vm_map_entry *prev;
4925 vsize_t size;
4926 int merged = 0;
4927 bool copying;
4928 int newetype;
4929
4930 if (VM_MAP_USE_KMAPENT(map)) {
4931 return 0;
4932 }
4933 if (entry->aref.ar_amap != NULL) {
4934 return 0;
4935 }
4936 if ((entry->flags & UVM_MAP_NOMERGE) != 0) {
4937 return 0;
4938 }
4939
4940 uobj = entry->object.uvm_obj;
4941 size = entry->end - entry->start;
4942 copying = (flags & UVM_MERGE_COPYING) != 0;
4943 newetype = copying ? (entry->etype & ~UVM_ET_NEEDSCOPY) : entry->etype;
4944
4945 next = entry->next;
4946 if (next != &map->header &&
4947 next->start == entry->end &&
4948 ((copying && next->aref.ar_amap != NULL &&
4949 amap_refs(next->aref.ar_amap) == 1) ||
4950 (!copying && next->aref.ar_amap == NULL)) &&
4951 UVM_ET_ISCOMPATIBLE(next, newetype,
4952 uobj, entry->flags, entry->protection,
4953 entry->max_protection, entry->inheritance, entry->advice,
4954 entry->wired_count) &&
4955 (uobj == NULL || entry->offset + size == next->offset)) {
4956 int error;
4957
4958 if (copying) {
4959 error = amap_extend(next, size,
4960 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_BACKWARDS);
4961 } else {
4962 error = 0;
4963 }
4964 if (error == 0) {
4965 if (uobj) {
4966 if (uobj->pgops->pgo_detach) {
4967 uobj->pgops->pgo_detach(uobj);
4968 }
4969 }
4970
4971 entry->end = next->end;
4972 clear_hints(map, next);
4973 uvm_map_entry_unlink(map, next);
4974 if (copying) {
4975 entry->aref = next->aref;
4976 entry->etype &= ~UVM_ET_NEEDSCOPY;
4977 }
4978 uvm_map_check(map, "trymerge forwardmerge");
4979 uvm_mapent_free_merged(map, next);
4980 merged++;
4981 }
4982 }
4983
4984 prev = entry->prev;
4985 if (prev != &map->header &&
4986 prev->end == entry->start &&
4987 ((copying && !merged && prev->aref.ar_amap != NULL &&
4988 amap_refs(prev->aref.ar_amap) == 1) ||
4989 (!copying && prev->aref.ar_amap == NULL)) &&
4990 UVM_ET_ISCOMPATIBLE(prev, newetype,
4991 uobj, entry->flags, entry->protection,
4992 entry->max_protection, entry->inheritance, entry->advice,
4993 entry->wired_count) &&
4994 (uobj == NULL ||
4995 prev->offset + prev->end - prev->start == entry->offset)) {
4996 int error;
4997
4998 if (copying) {
4999 error = amap_extend(prev, size,
5000 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_FORWARDS);
5001 } else {
5002 error = 0;
5003 }
5004 if (error == 0) {
5005 if (uobj) {
5006 if (uobj->pgops->pgo_detach) {
5007 uobj->pgops->pgo_detach(uobj);
5008 }
5009 entry->offset = prev->offset;
5010 }
5011
5012 entry->start = prev->start;
5013 clear_hints(map, prev);
5014 uvm_map_entry_unlink(map, prev);
5015 if (copying) {
5016 entry->aref = prev->aref;
5017 entry->etype &= ~UVM_ET_NEEDSCOPY;
5018 }
5019 uvm_map_check(map, "trymerge backmerge");
5020 uvm_mapent_free_merged(map, prev);
5021 merged++;
5022 }
5023 }
5024
5025 return merged;
5026 }
5027
5028 /*
5029 * uvm_map_create: create map
5030 */
5031
5032 struct vm_map *
5033 uvm_map_create(pmap_t pmap, vaddr_t vmin, vaddr_t vmax, int flags)
5034 {
5035 struct vm_map *result;
5036
5037 result = malloc(sizeof(struct vm_map), M_VMMAP, M_WAITOK);
5038 uvm_map_setup(result, vmin, vmax, flags);
5039 result->pmap = pmap;
5040 return(result);
5041 }
5042
5043 /*
5044 * uvm_map_setup: init map
5045 *
5046 * => map must not be in service yet.
5047 */
5048
5049 void
5050 uvm_map_setup(struct vm_map *map, vaddr_t vmin, vaddr_t vmax, int flags)
5051 {
5052 int ipl;
5053
5054 rb_tree_init(&map->rb_tree, &uvm_map_tree_ops);
5055 map->header.next = map->header.prev = &map->header;
5056 map->nentries = 0;
5057 map->size = 0;
5058 map->ref_count = 1;
5059 vm_map_setmin(map, vmin);
5060 vm_map_setmax(map, vmax);
5061 map->flags = flags;
5062 map->first_free = &map->header;
5063 map->hint = &map->header;
5064 map->timestamp = 0;
5065 map->busy = NULL;
5066
5067 if ((flags & VM_MAP_INTRSAFE) != 0) {
5068 ipl = IPL_VM;
5069 } else {
5070 ipl = IPL_NONE;
5071 }
5072
5073 rw_init(&map->lock);
5074 cv_init(&map->cv, "vm_map");
5075 mutex_init(&map->misc_lock, MUTEX_DRIVER, ipl);
5076 mutex_init(&map->mutex, MUTEX_DRIVER, ipl);
5077 }
5078
5079
5080 /*
5081 * U N M A P - m a i n e n t r y p o i n t
5082 */
5083
5084 /*
5085 * uvm_unmap1: remove mappings from a vm_map (from "start" up to "stop")
5086 *
5087 * => caller must check alignment and size
5088 * => map must be unlocked (we will lock it)
5089 * => flags is UVM_FLAG_QUANTUM or 0.
5090 */
5091
5092 void
5093 uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
5094 {
5095 struct vm_map_entry *dead_entries;
5096 struct uvm_mapent_reservation umr;
5097 UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist);
5098
5099 UVMHIST_LOG(maphist, " (map=0x%x, start=0x%x, end=0x%x)",
5100 map, start, end, 0);
5101 if (map == kernel_map) {
5102 LOCKDEBUG_MEM_CHECK((void *)start, end - start);
5103 }
5104 /*
5105 * work now done by helper functions. wipe the pmap's and then
5106 * detach from the dead entries...
5107 */
5108 uvm_mapent_reserve(map, &umr, 2, flags);
5109 vm_map_lock(map);
5110 uvm_unmap_remove(map, start, end, &dead_entries, &umr, flags);
5111 vm_map_unlock(map);
5112 uvm_mapent_unreserve(map, &umr);
5113
5114 if (dead_entries != NULL)
5115 uvm_unmap_detach(dead_entries, 0);
5116
5117 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
5118 }
5119
5120
5121 /*
5122 * uvm_map_reference: add reference to a map
5123 *
5124 * => map need not be locked (we use misc_lock).
5125 */
5126
5127 void
5128 uvm_map_reference(struct vm_map *map)
5129 {
5130 mutex_enter(&map->misc_lock);
5131 map->ref_count++;
5132 mutex_exit(&map->misc_lock);
5133 }
5134
5135 struct vm_map_kernel *
5136 vm_map_to_kernel(struct vm_map *map)
5137 {
5138
5139 KASSERT(VM_MAP_IS_KERNEL(map));
5140
5141 return (struct vm_map_kernel *)map;
5142 }
5143
5144 bool
5145 vm_map_starved_p(struct vm_map *map)
5146 {
5147
5148 if ((map->flags & VM_MAP_WANTVA) != 0) {
5149 return true;
5150 }
5151 /* XXX */
5152 if ((vm_map_max(map) - vm_map_min(map)) / 16 * 15 < map->size) {
5153 return true;
5154 }
5155 return false;
5156 }
5157
5158 #if defined(DDB) || defined(DEBUGPRINT)
5159
5160 /*
5161 * uvm_map_printit: actually prints the map
5162 */
5163
5164 void
5165 uvm_map_printit(struct vm_map *map, bool full,
5166 void (*pr)(const char *, ...))
5167 {
5168 struct vm_map_entry *entry;
5169
5170 (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, vm_map_min(map),
5171 vm_map_max(map));
5172 (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=0x%x\n",
5173 map->nentries, map->size, map->ref_count, map->timestamp,
5174 map->flags);
5175 (*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap,
5176 pmap_resident_count(map->pmap), pmap_wired_count(map->pmap));
5177 if (!full)
5178 return;
5179 for (entry = map->header.next; entry != &map->header;
5180 entry = entry->next) {
5181 (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%llx, amap=%p/%d\n",
5182 entry, entry->start, entry->end, entry->object.uvm_obj,
5183 (long long)entry->offset, entry->aref.ar_amap,
5184 entry->aref.ar_pageoff);
5185 (*pr)(
5186 "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
5187 "wc=%d, adv=%d\n",
5188 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
5189 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
5190 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
5191 entry->protection, entry->max_protection,
5192 entry->inheritance, entry->wired_count, entry->advice);
5193 }
5194 }
5195
5196 void
5197 uvm_whatis(uintptr_t addr, void (*pr)(const char *, ...))
5198 {
5199 struct vm_map *map;
5200
5201 for (map = kernel_map;;) {
5202 struct vm_map_entry *entry;
5203
5204 if (!uvm_map_lookup_entry_bytree(map, (vaddr_t)addr, &entry)) {
5205 break;
5206 }
5207 (*pr)("%p is %p+%zu from VMMAP %p\n",
5208 (void *)addr, (void *)entry->start,
5209 (size_t)(addr - (uintptr_t)entry->start), map);
5210 if (!UVM_ET_ISSUBMAP(entry)) {
5211 break;
5212 }
5213 map = entry->object.sub_map;
5214 }
5215 }
5216
5217 #endif /* DDB || DEBUGPRINT */
5218