uvm_map.c revision 1.20 1 /* $NetBSD: uvm_map.c,v 1.20 1998/05/22 02:01:54 chuck Exp $ */
2
3 /*
4 * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5 * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6 */
7 /*
8 * Copyright (c) 1997 Charles D. Cranor and Washington University.
9 * Copyright (c) 1991, 1993, The Regents of the University of California.
10 *
11 * All rights reserved.
12 *
13 * This code is derived from software contributed to Berkeley by
14 * The Mach Operating System project at Carnegie-Mellon University.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. All advertising materials mentioning features or use of this software
25 * must display the following acknowledgement:
26 * This product includes software developed by Charles D. Cranor,
27 * Washington University, the University of California, Berkeley and
28 * its contributors.
29 * 4. Neither the name of the University nor the names of its contributors
30 * may be used to endorse or promote products derived from this software
31 * without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 * SUCH DAMAGE.
44 *
45 * @(#)vm_map.c 8.3 (Berkeley) 1/12/94
46 * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
47 *
48 *
49 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
50 * All rights reserved.
51 *
52 * Permission to use, copy, modify and distribute this software and
53 * its documentation is hereby granted, provided that both the copyright
54 * notice and this permission notice appear in all copies of the
55 * software, derivative works or modified versions, and any portions
56 * thereof, and that both notices appear in supporting documentation.
57 *
58 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61 *
62 * Carnegie Mellon requests users of this software to return to
63 *
64 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
65 * School of Computer Science
66 * Carnegie Mellon University
67 * Pittsburgh PA 15213-3890
68 *
69 * any improvements or extensions that they make and grant Carnegie the
70 * rights to redistribute these changes.
71 */
72
73 #include "opt_uvmhist.h"
74 #include "opt_pmap_new.h"
75
76 /*
77 * uvm_map.c: uvm map operations
78 */
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/mman.h>
83 #include <sys/proc.h>
84 #include <sys/malloc.h>
85
86 #ifdef SYSVSHM
87 #include <sys/shm.h>
88 #endif
89
90 #include <vm/vm.h>
91 #include <vm/vm_page.h>
92 #include <vm/vm_kern.h>
93
94 #define UVM_MAP
95 #include <uvm/uvm.h>
96
97 struct uvm_cnt uvm_map_call, map_backmerge, map_forwmerge;
98 struct uvm_cnt uvm_mlk_call, uvm_mlk_hint;
99
100 /*
101 * macros
102 */
103
104 /*
105 * uvm_map_entry_link: insert entry into a map
106 *
107 * => map must be locked
108 */
109 #define uvm_map_entry_link(map, after_where, entry) do { \
110 (map)->nentries++; \
111 (entry)->prev = (after_where); \
112 (entry)->next = (after_where)->next; \
113 (entry)->prev->next = (entry); \
114 (entry)->next->prev = (entry); \
115 } while (0)
116
117 /*
118 * uvm_map_entry_unlink: remove entry from a map
119 *
120 * => map must be locked
121 */
122 #define uvm_map_entry_unlink(map, entry) do { \
123 (map)->nentries--; \
124 (entry)->next->prev = (entry)->prev; \
125 (entry)->prev->next = (entry)->next; \
126 } while (0)
127
128 /*
129 * SAVE_HINT: saves the specified entry as the hint for future lookups.
130 *
131 * => map need not be locked (protected by hint_lock).
132 */
133 #define SAVE_HINT(map,value) do { \
134 simple_lock(&(map)->hint_lock); \
135 (map)->hint = (value); \
136 simple_unlock(&(map)->hint_lock); \
137 } while (0)
138
139 /*
140 * VM_MAP_RANGE_CHECK: check and correct range
141 *
142 * => map must at least be read locked
143 */
144
145 #define VM_MAP_RANGE_CHECK(map, start, end) do { \
146 if (start < vm_map_min(map)) \
147 start = vm_map_min(map); \
148 if (end > vm_map_max(map)) \
149 end = vm_map_max(map); \
150 if (start > end) \
151 start = end; \
152 } while (0)
153
154 /*
155 * local prototypes
156 */
157
158 static vm_map_entry_t uvm_mapent_alloc __P((vm_map_t));
159 static void uvm_mapent_copy __P((vm_map_entry_t,vm_map_entry_t));
160 static void uvm_mapent_free __P((vm_map_entry_t));
161 static void uvm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
162
163 /*
164 * local inlines
165 */
166
167 /*
168 * uvm_mapent_alloc: allocate a map entry
169 *
170 * => XXX: static pool for kernel map?
171 */
172
173 static __inline vm_map_entry_t
174 uvm_mapent_alloc(map)
175 vm_map_t map;
176 {
177 vm_map_entry_t me;
178 int s;
179 UVMHIST_FUNC("uvm_mapent_alloc");
180 UVMHIST_CALLED(maphist);
181
182 if (map->entries_pageable) {
183 MALLOC(me, vm_map_entry_t, sizeof(struct vm_map_entry),
184 M_VMMAPENT, M_WAITOK);
185 me->flags = 0;
186 /* me can't be null, wait ok */
187
188 } else {
189 s = splimp(); /* protect kentry_free list with splimp */
190 simple_lock(&uvm.kentry_lock);
191 me = uvm.kentry_free;
192 if (me) uvm.kentry_free = me->next;
193 simple_unlock(&uvm.kentry_lock);
194 splx(s);
195 if (!me)
196 panic("mapent_alloc: out of kernel map entries, check MAX_KMAPENT");
197 me->flags = UVM_MAP_STATIC;
198 }
199
200 UVMHIST_LOG(maphist, "<- new entry=0x%x [pageable=%d]",
201 me, map->entries_pageable, 0, 0);
202 return(me);
203
204 }
205
206 /*
207 * uvm_mapent_free: free map entry
208 *
209 * => XXX: static pool for kernel map?
210 */
211
212 static __inline void
213 uvm_mapent_free(me)
214 vm_map_entry_t me;
215 {
216 int s;
217 UVMHIST_FUNC("uvm_mapent_free");
218 UVMHIST_CALLED(maphist);
219 UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
220 me, me->flags, 0, 0);
221 if ((me->flags & UVM_MAP_STATIC) == 0) {
222 FREE(me, M_VMMAPENT);
223 } else {
224 s = splimp(); /* protect kentry_free list with splimp */
225 simple_lock(&uvm.kentry_lock);
226 me->next = uvm.kentry_free;
227 uvm.kentry_free = me;
228 simple_unlock(&uvm.kentry_lock);
229 splx(s);
230 }
231 }
232
233 /*
234 * uvm_mapent_copy: copy a map entry, preserving flags
235 */
236
237 static __inline void
238 uvm_mapent_copy(src, dst)
239 vm_map_entry_t src;
240 vm_map_entry_t dst;
241 {
242
243 bcopy(src, dst, ((char *)&src->uvm_map_entry_stop_copy) - ((char*)src));
244 }
245
246 /*
247 * uvm_map_entry_unwire: unwire a map entry
248 *
249 * => map should be locked by caller
250 */
251
252 static __inline void
253 uvm_map_entry_unwire(map, entry)
254 vm_map_t map;
255 vm_map_entry_t entry;
256 {
257
258 uvm_fault_unwire(map->pmap, entry->start, entry->end);
259 entry->wired_count = 0;
260 }
261
262 /*
263 * uvm_map_init: init mapping system at boot time. note that we allocate
264 * and init the static pool of vm_map_entry_t's for the kernel here.
265 */
266
267 void
268 uvm_map_init()
269 {
270 static struct vm_map_entry kernel_map_entry[MAX_KMAPENT];
271 #if defined(UVMHIST)
272 static struct uvm_history_ent maphistbuf[100];
273 static struct uvm_history_ent pdhistbuf[100];
274 #endif
275 int lcv;
276
277 /*
278 * first, init logging system.
279 */
280
281 UVMHIST_FUNC("uvm_map_init");
282 UVMHIST_INIT_STATIC(maphist, maphistbuf);
283 UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
284 UVMHIST_CALLED(maphist);
285 UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
286 UVMCNT_INIT(uvm_map_call, UVMCNT_CNT, 0,
287 "# uvm_map() successful calls", 0);
288 UVMCNT_INIT(map_backmerge, UVMCNT_CNT, 0, "# uvm_map() back merges", 0);
289 UVMCNT_INIT(map_forwmerge, UVMCNT_CNT, 0, "# uvm_map() missed forward",
290 0);
291 UVMCNT_INIT(uvm_mlk_call, UVMCNT_CNT, 0, "# map lookup calls", 0);
292 UVMCNT_INIT(uvm_mlk_hint, UVMCNT_CNT, 0, "# map lookup hint hits", 0);
293
294 /*
295 * now set up static pool of kernel map entrys ...
296 */
297
298 simple_lock_init(&uvm.kentry_lock);
299 uvm.kentry_free = NULL;
300 for (lcv = 0 ; lcv < MAX_KMAPENT ; lcv++) {
301 kernel_map_entry[lcv].next = uvm.kentry_free;
302 uvm.kentry_free = &kernel_map_entry[lcv];
303 }
304
305 }
306
307 /*
308 * clippers
309 */
310
311 /*
312 * uvm_map_clip_start: ensure that the entry begins at or after
313 * the starting address, if it doesn't we split the entry.
314 *
315 * => caller should use UVM_MAP_CLIP_START macro rather than calling
316 * this directly
317 * => map must be locked by caller
318 */
319
320 void uvm_map_clip_start(map, entry, start)
321
322 register vm_map_t map;
323 register vm_map_entry_t entry;
324 register vm_offset_t start;
325
326 {
327 register vm_map_entry_t new_entry;
328 vm_offset_t new_adj;
329
330 /* uvm_map_simplify_entry(map, entry); */ /* XXX */
331
332 /*
333 * Split off the front portion. note that we must insert the new
334 * entry BEFORE this one, so that this entry has the specified
335 * starting address.
336 */
337
338 new_entry = uvm_mapent_alloc(map);
339 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
340
341 new_entry->end = start;
342 new_adj = start - new_entry->start;
343 if (entry->object.uvm_obj)
344 entry->offset += new_adj; /* shift start over */
345 entry->start = start;
346
347 if (new_entry->aref.ar_amap) {
348 amap_splitref(&new_entry->aref, &entry->aref, new_adj);
349 }
350
351 uvm_map_entry_link(map, entry->prev, new_entry);
352
353 if (UVM_ET_ISMAP(entry)) {
354 uvm_map_reference(new_entry->object.share_map);
355 } else {
356 if (UVM_ET_ISOBJ(entry) &&
357 entry->object.uvm_obj->pgops &&
358 entry->object.uvm_obj->pgops->pgo_reference)
359 entry->object.uvm_obj->pgops->pgo_reference(
360 entry->object.uvm_obj);
361 }
362 }
363
364 /*
365 * uvm_map_clip_end: ensure that the entry ends at or before
366 * the ending address, if it does't we split the reference
367 *
368 * => caller should use UVM_MAP_CLIP_END macro rather than calling
369 * this directly
370 * => map must be locked by caller
371 */
372
373 void
374 uvm_map_clip_end(map, entry, end)
375 vm_map_t map;
376 vm_map_entry_t entry;
377 vm_offset_t end;
378 {
379 vm_map_entry_t new_entry;
380 vm_offset_t new_adj; /* #bytes we move start forward */
381
382 /*
383 * Create a new entry and insert it
384 * AFTER the specified entry
385 */
386
387 new_entry = uvm_mapent_alloc(map);
388 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
389
390 new_entry->start = entry->end = end;
391 new_adj = end - entry->start;
392 if (new_entry->object.uvm_obj)
393 new_entry->offset += new_adj;
394
395 if (entry->aref.ar_amap)
396 amap_splitref(&entry->aref, &new_entry->aref, new_adj);
397
398 uvm_map_entry_link(map, entry, new_entry);
399
400 if (UVM_ET_ISMAP(entry)) {
401 uvm_map_reference(new_entry->object.share_map);
402 } else {
403 if (UVM_ET_ISOBJ(entry) &&
404 entry->object.uvm_obj->pgops &&
405 entry->object.uvm_obj->pgops->pgo_reference)
406 entry->object.uvm_obj->pgops->pgo_reference(
407 entry->object.uvm_obj);
408 }
409 }
410
411
412 /*
413 * M A P - m a i n e n t r y p o i n t
414 */
415 /*
416 * uvm_map: establish a valid mapping in a map
417 *
418 * => assume startp is page aligned.
419 * => assume size is a multiple of PAGE_SIZE.
420 * => assume sys_mmap provides enough of a "hint" to have us skip
421 * over text/data/bss area.
422 * => map must be unlocked (we will lock it)
423 * => <uobj,uoffset> value meanings (4 cases):
424 * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER
425 * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER
426 * [3] <uobj,uoffset> == normal mapping
427 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA
428 *
429 * case [4] is for kernel mappings where we don't know the offset until
430 * we've found a virtual address. note that kernel object offsets are
431 * always relative to vm_map_min(kernel_map).
432 * => XXXCDC: need way to map in external amap?
433 */
434
435 int
436 uvm_map(map, startp, size, uobj, uoffset, flags)
437 vm_map_t map;
438 vm_offset_t *startp; /* IN/OUT */
439 vm_size_t size;
440 struct uvm_object *uobj;
441 vm_offset_t uoffset;
442 uvm_flag_t flags;
443 {
444 vm_map_entry_t prev_entry, new_entry;
445 vm_prot_t prot = UVM_PROTECTION(flags), maxprot =
446 UVM_MAXPROTECTION(flags);
447 vm_inherit_t inherit = UVM_INHERIT(flags);
448 int advice = UVM_ADVICE(flags);
449 UVMHIST_FUNC("uvm_map");
450 UVMHIST_CALLED(maphist);
451
452 UVMHIST_LOG(maphist, "(map=0x%x, *startp=0x%x, size=%d, flags=0x%x)",
453 map, *startp, size, flags);
454 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
455
456 /*
457 * step 0: sanity check of protection code
458 */
459
460 if ((prot & maxprot) != prot) {
461 UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x",
462 prot, maxprot,0,0);
463 return(KERN_PROTECTION_FAILURE);
464 }
465
466 /*
467 * step 1: figure out where to put new VM range
468 */
469
470 if (vm_map_lock_try(map) == FALSE) {
471 if (flags & UVM_FLAG_TRYLOCK)
472 return(KERN_FAILURE);
473 vm_map_lock(map); /* could sleep here */
474 }
475 if ((prev_entry = uvm_map_findspace(map, *startp, size, startp,
476 uobj, uoffset, flags & UVM_FLAG_FIXED)) == NULL) {
477 UVMHIST_LOG(maphist,"<- uvm_map_findspace failed!",0,0,0,0);
478 vm_map_unlock(map);
479 return (KERN_NO_SPACE);
480 }
481
482 #if defined(PMAP_GROWKERNEL) /* hack */
483 {
484 /* locked by kernel_map lock */
485 static vm_offset_t maxkaddr = 0;
486
487 /*
488 * hack: grow kernel PTPs in advance.
489 */
490 if (map == kernel_map && maxkaddr < (*startp + size)) {
491 pmap_growkernel(*startp + size);
492 maxkaddr = *startp + size;
493 }
494 }
495 #endif
496
497 UVMCNT_INCR(uvm_map_call);
498
499 /*
500 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
501 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in
502 * either case we want to zero it before storing it in the map entry
503 * (because it looks strange and confusing when debugging...)
504 *
505 * if uobj is not null
506 * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
507 * and we do not need to change uoffset.
508 * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
509 * now (based on the starting address of the map). this case is
510 * for kernel object mappings where we don't know the offset until
511 * the virtual address is found (with uvm_map_findspace). the
512 * offset is the distance we are from the start of the map.
513 */
514
515 if (uobj == NULL) {
516 uoffset = 0;
517 } else {
518 if (uoffset == UVM_UNKNOWN_OFFSET) {
519 #ifdef DIAGNOSTIC
520 if (uobj->uo_refs != UVM_OBJ_KERN)
521 panic("uvm_map: unknown offset with non-kernel object");
522 #endif
523 uoffset = *startp - vm_map_min(kernel_map);
524 }
525 }
526
527 /*
528 * step 2: try and insert in map by extending previous entry, if
529 * possible
530 * XXX: we don't try and pull back the next entry. might be useful
531 * for a stack, but we are currently allocating our stack in advance.
532 */
533
534 if ((flags & UVM_FLAG_NOMERGE) == 0 &&
535 prev_entry->end == *startp && prev_entry != &map->header &&
536 prev_entry->object.uvm_obj == uobj) {
537
538 if (uobj && prev_entry->offset +
539 (prev_entry->end - prev_entry->start) != uoffset)
540 goto step3;
541
542 if (UVM_ET_ISMAP(prev_entry))
543 goto step3;
544
545 if (prev_entry->protection != prot ||
546 prev_entry->max_protection != maxprot)
547 goto step3;
548
549 if (prev_entry->inheritance != inherit ||
550 prev_entry->advice != advice)
551 goto step3;
552
553 /* wired_count's must match (new area is unwired) */
554 if (prev_entry->wired_count)
555 goto step3;
556
557 /*
558 * can't extend a shared amap. note: no need to lock amap to
559 * look at am_ref since we don't care about its exact value.
560 * if it is one (i.e. we have only reference) it will stay there
561 */
562
563 if (prev_entry->aref.ar_amap &&
564 prev_entry->aref.ar_amap->am_ref != 1) {
565 goto step3;
566 }
567
568 /* got it! */
569
570 UVMCNT_INCR(map_backmerge);
571 UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0);
572
573 /*
574 * drop our reference to uobj since we are extending a reference
575 * that we already have (the ref count can not drop to zero).
576 */
577 if (uobj && uobj->pgops->pgo_detach)
578 uobj->pgops->pgo_detach(uobj);
579
580 if (prev_entry->aref.ar_amap) {
581 amap_extend(prev_entry, size);
582 }
583
584 prev_entry->end += size;
585 map->size += size;
586
587 UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
588 vm_map_unlock(map);
589 return (KERN_SUCCESS);
590
591 }
592 step3:
593 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0);
594
595 /*
596 * check for possible forward merge (which we don't do) and count
597 * the number of times we missed a *possible* chance to merge more
598 */
599
600 if ((flags & UVM_FLAG_NOMERGE) == 0 &&
601 prev_entry->next != &map->header &&
602 prev_entry->next->start == (*startp + size))
603 UVMCNT_INCR(map_forwmerge);
604
605 /*
606 * step 3: allocate new entry and link it in
607 */
608
609 new_entry = uvm_mapent_alloc(map);
610 new_entry->start = *startp;
611 new_entry->end = new_entry->start + size;
612 new_entry->object.uvm_obj = uobj;
613 new_entry->offset = uoffset;
614
615 if (uobj)
616 new_entry->etype = UVM_ET_OBJ;
617 else
618 new_entry->etype = 0;
619
620 if (flags & UVM_FLAG_COPYONW) {
621 new_entry->etype |= UVM_ET_COPYONWRITE;
622 if ((flags & UVM_FLAG_OVERLAY) == 0)
623 new_entry->etype |= UVM_ET_NEEDSCOPY;
624 }
625
626 new_entry->protection = prot;
627 new_entry->max_protection = maxprot;
628 new_entry->inheritance = inherit;
629 new_entry->wired_count = 0;
630 new_entry->advice = advice;
631 if (flags & UVM_FLAG_OVERLAY) {
632 /*
633 * to_add: for BSS we overallocate a little since we
634 * are likely to extend
635 */
636 vm_offset_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
637 UVM_AMAP_CHUNK * PAGE_SIZE : 0;
638 struct vm_amap *amap = amap_alloc(size, to_add, M_WAITOK);
639 new_entry->aref.ar_slotoff = 0;
640 new_entry->aref.ar_amap = amap;
641 } else {
642 new_entry->aref.ar_amap = NULL;
643 }
644
645 uvm_map_entry_link(map, prev_entry, new_entry);
646
647 map->size += size;
648
649 /*
650 * Update the free space hint
651 */
652
653 if ((map->first_free == prev_entry) &&
654 (prev_entry->end >= new_entry->start))
655 map->first_free = new_entry;
656
657 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
658 vm_map_unlock(map);
659 return(KERN_SUCCESS);
660 }
661
662 /*
663 * uvm_map_lookup_entry: find map entry at or before an address
664 *
665 * => map must at least be read-locked by caller
666 * => entry is returned in "entry"
667 * => return value is true if address is in the returned entry
668 */
669
670 boolean_t
671 uvm_map_lookup_entry(map, address, entry)
672 register vm_map_t map;
673 register vm_offset_t address;
674 vm_map_entry_t *entry; /* OUT */
675 {
676 register vm_map_entry_t cur;
677 register vm_map_entry_t last;
678 UVMHIST_FUNC("uvm_map_lookup_entry");
679 UVMHIST_CALLED(maphist);
680
681 UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)",
682 map, address, entry, 0);
683
684 /*
685 * start looking either from the head of the
686 * list, or from the hint.
687 */
688
689 simple_lock(&map->hint_lock);
690 cur = map->hint;
691 simple_unlock(&map->hint_lock);
692
693 if (cur == &map->header)
694 cur = cur->next;
695
696 UVMCNT_INCR(uvm_mlk_call);
697 if (address >= cur->start) {
698 /*
699 * go from hint to end of list.
700 *
701 * but first, make a quick check to see if
702 * we are already looking at the entry we
703 * want (which is usually the case).
704 * note also that we don't need to save the hint
705 * here... it is the same hint (unless we are
706 * at the header, in which case the hint didn't
707 * buy us anything anyway).
708 */
709 last = &map->header;
710 if ((cur != last) && (cur->end > address)) {
711 UVMCNT_INCR(uvm_mlk_hint);
712 *entry = cur;
713 UVMHIST_LOG(maphist,"<- got it via hint (0x%x)",
714 cur, 0, 0, 0);
715 return (TRUE);
716 }
717 } else {
718 /*
719 * go from start to hint, *inclusively*
720 */
721 last = cur->next;
722 cur = map->header.next;
723 }
724
725 /*
726 * search linearly
727 */
728
729 while (cur != last) {
730 if (cur->end > address) {
731 if (address >= cur->start) {
732 /*
733 * save this lookup for future
734 * hints, and return
735 */
736
737 *entry = cur;
738 SAVE_HINT(map, cur);
739 UVMHIST_LOG(maphist,"<- search got it (0x%x)",
740 cur, 0, 0, 0);
741 return (TRUE);
742 }
743 break;
744 }
745 cur = cur->next;
746 }
747 *entry = cur->prev;
748 SAVE_HINT(map, *entry);
749 UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
750 return (FALSE);
751 }
752
753
754 /*
755 * uvm_map_findspace: find "length" sized space in "map".
756 *
757 * => "hint" is a hint about where we want it, unless fixed is true
758 * (in which case we insist on using "hint").
759 * => "result" is VA returned
760 * => uobj/uoffset are to be used to handle VAC alignment, if required
761 * => caller must at least have read-locked map
762 * => returns NULL on failure, or pointer to prev. map entry if success
763 * => note this is a cross between the old vm_map_findspace and vm_map_find
764 */
765
766 vm_map_entry_t
767 uvm_map_findspace(map, hint, length, result, uobj, uoffset, fixed)
768 vm_map_t map;
769 vm_offset_t hint;
770 vm_size_t length;
771 vm_offset_t *result; /* OUT */
772 struct uvm_object *uobj;
773 vm_offset_t uoffset;
774 boolean_t fixed;
775 {
776 vm_map_entry_t entry, next, tmp;
777 vm_offset_t end;
778 UVMHIST_FUNC("uvm_map_findspace");
779 UVMHIST_CALLED(maphist);
780
781 UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, fixed=%d)",
782 map, hint, length, fixed);
783
784 if (hint < map->min_offset) { /* check ranges ... */
785 if (fixed) {
786 UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
787 return(NULL);
788 }
789 hint = map->min_offset;
790 }
791 if (hint > map->max_offset) {
792 UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]",
793 hint, map->min_offset, map->max_offset, 0);
794 return(NULL);
795 }
796
797 /*
798 * Look for the first possible address; if there's already
799 * something at this address, we have to start after it.
800 */
801
802 if (!fixed && hint == map->min_offset) {
803 if ((entry = map->first_free) != &map->header)
804 hint = entry->end;
805 } else {
806 if (uvm_map_lookup_entry(map, hint, &tmp)) {
807 /* "hint" address already in use ... */
808 if (fixed) {
809 UVMHIST_LOG(maphist,"<- fixed & VA in use",
810 0, 0, 0, 0);
811 return(NULL);
812 }
813 hint = tmp->end;
814 }
815 entry = tmp;
816 }
817
818 /*
819 * Look through the rest of the map, trying to fit a new region in
820 * the gap between existing regions, or after the very last region.
821 * note: entry->end = base VA of current gap,
822 * next->start = VA of end of current gap
823 */
824 for (;; hint = (entry = next)->end) {
825 /*
826 * Find the end of the proposed new region. Be sure we didn't
827 * go beyond the end of the map, or wrap around the address;
828 * if so, we lose. Otherwise, if this is the last entry, or
829 * if the proposed new region fits before the next entry, we
830 * win.
831 */
832
833 #ifdef PMAP_PREFER
834 /*
835 * push hint forward as needed to avoid VAC alias problems.
836 * we only do this if a valid offset is specified.
837 */
838 if (!fixed && uoffset != UVM_UNKNOWN_OFFSET)
839 PMAP_PREFER(uoffset, &hint);
840 #endif
841 end = hint + length;
842 if (end > map->max_offset || end < hint) {
843 UVMHIST_LOG(maphist,"<- failed (off end)", 0,0,0,0);
844 return (NULL);
845 }
846 next = entry->next;
847 if (next == &map->header || next->start >= end)
848 break;
849 if (fixed) {
850 UVMHIST_LOG(maphist,"<- fixed mapping failed", 0,0,0,0);
851 return(NULL); /* only one shot at it ... */
852 }
853 }
854 SAVE_HINT(map, entry);
855 *result = hint;
856 UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0);
857 return (entry);
858 }
859
860 /*
861 * U N M A P - m a i n h e l p e r f u n c t i o n s
862 */
863
864 /*
865 * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
866 *
867 * => caller must check alignment and size
868 * => map must be locked by caller
869 * => if the "start"/"stop" range lie within a mapping of a share map,
870 * then the unmap takes place within the context of that share map
871 * rather than in the main map, unless the "mainonly" flag is set.
872 * (e.g. the "exit" system call would want to set "mainonly").
873 * => we return a list of map entries that we've remove from the map
874 * in "entry_list"
875 */
876
877 int
878 uvm_unmap_remove(map, start, end, mainonly, entry_list)
879 vm_map_t map;
880 vm_offset_t start,end;
881 boolean_t mainonly;
882 vm_map_entry_t *entry_list; /* OUT */
883 {
884 int result, refs;
885 vm_map_entry_t entry, first_entry, next;
886 vm_offset_t len;
887 boolean_t already_removed;
888 struct uvm_object *uobj;
889 UVMHIST_FUNC("uvm_unmap_remove");
890 UVMHIST_CALLED(maphist);
891
892 UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)",
893 map, start, end, 0);
894
895 VM_MAP_RANGE_CHECK(map, start, end);
896
897 /*
898 * find first entry
899 */
900 if (uvm_map_lookup_entry(map, start, &first_entry) == TRUE) {
901 /*
902 * start lies within a mapped region. first check to see if
903 * it is within a sharemap (in which case we recurse and unmap
904 * within the context of the share map).
905 */
906 if (UVM_ET_ISMAP(first_entry) &&
907 !UVM_ET_ISSUBMAP(first_entry) &&
908 mainonly == 0 && end <= first_entry->end) {
909 /*
910 * is a share map and in range ...
911 * XXX: do address transforms if share VA's != main VA's
912 * note: main map kept locked during share map unlock
913 */
914 result = uvm_unmap(first_entry->object.share_map,
915 start, end, 0);
916 *entry_list = NULL;
917 return(result);
918 }
919 /* non-share map: clip and go... */
920 entry = first_entry;
921 UVM_MAP_CLIP_START(map, entry, start);
922 /* critical! prevents stale hint */
923 SAVE_HINT(map, entry->prev);
924
925 } else {
926 entry = first_entry->next;
927 }
928
929 /*
930 * Save the free space hint
931 */
932
933 if (map->first_free->start >= start)
934 map->first_free = entry->prev;
935
936 /*
937 * note: we now re-use first_entry for a different task. we remove
938 * a number of map entries from the map and save them in a linked
939 * list headed by "first_entry". once we remove them from the map
940 * the caller should unlock the map and drop the references to the
941 * backing objects [c.f. uvm_unmap_detach]. the object is to
942 * seperate unmapping from reference dropping. why?
943 * [1] the map has to be locked for unmapping
944 * [2] the map need not be locked for reference dropping
945 * [3] dropping references may trigger pager I/O, and if we hit
946 * a pager that does synchronous I/O we may have to wait for it.
947 * [4] we would like all waiting for I/O to occur with maps unlocked
948 * so that we don't block other threads.
949 */
950 first_entry = NULL;
951 *entry_list = NULL; /* to be safe */
952
953 /*
954 * break up the area into map entry sized regions and unmap. note
955 * that all mappings have to be removed before we can even consider
956 * dropping references to amaps or VM objects (otherwise we could end
957 * up with a mapping to a page on the free list which would be very bad)
958 */
959
960 while ((entry != &map->header) && (entry->start < end)) {
961
962 UVM_MAP_CLIP_END(map, entry, end);
963 next = entry->next;
964 len = entry->end - entry->start;
965
966 /*
967 * unwire before removing addresses from the pmap; otherwise
968 * unwiring will put the entries back into the pmap (XXX).
969 */
970
971 if (entry->wired_count)
972 uvm_map_entry_unwire(map, entry);
973
974 /*
975 * special case: handle mappings to anonymous kernel objects.
976 * we want to free these pages right away...
977 */
978 if (UVM_ET_ISOBJ(entry) &&
979 entry->object.uvm_obj->uo_refs == UVM_OBJ_KERN) {
980
981 #ifdef DIAGNOSTIC
982 if (vm_map_pmap(map) != pmap_kernel())
983 panic("uvm_unmap_remove: kernel object mapped by non-kernel map");
984 #endif
985
986 /*
987 * note: kernel object mappings are currently used in
988 * two ways:
989 * [1] "normal" mappings of pages in the kernel object
990 * [2] uvm_km_valloc'd allocations in which we
991 * pmap_enter in some non-kernel-object page
992 * (e.g. vmapbuf).
993 *
994 * for case [1], we need to remove the mapping from
995 * the pmap and then remove the page from the kernel
996 * object (because, once pages in a kernel object are
997 * unmapped they are no longer needed, unlike, say,
998 * a vnode where you might want the data to persist
999 * until flushed out of a queue).
1000 *
1001 * for case [2], we need to remove the mapping from
1002 * the pmap. there shouldn't be any pages at the
1003 * specified offset in the kernel object [but it
1004 * doesn't hurt to call uvm_km_pgremove just to be
1005 * safe?]
1006 *
1007 * uvm_km_pgremove currently does the following:
1008 * for pages in the kernel object in range:
1009 * - pmap_page_protect them out of all pmaps
1010 * - uvm_pagefree the page
1011 *
1012 * note that in case [1] the pmap_page_protect call
1013 * in uvm_km_pgremove may very well be redundant
1014 * because we have already removed the mappings
1015 * beforehand with pmap_remove (or pmap_kremove).
1016 * in the PMAP_NEW case, the pmap_page_protect call
1017 * may not do anything, since PMAP_NEW allows the
1018 * kernel to enter/remove kernel mappings without
1019 * bothing to keep track of the mappings (e.g. via
1020 * pv_entry lists). XXX: because of this, in the
1021 * future we should consider removing the
1022 * pmap_page_protect from uvm_km_pgremove some time
1023 * in the future.
1024 */
1025
1026 /*
1027 * remove mappings from pmap
1028 */
1029 #if defined(PMAP_NEW)
1030 pmap_kremove(entry->start, len);
1031 #else
1032 pmap_remove(pmap_kernel(), entry->start,
1033 entry->start+len);
1034 #endif
1035
1036 /*
1037 * remove pages from a kernel object (offsets are
1038 * always relative to vm_map_min(kernel_map)).
1039 */
1040 uvm_km_pgremove(entry->object.uvm_obj,
1041 entry->start - vm_map_min(kernel_map),
1042 entry->end - vm_map_min(kernel_map));
1043
1044 already_removed = TRUE;
1045
1046 /*
1047 * null out kernel_object reference, we've just
1048 * dropped it
1049 */
1050 entry->etype &= ~UVM_ET_OBJ;
1051 entry->object.uvm_obj = NULL; /* to be safe */
1052
1053 } else
1054 already_removed = FALSE;
1055
1056 /*
1057 * remove mappings now. for sharemaps, check to see if
1058 * the reference count is one (i.e. not being shared right
1059 * now). if so, use the cheaper pmap_remove() rather than
1060 * the more expensive share_protect functions.
1061 */
1062
1063 if (!map->is_main_map) {
1064 simple_lock(&map->ref_lock);
1065 refs = map->ref_count;
1066 simple_unlock(&map->ref_lock);
1067 }
1068 #if defined(sparc)
1069 else
1070 refs = 0; /* XXX: gcc */
1071 #endif
1072
1073 if (map->is_main_map || (!map->is_main_map && refs == 1)) {
1074 if (!already_removed)
1075 pmap_remove(map->pmap, entry->start,
1076 entry->end);
1077 } else {
1078 /* share map... must remove all mappings */
1079 if (entry->aref.ar_amap) {
1080 simple_lock(&entry->aref.ar_amap->am_l);
1081 amap_share_protect(entry, VM_PROT_NONE);
1082 simple_unlock(&entry->aref.ar_amap->am_l);
1083 }
1084 if (UVM_ET_ISOBJ(entry)) {
1085 uobj = entry->object.uvm_obj;
1086 simple_lock(&uobj->vmobjlock);
1087 uobj->pgops->pgo_shareprot(entry, VM_PROT_NONE);
1088 simple_unlock(&uobj->vmobjlock);
1089 }
1090 }
1091
1092 /*
1093 * remove from map and put it on our list of entries that
1094 * we've nuked. then go do next entry.
1095 */
1096 UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0,0);
1097 uvm_map_entry_unlink(map, entry);
1098 map->size -= len;
1099 entry->next = first_entry;
1100 first_entry = entry;
1101 entry = next; /* next entry, please */
1102 }
1103
1104 /*
1105 * now we've cleaned up the map and are ready for the caller to drop
1106 * references to the mapped objects.
1107 */
1108
1109 *entry_list = first_entry;
1110 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1111 return(KERN_SUCCESS);
1112 }
1113
1114 /*
1115 * uvm_unmap_detach: drop references in a chain of map entries
1116 *
1117 * => we will free the map entries as we traverse the list.
1118 */
1119
1120 void
1121 uvm_unmap_detach(first_entry, amap_unref_flags)
1122 vm_map_entry_t first_entry;
1123 int amap_unref_flags;
1124 {
1125 vm_map_entry_t next_entry;
1126 UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
1127
1128 while (first_entry) {
1129
1130 #ifdef DIAGNOSTIC
1131 /*
1132 * sanity check
1133 */
1134 /* was part of vm_map_entry_delete() */
1135 if (first_entry->wired_count)
1136 panic("unmap: still wired!");
1137 #endif
1138
1139 UVMHIST_LOG(maphist,
1140 " detach 0x%x: amap=0x%x, obj=0x%x, map?=%d", first_entry,
1141 first_entry->aref.ar_amap, first_entry->object.uvm_obj,
1142 UVM_ET_ISMAP(first_entry));
1143
1144 /*
1145 * drop reference to amap, if we've got one
1146 */
1147
1148 if (first_entry->aref.ar_amap)
1149 amap_unref(first_entry, amap_unref_flags);
1150
1151 /*
1152 * drop reference to our backing object, if we've got one
1153 */
1154
1155 if (UVM_ET_ISMAP(first_entry)) {
1156 uvm_map_deallocate(first_entry->object.share_map);
1157 } else {
1158 if (UVM_ET_ISOBJ(first_entry) &&
1159 first_entry->object.uvm_obj->pgops->pgo_detach)
1160 first_entry->object.uvm_obj->pgops->
1161 pgo_detach(first_entry->object.uvm_obj);
1162 }
1163
1164 /*
1165 * next entry
1166 */
1167 next_entry = first_entry->next;
1168 uvm_mapent_free(first_entry);
1169 first_entry = next_entry;
1170 }
1171
1172 /*
1173 * done!
1174 */
1175 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
1176 return;
1177 }
1178
1179 /*
1180 * E X T R A C T I O N F U N C T I O N S
1181 */
1182
1183 /*
1184 * uvm_map_reserve: reserve space in a vm_map for future use.
1185 *
1186 * => we reserve space in a map by putting a dummy map entry in the
1187 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
1188 * => map should be unlocked (we will write lock it)
1189 * => we return true if we were able to reserve space
1190 * => XXXCDC: should be inline?
1191 */
1192
1193 int
1194 uvm_map_reserve(map, size, offset, raddr)
1195 vm_map_t map;
1196 vm_size_t size;
1197 vm_offset_t offset; /* hint for pmap_prefer */
1198 vm_offset_t *raddr; /* OUT: reserved VA */
1199 {
1200 UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
1201
1202 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
1203 map,size,offset,raddr);
1204
1205 size = round_page(size);
1206 if (*raddr < vm_map_min(map))
1207 *raddr = vm_map_min(map); /* hint */
1208
1209 /*
1210 * reserve some virtual space.
1211 */
1212
1213 if (uvm_map(map, raddr, size, NULL, offset,
1214 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
1215 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != KERN_SUCCESS) {
1216 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
1217 return (FALSE);
1218 }
1219
1220 UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
1221 return (TRUE);
1222 }
1223
1224 /*
1225 * uvm_map_replace: replace a reserved (blank) area of memory with
1226 * real mappings.
1227 *
1228 * => caller must WRITE-LOCK the map
1229 * => we return TRUE if replacement was a success
1230 * => we expect the newents chain to have nnewents entrys on it and
1231 * we expect newents->prev to point to the last entry on the list
1232 * => note newents is allowed to be NULL
1233 */
1234
1235 int
1236 uvm_map_replace(map, start, end, newents, nnewents)
1237 struct vm_map *map;
1238 vm_offset_t start, end;
1239 vm_map_entry_t newents;
1240 int nnewents;
1241 {
1242 vm_map_entry_t oldent, last;
1243 UVMHIST_FUNC("uvm_map_replace");
1244 UVMHIST_CALLED(maphist);
1245
1246 /*
1247 * first find the blank map entry at the specified address
1248 */
1249
1250 if (!uvm_map_lookup_entry(map, start, &oldent)) {
1251 return(FALSE);
1252 }
1253
1254 /*
1255 * check to make sure we have a proper blank entry
1256 */
1257
1258 if (oldent->start != start || oldent->end != end ||
1259 oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
1260 return (FALSE);
1261 }
1262
1263 #ifdef DIAGNOSTIC
1264 /*
1265 * sanity check the newents chain
1266 */
1267 {
1268 vm_map_entry_t tmpent = newents;
1269 int nent = 0;
1270 vm_offset_t cur = start;
1271
1272 while (tmpent) {
1273 nent++;
1274 if (tmpent->start < cur)
1275 panic("uvm_map_replace1");
1276 if (tmpent->start > tmpent->end || tmpent->end > end) {
1277 printf("tmpent->start=0x%lx, tmpent->end=0x%lx, end=0x%lx\n",
1278 tmpent->start, tmpent->end, end);
1279 panic("uvm_map_replace2");
1280 }
1281 cur = tmpent->end;
1282 if (tmpent->next) {
1283 if (tmpent->next->prev != tmpent)
1284 panic("uvm_map_replace3");
1285 } else {
1286 if (newents->prev != tmpent)
1287 panic("uvm_map_replace4");
1288 }
1289 tmpent = tmpent->next;
1290 }
1291 if (nent != nnewents)
1292 panic("uvm_map_replace5");
1293 }
1294 #endif
1295
1296 /*
1297 * map entry is a valid blank! replace it. (this does all the
1298 * work of map entry link/unlink...).
1299 */
1300
1301 if (newents) {
1302
1303 last = newents->prev; /* we expect this */
1304
1305 /* critical: flush stale hints out of map */
1306 SAVE_HINT(map, newents);
1307 if (map->first_free == oldent)
1308 map->first_free = last;
1309
1310 last->next = oldent->next;
1311 last->next->prev = last;
1312 newents->prev = oldent->prev;
1313 newents->prev->next = newents;
1314 map->nentries = map->nentries + (nnewents - 1);
1315
1316 } else {
1317
1318 /* critical: flush stale hints out of map */
1319 SAVE_HINT(map, oldent->prev);
1320 if (map->first_free == oldent)
1321 map->first_free = oldent->prev;
1322
1323 /* NULL list of new entries: just remove the old one */
1324 uvm_map_entry_unlink(map, oldent);
1325 }
1326
1327
1328 /*
1329 * now we can free the old blank entry, unlock the map and return.
1330 */
1331
1332 uvm_mapent_free(oldent);
1333 return(TRUE);
1334 }
1335
1336 /*
1337 * uvm_map_extract: extract a mapping from a map and put it somewhere
1338 * (maybe removing the old mapping)
1339 *
1340 * => maps should be unlocked (we will write lock them)
1341 * => returns 0 on success, error code otherwise
1342 * => start must be page aligned
1343 * => len must be page sized
1344 * => flags:
1345 * UVM_EXTRACT_REMOVE: remove mappings from srcmap
1346 * UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
1347 * UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
1348 * UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
1349 * >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
1350 * >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
1351 * be used from within the kernel in a kernel level map <<<
1352 */
1353
1354 int
1355 uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
1356 vm_map_t srcmap, dstmap;
1357 vm_offset_t start, *dstaddrp;
1358 vm_size_t len;
1359 int flags;
1360 {
1361 vm_offset_t dstaddr, end, newend, oldoffset, fudge, orig_fudge,
1362 oldstart;
1363 vm_map_entry_t chain, endchain, entry, orig_entry, newentry, deadentry;
1364 vm_map_entry_t oldentry;
1365 vm_size_t elen;
1366 int nchain, error, copy_ok;
1367 UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
1368 UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start,
1369 len,0);
1370 UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0);
1371
1372 #ifdef DIAGNOSTIC
1373 /*
1374 * step 0: sanity check: start must be on a page boundary, length
1375 * must be page sized. can't ask for CONTIG/QREF if you asked for
1376 * REMOVE.
1377 */
1378 if ((start & PAGE_MASK) || (len & PAGE_MASK))
1379 panic("uvm_map_extract1");
1380 if (flags & UVM_EXTRACT_REMOVE)
1381 if (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF))
1382 panic("uvm_map_extract2");
1383 #endif
1384
1385
1386 /*
1387 * step 1: reserve space in the target map for the extracted area
1388 */
1389
1390 dstaddr = *dstaddrp;
1391 if (uvm_map_reserve(dstmap, len, start, &dstaddr) == FALSE)
1392 return(ENOMEM);
1393 *dstaddrp = dstaddr; /* pass address back to caller */
1394 UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0);
1395
1396
1397 /*
1398 * step 2: setup for the extraction process loop by init'ing the
1399 * map entry chain, locking src map, and looking up the first useful
1400 * entry in the map.
1401 */
1402
1403 end = start + len;
1404 newend = dstaddr + len;
1405 chain = endchain = NULL;
1406 nchain = 0;
1407 vm_map_lock(srcmap);
1408
1409 if (uvm_map_lookup_entry(srcmap, start, &entry)) {
1410
1411 /* "start" is within an entry */
1412 if (flags & UVM_EXTRACT_QREF) {
1413 /*
1414 * for quick references we don't clip the entry, so
1415 * the entry may map space "before" the starting
1416 * virtual address... this is the "fudge" factor
1417 * (which can be non-zero only the first time
1418 * through the "while" loop in step 3).
1419 */
1420 fudge = start - entry->start;
1421 } else {
1422 /*
1423 * normal reference: we clip the map to fit (thus
1424 * fudge is zero)
1425 */
1426 UVM_MAP_CLIP_START(srcmap, entry, start);
1427 SAVE_HINT(srcmap, entry->prev);
1428 fudge = 0;
1429 }
1430
1431 } else {
1432
1433 /* "start" is not within an entry ... skip to next entry */
1434 if (flags & UVM_EXTRACT_CONTIG) {
1435 error = EINVAL;
1436 goto bad; /* definite hole here ... */
1437 }
1438
1439 entry = entry->next;
1440 fudge = 0;
1441 }
1442 /* save values from srcmap for step 6 */
1443 orig_entry = entry;
1444 orig_fudge = fudge;
1445
1446
1447 /*
1448 * step 3: now start looping through the map entries, extracting
1449 * as we go.
1450 */
1451
1452 while (entry->start < end && entry != &srcmap->header) {
1453
1454 /* if we are not doing a quick reference, clip it */
1455 if ((flags & UVM_EXTRACT_QREF) == 0)
1456 UVM_MAP_CLIP_END(srcmap, entry, end);
1457
1458 /* clear needs_copy (allow chunking) */
1459 if (UVM_ET_ISNEEDSCOPY(entry)) {
1460 if (fudge)
1461 oldstart = entry->start;
1462 else
1463 oldstart = 0; /* XXX: gcc */
1464 amap_copy(srcmap, entry, M_NOWAIT, TRUE, start, end);
1465 if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */
1466 error = ENOMEM;
1467 goto bad;
1468 }
1469 /* amap_copy could clip (during chunk)! update fudge */
1470 if (fudge) {
1471 fudge = fudge - (entry->start - oldstart);
1472 orig_fudge = fudge;
1473 }
1474 }
1475
1476 /* calculate the offset of this from "start" */
1477 oldoffset = (entry->start + fudge) - start;
1478
1479 /* allocate a new map entry */
1480 newentry = uvm_mapent_alloc(dstmap);
1481 if (newentry == NULL) {
1482 error = ENOMEM;
1483 goto bad;
1484 }
1485
1486 /* set up new map entry */
1487 newentry->next = NULL;
1488 newentry->prev = endchain;
1489 newentry->start = dstaddr + oldoffset;
1490 newentry->end =
1491 newentry->start + (entry->end - (entry->start + fudge));
1492 if (newentry->end > newend)
1493 newentry->end = newend;
1494 newentry->object.uvm_obj = entry->object.uvm_obj;
1495 if (newentry->object.uvm_obj) {
1496 if (newentry->object.uvm_obj->pgops->pgo_reference)
1497 newentry->object.uvm_obj->pgops->
1498 pgo_reference(newentry->object.uvm_obj);
1499 newentry->offset = entry->offset + fudge;
1500 } else {
1501 newentry->offset = 0;
1502 }
1503 newentry->etype = entry->etype;
1504 newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
1505 entry->max_protection : entry->protection;
1506 newentry->max_protection = entry->max_protection;
1507 newentry->inheritance = entry->inheritance;
1508 newentry->wired_count = 0;
1509 newentry->aref.ar_amap = entry->aref.ar_amap;
1510 if (newentry->aref.ar_amap) {
1511 newentry->aref.ar_slotoff =
1512 entry->aref.ar_slotoff + (fudge / PAGE_SIZE);
1513 amap_ref(newentry, AMAP_SHARED |
1514 ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
1515 } else {
1516 newentry->aref.ar_slotoff = 0;
1517 }
1518 newentry->advice = entry->advice;
1519
1520 /* now link it on the chain */
1521 nchain++;
1522 if (endchain == NULL) {
1523 chain = endchain = newentry;
1524 } else {
1525 endchain->next = newentry;
1526 endchain = newentry;
1527 }
1528
1529 /* end of 'while' loop! */
1530 if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
1531 (entry->next == &srcmap->header ||
1532 entry->next->start != entry->end)) {
1533 error = EINVAL;
1534 goto bad;
1535 }
1536 entry = entry->next;
1537 fudge = 0;
1538 }
1539
1540
1541 /*
1542 * step 4: close off chain (in format expected by uvm_map_replace)
1543 */
1544
1545 if (chain)
1546 chain->prev = endchain;
1547
1548
1549 /*
1550 * step 5: attempt to lock the dest map so we can pmap_copy.
1551 * note usage of copy_ok:
1552 * 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
1553 * 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
1554 */
1555
1556 if (srcmap == dstmap || vm_map_lock_try(dstmap) == TRUE) {
1557
1558 copy_ok = 1;
1559 if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
1560 nchain)) {
1561 if (srcmap != dstmap)
1562 vm_map_unlock(dstmap);
1563 error = EIO;
1564 goto bad;
1565 }
1566
1567 } else {
1568
1569 copy_ok = 0;
1570 /* replace defered until step 7 */
1571
1572 }
1573
1574
1575 /*
1576 * step 6: traverse the srcmap a second time to do the following:
1577 * - if we got a lock on the dstmap do pmap_copy
1578 * - if UVM_EXTRACT_REMOVE remove the entries
1579 * we make use of orig_entry and orig_fudge (saved in step 2)
1580 */
1581
1582 if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
1583
1584 /* purge possible stale hints from srcmap */
1585 if (flags & UVM_EXTRACT_REMOVE) {
1586 SAVE_HINT(srcmap, orig_entry->prev);
1587 if (srcmap->first_free->start >= start)
1588 srcmap->first_free = orig_entry->prev;
1589 }
1590
1591 entry = orig_entry;
1592 fudge = orig_fudge;
1593 deadentry = NULL; /* for UVM_EXTRACT_REMOVE */
1594
1595 while (entry->start < end && entry != &srcmap->header) {
1596
1597 if (copy_ok) {
1598 oldoffset = (entry->start + fudge) - start;
1599 elen = min(end, entry->end) - (entry->start + fudge);
1600 pmap_copy(dstmap->pmap, srcmap->pmap, dstaddr + oldoffset,
1601 elen, entry->start + fudge);
1602 }
1603
1604 /* we advance "entry" in the following if statement */
1605 if (flags & UVM_EXTRACT_REMOVE) {
1606 pmap_remove(srcmap->pmap, entry->start,
1607 entry->end);
1608 oldentry = entry; /* save entry */
1609 entry = entry->next; /* advance */
1610 uvm_map_entry_unlink(srcmap, oldentry);
1611 /* add to dead list */
1612 oldentry->next = deadentry;
1613 deadentry = oldentry;
1614 } else {
1615 entry = entry->next; /* advance */
1616 }
1617
1618 /* end of 'while' loop */
1619 fudge = 0;
1620 }
1621
1622 /*
1623 * unlock dstmap. we will dispose of deadentry in
1624 * step 7 if needed
1625 */
1626 if (copy_ok && srcmap != dstmap)
1627 vm_map_unlock(dstmap);
1628
1629 }
1630 else
1631 deadentry = NULL; /* XXX: gcc */
1632
1633 /*
1634 * step 7: we are done with the source map, unlock. if copy_ok
1635 * is 0 then we have not replaced the dummy mapping in dstmap yet
1636 * and we need to do so now.
1637 */
1638
1639 vm_map_unlock(srcmap);
1640 if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
1641 uvm_unmap_detach(deadentry, 0); /* dispose of old entries */
1642
1643 /* now do the replacement if we didn't do it in step 5 */
1644 if (copy_ok == 0) {
1645 vm_map_lock(dstmap);
1646 error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
1647 nchain);
1648 vm_map_unlock(dstmap);
1649
1650 if (error == FALSE) {
1651 error = EIO;
1652 goto bad2;
1653 }
1654 }
1655
1656 /*
1657 * done!
1658 */
1659 return(0);
1660
1661 /*
1662 * bad: failure recovery
1663 */
1664 bad:
1665 vm_map_unlock(srcmap);
1666 bad2: /* src already unlocked */
1667 if (chain)
1668 uvm_unmap_detach(chain,
1669 (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
1670 uvm_unmap(dstmap, dstaddr, dstaddr+len, 1); /* ??? */
1671 return(error);
1672 }
1673
1674 /* end of extraction functions */
1675
1676 /*
1677 * uvm_map_submap: punch down part of a map into a submap
1678 *
1679 * => only the kernel_map is allowed to be submapped
1680 * => the purpose of submapping is to break up the locking granularity
1681 * of a larger map
1682 * => the range specified must have been mapped previously with a uvm_map()
1683 * call [with uobj==NULL] to create a blank map entry in the main map.
1684 * [And it had better still be blank!]
1685 * => maps which contain submaps should never be copied or forked.
1686 * => to remove a submap, use uvm_unmap() on the main map
1687 * and then uvm_map_deallocate() the submap.
1688 * => main map must be unlocked.
1689 * => submap must have been init'd and have a zero reference count.
1690 * [need not be locked as we don't actually reference it]
1691 */
1692
1693 int
1694 uvm_map_submap(map, start, end, submap)
1695 vm_map_t map, submap;
1696 vm_offset_t start, end;
1697 {
1698 vm_map_entry_t entry;
1699 int result;
1700 UVMHIST_FUNC("uvm_map_submap"); UVMHIST_CALLED(maphist);
1701
1702 vm_map_lock(map);
1703
1704 VM_MAP_RANGE_CHECK(map, start, end);
1705
1706 if (uvm_map_lookup_entry(map, start, &entry)) {
1707 UVM_MAP_CLIP_START(map, entry, start);
1708 UVM_MAP_CLIP_END(map, entry, end); /* to be safe */
1709 }
1710 else {
1711 entry = NULL;
1712 }
1713
1714 if (entry != NULL &&
1715 entry->start == start && entry->end == end &&
1716 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
1717 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
1718
1719 /*
1720 * doit!
1721 */
1722 entry->etype |= (UVM_ET_MAP|UVM_ET_SUBMAP);
1723 entry->object.sub_map = submap;
1724 entry->offset = 0;
1725 uvm_map_reference(submap);
1726 result = KERN_SUCCESS;
1727 } else {
1728 result = KERN_INVALID_ARGUMENT;
1729 }
1730 vm_map_unlock(map);
1731
1732 return(result);
1733 }
1734
1735
1736 /*
1737 * uvm_map_protect: change map protection
1738 *
1739 * => set_max means set max_protection.
1740 * => map must be unlocked.
1741 * => XXXCDC: does not work properly with share maps. rethink.
1742 */
1743
1744 #define MASK(entry) ( UVM_ET_ISCOPYONWRITE(entry) ? \
1745 ~VM_PROT_WRITE : VM_PROT_ALL)
1746 #define max(a,b) ((a) > (b) ? (a) : (b))
1747
1748 int
1749 uvm_map_protect(map, start, end, new_prot, set_max)
1750 vm_map_t map;
1751 vm_offset_t start, end;
1752 vm_prot_t new_prot;
1753 boolean_t set_max;
1754 {
1755 vm_map_entry_t current, entry;
1756 UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
1757 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
1758 map, start, end, new_prot);
1759
1760 vm_map_lock(map);
1761
1762 VM_MAP_RANGE_CHECK(map, start, end);
1763
1764 if (uvm_map_lookup_entry(map, start, &entry)) {
1765 UVM_MAP_CLIP_START(map, entry, start);
1766 } else {
1767 entry = entry->next;
1768 }
1769
1770 /*
1771 * make a first pass to check for protection violations.
1772 */
1773
1774 current = entry;
1775 while ((current != &map->header) && (current->start < end)) {
1776 if (UVM_ET_ISSUBMAP(current))
1777 return(KERN_INVALID_ARGUMENT);
1778 if ((new_prot & current->max_protection) != new_prot) {
1779 vm_map_unlock(map);
1780 return(KERN_PROTECTION_FAILURE);
1781 }
1782 current = current->next;
1783 }
1784
1785 /* go back and fix up protections (no need to clip this time). */
1786
1787 current = entry;
1788
1789 while ((current != &map->header) && (current->start < end)) {
1790 vm_prot_t old_prot;
1791
1792 UVM_MAP_CLIP_END(map, current, end);
1793
1794 old_prot = current->protection;
1795 if (set_max)
1796 current->protection =
1797 (current->max_protection = new_prot) & old_prot;
1798 else
1799 current->protection = new_prot;
1800
1801 /*
1802 * update physical map if necessary. worry about copy-on-write
1803 * here -- CHECK THIS XXX
1804 */
1805
1806 if (current->protection != old_prot) {
1807 if (UVM_ET_ISMAP(current) &&
1808 !UVM_ET_ISSUBMAP(current)) {
1809 /* share map? gotta go down a level */
1810 vm_map_entry_t share_entry;
1811 vm_offset_t share_end;
1812
1813 /*
1814 * note: a share map has its own address
1815 * space (starting at zero). current->offset
1816 * is the offset into the share map our
1817 * mapping starts. the length of our
1818 * mapping is (current->end - current->start).
1819 * thus, our mapping goes from current->offset
1820 * to share_end (which is: current->offset +
1821 * length) in the share map's address space.
1822 *
1823 * thus for any share_entry we need to make
1824 * sure that the addresses we've got fall in
1825 * the range we want. we use:
1826 * max(any share_entry->start, current->offset)
1827 * min(any share_entry->end, share_end)
1828 *
1829 * of course to change our pmap we've got to
1830 * convert the share * map address back to
1831 * our map's virtual address space using:
1832 * our_va = share_va -
1833 * current->offset + current->start
1834 *
1835 * XXXCDC: protection change in sharemap may
1836 * require use of pmap_page_protect. needs
1837 * a rethink.
1838 */
1839
1840 vm_map_lock(current->object.share_map);
1841 /*
1842 * note: current->offset is offset into
1843 * share map
1844 */
1845 (void)uvm_map_lookup_entry(
1846 current->object.share_map,
1847 current->offset, &share_entry);
1848 share_end = current->offset +
1849 (current->end - current->start);
1850 while ((share_entry !=
1851 ¤t->object.share_map->header) &&
1852 (share_entry->start < share_end)) {
1853
1854 pmap_protect(map->pmap,
1855 (max(share_entry->start,
1856 current->offset) -
1857 current->offset + current->start),
1858 min(share_entry->end, share_end) -
1859 current->offset + current->start,
1860 current->protection &
1861 MASK(share_entry));
1862
1863 share_entry = share_entry->next;
1864 }
1865 vm_map_unlock(current->object.share_map);
1866
1867 } else { /* not share map! */
1868
1869 pmap_protect(map->pmap, current->start,
1870 current->end,
1871 current->protection & MASK(entry));
1872
1873 }
1874 }
1875 current = current->next;
1876 }
1877
1878 vm_map_unlock(map);
1879 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
1880 return(KERN_SUCCESS);
1881 }
1882
1883 #undef max
1884 #undef MASK
1885
1886 /*
1887 * uvm_map_inherit: set inheritance code for range of addrs in map.
1888 *
1889 * => map must be unlocked
1890 * => note that the inherit code is used during a "fork". see fork
1891 * code for details.
1892 * => XXXCDC: currently only works in main map. what about share map?
1893 */
1894
1895 int
1896 uvm_map_inherit(map, start, end, new_inheritance)
1897 vm_map_t map;
1898 vm_offset_t start;
1899 vm_offset_t end;
1900 vm_inherit_t new_inheritance;
1901 {
1902 vm_map_entry_t entry, temp_entry;
1903 UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
1904 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)",
1905 map, start, end, new_inheritance);
1906
1907 switch (new_inheritance) {
1908 case VM_INHERIT_NONE:
1909 case VM_INHERIT_COPY:
1910 case VM_INHERIT_SHARE:
1911 break;
1912 default:
1913 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
1914 return(KERN_INVALID_ARGUMENT);
1915 }
1916
1917 vm_map_lock(map);
1918
1919 VM_MAP_RANGE_CHECK(map, start, end);
1920
1921 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
1922 entry = temp_entry;
1923 UVM_MAP_CLIP_START(map, entry, start);
1924 } else {
1925 entry = temp_entry->next;
1926 }
1927
1928 while ((entry != &map->header) && (entry->start < end)) {
1929 UVM_MAP_CLIP_END(map, entry, end);
1930
1931 entry->inheritance = new_inheritance;
1932
1933 entry = entry->next;
1934 }
1935
1936 vm_map_unlock(map);
1937 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
1938 return(KERN_SUCCESS);
1939 }
1940
1941 /*
1942 * uvm_map_pageable: sets the pageability of a range in a map.
1943 *
1944 * => regions sepcified as not pageable require lock-down (wired) memory
1945 * and page tables.
1946 * => map must not be locked.
1947 * => XXXCDC: check this and try and clean it up.
1948 */
1949
1950 int
1951 uvm_map_pageable(map, start, end, new_pageable)
1952 vm_map_t map;
1953 vm_offset_t start, end;
1954 boolean_t new_pageable;
1955 {
1956 vm_map_entry_t entry, start_entry;
1957 vm_offset_t failed = 0;
1958 int rv;
1959 UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
1960 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
1961 map, start, end, new_pageable);
1962
1963 vm_map_lock(map);
1964 VM_MAP_RANGE_CHECK(map, start, end);
1965
1966 /*
1967 * only one pageability change may take place at one time, since
1968 * uvm_fault_wire assumes it will be called only once for each
1969 * wiring/unwiring. therefore, we have to make sure we're actually
1970 * changing the pageability for the entire region. we do so before
1971 * making any changes.
1972 */
1973
1974 if (uvm_map_lookup_entry(map, start, &start_entry) == FALSE) {
1975 vm_map_unlock(map);
1976
1977 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
1978 return (KERN_INVALID_ADDRESS);
1979 }
1980 entry = start_entry;
1981
1982 /*
1983 * handle wiring and unwiring seperately.
1984 */
1985
1986 if (new_pageable) { /* unwire */
1987
1988 UVM_MAP_CLIP_START(map, entry, start);
1989
1990 /*
1991 * unwiring. first ensure that the range to be unwired is
1992 * really wired down and that there are no holes.
1993 */
1994 while ((entry != &map->header) && (entry->start < end)) {
1995
1996 if (entry->wired_count == 0 ||
1997 (entry->end < end &&
1998 (entry->next == &map->header ||
1999 entry->next->start > entry->end))) {
2000 vm_map_unlock(map);
2001 UVMHIST_LOG(maphist,
2002 "<- done (INVALID UNWIRE ARG)",0,0,0,0);
2003 return (KERN_INVALID_ARGUMENT);
2004 }
2005 entry = entry->next;
2006 }
2007
2008 /*
2009 * now decrement the wiring count for each region. if a region
2010 * becomes completely unwired, unwire its physical pages and
2011 * mappings.
2012 */
2013 #if 0 /* not necessary: uvm_fault_unwire does not lock */
2014 lock_set_recursive(&map->lock);
2015 #endif /* XXXCDC */
2016
2017 entry = start_entry;
2018 while ((entry != &map->header) && (entry->start < end)) {
2019 UVM_MAP_CLIP_END(map, entry, end);
2020
2021 entry->wired_count--;
2022 if (entry->wired_count == 0)
2023 uvm_map_entry_unwire(map, entry);
2024
2025 entry = entry->next;
2026 }
2027 #if 0 /* XXXCDC: not necessary, see above */
2028 lock_clear_recursive(&map->lock);
2029 #endif
2030 vm_map_unlock(map);
2031 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
2032 return(KERN_SUCCESS);
2033
2034 /*
2035 * end of unwire case!
2036 */
2037 }
2038
2039 /*
2040 * wire case: in two passes [XXXCDC: ugly block of code here]
2041 *
2042 * 1: holding the write lock, we create any anonymous maps that need
2043 * to be created. then we clip each map entry to the region to
2044 * be wired and increment its wiring count.
2045 *
2046 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
2047 * in the pages for any newly wired area (wired_count is 1).
2048 *
2049 * downgrading to a read lock for uvm_fault_wire avoids a possible
2050 * deadlock with another thread that may have faulted on one of
2051 * the pages to be wired (it would mark the page busy, blocking
2052 * us, then in turn block on the map lock that we hold). because
2053 * of problems in the recursive lock package, we cannot upgrade
2054 * to a write lock in vm_map_lookup. thus, any actions that
2055 * require the write lock must be done beforehand. because we
2056 * keep the read lock on the map, the copy-on-write status of the
2057 * entries we modify here cannot change.
2058 */
2059
2060 while ((entry != &map->header) && (entry->start < end)) {
2061
2062 if (entry->wired_count == 0) { /* not already wired? */
2063
2064 /*
2065 * perform actions of vm_map_lookup that need the
2066 * write lock on the map: create an anonymous map
2067 * for a copy-on-write region, or an anonymous map
2068 * for a zero-fill region.
2069 *
2070 * we don't have to do this for entries that point
2071 * to sharing maps, because we won't hold the lock
2072 * on the sharing map.
2073 */
2074
2075 if (!UVM_ET_ISMAP(entry)) { /* not sharing map */
2076 /*
2077 * XXXCDC: protection vs. max_protection??
2078 * (wirefault uses max?)
2079 * XXXCDC: used to do it always if
2080 * uvm_obj == NULL (wrong?)
2081 */
2082 if ( UVM_ET_ISNEEDSCOPY(entry) &&
2083 (entry->protection & VM_PROT_WRITE) != 0) {
2084 amap_copy(map, entry, M_WAITOK, TRUE,
2085 start, end);
2086 /* XXXCDC: wait OK? */
2087 }
2088 }
2089 } /* wired_count == 0 */
2090 UVM_MAP_CLIP_START(map, entry, start);
2091 UVM_MAP_CLIP_END(map, entry, end);
2092 entry->wired_count++;
2093
2094 /*
2095 * Check for holes
2096 */
2097 if (entry->end < end && (entry->next == &map->header ||
2098 entry->next->start > entry->end)) {
2099 /*
2100 * found one. amap creation actions do not need to
2101 * be undone, but the wired counts need to be restored.
2102 */
2103 while (entry != &map->header && entry->end > start) {
2104 entry->wired_count--;
2105 entry = entry->prev;
2106 }
2107 vm_map_unlock(map);
2108 UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
2109 return(KERN_INVALID_ARGUMENT);
2110 }
2111 entry = entry->next;
2112 }
2113
2114 /*
2115 * Pass 2.
2116 */
2117 /*
2118 * HACK HACK HACK HACK
2119 *
2120 * if we are wiring in the kernel map or a submap of it, unlock the
2121 * map to avoid deadlocks. we trust that the kernel threads are
2122 * well-behaved, and therefore will not do anything destructive to
2123 * this region of the map while we have it unlocked. we cannot
2124 * trust user threads to do the same.
2125 *
2126 * HACK HACK HACK HACK
2127 */
2128 if (vm_map_pmap(map) == pmap_kernel()) {
2129 vm_map_unlock(map); /* trust me ... */
2130 } else {
2131 vm_map_set_recursive(&map->lock);
2132 lockmgr(&map->lock, LK_DOWNGRADE, (void *)0);
2133 }
2134
2135 rv = 0;
2136 entry = start_entry;
2137 while (entry != &map->header && entry->start < end) {
2138 /*
2139 * if uvm_fault_wire fails for any page we need to undo what has
2140 * been done. we decrement the wiring count for those pages
2141 * which have not yet been wired (now) and unwire those that
2142 * have * (later).
2143 *
2144 * XXX this violates the locking protocol on the map, needs to
2145 * be fixed. [because we only have a read lock on map we
2146 * shouldn't be changing wired_count?]
2147 */
2148 if (rv) {
2149 entry->wired_count--;
2150 } else if (entry->wired_count == 1) {
2151 rv = uvm_fault_wire(map, entry->start, entry->end);
2152 if (rv) {
2153 failed = entry->start;
2154 entry->wired_count--;
2155 }
2156 }
2157 entry = entry->next;
2158 }
2159
2160 if (vm_map_pmap(map) == pmap_kernel()) {
2161 vm_map_lock(map); /* relock */
2162 } else {
2163 vm_map_clear_recursive(&map->lock);
2164 }
2165
2166 if (rv) { /* failed? */
2167 vm_map_unlock(map);
2168 (void) uvm_map_pageable(map, start, failed, TRUE);
2169 UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
2170 return(rv);
2171 }
2172 vm_map_unlock(map);
2173
2174 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
2175 return(KERN_SUCCESS);
2176 }
2177
2178 /*
2179 * uvm_map_clean: push dirty pages off to backing store.
2180 *
2181 * => valid flags:
2182 * if (flags & PGO_SYNCIO): dirty pages are written synchronously
2183 * if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
2184 * if (flags & PGO_FREE): any cached pages are freed after clean
2185 * => returns an error if any part of the specified range isn't mapped
2186 * => never a need to flush amap layer since the anonymous memory has
2187 * no permanent home...
2188 * => called from sys_msync()
2189 * => caller must not write-lock map (read OK).
2190 * => we may sleep while cleaning if SYNCIO [with map read-locked]
2191 * => XXX: does this handle share maps properly?
2192 */
2193
2194 int
2195 uvm_map_clean(map, start, end, flags)
2196 vm_map_t map;
2197 vm_offset_t start, end;
2198 int flags;
2199 {
2200 vm_map_entry_t current;
2201 vm_map_entry_t entry;
2202 vm_size_t size;
2203 struct uvm_object *object;
2204 vm_offset_t offset;
2205 UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
2206 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
2207 map, start, end, flags);
2208
2209 vm_map_lock_read(map);
2210 VM_MAP_RANGE_CHECK(map, start, end);
2211 if (!uvm_map_lookup_entry(map, start, &entry)) {
2212 vm_map_unlock_read(map);
2213 return(KERN_INVALID_ADDRESS);
2214 }
2215
2216 /*
2217 * Make a first pass to check for holes.
2218 */
2219 for (current = entry; current->start < end; current = current->next) {
2220 if (UVM_ET_ISSUBMAP(current)) {
2221 vm_map_unlock_read(map);
2222 return(KERN_INVALID_ARGUMENT);
2223 }
2224 if (end > current->end && (current->next == &map->header ||
2225 current->end != current->next->start)) {
2226 vm_map_unlock_read(map);
2227 return(KERN_INVALID_ADDRESS);
2228 }
2229 }
2230
2231 /*
2232 * add "cleanit" flag to flags (for generic flush routine).
2233 * then make a second pass, cleaning/uncaching pages from
2234 * the indicated objects as we go.
2235 */
2236 flags = flags | PGO_CLEANIT;
2237 for (current = entry; current->start < end; current = current->next) {
2238 offset = current->offset + (start - current->start);
2239 size = (end <= current->end ? end : current->end) - start;
2240
2241 /*
2242 * get object/offset. special case to handle share maps.
2243 */
2244 if (UVM_ET_ISMAP(current)) { /* share map? */
2245 register vm_map_t smap;
2246 vm_map_entry_t tentry;
2247 vm_size_t tsize;
2248
2249 smap = current->object.share_map;
2250 vm_map_lock_read(smap);
2251 (void) uvm_map_lookup_entry(smap, offset, &tentry);
2252 tsize = tentry->end - offset;
2253 if (tsize < size)
2254 size = tsize;
2255 object = tentry->object.uvm_obj;
2256 offset = tentry->offset + (offset - tentry->start);
2257 simple_lock(&object->vmobjlock);
2258 vm_map_unlock_read(smap);
2259 } else {
2260 object = current->object.uvm_obj;
2261 simple_lock(&object->vmobjlock);
2262 }
2263
2264 /*
2265 * flush pages if writing is allowed. note that object is
2266 * locked.
2267 * XXX should we continue on an error?
2268 */
2269
2270 if (object && object->pgops &&
2271 (current->protection & VM_PROT_WRITE) != 0) {
2272 if (!object->pgops->pgo_flush(object, offset,
2273 offset+size, flags)) {
2274 simple_unlock(&object->vmobjlock);
2275 vm_map_unlock_read(map);
2276 return (KERN_FAILURE);
2277 }
2278 }
2279 simple_unlock(&object->vmobjlock);
2280 start += size;
2281 }
2282 vm_map_unlock_read(map);
2283 return(KERN_SUCCESS);
2284 }
2285
2286
2287 /*
2288 * uvm_map_checkprot: check protection in map
2289 *
2290 * => must allow specified protection in a fully allocated region.
2291 * => map must be read or write locked by caller.
2292 */
2293
2294 boolean_t
2295 uvm_map_checkprot(map, start, end, protection)
2296 vm_map_t map;
2297 vm_offset_t start, end;
2298 vm_prot_t protection;
2299 {
2300 vm_map_entry_t entry;
2301 vm_map_entry_t tmp_entry;
2302
2303 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
2304 return(FALSE);
2305 }
2306
2307 entry = tmp_entry;
2308
2309 while (start < end) {
2310 if (entry == &map->header) {
2311 return(FALSE);
2312 }
2313
2314 /*
2315 * no holes allowed
2316 */
2317
2318 if (start < entry->start) {
2319 return(FALSE);
2320 }
2321
2322 /*
2323 * check protection associated with entry
2324 */
2325
2326 if ((entry->protection & protection) != protection) {
2327 return(FALSE);
2328 }
2329
2330 /* go to next entry */
2331
2332 start = entry->end;
2333 entry = entry->next;
2334 }
2335 return(TRUE);
2336 }
2337
2338 /*
2339 * uvmspace_alloc: allocate a vmspace structure.
2340 *
2341 * - structure includes vm_map and pmap
2342 * - XXX: no locking on this structure
2343 * - refcnt set to 1, rest must be init'd by caller
2344 */
2345 struct vmspace *
2346 uvmspace_alloc(min, max, pageable)
2347 vm_offset_t min, max;
2348 int pageable;
2349 {
2350 struct vmspace *vm;
2351 UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
2352
2353 MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK);
2354 uvmspace_init(vm, NULL, min, max, pageable);
2355 UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0);
2356 return (vm);
2357 }
2358
2359 /*
2360 * uvmspace_init: initialize a vmspace structure.
2361 *
2362 * - XXX: no locking on this structure
2363 * - refcnt set to 1, rest must me init'd by caller
2364 */
2365 void
2366 uvmspace_init(vm, pmap, min, max, pageable)
2367 struct vmspace *vm;
2368 struct pmap *pmap;
2369 vm_offset_t min, max;
2370 boolean_t pageable;
2371 {
2372 UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist);
2373
2374 bzero(vm, sizeof(*vm));
2375
2376 uvm_map_setup(&vm->vm_map, min, max, pageable);
2377
2378 if (pmap)
2379 pmap_reference(pmap);
2380 else
2381 #if defined(PMAP_NEW)
2382 pmap = pmap_create();
2383 #else
2384 pmap = pmap_create(0);
2385 #endif
2386 vm->vm_map.pmap = pmap;
2387
2388 vm->vm_refcnt = 1;
2389 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
2390 }
2391
2392 /*
2393 * uvmspace_share: share a vmspace between two proceses
2394 *
2395 * - XXX: no locking on vmspace
2396 * - used for vfork, threads(?)
2397 */
2398
2399 void
2400 uvmspace_share(p1, p2)
2401 struct proc *p1, *p2;
2402 {
2403 p2->p_vmspace = p1->p_vmspace;
2404 p1->p_vmspace->vm_refcnt++;
2405 }
2406
2407 /*
2408 * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
2409 *
2410 * - XXX: no locking on vmspace
2411 */
2412
2413 void
2414 uvmspace_unshare(p)
2415 struct proc *p;
2416 {
2417 struct vmspace *nvm, *ovm = p->p_vmspace;
2418 int s;
2419
2420 if (ovm->vm_refcnt == 1)
2421 /* nothing to do: vmspace isn't shared in the first place */
2422 return;
2423
2424 /* make a new vmspace, still holding old one */
2425 nvm = uvmspace_fork(ovm);
2426
2427 s = splhigh(); /* make this `atomic' */
2428 pmap_deactivate(p); /* unbind old vmspace */
2429 p->p_vmspace = nvm;
2430 pmap_activate(p); /* switch to new vmspace */
2431 splx(s); /* end of critical section */
2432
2433 uvmspace_free(ovm); /* drop reference to old vmspace */
2434 }
2435
2436 /*
2437 * uvmspace_exec: the process wants to exec a new program
2438 *
2439 * - XXX: no locking on vmspace
2440 */
2441
2442 void
2443 uvmspace_exec(p)
2444 struct proc *p;
2445 {
2446 struct vmspace *nvm, *ovm = p->p_vmspace;
2447 vm_map_t map = &ovm->vm_map;
2448 int s;
2449
2450 #ifdef sparc
2451 /* XXX cgd 960926: the sparc #ifdef should be a MD hook */
2452 kill_user_windows(p); /* before stack addresses go away */
2453 #endif
2454
2455 /*
2456 * see if more than one process is using this vmspace...
2457 */
2458
2459 if (ovm->vm_refcnt == 1) {
2460
2461 /*
2462 * if p is the only process using its vmspace then we can safely
2463 * recycle that vmspace for the program that is being exec'd.
2464 */
2465
2466 #ifdef SYSVSHM
2467 /*
2468 * SYSV SHM semantics require us to kill all segments on an exec
2469 */
2470 if (ovm->vm_shm)
2471 shmexit(ovm);
2472 #endif
2473
2474 /*
2475 * now unmap the old program
2476 */
2477 uvm_unmap(map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS, 0);
2478
2479 } else {
2480
2481 /*
2482 * p's vmspace is being shared, so we can't reuse it for p since
2483 * it is still being used for others. allocate a new vmspace
2484 * for p
2485 */
2486 nvm = uvmspace_alloc(map->min_offset, map->max_offset,
2487 map->entries_pageable);
2488
2489 #if (defined(i386) || defined(pc532)) && !defined(PMAP_NEW)
2490 /*
2491 * allocate zero fill area in the new vmspace's map for user
2492 * page tables for ports that have old style pmaps that keep
2493 * user page tables in the top part of the process' address
2494 * space.
2495 *
2496 * XXXCDC: this should go away once all pmaps are fixed
2497 */
2498 {
2499 vm_offset_t addr = VM_MAXUSER_ADDRESS;
2500 if (uvm_map(&nvm->vm_map, &addr, VM_MAX_ADDRESS - addr,
2501 NULL, UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL,
2502 UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_NORMAL,
2503 UVM_FLAG_FIXED|UVM_FLAG_COPYONW)) != KERN_SUCCESS)
2504 panic("vm_allocate of PT page area failed");
2505 }
2506 #endif
2507
2508 /*
2509 * install new vmspace and drop our ref to the old one.
2510 */
2511
2512 s = splhigh();
2513 pmap_deactivate(p);
2514 p->p_vmspace = nvm;
2515 pmap_activate(p);
2516 splx(s);
2517
2518 uvmspace_free(ovm);
2519 }
2520 }
2521
2522 /*
2523 * uvmspace_free: free a vmspace data structure
2524 *
2525 * - XXX: no locking on vmspace
2526 */
2527
2528 void
2529 uvmspace_free(vm)
2530 struct vmspace *vm;
2531 {
2532 vm_map_entry_t dead_entries;
2533 UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
2534
2535 UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0);
2536 if (--vm->vm_refcnt == 0) {
2537 /*
2538 * lock the map, to wait out all other references to it. delete
2539 * all of the mappings and pages they hold, then call the pmap
2540 * module to reclaim anything left.
2541 */
2542 vm_map_lock(&vm->vm_map);
2543 if (vm->vm_map.nentries) {
2544 (void)uvm_unmap_remove(&vm->vm_map,
2545 vm->vm_map.min_offset, vm->vm_map.max_offset,
2546 TRUE, &dead_entries);
2547 if (dead_entries != NULL)
2548 uvm_unmap_detach(dead_entries, 0);
2549 }
2550 pmap_destroy(vm->vm_map.pmap);
2551 vm->vm_map.pmap = NULL;
2552 FREE(vm, M_VMMAP);
2553 }
2554 UVMHIST_LOG(maphist,"<- done", 0,0,0,0);
2555 }
2556
2557 /*
2558 * F O R K - m a i n e n t r y p o i n t
2559 */
2560 /*
2561 * uvmspace_fork: fork a process' main map
2562 *
2563 * => create a new vmspace for child process from parent.
2564 * => parent's map must not be locked.
2565 */
2566
2567 struct vmspace *
2568 uvmspace_fork(vm1)
2569 struct vmspace *vm1;
2570 {
2571 struct vmspace *vm2;
2572 vm_map_t old_map = &vm1->vm_map;
2573 vm_map_t new_map;
2574 vm_map_entry_t old_entry;
2575 vm_map_entry_t new_entry;
2576 pmap_t new_pmap;
2577 boolean_t protect_child;
2578 UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
2579
2580 #if (defined(i386) || defined(pc532)) && !defined(PMAP_NEW)
2581 /*
2582 * avoid copying any of the parent's pagetables or other per-process
2583 * objects that reside in the map by marking all of them non-inheritable
2584 * XXXCDC: should go away
2585 */
2586 (void) uvm_map_inherit(old_map, VM_MAXUSER_ADDRESS, VM_MAX_ADDRESS,
2587 VM_INHERIT_NONE);
2588 #endif
2589
2590 vm_map_lock(old_map);
2591
2592 vm2 = uvmspace_alloc(old_map->min_offset, old_map->max_offset,
2593 old_map->entries_pageable);
2594 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2595 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
2596 new_map = &vm2->vm_map; /* XXX */
2597 new_pmap = new_map->pmap;
2598
2599 old_entry = old_map->header.next;
2600
2601 /*
2602 * go entry-by-entry
2603 */
2604
2605 while (old_entry != &old_map->header) {
2606
2607 /*
2608 * first, some sanity checks on the old entry
2609 */
2610 if (UVM_ET_ISSUBMAP(old_entry))
2611 panic("fork: encountered a submap during fork (illegal)");
2612 else if (UVM_ET_ISMAP(old_entry)) {
2613 if (UVM_ET_ISNEEDSCOPY(old_entry))
2614 panic("fork: encountered a share map entry that needs_copy (illegal)");
2615 if (UVM_ET_ISCOPYONWRITE(old_entry))
2616 panic("fork: encountered a copy_on_write share map entry (illegal)");
2617 if (old_entry->aref.ar_amap)
2618 panic("fork: detected share map entry that has an amap (illegal)");
2619 } else {
2620 if (!UVM_ET_ISCOPYONWRITE(old_entry) &&
2621 UVM_ET_ISNEEDSCOPY(old_entry))
2622 panic("fork: non-copy_on_write map entry marked needs_copy (illegal)");
2623 }
2624
2625
2626 switch (old_entry->inheritance) {
2627 case VM_INHERIT_NONE:
2628 /*
2629 * drop the mapping
2630 */
2631 break;
2632
2633 case VM_INHERIT_SHARE:
2634 /*
2635 * share the mapping: this means we want the old and
2636 * new entries to share amaps and backing objects.
2637 */
2638
2639 /*
2640 * if the old_entry needs a new amap (due to prev fork)
2641 * then we need to allocate it now so that we have
2642 * something we own to share with the new_entry. [in
2643 * other words, we need to clear needs_copy]
2644 */
2645
2646 if (UVM_ET_ISNEEDSCOPY(old_entry)) {
2647 /* get our own amap, clears needs_copy */
2648 amap_copy(old_map, old_entry, M_WAITOK, FALSE,
2649 0, 0);
2650 /* XXXCDC: WAITOK??? */
2651 }
2652
2653 new_entry = uvm_mapent_alloc(new_map);
2654 /* old_entry -> new_entry */
2655 uvm_mapent_copy(old_entry, new_entry);
2656
2657 /* new pmap has nothing wired in it */
2658 new_entry->wired_count = 0;
2659
2660 /*
2661 * gain reference to objects backing the map
2662 */
2663 if (UVM_ET_ISMAP(new_entry)) { /* share map? */
2664 uvm_map_reference(old_entry->object.share_map);
2665 } else {
2666 if (new_entry->aref.ar_amap)
2667 /* share reference */
2668 amap_ref(new_entry, AMAP_SHARED);
2669
2670 if (new_entry->object.uvm_obj &&
2671 new_entry->object.uvm_obj->pgops->pgo_reference)
2672 new_entry->object.uvm_obj->
2673 pgops->pgo_reference(
2674 new_entry->object.uvm_obj);
2675 }
2676
2677 /* insert entry at end of new_map's entry list */
2678 uvm_map_entry_link(new_map, new_map->header.prev,
2679 new_entry);
2680
2681 /*
2682 * pmap_copy the mappings: this routine is optional
2683 * but if it is there it will reduce the number of
2684 * page faults in the new proc.
2685 */
2686
2687 pmap_copy(new_pmap, old_map->pmap, new_entry->start,
2688 (old_entry->end - old_entry->start),
2689 old_entry->start);
2690
2691 break;
2692
2693 case VM_INHERIT_COPY:
2694
2695 /*
2696 * copy-on-write the mapping (using mmap's
2697 * MAP_PRIVATE semantics)
2698 */
2699
2700 /*
2701 * share maps: we special case it (handled by
2702 * uvm_map_sharemapcopy)
2703 */
2704
2705 if (UVM_ET_ISMAP(old_entry)) { /* share map? */
2706 uvm_map_sharemapcopy(old_map, old_entry,
2707 new_map);
2708 break;
2709 }
2710
2711 /*
2712 * not a share map. allocate new_entry, adjust
2713 * reference counts. (note that new references
2714 * are read-only).
2715 */
2716
2717 new_entry = uvm_mapent_alloc(new_map);
2718 /* old_entry -> new_entry */
2719 uvm_mapent_copy(old_entry, new_entry);
2720
2721 if (new_entry->aref.ar_amap)
2722 amap_ref(new_entry, 0);
2723
2724 if (new_entry->object.uvm_obj &&
2725 new_entry->object.uvm_obj->pgops->pgo_reference)
2726 new_entry->object.uvm_obj->pgops->pgo_reference
2727 (new_entry->object.uvm_obj);
2728
2729 /* new pmap has nothing wired in it */
2730 new_entry->wired_count = 0;
2731
2732 new_entry->etype |=
2733 (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
2734 uvm_map_entry_link(new_map, new_map->header.prev,
2735 new_entry);
2736
2737 /*
2738 * the new entry will need an amap. it will either
2739 * need to be copied from the old entry or created
2740 * from scratch (if the old entry does not have an
2741 * amap). can we defer this process until later
2742 * (by setting "needs_copy") or do we need to copy
2743 * the amap now?
2744 *
2745 * we must copy the amap now if any of the following
2746 * conditions hold:
2747 * 1. the old entry has an amap and that amap is
2748 * being shared. this means that the old (parent)
2749 * process is sharing the amap with another
2750 * process. if we do not clear needs_copy here
2751 * we will end up in a situation where both the
2752 * parent and child process are refering to the
2753 * same amap with "needs_copy" set. if the
2754 * parent write-faults, the fault routine will
2755 * clear "needs_copy" in the parent by allocating
2756 * a new amap. this is wrong because the
2757 * parent is supposed to be sharing the old amap
2758 * and the new amap will break that.
2759 *
2760 * 2. if the old entry has an amap and a non-zero
2761 * wire count then we are going to have to call
2762 * amap_cow_now to avoid page faults in the
2763 * parent process. since amap_cow_now requires
2764 * "needs_copy" to be clear we might as well
2765 * clear it here as well.
2766 *
2767 */
2768
2769 if (old_entry->aref.ar_amap != NULL) {
2770
2771 if ((old_entry->aref.ar_amap->am_flags &
2772 AMAP_SHARED) != 0 ||
2773 old_entry->wired_count != 0) {
2774
2775 amap_copy(new_map, new_entry, M_WAITOK, FALSE,
2776 0, 0);
2777 /* XXXCDC: M_WAITOK ... ok? */
2778 }
2779 }
2780
2781 /*
2782 * if the parent's entry is wired down, then the
2783 * parent process does not want page faults on
2784 * access to that memory. this means that we
2785 * cannot do copy-on-write because we can't write
2786 * protect the old entry. in this case we
2787 * resolve all copy-on-write faults now, using
2788 * amap_cow_now. note that we have already
2789 * allocated any needed amap (above).
2790 */
2791
2792 if (old_entry->wired_count != 0) {
2793
2794 /*
2795 * resolve all copy-on-write faults now
2796 * (note that there is nothing to do if
2797 * the old mapping does not have an amap).
2798 * XXX: is it worthwhile to bother with pmap_copy
2799 * in this case?
2800 */
2801 if (old_entry->aref.ar_amap)
2802 amap_cow_now(new_map, new_entry);
2803
2804 } else {
2805
2806 /*
2807 * setup mappings to trigger copy-on-write faults
2808 * we must write-protect the parent if it has
2809 * an amap and it is not already "needs_copy"...
2810 * if it is already "needs_copy" then the parent
2811 * has already been write-protected by a previous
2812 * fork operation.
2813 *
2814 * if we do not write-protect the parent, then
2815 * we must be sure to write-protect the child
2816 * after the pmap_copy() operation.
2817 *
2818 * XXX: pmap_copy should have some way of telling
2819 * us that it didn't do anything so we can avoid
2820 * calling pmap_protect needlessly.
2821 */
2822
2823 if (old_entry->aref.ar_amap) {
2824
2825 if (!UVM_ET_ISNEEDSCOPY(old_entry)) {
2826 if (old_entry->max_protection & VM_PROT_WRITE) {
2827 pmap_protect(old_map->pmap,
2828 old_entry->start,
2829 old_entry->end,
2830 old_entry->protection &
2831 ~VM_PROT_WRITE);
2832 }
2833 old_entry->etype |= UVM_ET_NEEDSCOPY;
2834 }
2835
2836 /*
2837 * parent must now be write-protected
2838 */
2839 protect_child = FALSE;
2840 } else {
2841
2842 /*
2843 * we only need to protect the child if the
2844 * parent has write access.
2845 */
2846 if (old_entry->max_protection & VM_PROT_WRITE)
2847 protect_child = TRUE;
2848 else
2849 protect_child = FALSE;
2850
2851 }
2852
2853 /*
2854 * copy the mappings
2855 * XXX: need a way to tell if this does anything
2856 */
2857
2858 pmap_copy(new_pmap, old_map->pmap,
2859 new_entry->start,
2860 (old_entry->end - old_entry->start),
2861 old_entry->start);
2862
2863 /*
2864 * protect the child's mappings if necessary
2865 */
2866 if (protect_child) {
2867 pmap_protect(new_pmap, new_entry->start,
2868 new_entry->end,
2869 new_entry->protection &
2870 ~VM_PROT_WRITE);
2871 }
2872
2873 }
2874 break;
2875 } /* end of switch statement */
2876 old_entry = old_entry->next;
2877 }
2878
2879 new_map->size = old_map->size;
2880 vm_map_unlock(old_map);
2881
2882 #if (defined(i386) || defined(pc532)) && !defined(PMAP_NEW)
2883 /*
2884 * allocate zero fill area in the new vmspace's map for user
2885 * page tables for ports that have old style pmaps that keep
2886 * user page tables in the top part of the process' address
2887 * space.
2888 *
2889 * XXXCDC: this should go away once all pmaps are fixed
2890 */
2891 {
2892 vm_offset_t addr = VM_MAXUSER_ADDRESS;
2893 if (uvm_map(new_map, &addr, VM_MAX_ADDRESS - addr, NULL,
2894 UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL,
2895 UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_NORMAL,
2896 UVM_FLAG_FIXED|UVM_FLAG_COPYONW)) != KERN_SUCCESS)
2897 panic("vm_allocate of PT page area failed");
2898 }
2899 #endif
2900
2901 #ifdef SYSVSHM
2902 if (vm1->vm_shm)
2903 shmfork(vm1, vm2);
2904 #endif
2905
2906 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
2907 return(vm2);
2908 }
2909
2910
2911 /*
2912 * uvm_map_sharemapcopy: handle the copying of a share map during a
2913 * fork. this is a helper function for uvmspace_fork. it is called
2914 * when we are doing a fork and we have encountered a map entry which
2915 * has two attributes: [1] its inherit code is VM_INHERIT_COPY, and
2916 * [2] it points to a share map (i.e. is_a_map is true). in this case
2917 * we must traverse the area of the share map pointed to by the
2918 * old_entry and make private copies of the map entries in the share
2919 * map. this is somewhat similar to what happens in the non-share map
2920 * case in fork, but it has to handle multiple map entries which may
2921 * not be the proper size. it was seperated out into its own function
2922 * in order to make the main body of the fork code easier to read and
2923 * understand!
2924 *
2925 * main_entry->offset = starting VA in share map for our mapping
2926 *
2927 * => main map is locked by caller.
2928 * => we lock share map.
2929 * => new map isn't in use yet (still being set up for the first time).
2930 */
2931
2932 void
2933 uvm_map_sharemapcopy(main_map, main_entry, new_map)
2934 vm_map_t main_map, new_map;
2935 vm_map_entry_t main_entry;
2936 {
2937 vm_map_t share_map = main_entry->object.share_map;
2938 vm_map_entry_t share_entry, new_entry;
2939 vm_offset_t shend = main_entry->offset +
2940 (main_entry->end - main_entry->start);
2941 int refs;
2942
2943 /*
2944 * lock share map. find first map entry of interest. clip if needed.
2945 */
2946
2947 vm_map_lock(share_map);
2948 if (uvm_map_lookup_entry(share_map, main_entry->offset, &share_entry))
2949 UVM_MAP_CLIP_START(share_map, share_entry, main_entry->offset);
2950
2951 while (share_entry != &share_map->header &&
2952 share_entry->start < shend) {
2953
2954 /*
2955 * at this point we have a map entry that we need to make a
2956 * copy of.
2957 */
2958
2959 /* may need to clip? */
2960 UVM_MAP_CLIP_END(share_map, share_entry, shend);
2961 new_entry = uvm_mapent_alloc(new_map);
2962
2963 /* share_entry -> new_entry */
2964 uvm_mapent_copy(share_entry, new_entry);
2965
2966 /* convert share map addresses back to main map addresses */
2967 new_entry->start = main_entry->start +
2968 (new_entry->start - main_entry->offset);
2969 new_entry->end = main_entry->start +
2970 (new_entry->end - main_entry->offset);
2971
2972 /* gain references */
2973 if (new_entry->aref.ar_amap) {
2974 amap_ref(new_entry, 0);
2975 }
2976 if (new_entry->object.uvm_obj &&
2977 new_entry->object.uvm_obj->pgops->pgo_reference)
2978 new_entry->object.uvm_obj->
2979 pgops->pgo_reference(new_entry->object.uvm_obj);
2980
2981 /* init rest of new entry and insert at end of new map */
2982 new_entry->wired_count = 0;
2983 new_entry->etype |= (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
2984 uvm_map_entry_link(new_map, new_map->header.prev, new_entry);
2985
2986 /*
2987 * don't bother trying to defer the copy in the share map case
2988 */
2989 /* XXXCDC: WAITOK? */
2990 amap_copy(new_map, new_entry, M_WAITOK, FALSE, 0, 0);
2991
2992 /* just like non-share case: can't COW wired memory */
2993 if (share_entry->wired_count != 0 &&
2994 UVM_ET_ISCOPYONWRITE(share_entry)) {
2995 amap_cow_now(new_map, new_entry);
2996 } else {
2997
2998 /* just like non-share case */
2999 if (UVM_ET_ISCOPYONWRITE(share_entry)) {
3000
3001 if (!UVM_ET_ISNEEDSCOPY(share_entry)) {
3002
3003 /*
3004 * must write protect pages. if we
3005 * have the sole reference to the share
3006 * map we can use good old pmap_protect.
3007 * if we don't, then we have to use
3008 * pmap_page_protect. note that the VA
3009 * new_entry->start (starting entry of
3010 * this segment of the share map in
3011 * child process) is the same virtual
3012 * address it is mapped in in the parent
3013 * (thus we can mix main_map and
3014 * new_entry in the pmap_protect call
3015 * below).
3016 */
3017
3018 simple_lock(&share_map->ref_lock);
3019 refs = share_map->ref_count;
3020 simple_unlock(&share_map->ref_lock);
3021 if (refs == 1) {
3022 pmap_protect(main_map->pmap,
3023 new_entry->start,
3024 new_entry->end,
3025 share_entry->protection &
3026 ~VM_PROT_WRITE);
3027 } else {
3028 if (share_entry->aref.ar_amap) {
3029 simple_lock(
3030 &share_entry->aref.ar_amap->am_l);
3031 amap_share_protect(share_entry,
3032 share_entry->protection &
3033 ~VM_PROT_WRITE);
3034 simple_unlock(
3035 &share_entry->aref.ar_amap->am_l);
3036 }
3037 if (share_entry->object.uvm_obj)
3038 {
3039 #ifdef DIAGNOSTIC
3040 if (!share_entry->object.uvm_obj->pgops->
3041 pgo_shareprot)
3042 panic("fork: share_entry with no prot function");
3043 #endif
3044 simple_lock(
3045 &share_entry->object.uvm_obj->vmobjlock);
3046 share_entry->object.uvm_obj->pgops->
3047 pgo_shareprot(share_entry,
3048 share_entry->protection & ~VM_PROT_WRITE);
3049 simple_unlock(
3050 &share_entry->object.uvm_obj->vmobjlock);
3051 }
3052 }
3053 share_entry->etype |= UVM_ET_NEEDSCOPY;
3054 }
3055 }
3056
3057 /*
3058 * now copy the mappings: note address are the same
3059 * in both main_map and new_map
3060 */
3061 pmap_copy(new_map->pmap, main_map->pmap,
3062 new_entry->start,
3063 (new_entry->end - new_entry->start),
3064 new_entry->start);
3065
3066 /* just like non-share case */
3067 if (!UVM_ET_ISCOPYONWRITE(share_entry)) {
3068 pmap_protect(new_map->pmap, new_entry->start,
3069 new_entry->end,
3070 new_entry->protection & ~VM_PROT_WRITE);
3071 }
3072 }
3073
3074 /* next entry in share map, please */
3075 share_entry = share_entry->next;
3076
3077 }
3078 /* done! */
3079 }
3080
3081 #if defined(DDB)
3082
3083 /*
3084 * DDB hooks
3085 */
3086
3087 /*
3088 * uvm_map_print: print out a map
3089 */
3090
3091 void
3092 uvm_map_print(map, full)
3093 vm_map_t map;
3094 boolean_t full;
3095 {
3096
3097 uvm_map_printit(map, full, printf);
3098 }
3099
3100 /*
3101 * uvm_map_printit: actually prints the map
3102 */
3103
3104 void
3105 uvm_map_printit(map, full, pr)
3106 vm_map_t map;
3107 boolean_t full;
3108 void (*pr) __P((const char *, ...));
3109 {
3110 vm_map_entry_t entry;
3111
3112 (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, map->min_offset,map->max_offset);
3113 (*pr)("\t#ent=%d, sz=%d, ref=%d, main=%c, version=%d\n",
3114 map->nentries, map->size, map->ref_count,
3115 (map->is_main_map) ? 'T' : 'F', map->timestamp);
3116 #ifdef pmap_resident_count
3117 (*pr)("\tpmap=%p(resident=%d)\n", map->pmap,
3118 pmap_resident_count(map->pmap));
3119 #else
3120 /* XXXCDC: this should be required ... */
3121 (*pr)("\tpmap=%p(resident=<<NOT SUPPORTED!!!>>)\n", map->pmap);
3122 #endif
3123 if (!full)
3124 return;
3125 for (entry = map->header.next; entry != &map->header;
3126 entry = entry->next) {
3127 (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%x, amap=%p/%d\n",
3128 entry, entry->start, entry->end, entry->object.uvm_obj,
3129 entry->offset, entry->aref.ar_amap, entry->aref.ar_slotoff);
3130 (*pr)(
3131 "\tmap=%c, submap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, wc=%d, adv=%d\n",
3132 (entry->etype & UVM_ET_MAP) ? 'T' : 'F',
3133 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
3134 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
3135 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
3136 entry->protection, entry->max_protection,
3137 entry->inheritance, entry->wired_count, entry->advice);
3138 }
3139 }
3140
3141 /*
3142 * uvm_object_print: print out an object
3143 */
3144
3145 void
3146 uvm_object_print(uobj, full)
3147 struct uvm_object *uobj;
3148 boolean_t full;
3149 {
3150
3151 uvm_object_printit(uobj, full, printf);
3152 }
3153
3154 /*
3155 * uvm_object_printit: actually prints the object
3156 */
3157
3158 void
3159 uvm_object_printit(uobj, full, pr)
3160 struct uvm_object *uobj;
3161 boolean_t full;
3162 void (*pr) __P((const char *, ...));
3163 {
3164 struct vm_page *pg;
3165 int cnt = 0;
3166
3167 (*pr)("OBJECT %p: pgops=%p, npages=%d, ", uobj, uobj->pgops,
3168 uobj->uo_npages);
3169 if (uobj->uo_refs == UVM_OBJ_KERN)
3170 (*pr)("refs=<SYSTEM>\n");
3171 else
3172 (*pr)("refs=%d\n", uobj->uo_refs);
3173
3174 if (!full) return;
3175 (*pr)(" PAGES <pg,offset>:\n ");
3176 for (pg = uobj->memq.tqh_first ; pg ; pg = pg->listq.tqe_next, cnt++) {
3177 (*pr)("<%p,0x%lx> ", pg, pg->offset);
3178 if ((cnt % 3) == 2) (*pr)("\n ");
3179 }
3180 if ((cnt % 3) != 2) (*pr)("\n");
3181 }
3182
3183 /*
3184 * uvm_page_print: print out a page
3185 */
3186
3187 void
3188 uvm_page_print(pg, full)
3189 struct vm_page *pg;
3190 boolean_t full;
3191 {
3192
3193 uvm_page_printit(pg, full, printf);
3194 }
3195
3196 /*
3197 * uvm_page_printit: actually print the page
3198 */
3199
3200 void
3201 uvm_page_printit(pg, full, pr)
3202 struct vm_page *pg;
3203 boolean_t full;
3204 void (*pr) __P((const char *, ...));
3205 {
3206 struct vm_page *lcv;
3207 struct uvm_object *uobj;
3208 struct pglist *pgl;
3209
3210 (*pr)("PAGE %p:\n", pg);
3211 (*pr)(" flags=0x%x, pqflags=0x%x, vers=%d, wire_count=%d, pa=0x%lx\n",
3212 pg->flags, pg->pqflags, pg->version, pg->wire_count, pg->phys_addr);
3213 (*pr)(" uobject=%p, uanon=%p, offset=0x%lx loan_count=%d\n",
3214 pg->uobject, pg->uanon, pg->offset, pg->loan_count);
3215 #if defined(UVM_PAGE_TRKOWN)
3216 if (pg->flags & PG_BUSY)
3217 (*pr)(" owning process = %d, tag=%s\n",
3218 pg->owner, pg->owner_tag);
3219 else
3220 (*pr)(" page not busy, no owner\n");
3221 #else
3222 (*pr)(" [page ownership tracking disabled]\n");
3223 #endif
3224
3225 if (!full)
3226 return;
3227
3228 /* cross-verify object/anon */
3229 if ((pg->pqflags & PQ_FREE) == 0) {
3230 if (pg->pqflags & PQ_ANON) {
3231 if (pg->uanon == NULL || pg->uanon->u.an_page != pg)
3232 (*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n",
3233 (pg->uanon) ? pg->uanon->u.an_page : NULL);
3234 else
3235 (*pr)(" anon backpointer is OK\n");
3236 } else {
3237 uobj = pg->uobject;
3238 if (uobj) {
3239 (*pr)(" checking object list\n");
3240 for (lcv = uobj->memq.tqh_first ; lcv ;
3241 lcv = lcv->listq.tqe_next) {
3242 if (lcv == pg) break;
3243 }
3244 if (lcv)
3245 (*pr)(" page found on object list\n");
3246 else
3247 (*pr)(" >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
3248 }
3249 }
3250 }
3251
3252 /* cross-verify page queue */
3253 if (pg->pqflags & PQ_FREE)
3254 pgl = &uvm.page_free;
3255 else if (pg->pqflags & PQ_INACTIVE)
3256 pgl = (pg->pqflags & PQ_SWAPBACKED) ?
3257 &uvm.page_inactive_swp : &uvm.page_inactive_obj;
3258 else if (pg->pqflags & PQ_ACTIVE)
3259 pgl = &uvm.page_active;
3260 else
3261 pgl = NULL;
3262
3263 if (pgl) {
3264 (*pr)(" checking pageq list\n");
3265 for (lcv = pgl->tqh_first ; lcv ; lcv = lcv->pageq.tqe_next) {
3266 if (lcv == pg) break;
3267 }
3268 if (lcv)
3269 (*pr)(" page found on pageq list\n");
3270 else
3271 (*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
3272 }
3273 }
3274 #endif
3275