uvm_map.c revision 1.8 1 /* $NetBSD: uvm_map.c,v 1.8 1998/02/24 15:58:09 chuck Exp $ */
2
3 /*
4 * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5 * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6 */
7 /*
8 * Copyright (c) 1997 Charles D. Cranor and Washington University.
9 * Copyright (c) 1991, 1993, The Regents of the University of California.
10 *
11 * All rights reserved.
12 *
13 * This code is derived from software contributed to Berkeley by
14 * The Mach Operating System project at Carnegie-Mellon University.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. All advertising materials mentioning features or use of this software
25 * must display the following acknowledgement:
26 * This product includes software developed by Charles D. Cranor,
27 * Washington University, the University of California, Berkeley and
28 * its contributors.
29 * 4. Neither the name of the University nor the names of its contributors
30 * may be used to endorse or promote products derived from this software
31 * without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 * SUCH DAMAGE.
44 *
45 * @(#)vm_map.c 8.3 (Berkeley) 1/12/94
46 * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
47 *
48 *
49 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
50 * All rights reserved.
51 *
52 * Permission to use, copy, modify and distribute this software and
53 * its documentation is hereby granted, provided that both the copyright
54 * notice and this permission notice appear in all copies of the
55 * software, derivative works or modified versions, and any portions
56 * thereof, and that both notices appear in supporting documentation.
57 *
58 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61 *
62 * Carnegie Mellon requests users of this software to return to
63 *
64 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
65 * School of Computer Science
66 * Carnegie Mellon University
67 * Pittsburgh PA 15213-3890
68 *
69 * any improvements or extensions that they make and grant Carnegie the
70 * rights to redistribute these changes.
71 */
72
73 #include "opt_uvmhist.h"
74 #include "opt_pmap_new.h"
75
76 /*
77 * uvm_map.c: uvm map operations
78 */
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/mount.h>
83 #include <sys/mman.h>
84 #include <sys/proc.h>
85 #include <sys/malloc.h>
86
87 #ifdef SYSVSHM
88 #include <sys/shm.h>
89 #endif
90
91 #include <vm/vm.h>
92 #include <vm/vm_page.h>
93 #include <vm/vm_kern.h>
94
95 #include <sys/syscallargs.h>
96
97 #define UVM_MAP
98 #include <uvm/uvm.h>
99
100 struct uvm_cnt uvm_map_call, map_backmerge, map_forwmerge;
101 struct uvm_cnt uvm_mlk_call, uvm_mlk_hint;
102
103 /*
104 * macros
105 */
106
107 /*
108 * uvm_map_entry_link: insert entry into a map
109 *
110 * => map must be locked
111 */
112 #define uvm_map_entry_link(map, after_where, entry) \
113 { \
114 (map)->nentries++; \
115 (entry)->prev = (after_where); \
116 (entry)->next = (after_where)->next; \
117 (entry)->prev->next = (entry); \
118 (entry)->next->prev = (entry); \
119 }
120 /*
121 * uvm_map_entry_unlink: remove entry from a map
122 *
123 * => map must be locked
124 */
125 #define uvm_map_entry_unlink(map, entry) \
126 { \
127 (map)->nentries--; \
128 (entry)->next->prev = (entry)->prev; \
129 (entry)->prev->next = (entry)->next; \
130 }
131
132 /*
133 * SAVE_HINT: saves the specified entry as the hint for future lookups.
134 *
135 * => map need not be locked (protected by hint_lock).
136 */
137 #define SAVE_HINT(map,value) \
138 simple_lock(&(map)->hint_lock); \
139 (map)->hint = (value); \
140 simple_unlock(&(map)->hint_lock);
141
142 /*
143 * VM_MAP_RANGE_CHECK: check and correct range
144 *
145 * => map must at least be read locked
146 */
147
148 #define VM_MAP_RANGE_CHECK(map, start, end) \
149 { \
150 if (start < vm_map_min(map)) \
151 start = vm_map_min(map); \
152 if (end > vm_map_max(map)) \
153 end = vm_map_max(map); \
154 if (start > end) \
155 start = end; \
156 }
157
158 /*
159 * local prototypes
160 */
161
162 static vm_map_entry_t uvm_mapent_alloc __P((vm_map_t));
163 static void uvm_mapent_copy __P((vm_map_entry_t,vm_map_entry_t));
164 static void uvm_mapent_free __P((vm_map_entry_t));
165 static void uvm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
166
167 /*
168 * local inlines
169 */
170
171 /*
172 * uvm_mapent_alloc: allocate a map entry
173 *
174 * => XXX: static pool for kernel map?
175 */
176
177 static __inline vm_map_entry_t uvm_mapent_alloc(map)
178
179 vm_map_t map;
180
181 {
182 vm_map_entry_t me;
183 int s;
184 UVMHIST_FUNC("uvm_mapent_alloc");
185 UVMHIST_CALLED(maphist);
186
187 if (map->entries_pageable) {
188 MALLOC(me, vm_map_entry_t, sizeof(struct vm_map_entry),
189 M_VMMAPENT, M_WAITOK);
190 me->flags = 0;
191 /* me can't be null, wait ok */
192
193 } else {
194 s = splimp(); /* protect kentry_free list with splimp */
195 simple_lock(&uvm.kentry_lock);
196 me = uvm.kentry_free;
197 if (me) uvm.kentry_free = me->next;
198 simple_unlock(&uvm.kentry_lock);
199 splx(s);
200 if (!me)
201 panic("mapent_alloc: out of kernel map entries, check MAX_KMAPENT");
202 me->flags = UVM_MAP_STATIC;
203 }
204
205 UVMHIST_LOG(maphist, "<- new entry=0x%x [pageable=%d]",
206 me, map->entries_pageable, 0, 0);
207 return(me);
208
209 }
210
211 /*
212 * uvm_mapent_free: free map entry
213 *
214 * => XXX: static pool for kernel map?
215 */
216
217 static __inline void uvm_mapent_free(me)
218
219 vm_map_entry_t me;
220
221 {
222 int s;
223 UVMHIST_FUNC("uvm_mapent_free");
224 UVMHIST_CALLED(maphist);
225 UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
226 me, me->flags, 0, 0);
227 if ((me->flags & UVM_MAP_STATIC) == 0) {
228 FREE(me, M_VMMAPENT);
229 } else {
230 s = splimp(); /* protect kentry_free list with splimp */
231 simple_lock(&uvm.kentry_lock);
232 me->next = uvm.kentry_free;
233 uvm.kentry_free = me;
234 simple_unlock(&uvm.kentry_lock);
235 splx(s);
236 }
237 }
238
239 /*
240 * uvm_mapent_copy: copy a map entry, preserving flags
241 */
242
243 static __inline void uvm_mapent_copy(src, dst)
244
245 vm_map_entry_t src;
246 vm_map_entry_t dst;
247
248 {
249 bcopy(src, dst, ((char *)&src->uvm_map_entry_stop_copy) - ((char *)src));
250 }
251
252 /*
253 * uvm_map_entry_unwire: unwire a map entry
254 *
255 * => map should be locked by caller
256 */
257
258 static __inline void uvm_map_entry_unwire(map, entry)
259
260 vm_map_t map;
261 vm_map_entry_t entry;
262
263 {
264 uvm_fault_unwire(map->pmap, entry->start, entry->end);
265 entry->wired_count = 0;
266 }
267
268 /*
269 * uvm_map_init: init mapping system at boot time. note that we allocate
270 * and init the static pool of vm_map_entry_t's for the kernel here.
271 */
272
273 void uvm_map_init()
274
275 {
276 static struct vm_map_entry kernel_map_entry[MAX_KMAPENT];
277 #if defined(UVMHIST)
278 static struct uvm_history_ent maphistbuf[100];
279 static struct uvm_history_ent pdhistbuf[100];
280 #endif
281 int lcv;
282
283 /*
284 * first, init logging system.
285 */
286
287 UVMHIST_FUNC("uvm_map_init");
288 UVMHIST_INIT_STATIC(maphist, maphistbuf);
289 UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
290 UVMHIST_CALLED(maphist);
291 UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
292 UVMCNT_INIT(uvm_map_call, UVMCNT_CNT, 0, "# uvm_map() successful calls", 0);
293 UVMCNT_INIT(map_backmerge, UVMCNT_CNT, 0, "# uvm_map() back merges", 0);
294 UVMCNT_INIT(map_forwmerge, UVMCNT_CNT, 0, "# uvm_map() missed forward", 0);
295 UVMCNT_INIT(uvm_mlk_call, UVMCNT_CNT, 0, "# map lookup calls", 0);
296 UVMCNT_INIT(uvm_mlk_hint, UVMCNT_CNT, 0, "# map lookup hint hits", 0);
297
298 /*
299 * now set up static pool of kernel map entrys ...
300 */
301
302 simple_lock_init(&uvm.kentry_lock);
303 uvm.kentry_free = NULL;
304 for (lcv = 0 ; lcv < MAX_KMAPENT ; lcv++) {
305 kernel_map_entry[lcv].next = uvm.kentry_free;
306 uvm.kentry_free = &kernel_map_entry[lcv];
307 }
308
309 }
310
311 /*
312 * clippers
313 */
314
315 /*
316 * uvm_map_clip_start: ensure that the entry begins at or after
317 * the starting address, if it doesn't we split the entry.
318 *
319 * => caller should use UVM_MAP_CLIP_START macro rather than calling
320 * this directly
321 * => map must be locked by caller
322 */
323
324 void uvm_map_clip_start(map, entry, start)
325
326 register vm_map_t map;
327 register vm_map_entry_t entry;
328 register vm_offset_t start;
329
330 {
331 register vm_map_entry_t new_entry;
332 vm_offset_t new_adj;
333
334 /* uvm_map_simplify_entry(map, entry); */ /* XXX */
335
336 /*
337 * Split off the front portion. note that we must insert the new
338 * entry BEFORE this one, so that this entry has the specified
339 * starting address.
340 */
341
342 new_entry = uvm_mapent_alloc(map);
343 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
344
345 new_entry->end = start;
346 new_adj = start - new_entry->start;
347 if (entry->object.uvm_obj)
348 entry->offset += new_adj; /* shift start over */
349 entry->start = start;
350
351 if (new_entry->aref.ar_amap) {
352 amap_splitref(&new_entry->aref, &entry->aref, new_adj);
353 }
354
355 uvm_map_entry_link(map, entry->prev, new_entry);
356
357 if (UVM_ET_ISMAP(entry)) {
358 uvm_map_reference(new_entry->object.share_map);
359 } else {
360 if (UVM_ET_ISOBJ(entry) &&
361 entry->object.uvm_obj->pgops &&
362 entry->object.uvm_obj->pgops->pgo_reference)
363 entry->object.uvm_obj->pgops->pgo_reference(entry->object.uvm_obj);
364 }
365 }
366
367 /*
368 * uvm_map_clip_end: ensure that the entry ends at or before
369 * the ending address, if it does't we split the reference
370 *
371 * => caller should use UVM_MAP_CLIP_END macro rather than calling
372 * this directly
373 * => map must be locked by caller
374 */
375
376 void uvm_map_clip_end(map, entry, end)
377
378 register vm_map_t map;
379 register vm_map_entry_t entry;
380 register vm_offset_t end;
381
382 {
383 register vm_map_entry_t new_entry;
384 vm_offset_t new_adj; /* #bytes we move start forward */
385
386 /*
387 * Create a new entry and insert it
388 * AFTER the specified entry
389 */
390
391 new_entry = uvm_mapent_alloc(map);
392 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
393
394 new_entry->start = entry->end = end;
395 new_adj = end - entry->start;
396 if (new_entry->object.uvm_obj)
397 new_entry->offset += new_adj;
398
399 if (entry->aref.ar_amap) {
400 amap_splitref(&entry->aref, &new_entry->aref, new_adj);
401 }
402
403 uvm_map_entry_link(map, entry, new_entry);
404
405 if (UVM_ET_ISMAP(entry)) {
406 uvm_map_reference(new_entry->object.share_map);
407 } else {
408 if (UVM_ET_ISOBJ(entry) &&
409 entry->object.uvm_obj->pgops &&
410 entry->object.uvm_obj->pgops->pgo_reference)
411 entry->object.uvm_obj->pgops->pgo_reference(entry->object.uvm_obj);
412 }
413 }
414
415
416 /*
417 * M A P - m a i n e n t r y p o i n t
418 */
419 /*
420 * uvm_map: establish a valid mapping in a map
421 *
422 * => assume startp is page aligned.
423 * => assume size is a multiple of PAGE_SIZE.
424 * => assume sys_mmap provides enough of a "hint" to have us skip
425 * over text/data/bss area.
426 * => map must be unlocked (we will lock it)
427 * => <uobj,uoffset> value meanings (4 cases):
428 * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER
429 * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER
430 * [3] <uobj,uoffset> == normal mapping
431 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA
432 *
433 * case [4] is for kernel mappings where we don't know the offset until
434 * we've found a virtual address. note that kernel object offsets are
435 * always relative to vm_map_min(kernel_map).
436 * => XXXCDC: need way to map in external amap?
437 */
438
439 int uvm_map(map, startp, size, uobj, uoffset, flags)
440
441 vm_map_t map;
442 vm_offset_t *startp; /* IN/OUT */
443 vm_size_t size;
444 struct uvm_object *uobj;
445 vm_offset_t uoffset;
446 uvm_flag_t flags;
447
448 {
449
450 vm_map_entry_t prev_entry, new_entry;
451 vm_prot_t prot = UVM_PROTECTION(flags), maxprot = UVM_MAXPROTECTION(flags);
452 vm_inherit_t inherit = UVM_INHERIT(flags);
453 int advice = UVM_ADVICE(flags);
454 UVMHIST_FUNC("uvm_map");
455 UVMHIST_CALLED(maphist);
456
457 UVMHIST_LOG(maphist, "(map=0x%x, *startp=0x%x, size=%d, flags=0x%x)",
458 map, *startp, size, flags);
459 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
460
461 /*
462 * step 0: sanity check of protection code
463 */
464
465 if ((prot & maxprot) != prot) {
466 UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x",
467 prot, maxprot,0,0);
468 return(KERN_PROTECTION_FAILURE);
469 }
470
471 /*
472 * step 1: figure out where to put new VM range
473 */
474
475 if (vm_map_lock_try(map) == FALSE) {
476 if (flags & UVM_FLAG_TRYLOCK)
477 return(KERN_FAILURE);
478 vm_map_lock(map); /* could sleep here */
479 }
480 if ((prev_entry = uvm_map_findspace(map, *startp, size, startp,
481 uobj, uoffset, flags & UVM_FLAG_FIXED)) == NULL) {
482 UVMHIST_LOG(maphist,"<- uvm_map_findspace failed!",0,0,0,0);
483 vm_map_unlock(map);
484 return(KERN_NO_SPACE);
485 }
486
487 #if defined(PMAP_GROWKERNEL) /* hack */
488 {
489 static vm_offset_t maxkaddr = 0; /* locked by kernel_map lock */
490
491 /*
492 * hack: grow kernel PTPs in advance.
493 */
494 if (map == kernel_map && maxkaddr < (*startp + size)) {
495 pmap_growkernel(*startp + size);
496 maxkaddr = *startp + size;
497 }
498 }
499 #endif
500
501 UVMCNT_INCR(uvm_map_call);
502
503 /*
504 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
505 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in
506 * either case we want to zero it before storing it in the map entry
507 * (because it looks strange and confusing when debugging...)
508 *
509 * if uobj is not null
510 * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
511 * and we do not need to change uoffset.
512 * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset now
513 * (based on the starting address of the map). this case is for
514 * kernel object mappings where we don't know the offset until
515 * the virtual address is found (with uvm_map_findspace). the offset
516 * is the distance we are from the start of the map.
517 */
518
519 if (uobj == NULL) {
520 uoffset = 0;
521 } else {
522 if (uoffset == UVM_UNKNOWN_OFFSET) {
523 #ifdef DIAGNOSTIC
524 if (uobj->uo_refs != UVM_OBJ_KERN)
525 panic("uvm_map: unknown offset with non-kernel object");
526 #endif
527 uoffset = *startp - vm_map_min(kernel_map);
528 }
529 }
530
531 /*
532 * step 2: try and insert in map by extending previous entry, if possible
533 * XXX: we don't try and pull back the next entry. might be useful
534 * for a stack, but we are currently allocating our stack in advance.
535 */
536
537 if ((flags & UVM_FLAG_NOMERGE) == 0 &&
538 prev_entry->end == *startp && prev_entry != &map->header &&
539 prev_entry->object.uvm_obj == uobj) {
540
541 if (uobj && prev_entry->offset + (prev_entry->end - prev_entry->start)
542 != uoffset)
543 goto step3;
544
545 if (UVM_ET_ISMAP(prev_entry))
546 goto step3;
547
548 if (prev_entry->protection != prot ||
549 prev_entry->max_protection != maxprot)
550 goto step3;
551
552 if (prev_entry->inheritance != inherit ||
553 prev_entry->advice != advice)
554 goto step3;
555
556 /* wired_count's must match (new area is unwired) */
557 if (prev_entry->wired_count)
558 goto step3;
559
560 /*
561 * can't extend a shared amap. note: no need to lock amap to
562 * look at am_ref since we don't care about its exact value.
563 * if it is one (i.e. we have only reference) it will stay there.
564 */
565
566 if (prev_entry->aref.ar_amap && prev_entry->aref.ar_amap->am_ref != 1) {
567 goto step3;
568 }
569
570 /* got it! */
571
572 UVMCNT_INCR(map_backmerge);
573 UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0);
574
575 /*
576 * drop our reference to uobj since we are extending a reference
577 * that we already have (the ref count can not drop to zero).
578 */
579 if (uobj && uobj->pgops->pgo_detach)
580 uobj->pgops->pgo_detach(uobj);
581
582 if (prev_entry->aref.ar_amap) {
583 amap_extend(prev_entry, size);
584 }
585
586 prev_entry->end += size;
587 map->size += size;
588
589 UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
590 vm_map_unlock(map);
591 return(KERN_SUCCESS);
592
593 }
594
595 step3:
596 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0);
597
598 /* check for possible forward merge (which we don't do) and count
599 * the number of times we missed a *possible* chance to merge more
600 */
601
602 if ((flags & UVM_FLAG_NOMERGE) == 0 && prev_entry->next != &map->header &&
603 prev_entry->next->start == (*startp + size))
604 UVMCNT_INCR(map_forwmerge);
605
606 /*
607 * step 3: allocate new entry and link it in
608 */
609
610 new_entry = uvm_mapent_alloc(map);
611 new_entry->start = *startp;
612 new_entry->end = new_entry->start + size;
613 new_entry->object.uvm_obj = uobj;
614 new_entry->offset = uoffset;
615
616 if (uobj)
617 new_entry->etype = UVM_ET_OBJ;
618 else
619 new_entry->etype = 0;
620
621 if (flags & UVM_FLAG_COPYONW) {
622 new_entry->etype |= UVM_ET_COPYONWRITE;
623 if ((flags & UVM_FLAG_OVERLAY) == 0)
624 new_entry->etype |= UVM_ET_NEEDSCOPY;
625 }
626
627 new_entry->protection = prot;
628 new_entry->max_protection = maxprot;
629 new_entry->inheritance = inherit;
630 new_entry->wired_count = 0;
631 new_entry->advice = advice;
632 if (flags & UVM_FLAG_OVERLAY) {
633 /* to_add: for BSS we overallocate a little since we are likely to extend */
634 vm_offset_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
635 UVM_AMAP_CHUNK * PAGE_SIZE : 0;
636 struct vm_amap *amap = amap_alloc(size, to_add, M_WAITOK);
637 new_entry->aref.ar_slotoff = 0;
638 new_entry->aref.ar_amap = amap;
639 } else {
640 new_entry->aref.ar_amap = NULL;
641 }
642
643 uvm_map_entry_link(map, prev_entry, new_entry);
644
645 map->size += size;
646
647 /*
648 * Update the free space hint
649 */
650
651 if ((map->first_free == prev_entry) && (prev_entry->end >= new_entry->start))
652 map->first_free = new_entry;
653
654 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
655 vm_map_unlock(map);
656 return(KERN_SUCCESS);
657 }
658
659 /*
660 * uvm_map_lookup_entry: find map entry at or before an address
661 *
662 * => map must at least be read-locked by caller
663 * => entry is returned in "entry"
664 * => return value is true if address is in the returned entry
665 */
666
667 boolean_t uvm_map_lookup_entry(map, address, entry)
668
669 register vm_map_t map;
670 register vm_offset_t address;
671 vm_map_entry_t *entry; /* OUT */
672
673 {
674 register vm_map_entry_t cur;
675 register vm_map_entry_t last;
676 UVMHIST_FUNC("uvm_map_lookup_entry");
677 UVMHIST_CALLED(maphist);
678
679 UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)",
680 map, address, entry, 0);
681
682 /*
683 * Start looking either from the head of the
684 * list, or from the hint.
685 */
686
687 simple_lock(&map->hint_lock);
688 cur = map->hint;
689 simple_unlock(&map->hint_lock);
690
691 if (cur == &map->header)
692 cur = cur->next;
693
694 UVMCNT_INCR(uvm_mlk_call);
695 if (address >= cur->start) {
696 /*
697 * Go from hint to end of list.
698 *
699 * But first, make a quick check to see if
700 * we are already looking at the entry we
701 * want (which is usually the case).
702 * Note also that we don't need to save the hint
703 * here... it is the same hint (unless we are
704 * at the header, in which case the hint didn't
705 * buy us anything anyway).
706 */
707 last = &map->header;
708 if ((cur != last) && (cur->end > address)) {
709 UVMCNT_INCR(uvm_mlk_hint);
710 *entry = cur;
711 UVMHIST_LOG(maphist,"<- got it via hint (0x%x)",
712 cur,0,0,0);
713 return(TRUE);
714 }
715 }
716 else {
717 /*
718 * Go from start to hint, *inclusively*
719 */
720 last = cur->next;
721 cur = map->header.next;
722 }
723
724 /*
725 * Search linearly
726 */
727
728 while (cur != last) {
729 if (cur->end > address) {
730 if (address >= cur->start) {
731 /*
732 * Save this lookup for future
733 * hints, and return
734 */
735
736 *entry = cur;
737 SAVE_HINT(map, cur);
738 UVMHIST_LOG(maphist,"<- search got it (0x%x)",
739 cur, 0,0,0);
740 return(TRUE);
741 }
742 break;
743 }
744 cur = cur->next;
745 }
746 *entry = cur->prev;
747 SAVE_HINT(map, *entry);
748 UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
749 return(FALSE);
750 }
751
752
753 /*
754 * uvm_map_findspace: find "length" sized space in "map".
755 *
756 * => "hint" is a hint about where we want it, unless fixed is true
757 * (in which case we insist on using "hint").
758 * => "result" is VA returned
759 * => uobj/uoffset are to be used to handle VAC alignment, if required
760 * => caller must at least have read-locked map
761 * => returns NULL on failure, or pointer to prev. map entry if success
762 * => note this is a cross between the old vm_map_findspace and vm_map_find
763 */
764
765
766 vm_map_entry_t uvm_map_findspace(map, hint, length, result,
767 uobj, uoffset, fixed)
768
769 vm_map_t map;
770 vm_offset_t hint;
771 vm_size_t length;
772 vm_offset_t *result; /* OUT */
773 struct uvm_object *uobj;
774 vm_offset_t uoffset;
775 boolean_t fixed;
776
777 {
778 vm_map_entry_t entry, next, tmp;
779 vm_offset_t end;
780 UVMHIST_FUNC("uvm_map_findspace");
781 UVMHIST_CALLED(maphist);
782
783 UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, fixed=%d)",
784 map, hint, length, fixed);
785
786 if (hint < map->min_offset) { /* check ranges ... */
787 if (fixed) {
788 UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
789 return(NULL);
790 }
791 hint = map->min_offset;
792 }
793 if (hint > map->max_offset) {
794 UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]",
795 hint, map->min_offset, map->max_offset, 0);
796 return(NULL);
797 }
798
799 /*
800 * Look for the first possible address; if there's already
801 * something at this address, we have to start after it.
802 */
803
804 if (!fixed && hint == map->min_offset) {
805 if ((entry = map->first_free) != &map->header)
806 hint = entry->end;
807 } else {
808 if (uvm_map_lookup_entry(map, hint, &tmp)) {
809 /* "hint" address already in use ... */
810 if (fixed) {
811 UVMHIST_LOG(maphist,"<- fixed & VA in use",
812 0,0,0,0);
813 return(NULL);
814 }
815 hint = tmp->end;
816 }
817 entry = tmp;
818 }
819
820 /*
821 * Look through the rest of the map, trying to fit a new region in
822 * the gap between existing regions, or after the very last region.
823 * note: entry->end = base VA of current gap,
824 * next->start = VA of end of current gap
825 */
826 for (;; hint = (entry = next)->end) {
827 /*
828 * Find the end of the proposed new region. Be sure we didn't
829 * go beyond the end of the map, or wrap around the address;
830 * if so, we lose. Otherwise, if this is the last entry, or
831 * if the proposed new region fits before the next entry, we
832 * win.
833 */
834
835 #ifdef PMAP_PREFER
836 /*
837 * push hint forward as needed to avoid VAC alias problems.
838 * we only do this if a valid offset is specified.
839 */
840 if (!fixed && uoffset != UVM_UNKNOWN_OFFSET)
841 PMAP_PREFER(uoffset, &hint);
842 #endif
843 end = hint + length;
844 if (end > map->max_offset || end < hint) {
845 UVMHIST_LOG(maphist,"<- failed (off end)", 0,0,0,0);
846 return (NULL);
847 }
848 next = entry->next;
849 if (next == &map->header || next->start >= end)
850 break;
851 if (fixed) {
852 UVMHIST_LOG(maphist,"<- fixed mapping failed", 0,0,0,0);
853 return(NULL); /* only one shot at it ... */
854 }
855 }
856 SAVE_HINT(map, entry);
857 *result = hint;
858 UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0);
859 return (entry);
860 }
861
862 /*
863 * U N M A P - m a i n h e l p e r f u n c t i o n s
864 */
865
866 /*
867 * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
868 *
869 * => caller must check alignment and size
870 * => map must be locked by caller
871 * => if the "start"/"stop" range lie within a mapping of a share map,
872 * then the unmap takes place within the context of that share map
873 * rather than in the main map, unless the "mainonly" flag is set.
874 * (e.g. the "exit" system call would want to set "mainonly").
875 * => we return a list of map entries that we've remove from the map
876 * in "entry_list"
877 */
878
879 int uvm_unmap_remove(map, start, end, mainonly, entry_list)
880
881 vm_map_t map;
882 vm_offset_t start,end;
883 boolean_t mainonly;
884 vm_map_entry_t *entry_list; /* OUT */
885
886 {
887 int result, refs;
888 vm_map_entry_t entry, first_entry, next;
889 vm_offset_t len;
890 boolean_t already_removed;
891 struct uvm_object *uobj;
892 UVMHIST_FUNC("uvm_unmap_remove");
893 UVMHIST_CALLED(maphist);
894
895 UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)", map,start,end,0);
896
897 VM_MAP_RANGE_CHECK(map, start, end);
898
899 /*
900 * find first entry
901 */
902 if (uvm_map_lookup_entry(map, start, &first_entry) == TRUE) {
903
904 /*
905 * start lies within a mapped region. first check to see if
906 * it is within a sharemap (in which case we recurse and unmap
907 * within the context of the share map).
908 */
909 if (UVM_ET_ISMAP(first_entry) && !UVM_ET_ISSUBMAP(first_entry) &&
910 mainonly == 0 && end <= first_entry->end) {
911 /* is a share map and in range ... */
912 /* XXX: do address transforms if share VA's != main VA's */
913 /* note: main map kept locked during share map unlock */
914 result = uvm_unmap(first_entry->object.share_map, start, end, 0);
915 *entry_list = NULL;
916 return(result);
917 }
918 /* non-share map: clip and go... */
919 entry = first_entry;
920 UVM_MAP_CLIP_START(map, entry, start);
921 SAVE_HINT(map, entry->prev); /* critical! prevents stale hint */
922
923 } else {
924 entry = first_entry->next;
925 }
926
927 /*
928 * Save the free space hint
929 */
930
931 if (map->first_free->start >= start)
932 map->first_free = entry->prev;
933
934 /*
935 * note: we now re-use first_entry for a different task. we remove
936 * a number of map entries from the map and save them in a linked
937 * list headed by "first_entry". once we remove them from the map
938 * the caller should unlock the map and drop the references to the
939 * backing objects [c.f. uvm_unmap_detach]. the object is to
940 * seperate unmapping from reference dropping. why?
941 * [1] the map has to be locked for unmapping
942 * [2] the map need not be locked for reference dropping
943 * [3] dropping references may trigger pager I/O, and if we hit
944 * a pager that does synchronous I/O we may have to wait for it.
945 * [4] we would like all waiting for I/O to occur with maps unlocked
946 * so that we don't block other threads.
947 */
948 first_entry = NULL;
949 *entry_list = NULL; /* to be safe */
950
951 /*
952 * break up the area into map entry sized regions and unmap. note
953 * that all mappings have to be removed before we can even consider
954 * dropping references to amaps or VM objects (otherwise we could end
955 * up with a mapping to a page on the free list which would be very bad).
956 */
957
958 while ((entry != &map->header) && (entry->start < end)) {
959
960 UVM_MAP_CLIP_END(map, entry, end);
961 next = entry->next;
962 len = entry->end - entry->start;
963
964 /*
965 * unwire before removing addresses from the pmap; otherwise unwiring
966 * will put the entries back into the pmap (XXX).
967 */
968
969 if (entry->wired_count)
970 uvm_map_entry_unwire(map, entry);
971
972 /*
973 * special case: handle mappings to anonymous kernel objects.
974 * we want to free these pages right away...
975 */
976 if (UVM_ET_ISOBJ(entry) && entry->object.uvm_obj->uo_refs == UVM_OBJ_KERN) {
977
978 #ifdef DIAGNOSTIC
979 if (vm_map_pmap(map) != pmap_kernel())
980 panic("uvm_unmap_remove: kernel object mapped by non-kernel map");
981 #endif
982
983 /*
984 * note: kernel object mappings are currently used in two ways:
985 * [1] "normal" mappings of pages in the kernel object
986 * [2] uvm_km_valloc'd allocations in which we pmap_enter in
987 * some non-kernel-object page (e.g. vmapbuf).
988 *
989 * for case [1], we need to remove the mapping from the pmap
990 * and then remove the page from the kernel object (because,
991 * once pages in a kernel object are unmapped they are no longer
992 * needed, unlike, say, a vnode where you might want the data
993 * to persist until flushed out of a queue).
994 *
995 * for case [2], we need to remove the mapping from the pmap.
996 * there shouldn't be any pages at the specified offset in
997 * the kernel object [but it doesn't hurt to call uvm_km_pgremove
998 * just to be safe?]
999 *
1000 * uvm_km_pgremove currently does the following:
1001 * for pages in the kernel object in range:
1002 * - pmap_page_protect them out of all pmaps
1003 * - uvm_pagefree the page
1004 *
1005 * note that in case [1] the pmap_page_protect call in uvm_km_pgremove
1006 * may very well be redundant because we have already removed the
1007 * mappings beforehand with pmap_remove (or pmap_kremove).
1008 * in the PMAP_NEW case, the pmap_page_protect call may not do
1009 * anything, since PMAP_NEW allows the kernel to enter/remove
1010 * kernel mappings without bothing to keep track of the mappings
1011 * (e.g. via pv_entry lists). XXX: because of this, in the
1012 * future we should consider removing the pmap_page_protect from
1013 * uvm_km_pgremove some time in the future.
1014 */
1015
1016 /*
1017 * remove mappings from pmap
1018 */
1019 #if defined(PMAP_NEW)
1020 pmap_kremove(entry->start, len);
1021 #else
1022 pmap_remove(pmap_kernel(), entry->start, entry->start+len);
1023 #endif
1024
1025 /*
1026 * remove pages from a kernel object (offsets are always relative
1027 * to vm_map_min(kernel_map)).
1028 */
1029 uvm_km_pgremove(entry->object.uvm_obj,
1030 entry->start - vm_map_min(kernel_map),
1031 entry->end - vm_map_min(kernel_map));
1032
1033 already_removed = TRUE;
1034
1035 /* null out kernel_object reference, we've just dropped it */
1036 entry->etype &= ~UVM_ET_OBJ;
1037 entry->object.uvm_obj = NULL; /* to be safe */
1038
1039 } else {
1040
1041 already_removed = FALSE;
1042
1043 }
1044
1045 /*
1046 * remove mappings now. for sharemaps, check to see if the reference
1047 * count is one (i.e. not being shared right now). if so, use the
1048 * cheaper pmap_remove() rather than the more expensive share_protect
1049 * functions.
1050 */
1051
1052 if (!map->is_main_map) {
1053 simple_lock(&map->ref_lock);
1054 refs = map->ref_count;
1055 simple_unlock(&map->ref_lock);
1056 }
1057 #if defined(sparc)
1058 else { refs = 0; } /* XXX: shutup unused var gcc warning */
1059 #endif
1060
1061 if (map->is_main_map || (!map->is_main_map && refs == 1)) {
1062 if (!already_removed)
1063 pmap_remove(map->pmap, entry->start, entry->end);
1064 } else {
1065 /* share map... must remove all mappings */
1066 if (entry->aref.ar_amap) {
1067 simple_lock(&entry->aref.ar_amap->am_l);
1068 amap_share_protect(entry, VM_PROT_NONE);
1069 simple_unlock(&entry->aref.ar_amap->am_l);
1070 }
1071 if (UVM_ET_ISOBJ(entry)) {
1072 uobj = entry->object.uvm_obj;
1073 simple_lock(&uobj->vmobjlock);
1074 uobj->pgops->pgo_shareprot(entry, VM_PROT_NONE);
1075 simple_unlock(&uobj->vmobjlock);
1076 }
1077 }
1078
1079 /*
1080 * remove from map and put it on our list of entries that we've nuked.
1081 * then go do next entry.
1082 */
1083 UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0, 0);
1084 uvm_map_entry_unlink(map, entry);
1085 map->size -= len;
1086 entry->next = first_entry;
1087 first_entry = entry;
1088 entry = next; /* next entry, please */
1089 }
1090
1091 /*
1092 * now we've cleaned up the map and are ready for the caller to drop
1093 * references to the mapped objects.
1094 */
1095
1096 *entry_list = first_entry;
1097 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1098 return(KERN_SUCCESS);
1099 }
1100
1101 /*
1102 * uvm_unmap_detach: drop references in a chain of map entries
1103 *
1104 * => we will free the map entries as we traverse the list.
1105 */
1106
1107 void uvm_unmap_detach(first_entry, amap_unref_flags)
1108
1109 vm_map_entry_t first_entry;
1110 int amap_unref_flags;
1111
1112 {
1113 vm_map_entry_t next_entry;
1114 UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
1115
1116 while (first_entry) {
1117
1118 #ifdef DIAGNOSTIC
1119 /*
1120 * sanity check
1121 */
1122 if (first_entry->wired_count) /* was part of vm_map_entry_delete() */
1123 panic("unmap: still wired!");
1124 #endif
1125
1126 UVMHIST_LOG(maphist, " detach 0x%x: amap=0x%x, obj=0x%x, map?=%d",
1127 first_entry, first_entry->aref.ar_amap, first_entry->object.uvm_obj,
1128 UVM_ET_ISMAP(first_entry));
1129
1130 /*
1131 * drop reference to amap, if we've got one
1132 */
1133
1134 if (first_entry->aref.ar_amap)
1135 amap_unref(first_entry, amap_unref_flags);
1136
1137 /*
1138 * drop reference to our backing object, if we've got one
1139 */
1140
1141 if (UVM_ET_ISMAP(first_entry)) {
1142 uvm_map_deallocate(first_entry->object.share_map);
1143 } else {
1144 if (UVM_ET_ISOBJ(first_entry) &&
1145 first_entry->object.uvm_obj->pgops->pgo_detach)
1146 first_entry->object.uvm_obj->pgops->
1147 pgo_detach(first_entry->object.uvm_obj);
1148 }
1149
1150 /*
1151 * next entry
1152 */
1153 next_entry = first_entry->next;
1154 uvm_mapent_free(first_entry);
1155 first_entry = next_entry;
1156 }
1157
1158 /*
1159 * done!
1160 */
1161 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
1162 return;
1163 }
1164
1165 /*
1166 * E X T R A C T I O N F U N C T I O N S
1167 */
1168
1169 /*
1170 * uvm_map_reserve: reserve space in a vm_map for future use.
1171 *
1172 * => we reserve space in a map by putting a dummy map entry in the
1173 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
1174 * => map should be unlocked (we will write lock it)
1175 * => we return true if we were able to reserve space
1176 * => XXXCDC: should be inline?
1177 */
1178
1179 int uvm_map_reserve(map, size, offset, raddr)
1180
1181 vm_map_t map;
1182 vm_size_t size;
1183 vm_offset_t offset; /* hint for pmap_prefer */
1184 vm_offset_t *raddr; /* OUT: reserved VA */
1185
1186 {
1187 UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
1188
1189 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
1190 map,size,offset,raddr);
1191
1192 size = round_page(size);
1193 if (*raddr < vm_map_min(map))
1194 *raddr = vm_map_min(map); /* hint */
1195
1196 /*
1197 * reserve some virtual space.
1198 */
1199
1200 if (uvm_map(map, raddr, size, NULL, offset,
1201 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
1202 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != KERN_SUCCESS) {
1203 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
1204 return(FALSE);
1205 }
1206
1207 UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
1208 return(TRUE);
1209 }
1210
1211 /*
1212 * uvm_map_replace: replace a reserved (blank) area of memory with
1213 * real mappings.
1214 *
1215 * => caller must WRITE-LOCK the map
1216 * => we return TRUE if replacement was a success
1217 * => we expect the newents chain to have nnewents entrys on it and
1218 * we expect newents->prev to point to the last entry on the list
1219 * => note newents is allowed to be NULL
1220 */
1221
1222 int uvm_map_replace(map, start, end, newents, nnewents)
1223
1224 struct vm_map *map;
1225 vm_offset_t start, end;
1226 vm_map_entry_t newents;
1227 int nnewents;
1228
1229 {
1230 vm_map_entry_t oldent, last;
1231 UVMHIST_FUNC("uvm_map_replace");
1232 UVMHIST_CALLED(maphist);
1233
1234 /*
1235 * first find the blank map entry at the specified address
1236 */
1237
1238 if (!uvm_map_lookup_entry(map, start, &oldent)) {
1239 return(FALSE);
1240 }
1241
1242 /*
1243 * check to make sure we have a proper blank entry
1244 */
1245
1246 if (oldent->start != start || oldent->end != end ||
1247 oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
1248 return(FALSE);
1249 }
1250
1251 #ifdef DIAGNOSTIC
1252 /*
1253 * sanity check the newents chain
1254 */
1255 {
1256 vm_map_entry_t tmpent = newents;
1257 int nent = 0;
1258 vm_offset_t cur = start;
1259
1260 while (tmpent) {
1261 nent++;
1262 if (tmpent->start < cur)
1263 panic("uvm_map_replace1");
1264 if (tmpent->start > tmpent->end || tmpent->end > end) {
1265 printf("tmpent->start=0x%lx, tmpent->end=0x%lx, end=0x%lx\n",
1266 tmpent->start, tmpent->end, end);
1267 panic("uvm_map_replace2");
1268 }
1269 cur = tmpent->end;
1270 if (tmpent->next) {
1271 if (tmpent->next->prev != tmpent)
1272 panic("uvm_map_replace3");
1273 } else {
1274 if (newents->prev != tmpent)
1275 panic("uvm_map_replace4");
1276 }
1277 tmpent = tmpent->next;
1278 }
1279 if (nent != nnewents)
1280 panic("uvm_map_replace5");
1281 }
1282 #endif
1283
1284 /*
1285 * map entry is a valid blank! replace it. (this does all the
1286 * work of map entry link/unlink...).
1287 */
1288
1289 if (newents) {
1290
1291 last = newents->prev; /* we expect this */
1292
1293 /* critical: flush stale hints out of map */
1294 SAVE_HINT(map, newents);
1295 if (map->first_free == oldent)
1296 map->first_free = last;
1297
1298 last->next = oldent->next;
1299 last->next->prev = last;
1300 newents->prev = oldent->prev;
1301 newents->prev->next = newents;
1302 map->nentries = map->nentries + (nnewents - 1);
1303
1304 } else {
1305
1306 /* critical: flush stale hints out of map */
1307 SAVE_HINT(map, oldent->prev);
1308 if (map->first_free == oldent)
1309 map->first_free = oldent->prev;
1310
1311 /* NULL list of new entries: just remove the old one */
1312 uvm_map_entry_unlink(map, oldent);
1313 }
1314
1315
1316 /*
1317 * now we can free the old blank entry, unlock the map and return.
1318 */
1319
1320 uvm_mapent_free(oldent);
1321 return(TRUE);
1322 }
1323
1324 /*
1325 * uvm_map_extract: extract a mapping from a map and put it somewhere
1326 * (maybe removing the old mapping)
1327 *
1328 * => maps should be unlocked (we will write lock them)
1329 * => returns 0 on success, error code otherwise
1330 * => start must be page aligned
1331 * => len must be page sized
1332 * => flags:
1333 * UVM_EXTRACT_REMOVE: remove mappings from srcmap
1334 * UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
1335 * UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
1336 * UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
1337 * >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
1338 * >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
1339 * be used from within the kernel in a kernel level map <<<
1340 */
1341
1342 int uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
1343
1344 vm_map_t srcmap, dstmap;
1345 vm_offset_t start, *dstaddrp;
1346 vm_size_t len;
1347 int flags;
1348
1349 {
1350 vm_offset_t dstaddr, end, newend, oldoffset, fudge, orig_fudge, oldstart;
1351 vm_map_entry_t chain, endchain, entry, orig_entry, newentry, deadentry;
1352 vm_size_t elen;
1353 int nchain, error, copy_ok;
1354 UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
1355 UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap,start,len,0);
1356 UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0);
1357
1358 #ifdef DIAGNOSTIC
1359 /*
1360 * step 0: sanity check: start must be on a page boundary, length
1361 * must be page sized. can't ask for CONTIG/QREF if you asked for
1362 * REMOVE.
1363 */
1364 if ((start & PAGE_MASK) || (len & PAGE_MASK))
1365 panic("uvm_map_extract1");
1366 if (flags & UVM_EXTRACT_REMOVE)
1367 if (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF))
1368 panic("uvm_map_extract2");
1369 #endif
1370
1371
1372 /*
1373 * step 1: reserve space in the target map for the extracted area
1374 */
1375
1376 dstaddr = *dstaddrp;
1377 if (uvm_map_reserve(dstmap, len, start, &dstaddr) == FALSE)
1378 return(ENOMEM);
1379 *dstaddrp = dstaddr; /* pass address back to caller */
1380 UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0);
1381
1382
1383 /*
1384 * step 2: setup for the extraction process loop by init'ing the
1385 * map entry chain, locking src map, and looking up the first useful
1386 * entry in the map.
1387 */
1388
1389 end = start + len;
1390 newend = dstaddr + len;
1391 chain = endchain = NULL;
1392 nchain = 0;
1393 vm_map_lock(srcmap);
1394
1395 if (uvm_map_lookup_entry(srcmap, start, &entry)) {
1396
1397 /* "start" is within an entry */
1398 if (flags & UVM_EXTRACT_QREF) {
1399 /*
1400 * for quick references we don't clip the entry, so the entry
1401 * may map space "before" the starting virtual address... this is
1402 * the "fudge" factor (which can be non-zero only the first time
1403 * through the "while" loop in step 3).
1404 */
1405 fudge = start - entry->start;
1406 } else {
1407 /*
1408 * normal reference: we clip the map to fit (thus fudge is zero)
1409 */
1410 UVM_MAP_CLIP_START(srcmap, entry, start);
1411 SAVE_HINT(srcmap, entry->prev);
1412 fudge = 0;
1413 }
1414
1415 } else {
1416
1417 /* "start" is not within an entry ... skip to next entry */
1418 if (flags & UVM_EXTRACT_CONTIG) {
1419 error = EINVAL;
1420 goto bad; /* definite hole here ... */
1421 }
1422
1423 entry = entry->next;
1424 fudge = 0;
1425 }
1426 /* save values from srcmap for step 6 */
1427 orig_entry = entry;
1428 orig_fudge = fudge;
1429
1430
1431 /*
1432 * step 3: now start looping through the map entries, extracting
1433 * as we go.
1434 */
1435
1436 while (entry->start < end && entry != &srcmap->header) {
1437
1438 /* if we are not doing a quick reference, clip it */
1439 if ((flags & UVM_EXTRACT_QREF) == 0)
1440 UVM_MAP_CLIP_END(srcmap, entry, end);
1441
1442 /* clear needs_copy (allow chunking) */
1443 if (UVM_ET_ISNEEDSCOPY(entry)) {
1444 if (fudge)
1445 oldstart = entry->start;
1446 else
1447 oldstart = 0; /* XXX: unecessary, to avert gcc warning */
1448 amap_copy(srcmap, entry, M_NOWAIT, TRUE, start, end);
1449 if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */
1450 error = ENOMEM;
1451 goto bad;
1452 }
1453 if (fudge) { /* amap_copy could clip (during chunk)! update fudge */
1454 fudge = fudge - (entry->start - oldstart);
1455 orig_fudge = fudge;
1456 }
1457 }
1458
1459 /* calculate the offset of this from "start" */
1460 oldoffset = (entry->start + fudge) - start;
1461
1462 /* allocate a new map entry */
1463 newentry = uvm_mapent_alloc(dstmap);
1464 if (newentry == NULL) {
1465 error = ENOMEM;
1466 goto bad;
1467 }
1468
1469 /* set up new map entry */
1470 newentry->next = NULL;
1471 newentry->prev = endchain;
1472 newentry->start = dstaddr + oldoffset;
1473 newentry->end = newentry->start + (entry->end - (entry->start + fudge));
1474 if (newentry->end > newend)
1475 newentry->end = newend;
1476 newentry->object.uvm_obj = entry->object.uvm_obj;
1477 if (newentry->object.uvm_obj) {
1478 if (newentry->object.uvm_obj->pgops->pgo_reference)
1479 newentry->object.uvm_obj->pgops->
1480 pgo_reference(newentry->object.uvm_obj);
1481 newentry->offset = entry->offset + fudge;
1482 } else {
1483 newentry->offset = 0;
1484 }
1485 newentry->etype = entry->etype;
1486 newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
1487 entry->max_protection : entry->protection;
1488 newentry->max_protection = entry->max_protection;
1489 newentry->inheritance = entry->inheritance;
1490 newentry->wired_count = 0;
1491 newentry->aref.ar_amap = entry->aref.ar_amap;
1492 if (newentry->aref.ar_amap) {
1493 newentry->aref.ar_slotoff = entry->aref.ar_slotoff + (fudge / PAGE_SIZE);
1494 amap_ref(newentry,
1495 AMAP_SHARED | ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
1496 } else {
1497 newentry->aref.ar_slotoff = 0;
1498 }
1499 newentry->advice = entry->advice;
1500
1501 /* now link it on the chain */
1502 nchain++;
1503 if (endchain == NULL) {
1504 chain = endchain = newentry;
1505 } else {
1506 endchain->next = newentry;
1507 endchain = newentry;
1508 }
1509
1510 /* end of 'while' loop! */
1511 if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
1512 (entry->next == &srcmap->header || entry->next->start != entry->end)) {
1513 error = EINVAL;
1514 goto bad;
1515 }
1516 entry = entry->next;
1517 fudge = 0;
1518 }
1519
1520
1521 /*
1522 * step 4: close off chain (in format expected by uvm_map_replace)
1523 */
1524
1525 if (chain)
1526 chain->prev = endchain;
1527
1528
1529 /*
1530 * step 5: attempt to lock the dest map so we can pmap_copy.
1531 * note usage of copy_ok:
1532 * 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
1533 * 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
1534 */
1535
1536 if (srcmap == dstmap || vm_map_lock_try(dstmap) == TRUE) {
1537
1538 copy_ok = 1;
1539 if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain, nchain)) {
1540 if (srcmap != dstmap)
1541 vm_map_unlock(dstmap);
1542 error = EIO;
1543 goto bad;
1544 }
1545
1546 } else {
1547
1548 copy_ok = 0;
1549 /* replace defered until step 7 */
1550
1551 }
1552
1553
1554 /*
1555 * step 6: traverse the srcmap a second time to do the following:
1556 * - if we got a lock on the dstmap do pmap_copy
1557 * - if UVM_EXTRACT_REMOVE remove the entries
1558 * we make use of orig_entry and orig_fudge (saved in step 2)
1559 */
1560
1561 if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
1562
1563 /* purge possible stale hints from srcmap */
1564 if (flags & UVM_EXTRACT_REMOVE) {
1565 SAVE_HINT(srcmap, orig_entry->prev);
1566 if (srcmap->first_free->start >= start)
1567 srcmap->first_free = orig_entry->prev;
1568 }
1569
1570 entry = orig_entry;
1571 fudge = orig_fudge;
1572 deadentry = NULL; /* for UVM_EXTRACT_REMOVE */
1573
1574 while (entry->start < end && entry != &srcmap->header) {
1575
1576 if (copy_ok) {
1577 oldoffset = (entry->start + fudge) - start;
1578 elen = min(end, entry->end) - (entry->start + fudge);
1579 pmap_copy(dstmap->pmap, srcmap->pmap, dstaddr + oldoffset,
1580 elen, entry->start + fudge);
1581 }
1582
1583 if (flags & UVM_EXTRACT_REMOVE) {
1584 pmap_remove(srcmap->pmap, entry->start, entry->end);
1585 uvm_map_entry_unlink(srcmap, entry);
1586 entry->next = deadentry;
1587 deadentry = entry;
1588 }
1589
1590 /* end of 'while' loop */
1591 entry = entry->next;
1592 fudge = 0;
1593 }
1594
1595 /* unlock dstmap. we will dispose of deadentry in step 7 if needed */
1596 if (copy_ok && srcmap != dstmap)
1597 vm_map_unlock(dstmap);
1598
1599 }
1600 else { deadentry = NULL; } /* XXX: shut up gcc warning */
1601
1602 /*
1603 * step 7: we are done with the source map, unlock. if copy_ok
1604 * is 0 then we have not replaced the dummy mapping in dstmap yet
1605 * and we need to do so now.
1606 */
1607
1608 vm_map_unlock(srcmap);
1609 if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
1610 uvm_unmap_detach(deadentry, 0); /* dispose of old entries */
1611
1612 /* now do the replacement if we didn't do it in step 5 */
1613 if (copy_ok == 0) {
1614 vm_map_lock(dstmap);
1615 error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain, nchain);
1616 vm_map_unlock(dstmap);
1617
1618 if (error == FALSE) {
1619 error = EIO;
1620 goto bad2;
1621 }
1622 }
1623
1624 /*
1625 * done!
1626 */
1627 return(0);
1628
1629 /*
1630 * bad: failure recovery
1631 */
1632 bad:
1633 vm_map_unlock(srcmap);
1634 bad2: /* src already unlocked */
1635 if (chain)
1636 uvm_unmap_detach(chain, (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
1637 uvm_unmap(dstmap, dstaddr, dstaddr+len, 1); /* ??? */
1638 return(error);
1639 }
1640
1641 /* end of extraction functions */
1642
1643 /*
1644 * uvm_map_submap: punch down part of a map into a submap
1645 *
1646 * => only the kernel_map is allowed to be submapped
1647 * => the purpose of submapping is to break up the locking granularity
1648 * of a larger map
1649 * => the range specified must have been mapped previously with a uvm_map()
1650 * call [with uobj==NULL] to create a blank map entry in the main map.
1651 * [And it had better still be blank!]
1652 * => maps which contain submaps should never be copied or forked.
1653 * => to remove a submap, use uvm_unmap() on the main map
1654 * and then uvm_map_deallocate() the submap.
1655 * => main map must be unlocked.
1656 * => submap must have been init'd and have a zero reference count.
1657 * [need not be locked as we don't actually reference it]
1658 */
1659
1660 int uvm_map_submap(map, start, end, submap)
1661
1662 vm_map_t map, submap;
1663 vm_offset_t start, end;
1664
1665 {
1666 vm_map_entry_t entry;
1667 int result;
1668 UVMHIST_FUNC("uvm_map_submap"); UVMHIST_CALLED(maphist);
1669
1670 vm_map_lock(map);
1671
1672 VM_MAP_RANGE_CHECK(map, start, end);
1673
1674 if (uvm_map_lookup_entry(map, start, &entry)) {
1675 UVM_MAP_CLIP_START(map, entry, start);
1676 UVM_MAP_CLIP_END(map, entry, end); /* to be safe */
1677 }
1678 else {
1679 entry = NULL;
1680 }
1681
1682 if (entry != NULL &&
1683 entry->start == start && entry->end == end &&
1684 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
1685 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
1686
1687 /*
1688 * doit!
1689 */
1690 entry->etype |= (UVM_ET_MAP|UVM_ET_SUBMAP);
1691 entry->object.sub_map = submap;
1692 entry->offset = 0;
1693 uvm_map_reference(submap);
1694 result = KERN_SUCCESS;
1695 } else {
1696 result = KERN_INVALID_ARGUMENT;
1697 }
1698 vm_map_unlock(map);
1699
1700 return(result);
1701 }
1702
1703
1704 /*
1705 * uvm_map_protect: change map protection
1706 *
1707 * => set_max means set max_protection.
1708 * => map must be unlocked.
1709 * => XXXCDC: does not work properly with share maps. rethink.
1710 */
1711
1712 #define MASK(entry) ( UVM_ET_ISCOPYONWRITE(entry) ? \
1713 ~VM_PROT_WRITE : VM_PROT_ALL)
1714 #define max(a,b) ((a) > (b) ? (a) : (b))
1715
1716 int uvm_map_protect(map, start, end, new_prot, set_max)
1717
1718 vm_map_t map;
1719 vm_offset_t start, end;
1720 vm_prot_t new_prot;
1721 boolean_t set_max;
1722
1723 {
1724 vm_map_entry_t current, entry;
1725 UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
1726 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
1727 map, start, end, new_prot);
1728
1729 vm_map_lock(map);
1730
1731 VM_MAP_RANGE_CHECK(map, start, end);
1732
1733 if (uvm_map_lookup_entry(map, start, &entry)) {
1734 UVM_MAP_CLIP_START(map, entry, start);
1735 } else {
1736 entry = entry->next;
1737 }
1738
1739 /*
1740 * make a first pass to check for protection violations.
1741 */
1742
1743 current = entry;
1744 while ((current != &map->header) && (current->start < end)) {
1745 if (UVM_ET_ISSUBMAP(current))
1746 return(KERN_INVALID_ARGUMENT);
1747 if ((new_prot & current->max_protection) != new_prot) {
1748 vm_map_unlock(map);
1749 return(KERN_PROTECTION_FAILURE);
1750 }
1751 current = current->next;
1752 }
1753
1754 /* go back and fix up protections (no need to clip this time). */
1755
1756 current = entry;
1757
1758 while ((current != &map->header) && (current->start < end)) {
1759 vm_prot_t old_prot;
1760
1761 UVM_MAP_CLIP_END(map, current, end);
1762
1763 old_prot = current->protection;
1764 if (set_max)
1765 current->protection = (current->max_protection = new_prot) & old_prot;
1766 else
1767 current->protection = new_prot;
1768
1769 /*
1770 * update physical map if necessary. worry about copy-on-write
1771 * here -- CHECK THIS XXX
1772 */
1773
1774 if (current->protection != old_prot) {
1775
1776 if (UVM_ET_ISMAP(current) && !UVM_ET_ISSUBMAP(current)) {
1777 /* share map? gotta go down a level */
1778 vm_map_entry_t share_entry;
1779 vm_offset_t share_end;
1780
1781 /*
1782 * note: a share map has its own address space (starting at zero).
1783 * current->offset is the offset into the share map our mapping
1784 * starts. the length of our mapping is (current->end -
1785 * current->start). thus, our mapping goes from current->offset
1786 * to share_end (which is: current->offset + length) in the share
1787 * map's address space.
1788 *
1789 * thus for any share_entry we need to make sure that the addresses
1790 * we've got fall in the range we want. we use:
1791 * max(any share_entry->start, current->offset)
1792 * min(any share_entry->end, share_end)
1793 *
1794 * of course to change our pmap we've got to convert the share
1795 * map address back to our map's virtual address space using:
1796 * our_va = share_va - current->offset + current->start
1797 *
1798 * XXXCDC: protection change in sharemap may require use
1799 * of pmap_page_protect. needs a rethink.
1800 */
1801
1802 vm_map_lock(current->object.share_map);
1803 /* note: current->offset is offset into share map */
1804 (void) uvm_map_lookup_entry(current->object.share_map,
1805 current->offset, &share_entry);
1806 share_end = current->offset + (current->end - current->start);
1807 while ((share_entry != ¤t->object.share_map->header) &&
1808 (share_entry->start < share_end)) {
1809
1810 pmap_protect(map->pmap, (max(share_entry->start, current->offset)
1811 - current->offset + current->start),
1812 min(share_entry->end, share_end)
1813 - current->offset + current->start,
1814 current->protection & MASK(share_entry));
1815
1816 share_entry = share_entry->next;
1817 }
1818 vm_map_unlock(current->object.share_map);
1819
1820 } else { /* not share map! */
1821
1822 pmap_protect(map->pmap, current->start, current->end,
1823 current->protection & MASK(entry));
1824
1825 }
1826 }
1827 current = current->next;
1828 }
1829
1830 vm_map_unlock(map);
1831 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
1832 return(KERN_SUCCESS);
1833 }
1834
1835 #undef max
1836 #undef MASK
1837
1838 /*
1839 * uvm_map_inherit: set inheritance code for range of addrs in map.
1840 *
1841 * => map must be unlocked
1842 * => note that the inherit code is used during a "fork". see fork
1843 * code for details.
1844 * => XXXCDC: currently only works in main map. what about share map?
1845 */
1846
1847 int uvm_map_inherit(map, start, end, new_inheritance)
1848
1849 vm_map_t map;
1850 vm_offset_t start;
1851 vm_offset_t end;
1852 vm_inherit_t new_inheritance;
1853
1854 {
1855 vm_map_entry_t entry, temp_entry;
1856 UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
1857 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)",
1858 map, start, end, new_inheritance);
1859
1860 switch (new_inheritance) {
1861 case VM_INHERIT_NONE:
1862 case VM_INHERIT_COPY:
1863 case VM_INHERIT_SHARE:
1864 break;
1865 default:
1866 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
1867 return(KERN_INVALID_ARGUMENT);
1868 }
1869
1870 vm_map_lock(map);
1871
1872 VM_MAP_RANGE_CHECK(map, start, end);
1873
1874 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
1875 entry = temp_entry;
1876 UVM_MAP_CLIP_START(map, entry, start);
1877 } else {
1878 entry = temp_entry->next;
1879 }
1880
1881 while ((entry != &map->header) && (entry->start < end)) {
1882 UVM_MAP_CLIP_END(map, entry, end);
1883
1884 entry->inheritance = new_inheritance;
1885
1886 entry = entry->next;
1887 }
1888
1889 vm_map_unlock(map);
1890 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
1891 return(KERN_SUCCESS);
1892 }
1893
1894 /*
1895 * uvm_map_pageable: sets the pageability of a range in a map.
1896 *
1897 * => regions sepcified as not pageable require lock-down (wired) memory
1898 * and page tables.
1899 * => map must not be locked.
1900 * => XXXCDC: check this and try and clean it up.
1901 */
1902
1903 int uvm_map_pageable(map, start, end, new_pageable)
1904
1905 vm_map_t map;
1906 vm_offset_t start, end;
1907 boolean_t new_pageable;
1908
1909 {
1910 vm_map_entry_t entry, start_entry;
1911 vm_offset_t failed = 0;
1912 int rv;
1913 UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
1914 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
1915 map, start, end, new_pageable);
1916
1917 vm_map_lock(map);
1918 VM_MAP_RANGE_CHECK(map, start, end);
1919
1920 /*
1921 * only one pageability change may take place at one time, since
1922 * uvm_fault_wire assumes it will be called only once for each
1923 * wiring/unwiring. therefore, we have to make sure we're actually
1924 * changing the pageability for the entire region. we do so before
1925 * making any changes.
1926 */
1927
1928 if (uvm_map_lookup_entry(map, start, &start_entry) == FALSE) {
1929 vm_map_unlock(map);
1930
1931 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
1932 return(KERN_INVALID_ADDRESS);
1933 }
1934 entry = start_entry;
1935
1936 /*
1937 * handle wiring and unwiring seperately.
1938 */
1939
1940 if (new_pageable) { /* unwire */
1941
1942 UVM_MAP_CLIP_START(map, entry, start);
1943
1944 /*
1945 * unwiring. first ensure that the range to be unwired is really
1946 * wired down and that there are no holes.
1947 */
1948 while ((entry != &map->header) && (entry->start < end)) {
1949
1950 if (entry->wired_count == 0 ||
1951 (entry->end < end &&
1952 (entry->next == &map->header ||
1953 entry->next->start > entry->end))) {
1954 vm_map_unlock(map);
1955 UVMHIST_LOG(maphist,"<- done (INVALID UNWIRE ARG)",0,0,0,0);
1956 return(KERN_INVALID_ARGUMENT);
1957 }
1958 entry = entry->next;
1959 }
1960
1961 /*
1962 * now decrement the wiring count for each region. if a region
1963 * becomes completely unwired, unwire its physical pages and mappings.
1964 */
1965 #if 0 /* not necessary: uvm_fault_unwire does not lock */
1966 lock_set_recursive(&map->lock);
1967 #endif /* XXXCDC */
1968
1969 entry = start_entry;
1970 while ((entry != &map->header) && (entry->start < end)) {
1971 UVM_MAP_CLIP_END(map, entry, end);
1972
1973 entry->wired_count--;
1974 if (entry->wired_count == 0)
1975 uvm_map_entry_unwire(map, entry);
1976
1977 entry = entry->next;
1978 }
1979 #if 0 /* XXXCDC: not necessary, see above */
1980 lock_clear_recursive(&map->lock);
1981 #endif
1982 vm_map_unlock(map);
1983 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
1984 return(KERN_SUCCESS);
1985
1986 /*
1987 * end of unwire case!
1988 */
1989 }
1990
1991 /*
1992 * wire case: in two passes [XXXCDC: ugly block of code here]
1993 *
1994 * 1: holding the write lock, we create any anonymous maps that need
1995 * to be created. then we clip each map entry to the region to
1996 * be wired and increment its wiring count.
1997 *
1998 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
1999 * in the pages for any newly wired area (wired_count is 1).
2000 *
2001 * downgrading to a read lock for uvm_fault_wire avoids a possible
2002 * deadlock with another thread that may have faulted on one of
2003 * the pages to be wired (it would mark the page busy, blocking
2004 * us, then in turn block on the map lock that we hold). because
2005 * of problems in the recursive lock package, we cannot upgrade
2006 * to a write lock in vm_map_lookup. thus, any actions that
2007 * require the write lock must be done beforehand. because we
2008 * keep the read lock on the map, the copy-on-write status of the
2009 * entries we modify here cannot change.
2010 */
2011
2012 while ((entry != &map->header) && (entry->start < end)) {
2013
2014 if (entry->wired_count == 0) { /* not already wired? */
2015
2016 /*
2017 * perform actions of vm_map_lookup that need the write lock on
2018 * the map: create an anonymous map for a copy-on-write region,
2019 * or an anonymous map for a zero-fill region.
2020 *
2021 * we don't have to do this for entries that point to sharing
2022 * maps, because we won't hold the lock on the sharing map.
2023 */
2024
2025 if (!UVM_ET_ISMAP(entry)) { /* not sharing map */
2026 /*
2027 * XXXCDC: protection vs. max_protection?? (wirefault uses max?)
2028 * XXXCDC: used to do it always if uvm_obj == NULL (wrong?)
2029 */
2030 if ( UVM_ET_ISNEEDSCOPY(entry) &&
2031 (entry->protection & VM_PROT_WRITE) != 0) {
2032
2033 amap_copy(map, entry, M_WAITOK, TRUE, start, end);
2034 /* XXXCDC: wait OK? */
2035
2036 }
2037 }
2038 } /* wired_count == 0 */
2039 UVM_MAP_CLIP_START(map, entry, start);
2040 UVM_MAP_CLIP_END(map, entry, end);
2041 entry->wired_count++;
2042
2043 /*
2044 * Check for holes
2045 */
2046 if (entry->end < end && (entry->next == &map->header ||
2047 entry->next->start > entry->end)) {
2048 /*
2049 * found one. amap creation actions do not need to be undone,
2050 * but the wired counts need to be restored.
2051 */
2052 while (entry != &map->header && entry->end > start) {
2053 entry->wired_count--;
2054 entry = entry->prev;
2055 }
2056 vm_map_unlock(map);
2057 UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
2058 return(KERN_INVALID_ARGUMENT);
2059 }
2060 entry = entry->next;
2061 }
2062
2063 /*
2064 * Pass 2.
2065 */
2066 /*
2067 * HACK HACK HACK HACK
2068 *
2069 * if we are wiring in the kernel map or a submap of it, unlock the
2070 * map to avoid deadlocks. we trust that the kernel threads are
2071 * well-behaved, and therefore will not do anything destructive to
2072 * this region of the map while we have it unlocked. we cannot
2073 * trust user threads to do the same.
2074 *
2075 * HACK HACK HACK HACK
2076 */
2077 if (vm_map_pmap(map) == pmap_kernel()) {
2078 vm_map_unlock(map); /* trust me ... */
2079 } else {
2080 vm_map_set_recursive(&map->lock);
2081 lockmgr(&map->lock, LK_DOWNGRADE, (void *)0, curproc);
2082 }
2083
2084 rv = 0;
2085 entry = start_entry;
2086 while (entry != &map->header && entry->start < end) {
2087 /*
2088 * if uvm_fault_wire fails for any page we need to undo what has
2089 * been done. we decrement the wiring count for those pages which
2090 * have not yet been wired (now) and unwire those that have
2091 * (later).
2092 *
2093 * XXX this violates the locking protocol on the map,
2094 * needs to be fixed. [because we only have a read lock on map we
2095 * shouldn't be changing wired_count?]
2096 */
2097 if (rv) {
2098 entry->wired_count--;
2099 } else if (entry->wired_count == 1) {
2100 rv = uvm_fault_wire(map, entry->start, entry->end);
2101 if (rv) {
2102 failed = entry->start;
2103 entry->wired_count--;
2104 }
2105 }
2106 entry = entry->next;
2107 }
2108
2109 if (vm_map_pmap(map) == pmap_kernel()) {
2110 vm_map_lock(map); /* relock */
2111 }
2112 else {
2113 vm_map_clear_recursive(&map->lock);
2114 }
2115
2116 if (rv) { /* failed? */
2117 vm_map_unlock(map);
2118 (void) uvm_map_pageable(map, start, failed, TRUE);
2119 UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
2120 return(rv);
2121 }
2122 vm_map_unlock(map);
2123
2124 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
2125 return(KERN_SUCCESS);
2126 }
2127
2128 /*
2129 * uvm_map_clean: push dirty pages off to backing store.
2130 *
2131 * => valid flags:
2132 * if (flags & PGO_SYNCIO): dirty pages are written synchronously
2133 * if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
2134 * if (flags & PGO_FREE): any cached pages are freed after clean
2135 * => returns an error if any part of the specified range isn't mapped
2136 * => never a need to flush amap layer since the anonymous memory has
2137 * no permanent home...
2138 * => called from sys_msync()
2139 * => caller must not write-lock map (read OK).
2140 * => we may sleep while cleaning if SYNCIO [with map read-locked]
2141 * => XXX: does this handle share maps properly?
2142 */
2143
2144 int uvm_map_clean(map, start, end, flags)
2145
2146 vm_map_t map;
2147 vm_offset_t start, end;
2148 int flags;
2149
2150 {
2151 vm_map_entry_t current;
2152 vm_map_entry_t entry;
2153 vm_size_t size;
2154 struct uvm_object *object;
2155 vm_offset_t offset;
2156 UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
2157 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
2158 map, start, end, flags);
2159
2160 vm_map_lock_read(map);
2161 VM_MAP_RANGE_CHECK(map, start, end);
2162 if (!uvm_map_lookup_entry(map, start, &entry)) {
2163 vm_map_unlock_read(map);
2164 return(KERN_INVALID_ADDRESS);
2165 }
2166
2167 /*
2168 * Make a first pass to check for holes.
2169 */
2170 for (current = entry; current->start < end; current = current->next) {
2171 if (UVM_ET_ISSUBMAP(current)) {
2172 vm_map_unlock_read(map);
2173 return(KERN_INVALID_ARGUMENT);
2174 }
2175 if (end > current->end &&
2176 (current->next == &map->header ||
2177 current->end != current->next->start)) {
2178 vm_map_unlock_read(map);
2179 return(KERN_INVALID_ADDRESS);
2180 }
2181 }
2182
2183 /*
2184 * add "cleanit" flag to flags (for generic flush routine).
2185 * then make a second pass, cleaning/uncaching pages from
2186 * the indicated objects as we go.
2187 */
2188 flags = flags | PGO_CLEANIT;
2189 for (current = entry; current->start < end; current = current->next) {
2190 offset = current->offset + (start - current->start);
2191 size = (end <= current->end ? end : current->end) - start;
2192
2193 /*
2194 * get object/offset. special case to handle share maps.
2195 */
2196 if (UVM_ET_ISMAP(current)) { /* share map? */
2197 register vm_map_t smap;
2198 vm_map_entry_t tentry;
2199 vm_size_t tsize;
2200
2201 smap = current->object.share_map;
2202 vm_map_lock_read(smap);
2203 (void) uvm_map_lookup_entry(smap, offset, &tentry);
2204 tsize = tentry->end - offset;
2205 if (tsize < size)
2206 size = tsize;
2207 object = tentry->object.uvm_obj;
2208 offset = tentry->offset + (offset - tentry->start);
2209 simple_lock(&object->vmobjlock);
2210 vm_map_unlock_read(smap);
2211 } else {
2212 object = current->object.uvm_obj;
2213 simple_lock(&object->vmobjlock);
2214 }
2215
2216 /*
2217 * flush pages if writing is allowed. note that object is locked.
2218 * XXX should we continue on an error?
2219 */
2220
2221 if (object && object->pgops &&
2222 (current->protection & VM_PROT_WRITE) != 0) {
2223 if (!object->pgops->pgo_flush(object, offset, offset+size, flags)) {
2224 simple_unlock(&object->vmobjlock);
2225 vm_map_unlock_read(map);
2226 return(KERN_FAILURE);
2227 }
2228 }
2229 simple_unlock(&object->vmobjlock);
2230 start += size;
2231 }
2232 vm_map_unlock_read(map);
2233 return(KERN_SUCCESS);
2234 }
2235
2236
2237 /*
2238 * uvm_map_checkprot: check protection in map
2239 *
2240 * => must allow specified protection in a fully allocated region.
2241 * => map must be read or write locked by caller.
2242 */
2243
2244 boolean_t uvm_map_checkprot(map, start, end, protection)
2245
2246 vm_map_t map;
2247 vm_offset_t start, end;
2248 vm_prot_t protection;
2249
2250 {
2251 vm_map_entry_t entry;
2252 vm_map_entry_t tmp_entry;
2253
2254 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
2255 return(FALSE);
2256 }
2257
2258 entry = tmp_entry;
2259
2260 while (start < end) {
2261 if (entry == &map->header) {
2262 return(FALSE);
2263 }
2264
2265 /*
2266 * no holes allowed
2267 */
2268
2269 if (start < entry->start) {
2270 return(FALSE);
2271 }
2272
2273 /*
2274 * check protection associated with entry
2275 */
2276
2277 if ((entry->protection & protection) != protection) {
2278 return(FALSE);
2279 }
2280
2281 /* go to next entry */
2282
2283 start = entry->end;
2284 entry = entry->next;
2285 }
2286 return(TRUE);
2287 }
2288
2289 /*
2290 * uvmspace_alloc: allocate a vmspace structure.
2291 *
2292 * - structure includes vm_map and pmap
2293 * - XXX: no locking on this structure
2294 * - refcnt set to 1, rest must be init'd by caller
2295 */
2296 struct vmspace *uvmspace_alloc(min, max, pageable)
2297
2298 vm_offset_t min, max;
2299 int pageable;
2300
2301 {
2302 struct vmspace *vm;
2303 UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
2304
2305 MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK);
2306 bzero(vm, sizeof(*vm));
2307 uvm_map_setup(&vm->vm_map, min, max, pageable);
2308 #if defined(PMAP_NEW)
2309 vm->vm_map.pmap = pmap_create();
2310 #else
2311 vm->vm_map.pmap = pmap_create(0);
2312 #endif
2313 vm->vm_refcnt = 1;
2314 UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0);
2315 return (vm);
2316 }
2317
2318 /*
2319 * uvmspace_share: share a vmspace between two proceses
2320 *
2321 * - XXX: no locking on vmspace
2322 * - used for vfork, threads(?)
2323 */
2324
2325 void uvmspace_share(p1, p2)
2326
2327 struct proc *p1, *p2;
2328
2329 {
2330 p2->p_vmspace = p1->p_vmspace;
2331 p1->p_vmspace->vm_refcnt++;
2332 }
2333
2334 /*
2335 * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
2336 *
2337 * - XXX: no locking on vmspace
2338 */
2339
2340 void uvmspace_unshare(p)
2341
2342 struct proc *p;
2343
2344 {
2345 struct vmspace *nvm, *ovm = p->p_vmspace;
2346
2347 if (ovm->vm_refcnt == 1)
2348 return; /* nothing to do: vmspace isn't shared in the first place */
2349
2350 nvm = uvmspace_fork(ovm); /* make a new vmspace, still holding old one */
2351 p->p_vmspace = nvm;
2352 pmap_activate(p); /* switch to new vmspace */
2353 uvmspace_free(ovm); /* drop reference to old vmspace */
2354 }
2355
2356 /*
2357 * uvmspace_exec: the process wants to exec a new program
2358 *
2359 * - XXX: no locking on vmspace
2360 */
2361
2362 void uvmspace_exec(p)
2363
2364 struct proc *p;
2365
2366 {
2367 struct vmspace *nvm, *ovm = p->p_vmspace;
2368 vm_map_t map = &ovm->vm_map;
2369
2370 #ifdef sparc
2371 /* XXX cgd 960926: the sparc #ifdef should be a MD hook */
2372 kill_user_windows(p); /* before stack addresses go away */
2373 #endif
2374
2375 /*
2376 * see if more than one process is using this vmspace...
2377 */
2378
2379 if (ovm->vm_refcnt == 1) {
2380
2381 /*
2382 * if p is the only process using its vmspace then we can safely
2383 * recycle that vmspace for the program that is being exec'd.
2384 */
2385
2386 #ifdef SYSVSHM
2387 /*
2388 * SYSV SHM semantics require us to kill all segments on an exec.
2389 */
2390 if (ovm->vm_shm)
2391 shmexit(ovm);
2392 #endif
2393
2394 /*
2395 * now unmap the old program
2396 */
2397 uvm_unmap(map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS, 0);
2398
2399 } else {
2400
2401 /*
2402 * p's vmspace is being shared, so we can't reuse it for p since
2403 * it is still being used for others. allocate a new vmspace for
2404 * p
2405 */
2406 nvm = uvmspace_alloc(map->min_offset, map->max_offset,
2407 map->entries_pageable);
2408
2409 #if (defined(i386) && !defined(PMAP_NEW)) || defined(pc532)
2410 /*
2411 * allocate zero fill area in the new vmspace's map for user page
2412 * tables for ports that have old style pmaps that keep user page
2413 * tables in the top part of the process' address space.
2414 *
2415 * XXXCDC: this should go away once all pmaps are fixed
2416 */
2417 {
2418 vm_offset_t addr = VM_MAXUSER_ADDRESS;
2419 if (uvm_map(&nvm->vm_map, &addr, VM_MAX_ADDRESS - addr,
2420 NULL, UVM_UNKNOWN_OFFSET,
2421 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
2422 UVM_ADV_NORMAL, UVM_FLAG_FIXED|UVM_FLAG_COPYONW))
2423 != KERN_SUCCESS)
2424 panic("vm_allocate of PT page area failed");
2425 }
2426 #endif
2427
2428 /*
2429 * install new vmspace and drop our ref to the old one.
2430 */
2431
2432 p->p_vmspace = nvm;
2433 pmap_activate(p);
2434 uvmspace_free(ovm);
2435 }
2436 }
2437
2438 /*
2439 * uvmspace_free: free a vmspace data structure
2440 *
2441 * - XXX: no locking on vmspace
2442 */
2443
2444 void uvmspace_free(vm)
2445
2446 struct vmspace *vm;
2447
2448 {
2449 vm_map_entry_t dead_entries;
2450 UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
2451
2452 UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0);
2453 if (--vm->vm_refcnt == 0) {
2454 /*
2455 * lock the map, to wait out all other references to it. delete
2456 * all of the mappings and pages they hold, then call the pmap
2457 * module to reclaim anything left.
2458 */
2459 vm_map_lock(&vm->vm_map);
2460 if (vm->vm_map.nentries) {
2461 (void) uvm_unmap_remove(&vm->vm_map, vm->vm_map.min_offset,
2462 vm->vm_map.max_offset, TRUE, &dead_entries);
2463 if (dead_entries != NULL)
2464 uvm_unmap_detach(dead_entries, 0);
2465 }
2466 pmap_destroy(vm->vm_map.pmap);
2467 vm->vm_map.pmap = NULL;
2468 FREE(vm, M_VMMAP);
2469 }
2470 UVMHIST_LOG(maphist,"<- done", 0,0,0,0);
2471 }
2472
2473 /*
2474 * F O R K - m a i n e n t r y p o i n t
2475 */
2476 /*
2477 * uvmspace_fork: fork a process' main map
2478 *
2479 * => create a new vmspace for child process from parent.
2480 * => parent's map must not be locked.
2481 */
2482
2483 struct vmspace *uvmspace_fork(vm1)
2484
2485 struct vmspace *vm1;
2486
2487 {
2488 struct vmspace *vm2;
2489 vm_map_t old_map = &vm1->vm_map;
2490 vm_map_t new_map;
2491 vm_map_entry_t old_entry;
2492 vm_map_entry_t new_entry;
2493 pmap_t new_pmap;
2494 UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
2495
2496 #if (defined(i386) && !defined(PMAP_NEW)) || defined(pc532)
2497 /*
2498 * avoid copying any of the parent's pagetables or other per-process
2499 * objects that reside in the map by marking all of them non-inheritable
2500 * XXXCDC: should go away
2501 */
2502 (void) uvm_map_inherit(old_map, VM_MAXUSER_ADDRESS, VM_MAX_ADDRESS,
2503 VM_INHERIT_NONE);
2504 #endif
2505
2506 vm_map_lock(old_map);
2507
2508 vm2 = uvmspace_alloc(old_map->min_offset, old_map->max_offset,
2509 old_map->entries_pageable);
2510 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2511 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
2512 new_map = &vm2->vm_map; /* XXX */
2513 new_pmap = new_map->pmap;
2514
2515 old_entry = old_map->header.next;
2516
2517 /*
2518 * go entry-by-entry
2519 */
2520
2521 while (old_entry != &old_map->header) {
2522
2523 /*
2524 * first, some sanity checks on the old entry
2525 */
2526 if (UVM_ET_ISSUBMAP(old_entry))
2527 panic("fork: encountered a submap during fork (illegal)");
2528 else if (UVM_ET_ISMAP(old_entry)) {
2529 if (UVM_ET_ISNEEDSCOPY(old_entry))
2530 panic("fork: encountered a share map entry that needs_copy (illegal)");
2531 if (UVM_ET_ISCOPYONWRITE(old_entry))
2532 panic("fork: encountered a copy_on_write share map entry (illegal)");
2533 if (old_entry->aref.ar_amap)
2534 panic("fork: detected share map entry that has an amap (illegal)");
2535 } else {
2536 if (!UVM_ET_ISCOPYONWRITE(old_entry) && UVM_ET_ISNEEDSCOPY(old_entry))
2537 panic("fork: non-copy_on_write map entry marked needs_copy (illegal)");
2538 }
2539
2540
2541 switch (old_entry->inheritance) {
2542 case VM_INHERIT_NONE:
2543
2544 /*
2545 * drop the mapping
2546 */
2547
2548 break;
2549
2550 case VM_INHERIT_SHARE:
2551
2552 /*
2553 * share the mapping: this means we want the old and new entries to
2554 * share amaps and backing objects.
2555 */
2556
2557 /*
2558 * if the old_entry needs a new amap (due to prev fork) then we need
2559 * to allocate it now so that we have something we own to share with
2560 * the new_entry. [in other words, we need to clear needs_copy]
2561 */
2562
2563 if (UVM_ET_ISNEEDSCOPY(old_entry)) {
2564 /* get our own amap, clears needs_copy */
2565 amap_copy(old_map, old_entry, M_WAITOK, FALSE, 0, 0);
2566 /* XXXCDC: WAITOK??? */
2567 }
2568
2569 new_entry = uvm_mapent_alloc(new_map);
2570 uvm_mapent_copy(old_entry, new_entry); /* old_entry -> new_entry */
2571 new_entry->wired_count = 0; /* new pmap has nothing wired in it */
2572
2573 /*
2574 * gain reference to objects backing the map
2575 */
2576 if (UVM_ET_ISMAP(new_entry)) { /* share map? */
2577 uvm_map_reference(old_entry->object.share_map);
2578 } else {
2579 if (new_entry->aref.ar_amap)
2580 amap_ref(new_entry, AMAP_SHARED); /* share reference */
2581 if (new_entry->object.uvm_obj &&
2582 new_entry->object.uvm_obj->pgops->pgo_reference)
2583 new_entry->object.uvm_obj->
2584 pgops->pgo_reference(new_entry->object.uvm_obj);
2585 }
2586
2587 /* insert entry at end of new_map's entry list */
2588 uvm_map_entry_link(new_map, new_map->header.prev, new_entry);
2589
2590 /*
2591 * pmap_copy the mappings: this routine is optional but if it is
2592 * there it will reduce the number of page faults in the new proc.
2593 */
2594
2595 pmap_copy(new_pmap, old_map->pmap, new_entry->start,
2596 (old_entry->end - old_entry->start), old_entry->start);
2597
2598 break;
2599
2600 case VM_INHERIT_COPY:
2601
2602 /*
2603 * copy-on-write the mapping (using mmap's MAP_PRIVATE semantics)
2604 */
2605
2606 /*
2607 * share maps: we special case it (handled by uvm_map_sharemapcopy)
2608 */
2609
2610 if (UVM_ET_ISMAP(old_entry)) { /* share map? */
2611 uvm_map_sharemapcopy(old_map, old_entry, new_map);
2612 break;
2613 }
2614
2615 /*
2616 * not a share map. allocate new_entry, adjust reference counts.
2617 * (note that new references are read-only).
2618 */
2619
2620 new_entry = uvm_mapent_alloc(new_map);
2621 uvm_mapent_copy(old_entry, new_entry); /* old_entry -> new_entry */
2622 if (new_entry->aref.ar_amap) {
2623 amap_ref(new_entry, 0);
2624 }
2625 if (new_entry->object.uvm_obj &&
2626 new_entry->object.uvm_obj->pgops->pgo_reference)
2627 new_entry->object.uvm_obj->
2628 pgops->pgo_reference(new_entry->object.uvm_obj);
2629
2630 new_entry->wired_count = 0; /* new pmap has nothing wired in it */
2631 new_entry->etype |= (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
2632 uvm_map_entry_link(new_map, new_map->header.prev, new_entry);
2633
2634 /*
2635 * the new entry will need an amap. it will either need to be
2636 * copied from the old entry or created from scrach (if the old
2637 * does not have an amap). can we defer this process until
2638 * later (by setting needs_copy) or do we need to do it now?
2639 *
2640 * we must do it now if any of the following conditions hold:
2641 *
2642 * 1. the old entry has an amap and it is not copy_on_write [i.e.
2643 * shared].
2644 * why: we would have to write-protect the old mapping in the
2645 * parent's pmap [thus needlessly changing the protection of a
2646 * shared mapping, something we don't want to do]
2647 * note: a non-copy-on-write old entry will not have an
2648 * amap unless we've used non-standard features of this VM system.
2649 * [also, see semantic note below...]
2650 *
2651 * 2. the old entry has an amap and that amap is being shared.
2652 * why: if the amap is being shared between 2 or more processes
2653 * they need to continue sharing the amap. if we try and defer
2654 * the copy there is no easy to determine which process needs to
2655 * break off their references to the amap and which ones are supposed
2656 * to keep it at fault time.
2657 *
2658 * 3. if the old entry was copy_on_write and wired then we
2659 * are going to have to call fault_copy_entry now (see below).
2660 * that needs to have the amap copied also, so we do it here
2661 * too.
2662 *
2663 * semantic note: if the old entry was shared and had an amap
2664 * then the child gets a snapshot copy of the pages in the amap
2665 * now, but the child does not want to see any new pages added
2666 * to the amap by the parent after the fork. the child will see
2667 * changes made by the parent to any amap pages it inherits
2668 * until it writes them itself. to get these semantics we need
2669 * to copy the amap now (as per [1] above).
2670 */
2671
2672 if ((old_entry->aref.ar_amap &&
2673 (UVM_ET_ISCOPYONWRITE(old_entry) == FALSE ||
2674 (old_entry->aref.ar_amap->am_flags & AMAP_SHARED) != 0)) ||
2675 (old_entry->wired_count != 0 && UVM_ET_ISCOPYONWRITE(old_entry)) ) {
2676 amap_copy(new_map, new_entry, M_WAITOK, FALSE, 0, 0);
2677 /* XXXCDC: M_WAITOK? */
2678 }
2679
2680 /*
2681 * if an entry is wired down, then we can not get faults on access.
2682 * this means that we can't do COW because we can't write protect
2683 * the old entry (otherwise we could get a protection fault on wired
2684 * memory). if that is the case we must copy things now. note
2685 * that we've already allocated the new amap (above).
2686 */
2687
2688 if (old_entry->wired_count != 0 && UVM_ET_ISCOPYONWRITE(old_entry)) {
2689
2690 /*
2691 * copy it now
2692 */
2693
2694 amap_cow_now(new_map, new_entry); /* was fault_copy_entry */
2695
2696 } else {
2697
2698 /*
2699 * do a copy-on-write. two cases to consider:
2700 * 1. old_entry is MAP_SHARE (old_entry->copy_on_write == FALSE)
2701 * => no need to protect old mappings
2702 * 2. old_entry is MAP_PRIVATE (old_entry->copy_on_write == TRUE)
2703 * => must protect both old and new mappings
2704 */
2705
2706 if (UVM_ET_ISCOPYONWRITE(old_entry)) { /* private mapping? */
2707
2708 /*
2709 * protect old mappings. note that if needs_copy is true then
2710 * the mappings have already been protected elsewhere and there
2711 * is no need to do it again. also note that pmap_copy will
2712 * copy the protected mappings to the child.
2713 */
2714
2715 if (!UVM_ET_ISNEEDSCOPY(old_entry)) {
2716 /* write protect pages */
2717 pmap_protect(old_map->pmap, old_entry->start, old_entry->end,
2718 old_entry->protection & ~VM_PROT_WRITE);
2719 old_entry->etype |= UVM_ET_NEEDSCOPY;
2720 }
2721 }
2722
2723 pmap_copy(new_pmap, old_map->pmap, new_entry->start,
2724 (old_entry->end - old_entry->start), old_entry->start);
2725
2726 /*
2727 * protect new mappings. already taken care of for private
2728 * mappings by the call to pmap_protect above.
2729 */
2730
2731 if (!UVM_ET_ISCOPYONWRITE(old_entry)) {
2732 pmap_protect(new_pmap, new_entry->start, new_entry->end,
2733 new_entry->protection & ~VM_PROT_WRITE);
2734 }
2735 }
2736
2737 break;
2738 }
2739 old_entry = old_entry->next;
2740 }
2741
2742 new_map->size = old_map->size;
2743 vm_map_unlock(old_map);
2744
2745 #if (defined(i386) && !defined(PMAP_NEW)) || defined(pc532)
2746 /*
2747 * allocate zero fill area in the new vmspace's map for user page
2748 * tables for ports that have old style pmaps that keep user page
2749 * tables in the top part of the process' address space.
2750 *
2751 * XXXCDC: this should go away once all pmaps are fixed
2752 */
2753 {
2754 vm_offset_t addr = VM_MAXUSER_ADDRESS;
2755 if (uvm_map(new_map, &addr, VM_MAX_ADDRESS - addr,
2756 NULL, UVM_UNKNOWN_OFFSET,
2757 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
2758 UVM_ADV_NORMAL, UVM_FLAG_FIXED|UVM_FLAG_COPYONW))
2759 != KERN_SUCCESS)
2760 panic("vm_allocate of PT page area failed");
2761 }
2762 #endif
2763
2764 #ifdef SYSVSHM
2765 if (vm1->vm_shm)
2766 shmfork(vm1, vm2);
2767 #endif
2768
2769 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
2770 return(vm2);
2771 }
2772
2773
2774 /*
2775 * uvm_map_sharemapcopy: handle the copying of a share map during a
2776 * fork. this is a helper function for uvmspace_fork. it is called
2777 * when we are doing a fork and we have encountered a map entry which
2778 * has two attributes: [1] its inherit code is VM_INHERIT_COPY, and
2779 * [2] it points to a share map (i.e. is_a_map is true). in this case
2780 * we must traverse the area of the share map pointed to by the
2781 * old_entry and make private copies of the map entries in the share
2782 * map. this is somewhat similar to what happens in the non-share map
2783 * case in fork, but it has to handle multiple map entries which may
2784 * not be the proper size. it was seperated out into its own function
2785 * in order to make the main body of the fork code easier to read and
2786 * understand!
2787 *
2788 * main_entry->offset = starting VA in share map for our mapping
2789 *
2790 * => main map is locked by caller.
2791 * => we lock share map.
2792 * => new map isn't in use yet (still being set up for the first time).
2793 */
2794
2795 void uvm_map_sharemapcopy(main_map, main_entry, new_map)
2796
2797 vm_map_t main_map, new_map;
2798 vm_map_entry_t main_entry;
2799
2800 {
2801 vm_map_t share_map = main_entry->object.share_map;
2802 vm_map_entry_t share_entry, new_entry;
2803 vm_offset_t shend = main_entry->offset +
2804 (main_entry->end - main_entry->start);
2805 int refs;
2806
2807 /*
2808 * lock share map. find first map entry of interest. clip if needed.
2809 */
2810
2811 vm_map_lock(share_map);
2812 if (uvm_map_lookup_entry(share_map, main_entry->offset, &share_entry))
2813 UVM_MAP_CLIP_START(share_map, share_entry, main_entry->offset);
2814
2815 while (share_entry != &share_map->header && share_entry->start < shend) {
2816
2817 /*
2818 * at this point we have a map entry that we need to make a copy of.
2819 */
2820
2821 UVM_MAP_CLIP_END(share_map, share_entry, shend); /* may need to clip? */
2822
2823 new_entry = uvm_mapent_alloc(new_map);
2824 uvm_mapent_copy(share_entry, new_entry); /* share_entry -> new_entry */
2825
2826 /* convert share map addresses back to main map addresses */
2827 new_entry->start = main_entry->start +
2828 (new_entry->start - main_entry->offset);
2829 new_entry->end = main_entry->start + (new_entry->end - main_entry->offset);
2830
2831 /* gain references */
2832 if (new_entry->aref.ar_amap) {
2833 amap_ref(new_entry, 0);
2834 }
2835 if (new_entry->object.uvm_obj &&
2836 new_entry->object.uvm_obj->pgops->pgo_reference)
2837 new_entry->object.uvm_obj->
2838 pgops->pgo_reference(new_entry->object.uvm_obj);
2839
2840 /* init rest of new entry and insert at end of new map */
2841 new_entry->wired_count = 0;
2842 new_entry->etype |= (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
2843 uvm_map_entry_link(new_map, new_map->header.prev, new_entry);
2844
2845 /* don't bother trying to defer the copy in the share map case */
2846 amap_copy(new_map, new_entry, M_WAITOK, FALSE, 0, 0); /* XXXCDC: WAITOK? */
2847
2848 /* just like non-share case: can't COW wired memory */
2849 if (share_entry->wired_count != 0 && UVM_ET_ISCOPYONWRITE(share_entry)) {
2850
2851
2852 amap_cow_now(new_map, new_entry); /* was fault copy entry */
2853
2854 } else {
2855
2856 /* just like non-share case */
2857 if (UVM_ET_ISCOPYONWRITE(share_entry)) {
2858
2859 if (!UVM_ET_ISNEEDSCOPY(share_entry)) {
2860
2861 /*
2862 * must write protect pages. if we have the sole reference
2863 * to the share map we can use good old pmap_protect. if we
2864 * don't, then we have to use pmap_page_protect.
2865 *
2866 * note that the VA new_entry->start (starting entry of this
2867 * segment of the share map in child process) is the same
2868 * virtual address it is mapped in in the parent (thus we
2869 * can mix main_map and new_entry in the pmap_protect call below).
2870 */
2871
2872 simple_lock(&share_map->ref_lock);
2873 refs = share_map->ref_count;
2874 simple_unlock(&share_map->ref_lock);
2875 if (refs == 1) {
2876 pmap_protect(main_map->pmap, new_entry->start, new_entry->end,
2877 share_entry->protection & ~VM_PROT_WRITE);
2878 } else {
2879 if (share_entry->aref.ar_amap) {
2880 simple_lock(&share_entry->aref.ar_amap->am_l);
2881 amap_share_protect(share_entry,
2882 share_entry->protection & ~VM_PROT_WRITE);
2883 simple_unlock(&share_entry->aref.ar_amap->am_l);
2884 }
2885 if (share_entry->object.uvm_obj) {
2886 #ifdef DIAGNOSTIC
2887 if (!share_entry->object.uvm_obj->pgops->pgo_shareprot)
2888 panic("fork: share_entry with no prot function");
2889 #endif
2890 simple_lock(&share_entry->object.uvm_obj->vmobjlock);
2891 share_entry->object.uvm_obj->pgops->
2892 pgo_shareprot(share_entry,
2893 share_entry->protection & ~VM_PROT_WRITE);
2894 simple_unlock(&share_entry->object.uvm_obj->vmobjlock);
2895 }
2896 }
2897
2898 share_entry->etype |= UVM_ET_NEEDSCOPY;
2899 }
2900 }
2901
2902 /*
2903 * now copy the mappings: note address are the same in both
2904 * main_map and new_map
2905 */
2906 pmap_copy(new_map->pmap, main_map->pmap, new_entry->start,
2907 (new_entry->end - new_entry->start), new_entry->start);
2908
2909 /* just like non-share case */
2910 if (!UVM_ET_ISCOPYONWRITE(share_entry)) {
2911 pmap_protect(new_map->pmap, new_entry->start, new_entry->end,
2912 new_entry->protection & ~VM_PROT_WRITE);
2913 }
2914 }
2915
2916 /* next entry in share map, please */
2917 share_entry = share_entry->next;
2918
2919 }
2920 /* done! */
2921 }
2922
2923 #if defined(DDB)
2924
2925 /*
2926 * DDB hooks
2927 */
2928
2929 /*
2930 * uvm_map_print: print out a map
2931 */
2932
2933 void uvm_map_print(map, full)
2934
2935 vm_map_t map;
2936 boolean_t full;
2937
2938 {
2939 uvm_map_printit(map, full, printf);
2940 }
2941
2942 /*
2943 * uvm_map_printit: actually prints the map
2944 */
2945
2946 void uvm_map_printit(map, full, pr)
2947
2948 vm_map_t map;
2949 boolean_t full;
2950 void (*pr) __P((const char *, ...));
2951
2952 {
2953 vm_map_entry_t entry;
2954 (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, map->min_offset, map->max_offset);
2955 (*pr)("\tpmap=%p, #ent=%d, sz=%d, ref=%d, main=%c, version=%d\n",
2956 map->pmap, map->nentries, map->size, map->ref_count,
2957 (map->is_main_map) ? 'T' : 'F', map->timestamp);
2958 if (!full) return;
2959 for (entry = map->header.next; entry != &map->header; entry = entry->next) {
2960 (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%x, amap=%p/%d\n",
2961 entry, entry->start, entry->end, entry->object.uvm_obj, entry->offset,
2962 entry->aref.ar_amap, entry->aref.ar_slotoff);
2963 (*pr)(
2964 "\tmap=%c, submap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, wc=%d, adv=%d\n",
2965 (entry->etype & UVM_ET_MAP) ? 'T' : 'F',
2966 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
2967 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
2968 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
2969 entry->protection, entry->max_protection, entry->inheritance,
2970 entry->wired_count, entry->advice);
2971 }
2972 }
2973
2974 /*
2975 * uvm_object_print: print out an object
2976 */
2977
2978 void uvm_object_print(uobj, full)
2979
2980 struct uvm_object *uobj;
2981 boolean_t full;
2982
2983 {
2984 uvm_object_printit(uobj, full, printf);
2985 }
2986
2987 /*
2988 * uvm_object_printit: actually prints the object
2989 */
2990
2991 void uvm_object_printit(uobj, full, pr)
2992
2993 struct uvm_object *uobj;
2994 boolean_t full;
2995 void (*pr) __P((const char *, ...));
2996
2997 {
2998 struct vm_page *pg;
2999 int cnt = 0;
3000
3001 (*pr)("OBJECT %p: pgops=%p, npages=%d, ", uobj, uobj->pgops, uobj->uo_npages);
3002 if (uobj->uo_refs == UVM_OBJ_KERN)
3003 (*pr)("refs=<SYSTEM>\n");
3004 else
3005 (*pr)("refs=%d\n", uobj->uo_refs);
3006
3007 if (!full) return;
3008 (*pr)(" PAGES <pg,offset>:\n ");
3009 for (pg = uobj->memq.tqh_first ; pg ; pg = pg->listq.tqe_next, cnt++) {
3010 (*pr)("<%p,0x%lx> ", pg, pg->offset);
3011 if ((cnt % 3) == 2) (*pr)("\n ");
3012 }
3013 if ((cnt % 3) != 2) (*pr)("\n");
3014 }
3015
3016 /*
3017 * uvm_page_print: print out a page
3018 */
3019
3020 void uvm_page_print(pg, full)
3021
3022 struct vm_page *pg;
3023 boolean_t full;
3024
3025 {
3026 uvm_page_printit(pg, full, printf);
3027 }
3028
3029 /*
3030 * uvm_page_printit: actually print the page
3031 */
3032
3033 void uvm_page_printit(pg, full, pr)
3034
3035 struct vm_page *pg;
3036 boolean_t full;
3037 void (*pr) __P((const char *, ...));
3038
3039 {
3040 struct vm_page *lcv;
3041 struct uvm_object *uobj;
3042 struct pglist *pgl;
3043
3044 (*pr)("PAGE %p:\n", pg);
3045 (*pr)(" flags=0x%x, pqflags=0x%x, vers=%d, wire_count=%d, pa=0x%lx\n",
3046 pg->flags, pg->pqflags, pg->version, pg->wire_count, pg->phys_addr);
3047 (*pr)(" uobject=%p, uanon=%p, offset=0x%lx loan_count=%d\n",
3048 pg->uobject, pg->uanon, pg->offset, pg->loan_count);
3049 #if defined(UVM_PAGE_TRKOWN)
3050 if (pg->flags & PG_BUSY)
3051 (*pr)(" owning process = %d, tag=%s\n", pg->owner, pg->owner_tag);
3052 else
3053 (*pr)(" page not busy, no owner\n");
3054 #else
3055 (*pr)(" [page ownership tracking disabled]\n");
3056 #endif
3057
3058 if (!full) return;
3059
3060 /* cross-verify object/anon */
3061 if ((pg->pqflags & PQ_FREE) == 0) {
3062 if (pg->pqflags & PQ_ANON) {
3063 if (pg->uanon == NULL || pg->uanon->u.an_page != pg)
3064 (*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n",
3065 (pg->uanon) ? pg->uanon->u.an_page : NULL);
3066 else
3067 (*pr)(" anon backpointer is OK\n");
3068 } else {
3069 uobj = pg->uobject;
3070 if (uobj) {
3071 (*pr)(" checking object list\n");
3072 for (lcv = uobj->memq.tqh_first ; lcv ; lcv = lcv->listq.tqe_next) {
3073 if (lcv == pg) break;
3074 }
3075 if (lcv)
3076 (*pr)(" page found on object list\n");
3077 else
3078 (*pr)(" >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
3079 }
3080 }
3081 }
3082
3083 /* cross-verify page queue */
3084 if (pg->pqflags & PQ_FREE)
3085 pgl = &uvm.page_free;
3086 else if (pg->pqflags & PQ_INACTIVE)
3087 pgl = (pg->pqflags & PQ_SWAPBACKED) ?
3088 &uvm.page_inactive_swp : &uvm.page_inactive_obj;
3089 else if (pg->pqflags & PQ_ACTIVE)
3090 pgl = &uvm.page_active;
3091 else
3092 pgl = NULL;
3093
3094 if (pgl) {
3095 (*pr)(" checking pageq list\n");
3096 for (lcv = pgl->tqh_first ; lcv ; lcv = lcv->pageq.tqe_next) {
3097 if (lcv == pg) break;
3098 }
3099 if (lcv)
3100 (*pr)(" page found on pageq list\n");
3101 else
3102 (*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
3103 }
3104 }
3105 #endif
3106