uvm_map.c revision 1.7 1 /* $NetBSD: uvm_map.c,v 1.7 1998/02/18 14:50:32 drochner Exp $ */
2
3 /*
4 * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5 * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6 */
7 /*
8 * Copyright (c) 1997 Charles D. Cranor and Washington University.
9 * Copyright (c) 1991, 1993, The Regents of the University of California.
10 *
11 * All rights reserved.
12 *
13 * This code is derived from software contributed to Berkeley by
14 * The Mach Operating System project at Carnegie-Mellon University.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. All advertising materials mentioning features or use of this software
25 * must display the following acknowledgement:
26 * This product includes software developed by Charles D. Cranor,
27 * Washington University, the University of California, Berkeley and
28 * its contributors.
29 * 4. Neither the name of the University nor the names of its contributors
30 * may be used to endorse or promote products derived from this software
31 * without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 * SUCH DAMAGE.
44 *
45 * @(#)vm_map.c 8.3 (Berkeley) 1/12/94
46 * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
47 *
48 *
49 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
50 * All rights reserved.
51 *
52 * Permission to use, copy, modify and distribute this software and
53 * its documentation is hereby granted, provided that both the copyright
54 * notice and this permission notice appear in all copies of the
55 * software, derivative works or modified versions, and any portions
56 * thereof, and that both notices appear in supporting documentation.
57 *
58 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61 *
62 * Carnegie Mellon requests users of this software to return to
63 *
64 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
65 * School of Computer Science
66 * Carnegie Mellon University
67 * Pittsburgh PA 15213-3890
68 *
69 * any improvements or extensions that they make and grant Carnegie the
70 * rights to redistribute these changes.
71 */
72
73 #include "opt_uvmhist.h"
74 #include "opt_pmap_new.h"
75
76 /*
77 * uvm_map.c: uvm map operations
78 */
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/mount.h>
83 #include <sys/mman.h>
84 #include <sys/proc.h>
85 #include <sys/malloc.h>
86
87 #ifdef SYSVSHM
88 #include <sys/shm.h>
89 #endif
90
91 #include <vm/vm.h>
92 #include <vm/vm_page.h>
93 #include <vm/vm_kern.h>
94
95 #include <sys/syscallargs.h>
96
97 #define UVM_MAP
98 #include <uvm/uvm.h>
99
100 struct uvm_cnt uvm_map_call, map_backmerge, map_forwmerge;
101 struct uvm_cnt uvm_mlk_call, uvm_mlk_hint;
102
103 /*
104 * macros
105 */
106
107 /*
108 * uvm_map_entry_link: insert entry into a map
109 *
110 * => map must be locked
111 */
112 #define uvm_map_entry_link(map, after_where, entry) \
113 { \
114 (map)->nentries++; \
115 (entry)->prev = (after_where); \
116 (entry)->next = (after_where)->next; \
117 (entry)->prev->next = (entry); \
118 (entry)->next->prev = (entry); \
119 }
120 /*
121 * uvm_map_entry_unlink: remove entry from a map
122 *
123 * => map must be locked
124 */
125 #define uvm_map_entry_unlink(map, entry) \
126 { \
127 (map)->nentries--; \
128 (entry)->next->prev = (entry)->prev; \
129 (entry)->prev->next = (entry)->next; \
130 }
131
132 /*
133 * SAVE_HINT: saves the specified entry as the hint for future lookups.
134 *
135 * => map need not be locked (protected by hint_lock).
136 */
137 #define SAVE_HINT(map,value) \
138 simple_lock(&(map)->hint_lock); \
139 (map)->hint = (value); \
140 simple_unlock(&(map)->hint_lock);
141
142 /*
143 * VM_MAP_RANGE_CHECK: check and correct range
144 *
145 * => map must at least be read locked
146 */
147
148 #define VM_MAP_RANGE_CHECK(map, start, end) \
149 { \
150 if (start < vm_map_min(map)) \
151 start = vm_map_min(map); \
152 if (end > vm_map_max(map)) \
153 end = vm_map_max(map); \
154 if (start > end) \
155 start = end; \
156 }
157
158 /*
159 * local prototypes
160 */
161
162 static vm_map_entry_t uvm_mapent_alloc __P((vm_map_t));
163 static void uvm_mapent_copy __P((vm_map_entry_t,vm_map_entry_t));
164 static void uvm_mapent_free __P((vm_map_entry_t));
165 static void uvm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
166
167 /*
168 * local inlines
169 */
170
171 /*
172 * uvm_mapent_alloc: allocate a map entry
173 *
174 * => XXX: static pool for kernel map?
175 */
176
177 static __inline vm_map_entry_t uvm_mapent_alloc(map)
178
179 vm_map_t map;
180
181 {
182 vm_map_entry_t me;
183 int s;
184 UVMHIST_FUNC("uvm_mapent_alloc");
185 UVMHIST_CALLED(maphist);
186
187 if (map->entries_pageable) {
188 MALLOC(me, vm_map_entry_t, sizeof(struct vm_map_entry),
189 M_VMMAPENT, M_WAITOK);
190 me->flags = 0;
191 /* me can't be null, wait ok */
192
193 } else {
194 s = splimp(); /* protect kentry_free list with splimp */
195 simple_lock(&uvm.kentry_lock);
196 me = uvm.kentry_free;
197 if (me) uvm.kentry_free = me->next;
198 simple_unlock(&uvm.kentry_lock);
199 splx(s);
200 if (!me)
201 panic("mapent_alloc: out of kernel map entries, check MAX_KMAPENT");
202 me->flags = UVM_MAP_STATIC;
203 }
204
205 UVMHIST_LOG(maphist, "<- new entry=0x%x [pageable=%d]",
206 me, map->entries_pageable, 0, 0);
207 return(me);
208
209 }
210
211 /*
212 * uvm_mapent_free: free map entry
213 *
214 * => XXX: static pool for kernel map?
215 */
216
217 static __inline void uvm_mapent_free(me)
218
219 vm_map_entry_t me;
220
221 {
222 int s;
223 UVMHIST_FUNC("uvm_mapent_free");
224 UVMHIST_CALLED(maphist);
225 UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
226 me, me->flags, 0, 0);
227 if ((me->flags & UVM_MAP_STATIC) == 0) {
228 FREE(me, M_VMMAPENT);
229 } else {
230 s = splimp(); /* protect kentry_free list with splimp */
231 simple_lock(&uvm.kentry_lock);
232 me->next = uvm.kentry_free;
233 uvm.kentry_free = me;
234 simple_unlock(&uvm.kentry_lock);
235 splx(s);
236 }
237 }
238
239 /*
240 * uvm_mapent_copy: copy a map entry, preserving flags
241 */
242
243 static __inline void uvm_mapent_copy(src, dst)
244
245 vm_map_entry_t src;
246 vm_map_entry_t dst;
247
248 {
249 bcopy(src, dst, ((char *)&src->uvm_map_entry_stop_copy) - ((char *)src));
250 }
251
252 /*
253 * uvm_map_entry_unwire: unwire a map entry
254 *
255 * => map should be locked by caller
256 */
257
258 static __inline void uvm_map_entry_unwire(map, entry)
259
260 vm_map_t map;
261 vm_map_entry_t entry;
262
263 {
264 uvm_fault_unwire(map->pmap, entry->start, entry->end);
265 entry->wired_count = 0;
266 }
267
268 /*
269 * uvm_map_init: init mapping system at boot time. note that we allocate
270 * and init the static pool of vm_map_entry_t's for the kernel here.
271 */
272
273 void uvm_map_init()
274
275 {
276 static struct vm_map_entry kernel_map_entry[MAX_KMAPENT];
277 #if defined(UVMHIST)
278 static struct uvm_history_ent maphistbuf[100];
279 static struct uvm_history_ent pdhistbuf[100];
280 #endif
281 int lcv;
282
283 /*
284 * first, init logging system.
285 */
286
287 UVMHIST_FUNC("uvm_map_init");
288 UVMHIST_INIT_STATIC(maphist, maphistbuf);
289 UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
290 UVMHIST_CALLED(maphist);
291 UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
292 UVMCNT_INIT(uvm_map_call, UVMCNT_CNT, 0, "# uvm_map() successful calls", 0);
293 UVMCNT_INIT(map_backmerge, UVMCNT_CNT, 0, "# uvm_map() back merges", 0);
294 UVMCNT_INIT(map_forwmerge, UVMCNT_CNT, 0, "# uvm_map() missed forward", 0);
295 UVMCNT_INIT(uvm_mlk_call, UVMCNT_CNT, 0, "# map lookup calls", 0);
296 UVMCNT_INIT(uvm_mlk_hint, UVMCNT_CNT, 0, "# map lookup hint hits", 0);
297
298 /*
299 * now set up static pool of kernel map entrys ...
300 */
301
302 simple_lock_init(&uvm.kentry_lock);
303 uvm.kentry_free = NULL;
304 for (lcv = 0 ; lcv < MAX_KMAPENT ; lcv++) {
305 kernel_map_entry[lcv].next = uvm.kentry_free;
306 uvm.kentry_free = &kernel_map_entry[lcv];
307 }
308
309 }
310
311 /*
312 * clippers
313 */
314
315 /*
316 * uvm_map_clip_start: ensure that the entry begins at or after
317 * the starting address, if it doesn't we split the entry.
318 *
319 * => caller should use UVM_MAP_CLIP_START macro rather than calling
320 * this directly
321 * => map must be locked by caller
322 */
323
324 void uvm_map_clip_start(map, entry, start)
325
326 register vm_map_t map;
327 register vm_map_entry_t entry;
328 register vm_offset_t start;
329
330 {
331 register vm_map_entry_t new_entry;
332 vm_offset_t new_adj;
333
334 /* uvm_map_simplify_entry(map, entry); */ /* XXX */
335
336 /*
337 * Split off the front portion. note that we must insert the new
338 * entry BEFORE this one, so that this entry has the specified
339 * starting address.
340 */
341
342 new_entry = uvm_mapent_alloc(map);
343 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
344
345 new_entry->end = start;
346 new_adj = start - new_entry->start;
347 if (entry->object.uvm_obj)
348 entry->offset += new_adj; /* shift start over */
349 entry->start = start;
350
351 if (new_entry->aref.ar_amap) {
352 amap_splitref(&new_entry->aref, &entry->aref, new_adj);
353 }
354
355 uvm_map_entry_link(map, entry->prev, new_entry);
356
357 if (UVM_ET_ISMAP(entry)) {
358 uvm_map_reference(new_entry->object.share_map);
359 } else {
360 if (UVM_ET_ISOBJ(entry) &&
361 entry->object.uvm_obj->pgops &&
362 entry->object.uvm_obj->pgops->pgo_reference)
363 entry->object.uvm_obj->pgops->pgo_reference(entry->object.uvm_obj);
364 }
365 }
366
367 /*
368 * uvm_map_clip_end: ensure that the entry ends at or before
369 * the ending address, if it does't we split the reference
370 *
371 * => caller should use UVM_MAP_CLIP_END macro rather than calling
372 * this directly
373 * => map must be locked by caller
374 */
375
376 void uvm_map_clip_end(map, entry, end)
377
378 register vm_map_t map;
379 register vm_map_entry_t entry;
380 register vm_offset_t end;
381
382 {
383 register vm_map_entry_t new_entry;
384 vm_offset_t new_adj; /* #bytes we move start forward */
385
386 /*
387 * Create a new entry and insert it
388 * AFTER the specified entry
389 */
390
391 new_entry = uvm_mapent_alloc(map);
392 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
393
394 new_entry->start = entry->end = end;
395 new_adj = end - entry->start;
396 if (new_entry->object.uvm_obj)
397 new_entry->offset += new_adj;
398
399 if (entry->aref.ar_amap) {
400 amap_splitref(&entry->aref, &new_entry->aref, new_adj);
401 }
402
403 uvm_map_entry_link(map, entry, new_entry);
404
405 if (UVM_ET_ISMAP(entry)) {
406 uvm_map_reference(new_entry->object.share_map);
407 } else {
408 if (UVM_ET_ISOBJ(entry) &&
409 entry->object.uvm_obj->pgops &&
410 entry->object.uvm_obj->pgops->pgo_reference)
411 entry->object.uvm_obj->pgops->pgo_reference(entry->object.uvm_obj);
412 }
413 }
414
415
416 /*
417 * M A P - m a i n e n t r y p o i n t
418 */
419 /*
420 * uvm_map: establish a valid mapping in a map
421 *
422 * => assume startp is page aligned.
423 * => assume size is a multiple of PAGE_SIZE.
424 * => assume sys_mmap provides enough of a "hint" to have us skip
425 * over text/data/bss area.
426 * => map must be unlocked (we will lock it)
427 * => <uobj,uoffset> value meanings (4 cases):
428 * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER
429 * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER
430 * [3] <uobj,uoffset> == normal mapping
431 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA
432 *
433 * case [4] is for kernel mappings where we don't know the offset until
434 * we've found a virtual address.
435 * => XXXCDC: need way to map in external amap?
436 */
437
438 int uvm_map(map, startp, size, uobj, uoffset, flags)
439
440 vm_map_t map;
441 vm_offset_t *startp; /* IN/OUT */
442 vm_size_t size;
443 struct uvm_object *uobj;
444 vm_offset_t uoffset;
445 uvm_flag_t flags;
446
447 {
448
449 vm_map_entry_t prev_entry, new_entry;
450 vm_prot_t prot = UVM_PROTECTION(flags), maxprot = UVM_MAXPROTECTION(flags);
451 vm_inherit_t inherit = UVM_INHERIT(flags);
452 int advice = UVM_ADVICE(flags);
453 UVMHIST_FUNC("uvm_map");
454 UVMHIST_CALLED(maphist);
455
456 UVMHIST_LOG(maphist, "(map=0x%x, *startp=0x%x, size=%d, flags=0x%x)",
457 map, *startp, size, flags);
458 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
459
460 /*
461 * step 0: sanity check of protection code
462 */
463
464 if ((prot & maxprot) != prot) {
465 UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x",
466 prot, maxprot,0,0);
467 return(KERN_PROTECTION_FAILURE);
468 }
469
470 /*
471 * step 1: figure out where to put new VM range
472 */
473
474 if (vm_map_lock_try(map) == FALSE) {
475 if (flags & UVM_FLAG_TRYLOCK)
476 return(KERN_FAILURE);
477 vm_map_lock(map); /* could sleep here */
478 }
479 if ((prev_entry = uvm_map_findspace(map, *startp, size, startp,
480 uobj, uoffset, flags & UVM_FLAG_FIXED)) == NULL) {
481 UVMHIST_LOG(maphist,"<- uvm_map_findspace failed!",0,0,0,0);
482 vm_map_unlock(map);
483 return(KERN_NO_SPACE);
484 }
485
486 #if defined(PMAP_GROWKERNEL) /* hack */
487 {
488 static vm_offset_t maxkaddr = 0; /* locked by kernel_map lock */
489
490 /*
491 * hack: grow kernel PTPs in advance.
492 */
493 if (map == kernel_map && maxkaddr < (*startp + size)) {
494 pmap_growkernel(*startp + size);
495 maxkaddr = *startp + size;
496 }
497 }
498 #endif
499
500 UVMCNT_INCR(uvm_map_call);
501
502 /*
503 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
504 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in
505 * either case we want to zero it before storing it in the map entry
506 * (because it looks strange and confusing when debugging...)
507 *
508 * if uobj is not null
509 * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
510 * and we do not need to change uoffset.
511 * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset now
512 * (based on the starting address of the map). this case is for
513 * kernel object mappings where we don't know the offset until
514 * the virtual address is found (with uvm_map_findspace). the offset
515 * is the distance we are from the start of the map.
516 */
517
518 if (uobj == NULL) {
519 uoffset = 0;
520 } else {
521 if (uoffset == UVM_UNKNOWN_OFFSET)
522 uoffset = *startp - vm_map_min(map);
523 }
524
525 /*
526 * step 2: try and insert in map by extending previous entry, if possible
527 * XXX: we don't try and pull back the next entry. might be useful
528 * for a stack, but we are currently allocating our stack in advance.
529 */
530
531 if ((flags & UVM_FLAG_NOMERGE) == 0 &&
532 prev_entry->end == *startp && prev_entry != &map->header &&
533 prev_entry->object.uvm_obj == uobj) {
534
535 if (uobj && prev_entry->offset + (prev_entry->end - prev_entry->start)
536 != uoffset)
537 goto step3;
538
539 if (UVM_ET_ISMAP(prev_entry))
540 goto step3;
541
542 if (prev_entry->protection != prot ||
543 prev_entry->max_protection != maxprot)
544 goto step3;
545
546 if (prev_entry->inheritance != inherit ||
547 prev_entry->advice != advice)
548 goto step3;
549
550 /* wired_count's must match (new area is unwired) */
551 if (prev_entry->wired_count)
552 goto step3;
553
554 /*
555 * can't extend a shared amap. note: no need to lock amap to
556 * look at am_ref since we don't care about its exact value.
557 * if it is one (i.e. we have only reference) it will stay there.
558 */
559
560 if (prev_entry->aref.ar_amap && prev_entry->aref.ar_amap->am_ref != 1) {
561 goto step3;
562 }
563
564 /* got it! */
565
566 UVMCNT_INCR(map_backmerge);
567 UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0);
568
569 /*
570 * drop our reference to uobj since we are extending a reference
571 * that we already have (the ref count can not drop to zero).
572 */
573 if (uobj && uobj->pgops->pgo_detach)
574 uobj->pgops->pgo_detach(uobj);
575
576 if (prev_entry->aref.ar_amap) {
577 amap_extend(prev_entry, size);
578 }
579
580 prev_entry->end += size;
581 map->size += size;
582
583 UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
584 vm_map_unlock(map);
585 return(KERN_SUCCESS);
586
587 }
588
589 step3:
590 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0);
591
592 /* check for possible forward merge (which we don't do) and count
593 * the number of times we missed a *possible* chance to merge more
594 */
595
596 if ((flags & UVM_FLAG_NOMERGE) == 0 && prev_entry->next != &map->header &&
597 prev_entry->next->start == (*startp + size))
598 UVMCNT_INCR(map_forwmerge);
599
600 /*
601 * step 3: allocate new entry and link it in
602 */
603
604 new_entry = uvm_mapent_alloc(map);
605 new_entry->start = *startp;
606 new_entry->end = new_entry->start + size;
607 new_entry->object.uvm_obj = uobj;
608 new_entry->offset = uoffset;
609
610 if (uobj)
611 new_entry->etype = UVM_ET_OBJ;
612 else
613 new_entry->etype = 0;
614
615 if (flags & UVM_FLAG_COPYONW) {
616 new_entry->etype |= UVM_ET_COPYONWRITE;
617 if ((flags & UVM_FLAG_OVERLAY) == 0)
618 new_entry->etype |= UVM_ET_NEEDSCOPY;
619 }
620
621 new_entry->protection = prot;
622 new_entry->max_protection = maxprot;
623 new_entry->inheritance = inherit;
624 new_entry->wired_count = 0;
625 new_entry->advice = advice;
626 if (flags & UVM_FLAG_OVERLAY) {
627 /* to_add: for BSS we overallocate a little since we are likely to extend */
628 vm_offset_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
629 UVM_AMAP_CHUNK * PAGE_SIZE : 0;
630 struct vm_amap *amap = amap_alloc(size, to_add, M_WAITOK);
631 new_entry->aref.ar_slotoff = 0;
632 new_entry->aref.ar_amap = amap;
633 } else {
634 new_entry->aref.ar_amap = NULL;
635 }
636
637 uvm_map_entry_link(map, prev_entry, new_entry);
638
639 map->size += size;
640
641 /*
642 * Update the free space hint
643 */
644
645 if ((map->first_free == prev_entry) && (prev_entry->end >= new_entry->start))
646 map->first_free = new_entry;
647
648 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
649 vm_map_unlock(map);
650 return(KERN_SUCCESS);
651 }
652
653 /*
654 * uvm_map_lookup_entry: find map entry at or before an address
655 *
656 * => map must at least be read-locked by caller
657 * => entry is returned in "entry"
658 * => return value is true if address is in the returned entry
659 */
660
661 boolean_t uvm_map_lookup_entry(map, address, entry)
662
663 register vm_map_t map;
664 register vm_offset_t address;
665 vm_map_entry_t *entry; /* OUT */
666
667 {
668 register vm_map_entry_t cur;
669 register vm_map_entry_t last;
670 UVMHIST_FUNC("uvm_map_lookup_entry");
671 UVMHIST_CALLED(maphist);
672
673 UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)",
674 map, address, entry, 0);
675
676 /*
677 * Start looking either from the head of the
678 * list, or from the hint.
679 */
680
681 simple_lock(&map->hint_lock);
682 cur = map->hint;
683 simple_unlock(&map->hint_lock);
684
685 if (cur == &map->header)
686 cur = cur->next;
687
688 UVMCNT_INCR(uvm_mlk_call);
689 if (address >= cur->start) {
690 /*
691 * Go from hint to end of list.
692 *
693 * But first, make a quick check to see if
694 * we are already looking at the entry we
695 * want (which is usually the case).
696 * Note also that we don't need to save the hint
697 * here... it is the same hint (unless we are
698 * at the header, in which case the hint didn't
699 * buy us anything anyway).
700 */
701 last = &map->header;
702 if ((cur != last) && (cur->end > address)) {
703 UVMCNT_INCR(uvm_mlk_hint);
704 *entry = cur;
705 UVMHIST_LOG(maphist,"<- got it via hint (0x%x)",
706 cur,0,0,0);
707 return(TRUE);
708 }
709 }
710 else {
711 /*
712 * Go from start to hint, *inclusively*
713 */
714 last = cur->next;
715 cur = map->header.next;
716 }
717
718 /*
719 * Search linearly
720 */
721
722 while (cur != last) {
723 if (cur->end > address) {
724 if (address >= cur->start) {
725 /*
726 * Save this lookup for future
727 * hints, and return
728 */
729
730 *entry = cur;
731 SAVE_HINT(map, cur);
732 UVMHIST_LOG(maphist,"<- search got it (0x%x)",
733 cur, 0,0,0);
734 return(TRUE);
735 }
736 break;
737 }
738 cur = cur->next;
739 }
740 *entry = cur->prev;
741 SAVE_HINT(map, *entry);
742 UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
743 return(FALSE);
744 }
745
746
747 /*
748 * uvm_map_findspace: find "length" sized space in "map".
749 *
750 * => "hint" is a hint about where we want it, unless fixed is true
751 * (in which case we insist on using "hint").
752 * => "result" is VA returned
753 * => uobj/uoffset are to be used to handle VAC alignment, if required
754 * => caller must at least have read-locked map
755 * => returns NULL on failure, or pointer to prev. map entry if success
756 * => note this is a cross between the old vm_map_findspace and vm_map_find
757 */
758
759
760 vm_map_entry_t uvm_map_findspace(map, hint, length, result,
761 uobj, uoffset, fixed)
762
763 vm_map_t map;
764 vm_offset_t hint;
765 vm_size_t length;
766 vm_offset_t *result; /* OUT */
767 struct uvm_object *uobj;
768 vm_offset_t uoffset;
769 boolean_t fixed;
770
771 {
772 vm_map_entry_t entry, next, tmp;
773 vm_offset_t end;
774 UVMHIST_FUNC("uvm_map_findspace");
775 UVMHIST_CALLED(maphist);
776
777 UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, fixed=%d)",
778 map, hint, length, fixed);
779
780 if (hint < map->min_offset) { /* check ranges ... */
781 if (fixed) {
782 UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
783 return(NULL);
784 }
785 hint = map->min_offset;
786 }
787 if (hint > map->max_offset) {
788 UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]",
789 hint, map->min_offset, map->max_offset, 0);
790 return(NULL);
791 }
792
793 /*
794 * Look for the first possible address; if there's already
795 * something at this address, we have to start after it.
796 */
797
798 if (!fixed && hint == map->min_offset) {
799 if ((entry = map->first_free) != &map->header)
800 hint = entry->end;
801 } else {
802 if (uvm_map_lookup_entry(map, hint, &tmp)) {
803 /* "hint" address already in use ... */
804 if (fixed) {
805 UVMHIST_LOG(maphist,"<- fixed & VA in use",
806 0,0,0,0);
807 return(NULL);
808 }
809 hint = tmp->end;
810 }
811 entry = tmp;
812 }
813
814 /*
815 * Look through the rest of the map, trying to fit a new region in
816 * the gap between existing regions, or after the very last region.
817 * note: entry->end = base VA of current gap,
818 * next->start = VA of end of current gap
819 */
820 for (;; hint = (entry = next)->end) {
821 /*
822 * Find the end of the proposed new region. Be sure we didn't
823 * go beyond the end of the map, or wrap around the address;
824 * if so, we lose. Otherwise, if this is the last entry, or
825 * if the proposed new region fits before the next entry, we
826 * win.
827 */
828
829 #ifdef PMAP_PREFER
830 /*
831 * push hint forward as needed to avoid VAC alias problems.
832 * we only do this if a valid offset is specified.
833 */
834 if (!fixed && uoffset != UVM_UNKNOWN_OFFSET)
835 PMAP_PREFER(uoffset, &hint);
836 #endif
837 end = hint + length;
838 if (end > map->max_offset || end < hint) {
839 UVMHIST_LOG(maphist,"<- failed (off end)", 0,0,0,0);
840 return (NULL);
841 }
842 next = entry->next;
843 if (next == &map->header || next->start >= end)
844 break;
845 if (fixed) {
846 UVMHIST_LOG(maphist,"<- fixed mapping failed", 0,0,0,0);
847 return(NULL); /* only one shot at it ... */
848 }
849 }
850 SAVE_HINT(map, entry);
851 *result = hint;
852 UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0);
853 return (entry);
854 }
855
856 /*
857 * U N M A P - m a i n h e l p e r f u n c t i o n s
858 */
859
860 /*
861 * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
862 *
863 * => caller must check alignment and size
864 * => map must be locked by caller
865 * => if the "start"/"stop" range lie within a mapping of a share map,
866 * then the unmap takes place within the context of that share map
867 * rather than in the main map, unless the "mainonly" flag is set.
868 * (e.g. the "exit" system call would want to set "mainonly").
869 * => we return a list of map entries that we've remove from the map
870 * in "entry_list"
871 */
872
873 int uvm_unmap_remove(map, start, end, mainonly, entry_list)
874
875 vm_map_t map;
876 vm_offset_t start,end;
877 boolean_t mainonly;
878 vm_map_entry_t *entry_list; /* OUT */
879
880 {
881 int result, refs;
882 vm_map_entry_t entry, first_entry, next;
883 vm_offset_t len;
884 boolean_t already_removed;
885 struct uvm_object *uobj;
886 UVMHIST_FUNC("uvm_unmap_remove");
887 UVMHIST_CALLED(maphist);
888
889 UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)", map,start,end,0);
890
891 VM_MAP_RANGE_CHECK(map, start, end);
892
893 /*
894 * find first entry
895 */
896 if (uvm_map_lookup_entry(map, start, &first_entry) == TRUE) {
897
898 /*
899 * start lies within a mapped region. first check to see if
900 * it is within a sharemap (in which case we recurse and unmap
901 * within the context of the share map).
902 */
903 if (UVM_ET_ISMAP(first_entry) && !UVM_ET_ISSUBMAP(first_entry) &&
904 mainonly == 0 && end <= first_entry->end) {
905 /* is a share map and in range ... */
906 /* XXX: do address transforms if share VA's != main VA's */
907 /* note: main map kept locked during share map unlock */
908 result = uvm_unmap(first_entry->object.share_map, start, end, 0);
909 *entry_list = NULL;
910 return(result);
911 }
912 /* non-share map: clip and go... */
913 entry = first_entry;
914 UVM_MAP_CLIP_START(map, entry, start);
915 SAVE_HINT(map, entry->prev); /* critical! prevents stale hint */
916
917 } else {
918 entry = first_entry->next;
919 }
920
921 /*
922 * Save the free space hint
923 */
924
925 if (map->first_free->start >= start)
926 map->first_free = entry->prev;
927
928 /*
929 * note: we now re-use first_entry for a different task. we remove
930 * a number of map entries from the map and save them in a linked
931 * list headed by "first_entry". once we remove them from the map
932 * the caller should unlock the map and drop the references to the
933 * backing objects [c.f. uvm_unmap_detach]. the object is to
934 * seperate unmapping from reference dropping. why?
935 * [1] the map has to be locked for unmapping
936 * [2] the map need not be locked for reference dropping
937 * [3] dropping references may trigger pager I/O, and if we hit
938 * a pager that does synchronous I/O we may have to wait for it.
939 * [4] we would like all waiting for I/O to occur with maps unlocked
940 * so that we don't block other threads.
941 */
942 first_entry = NULL;
943 *entry_list = NULL; /* to be safe */
944
945 /*
946 * break up the area into map entry sized regions and unmap. note
947 * that all mappings have to be removed before we can even consider
948 * dropping references to amaps or VM objects (otherwise we could end
949 * up with a mapping to a page on the free list which would be very bad).
950 */
951
952 while ((entry != &map->header) && (entry->start < end)) {
953
954 UVM_MAP_CLIP_END(map, entry, end);
955 next = entry->next;
956 len = entry->end - entry->start;
957
958 /*
959 * unwire before removing addresses from the pmap; otherwise unwiring
960 * will put the entries back into the pmap (XXX).
961 */
962
963 if (entry->wired_count)
964 uvm_map_entry_unwire(map, entry);
965
966 /*
967 * special case: handle mappings to anonymous kernel objects.
968 * we want to free these pages right away...
969 */
970 if (UVM_ET_ISOBJ(entry) && entry->object.uvm_obj->uo_refs == UVM_OBJ_KERN) {
971
972 #ifdef DIAGNOSTIC
973 if (vm_map_pmap(map) != pmap_kernel())
974 panic("uvm_unmap_remove: kernel object mapped by non-kernel map");
975 #endif
976
977 /*
978 * note: kernel object mappings are currently used in two ways:
979 * [1] "normal" mappings of pages in the kernel object
980 * [2] uvm_km_valloc'd allocations in which we pmap_enter in
981 * some non-kernel-object page (e.g. vmapbuf).
982 *
983 * for case [1], we need to remove the mapping from the pmap
984 * and then remove the page from the kernel object (because,
985 * once pages in a kernel object are unmapped they are no longer
986 * needed, unlike, say, a vnode where you might want the data
987 * to persist until flushed out of a queue).
988 *
989 * for case [2], we need to remove the mapping from the pmap.
990 * there shouldn't be any pages at the specified offset in
991 * the kernel object [but it doesn't hurt to call uvm_km_pgremove
992 * just to be safe?]
993 *
994 * uvm_km_pgremove currently does the following:
995 * for pages in the kernel object in range:
996 * - pmap_page_protect them out of all pmaps
997 * - uvm_pagefree the page
998 *
999 * note that in case [1] the pmap_page_protect call in uvm_km_pgremove
1000 * may very well be redundant because we have already removed the
1001 * mappings beforehand with pmap_remove (or pmap_kremove).
1002 * in the PMAP_NEW case, the pmap_page_protect call may not do
1003 * anything, since PMAP_NEW allows the kernel to enter/remove
1004 * kernel mappings without bothing to keep track of the mappings
1005 * (e.g. via pv_entry lists). XXX: because of this, in the
1006 * future we should consider removing the pmap_page_protect from
1007 * uvm_km_pgremove some time in the future.
1008 */
1009
1010 /*
1011 * remove mappings from pmap
1012 */
1013 #if defined(PMAP_NEW)
1014 pmap_kremove(entry->start, len);
1015 #else
1016 pmap_remove(pmap_kernel(), entry->start, entry->start+len);
1017 #endif
1018
1019 /*
1020 * remove pages from kernel object
1021 */
1022 uvm_km_pgremove(entry->object.uvm_obj, entry->start - vm_map_min(map),
1023 entry->end - vm_map_min(map));
1024
1025 already_removed = TRUE;
1026
1027 /* null out kernel_object reference, we've just dropped it */
1028 entry->etype &= ~UVM_ET_OBJ;
1029 entry->object.uvm_obj = NULL; /* to be safe */
1030
1031 } else {
1032
1033 already_removed = FALSE;
1034
1035 }
1036
1037 /*
1038 * remove mappings now. for sharemaps, check to see if the reference
1039 * count is one (i.e. not being shared right now). if so, use the
1040 * cheaper pmap_remove() rather than the more expensive share_protect
1041 * functions.
1042 */
1043
1044 if (!map->is_main_map) {
1045 simple_lock(&map->ref_lock);
1046 refs = map->ref_count;
1047 simple_unlock(&map->ref_lock);
1048 }
1049 #if defined(sparc)
1050 else { refs = 0; } /* XXX: shutup unused var gcc warning */
1051 #endif
1052
1053 if (map->is_main_map || (!map->is_main_map && refs == 1)) {
1054 if (!already_removed)
1055 pmap_remove(map->pmap, entry->start, entry->end);
1056 } else {
1057 /* share map... must remove all mappings */
1058 if (entry->aref.ar_amap) {
1059 simple_lock(&entry->aref.ar_amap->am_l);
1060 amap_share_protect(entry, VM_PROT_NONE);
1061 simple_unlock(&entry->aref.ar_amap->am_l);
1062 }
1063 if (UVM_ET_ISOBJ(entry)) {
1064 uobj = entry->object.uvm_obj;
1065 simple_lock(&uobj->vmobjlock);
1066 uobj->pgops->pgo_shareprot(entry, VM_PROT_NONE);
1067 simple_unlock(&uobj->vmobjlock);
1068 }
1069 }
1070
1071 /*
1072 * remove from map and put it on our list of entries that we've nuked.
1073 * then go do next entry.
1074 */
1075 UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0, 0);
1076 uvm_map_entry_unlink(map, entry);
1077 map->size -= len;
1078 entry->next = first_entry;
1079 first_entry = entry;
1080 entry = next; /* next entry, please */
1081 }
1082
1083 /*
1084 * now we've cleaned up the map and are ready for the caller to drop
1085 * references to the mapped objects.
1086 */
1087
1088 *entry_list = first_entry;
1089 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1090 return(KERN_SUCCESS);
1091 }
1092
1093 /*
1094 * uvm_unmap_detach: drop references in a chain of map entries
1095 *
1096 * => we will free the map entries as we traverse the list.
1097 */
1098
1099 void uvm_unmap_detach(first_entry, amap_unref_flags)
1100
1101 vm_map_entry_t first_entry;
1102 int amap_unref_flags;
1103
1104 {
1105 vm_map_entry_t next_entry;
1106 UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
1107
1108 while (first_entry) {
1109
1110 #ifdef DIAGNOSTIC
1111 /*
1112 * sanity check
1113 */
1114 if (first_entry->wired_count) /* was part of vm_map_entry_delete() */
1115 panic("unmap: still wired!");
1116 #endif
1117
1118 UVMHIST_LOG(maphist, " detach 0x%x: amap=0x%x, obj=0x%x, map?=%d",
1119 first_entry, first_entry->aref.ar_amap, first_entry->object.uvm_obj,
1120 UVM_ET_ISMAP(first_entry));
1121
1122 /*
1123 * drop reference to amap, if we've got one
1124 */
1125
1126 if (first_entry->aref.ar_amap)
1127 amap_unref(first_entry, amap_unref_flags);
1128
1129 /*
1130 * drop reference to our backing object, if we've got one
1131 */
1132
1133 if (UVM_ET_ISMAP(first_entry)) {
1134 uvm_map_deallocate(first_entry->object.share_map);
1135 } else {
1136 if (UVM_ET_ISOBJ(first_entry) &&
1137 first_entry->object.uvm_obj->pgops->pgo_detach)
1138 first_entry->object.uvm_obj->pgops->
1139 pgo_detach(first_entry->object.uvm_obj);
1140 }
1141
1142 /*
1143 * next entry
1144 */
1145 next_entry = first_entry->next;
1146 uvm_mapent_free(first_entry);
1147 first_entry = next_entry;
1148 }
1149
1150 /*
1151 * done!
1152 */
1153 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
1154 return;
1155 }
1156
1157 /*
1158 * E X T R A C T I O N F U N C T I O N S
1159 */
1160
1161 /*
1162 * uvm_map_reserve: reserve space in a vm_map for future use.
1163 *
1164 * => we reserve space in a map by putting a dummy map entry in the
1165 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
1166 * => map should be unlocked (we will write lock it)
1167 * => we return true if we were able to reserve space
1168 * => XXXCDC: should be inline?
1169 */
1170
1171 int uvm_map_reserve(map, size, offset, raddr)
1172
1173 vm_map_t map;
1174 vm_size_t size;
1175 vm_offset_t offset; /* hint for pmap_prefer */
1176 vm_offset_t *raddr; /* OUT: reserved VA */
1177
1178 {
1179 UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
1180
1181 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
1182 map,size,offset,raddr);
1183
1184 size = round_page(size);
1185 if (*raddr < vm_map_min(map))
1186 *raddr = vm_map_min(map); /* hint */
1187
1188 /*
1189 * reserve some virtual space.
1190 */
1191
1192 if (uvm_map(map, raddr, size, NULL, offset,
1193 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
1194 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != KERN_SUCCESS) {
1195 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
1196 return(FALSE);
1197 }
1198
1199 UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
1200 return(TRUE);
1201 }
1202
1203 /*
1204 * uvm_map_replace: replace a reserved (blank) area of memory with
1205 * real mappings.
1206 *
1207 * => caller must WRITE-LOCK the map
1208 * => we return TRUE if replacement was a success
1209 * => we expect the newents chain to have nnewents entrys on it and
1210 * we expect newents->prev to point to the last entry on the list
1211 * => note newents is allowed to be NULL
1212 */
1213
1214 int uvm_map_replace(map, start, end, newents, nnewents)
1215
1216 struct vm_map *map;
1217 vm_offset_t start, end;
1218 vm_map_entry_t newents;
1219 int nnewents;
1220
1221 {
1222 vm_map_entry_t oldent, last;
1223 UVMHIST_FUNC("uvm_map_replace");
1224 UVMHIST_CALLED(maphist);
1225
1226 /*
1227 * first find the blank map entry at the specified address
1228 */
1229
1230 if (!uvm_map_lookup_entry(map, start, &oldent)) {
1231 return(FALSE);
1232 }
1233
1234 /*
1235 * check to make sure we have a proper blank entry
1236 */
1237
1238 if (oldent->start != start || oldent->end != end ||
1239 oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
1240 return(FALSE);
1241 }
1242
1243 #ifdef DIAGNOSTIC
1244 /*
1245 * sanity check the newents chain
1246 */
1247 {
1248 vm_map_entry_t tmpent = newents;
1249 int nent = 0;
1250 vm_offset_t cur = start;
1251
1252 while (tmpent) {
1253 nent++;
1254 if (tmpent->start < cur)
1255 panic("uvm_map_replace1");
1256 if (tmpent->start > tmpent->end || tmpent->end > end) {
1257 printf("tmpent->start=0x%lx, tmpent->end=0x%lx, end=0x%lx\n",
1258 tmpent->start, tmpent->end, end);
1259 panic("uvm_map_replace2");
1260 }
1261 cur = tmpent->end;
1262 if (tmpent->next) {
1263 if (tmpent->next->prev != tmpent)
1264 panic("uvm_map_replace3");
1265 } else {
1266 if (newents->prev != tmpent)
1267 panic("uvm_map_replace4");
1268 }
1269 tmpent = tmpent->next;
1270 }
1271 if (nent != nnewents)
1272 panic("uvm_map_replace5");
1273 }
1274 #endif
1275
1276 /*
1277 * map entry is a valid blank! replace it. (this does all the
1278 * work of map entry link/unlink...).
1279 */
1280
1281 if (newents) {
1282
1283 last = newents->prev; /* we expect this */
1284
1285 /* critical: flush stale hints out of map */
1286 SAVE_HINT(map, newents);
1287 if (map->first_free == oldent)
1288 map->first_free = last;
1289
1290 last->next = oldent->next;
1291 last->next->prev = last;
1292 newents->prev = oldent->prev;
1293 newents->prev->next = newents;
1294 map->nentries = map->nentries + (nnewents - 1);
1295
1296 } else {
1297
1298 /* critical: flush stale hints out of map */
1299 SAVE_HINT(map, oldent->prev);
1300 if (map->first_free == oldent)
1301 map->first_free = oldent->prev;
1302
1303 /* NULL list of new entries: just remove the old one */
1304 uvm_map_entry_unlink(map, oldent);
1305 }
1306
1307
1308 /*
1309 * now we can free the old blank entry, unlock the map and return.
1310 */
1311
1312 uvm_mapent_free(oldent);
1313 return(TRUE);
1314 }
1315
1316 /*
1317 * uvm_map_extract: extract a mapping from a map and put it somewhere
1318 * (maybe removing the old mapping)
1319 *
1320 * => maps should be unlocked (we will write lock them)
1321 * => returns 0 on success, error code otherwise
1322 * => start must be page aligned
1323 * => len must be page sized
1324 * => flags:
1325 * UVM_EXTRACT_REMOVE: remove mappings from srcmap
1326 * UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
1327 * UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
1328 * UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
1329 * >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
1330 * >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
1331 * be used from within the kernel in a kernel level map <<<
1332 */
1333
1334 int uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
1335
1336 vm_map_t srcmap, dstmap;
1337 vm_offset_t start, *dstaddrp;
1338 vm_size_t len;
1339 int flags;
1340
1341 {
1342 vm_offset_t dstaddr, end, newend, oldoffset, fudge, orig_fudge, oldstart;
1343 vm_map_entry_t chain, endchain, entry, orig_entry, newentry, deadentry;
1344 vm_size_t elen;
1345 int nchain, error, copy_ok;
1346 UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
1347 UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap,start,len,0);
1348 UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0);
1349
1350 #ifdef DIAGNOSTIC
1351 /*
1352 * step 0: sanity check: start must be on a page boundary, length
1353 * must be page sized. can't ask for CONTIG/QREF if you asked for
1354 * REMOVE.
1355 */
1356 if ((start & PAGE_MASK) || (len & PAGE_MASK))
1357 panic("uvm_map_extract1");
1358 if (flags & UVM_EXTRACT_REMOVE)
1359 if (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF))
1360 panic("uvm_map_extract2");
1361 #endif
1362
1363
1364 /*
1365 * step 1: reserve space in the target map for the extracted area
1366 */
1367
1368 dstaddr = *dstaddrp;
1369 if (uvm_map_reserve(dstmap, len, start, &dstaddr) == FALSE)
1370 return(ENOMEM);
1371 *dstaddrp = dstaddr; /* pass address back to caller */
1372 UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0);
1373
1374
1375 /*
1376 * step 2: setup for the extraction process loop by init'ing the
1377 * map entry chain, locking src map, and looking up the first useful
1378 * entry in the map.
1379 */
1380
1381 end = start + len;
1382 newend = dstaddr + len;
1383 chain = endchain = NULL;
1384 nchain = 0;
1385 vm_map_lock(srcmap);
1386
1387 if (uvm_map_lookup_entry(srcmap, start, &entry)) {
1388
1389 /* "start" is within an entry */
1390 if (flags & UVM_EXTRACT_QREF) {
1391 /*
1392 * for quick references we don't clip the entry, so the entry
1393 * may map space "before" the starting virtual address... this is
1394 * the "fudge" factor (which can be non-zero only the first time
1395 * through the "while" loop in step 3).
1396 */
1397 fudge = start - entry->start;
1398 } else {
1399 /*
1400 * normal reference: we clip the map to fit (thus fudge is zero)
1401 */
1402 UVM_MAP_CLIP_START(srcmap, entry, start);
1403 SAVE_HINT(srcmap, entry->prev);
1404 fudge = 0;
1405 }
1406
1407 } else {
1408
1409 /* "start" is not within an entry ... skip to next entry */
1410 if (flags & UVM_EXTRACT_CONTIG) {
1411 error = EINVAL;
1412 goto bad; /* definite hole here ... */
1413 }
1414
1415 entry = entry->next;
1416 fudge = 0;
1417 }
1418 /* save values from srcmap for step 6 */
1419 orig_entry = entry;
1420 orig_fudge = fudge;
1421
1422
1423 /*
1424 * step 3: now start looping through the map entries, extracting
1425 * as we go.
1426 */
1427
1428 while (entry->start < end && entry != &srcmap->header) {
1429
1430 /* if we are not doing a quick reference, clip it */
1431 if ((flags & UVM_EXTRACT_QREF) == 0)
1432 UVM_MAP_CLIP_END(srcmap, entry, end);
1433
1434 /* clear needs_copy (allow chunking) */
1435 if (UVM_ET_ISNEEDSCOPY(entry)) {
1436 if (fudge)
1437 oldstart = entry->start;
1438 else
1439 oldstart = 0; /* XXX: unecessary, to avert gcc warning */
1440 amap_copy(srcmap, entry, M_NOWAIT, TRUE, start, end);
1441 if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */
1442 error = ENOMEM;
1443 goto bad;
1444 }
1445 if (fudge) { /* amap_copy could clip (during chunk)! update fudge */
1446 fudge = fudge - (entry->start - oldstart);
1447 orig_fudge = fudge;
1448 }
1449 }
1450
1451 /* calculate the offset of this from "start" */
1452 oldoffset = (entry->start + fudge) - start;
1453
1454 /* allocate a new map entry */
1455 newentry = uvm_mapent_alloc(dstmap);
1456 if (newentry == NULL) {
1457 error = ENOMEM;
1458 goto bad;
1459 }
1460
1461 /* set up new map entry */
1462 newentry->next = NULL;
1463 newentry->prev = endchain;
1464 newentry->start = dstaddr + oldoffset;
1465 newentry->end = newentry->start + (entry->end - (entry->start + fudge));
1466 if (newentry->end > newend)
1467 newentry->end = newend;
1468 newentry->object.uvm_obj = entry->object.uvm_obj;
1469 if (newentry->object.uvm_obj) {
1470 if (newentry->object.uvm_obj->pgops->pgo_reference)
1471 newentry->object.uvm_obj->pgops->
1472 pgo_reference(newentry->object.uvm_obj);
1473 newentry->offset = entry->offset + fudge;
1474 } else {
1475 newentry->offset = 0;
1476 }
1477 newentry->etype = entry->etype;
1478 newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
1479 entry->max_protection : entry->protection;
1480 newentry->max_protection = entry->max_protection;
1481 newentry->inheritance = entry->inheritance;
1482 newentry->wired_count = 0;
1483 newentry->aref.ar_amap = entry->aref.ar_amap;
1484 if (newentry->aref.ar_amap) {
1485 newentry->aref.ar_slotoff = entry->aref.ar_slotoff + (fudge / PAGE_SIZE);
1486 amap_ref(newentry,
1487 AMAP_SHARED | ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
1488 } else {
1489 newentry->aref.ar_slotoff = 0;
1490 }
1491 newentry->advice = entry->advice;
1492
1493 /* now link it on the chain */
1494 nchain++;
1495 if (endchain == NULL) {
1496 chain = endchain = newentry;
1497 } else {
1498 endchain->next = newentry;
1499 endchain = newentry;
1500 }
1501
1502 /* end of 'while' loop! */
1503 if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
1504 (entry->next == &srcmap->header || entry->next->start != entry->end)) {
1505 error = EINVAL;
1506 goto bad;
1507 }
1508 entry = entry->next;
1509 fudge = 0;
1510 }
1511
1512
1513 /*
1514 * step 4: close off chain (in format expected by uvm_map_replace)
1515 */
1516
1517 if (chain)
1518 chain->prev = endchain;
1519
1520
1521 /*
1522 * step 5: attempt to lock the dest map so we can pmap_copy.
1523 * note usage of copy_ok:
1524 * 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
1525 * 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
1526 */
1527
1528 if (srcmap == dstmap || vm_map_lock_try(dstmap) == TRUE) {
1529
1530 copy_ok = 1;
1531 if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain, nchain)) {
1532 if (srcmap != dstmap)
1533 vm_map_unlock(dstmap);
1534 error = EIO;
1535 goto bad;
1536 }
1537
1538 } else {
1539
1540 copy_ok = 0;
1541 /* replace defered until step 7 */
1542
1543 }
1544
1545
1546 /*
1547 * step 6: traverse the srcmap a second time to do the following:
1548 * - if we got a lock on the dstmap do pmap_copy
1549 * - if UVM_EXTRACT_REMOVE remove the entries
1550 * we make use of orig_entry and orig_fudge (saved in step 2)
1551 */
1552
1553 if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
1554
1555 /* purge possible stale hints from srcmap */
1556 if (flags & UVM_EXTRACT_REMOVE) {
1557 SAVE_HINT(srcmap, orig_entry->prev);
1558 if (srcmap->first_free->start >= start)
1559 srcmap->first_free = orig_entry->prev;
1560 }
1561
1562 entry = orig_entry;
1563 fudge = orig_fudge;
1564 deadentry = NULL; /* for UVM_EXTRACT_REMOVE */
1565
1566 while (entry->start < end && entry != &srcmap->header) {
1567
1568 if (copy_ok) {
1569 oldoffset = (entry->start + fudge) - start;
1570 elen = min(end, entry->end) - (entry->start + fudge);
1571 pmap_copy(dstmap->pmap, srcmap->pmap, dstaddr + oldoffset,
1572 elen, entry->start + fudge);
1573 }
1574
1575 if (flags & UVM_EXTRACT_REMOVE) {
1576 pmap_remove(srcmap->pmap, entry->start, entry->end);
1577 uvm_map_entry_unlink(srcmap, entry);
1578 entry->next = deadentry;
1579 deadentry = entry;
1580 }
1581
1582 /* end of 'while' loop */
1583 entry = entry->next;
1584 fudge = 0;
1585 }
1586
1587 /* unlock dstmap. we will dispose of deadentry in step 7 if needed */
1588 if (copy_ok && srcmap != dstmap)
1589 vm_map_unlock(dstmap);
1590
1591 }
1592 else { deadentry = NULL; } /* XXX: shut up gcc warning */
1593
1594 /*
1595 * step 7: we are done with the source map, unlock. if copy_ok
1596 * is 0 then we have not replaced the dummy mapping in dstmap yet
1597 * and we need to do so now.
1598 */
1599
1600 vm_map_unlock(srcmap);
1601 if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
1602 uvm_unmap_detach(deadentry, 0); /* dispose of old entries */
1603
1604 /* now do the replacement if we didn't do it in step 5 */
1605 if (copy_ok == 0) {
1606 vm_map_lock(dstmap);
1607 error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain, nchain);
1608 vm_map_unlock(dstmap);
1609
1610 if (error == FALSE) {
1611 error = EIO;
1612 goto bad2;
1613 }
1614 }
1615
1616 /*
1617 * done!
1618 */
1619 return(0);
1620
1621 /*
1622 * bad: failure recovery
1623 */
1624 bad:
1625 vm_map_unlock(srcmap);
1626 bad2: /* src already unlocked */
1627 if (chain)
1628 uvm_unmap_detach(chain, (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
1629 uvm_unmap(dstmap, dstaddr, dstaddr+len, 1); /* ??? */
1630 return(error);
1631 }
1632
1633 /* end of extraction functions */
1634
1635 /*
1636 * uvm_map_submap: punch down part of a map into a submap
1637 *
1638 * => only the kernel_map is allowed to be submapped
1639 * => the purpose of submapping is to break up the locking granularity
1640 * of a larger map
1641 * => the range specified must have been mapped previously with a uvm_map()
1642 * call [with uobj==NULL] to create a blank map entry in the main map.
1643 * [And it had better still be blank!]
1644 * => maps which contain submaps should never be copied or forked.
1645 * => to remove a submap, use uvm_unmap() on the main map
1646 * and then uvm_map_deallocate() the submap.
1647 * => main map must be unlocked.
1648 * => submap must have been init'd and have a zero reference count.
1649 * [need not be locked as we don't actually reference it]
1650 */
1651
1652 int uvm_map_submap(map, start, end, submap)
1653
1654 vm_map_t map, submap;
1655 vm_offset_t start, end;
1656
1657 {
1658 vm_map_entry_t entry;
1659 int result;
1660 UVMHIST_FUNC("uvm_map_submap"); UVMHIST_CALLED(maphist);
1661
1662 vm_map_lock(map);
1663
1664 VM_MAP_RANGE_CHECK(map, start, end);
1665
1666 if (uvm_map_lookup_entry(map, start, &entry)) {
1667 UVM_MAP_CLIP_START(map, entry, start);
1668 UVM_MAP_CLIP_END(map, entry, end); /* to be safe */
1669 }
1670 else {
1671 entry = NULL;
1672 }
1673
1674 if (entry != NULL &&
1675 entry->start == start && entry->end == end &&
1676 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
1677 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
1678
1679 /*
1680 * doit!
1681 */
1682 entry->etype |= (UVM_ET_MAP|UVM_ET_SUBMAP);
1683 entry->object.sub_map = submap;
1684 entry->offset = 0;
1685 uvm_map_reference(submap);
1686 result = KERN_SUCCESS;
1687 } else {
1688 result = KERN_INVALID_ARGUMENT;
1689 }
1690 vm_map_unlock(map);
1691
1692 return(result);
1693 }
1694
1695
1696 /*
1697 * uvm_map_protect: change map protection
1698 *
1699 * => set_max means set max_protection.
1700 * => map must be unlocked.
1701 * => XXXCDC: does not work properly with share maps. rethink.
1702 */
1703
1704 #define MASK(entry) ( UVM_ET_ISCOPYONWRITE(entry) ? \
1705 ~VM_PROT_WRITE : VM_PROT_ALL)
1706 #define max(a,b) ((a) > (b) ? (a) : (b))
1707
1708 int uvm_map_protect(map, start, end, new_prot, set_max)
1709
1710 vm_map_t map;
1711 vm_offset_t start, end;
1712 vm_prot_t new_prot;
1713 boolean_t set_max;
1714
1715 {
1716 vm_map_entry_t current, entry;
1717 UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
1718 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
1719 map, start, end, new_prot);
1720
1721 vm_map_lock(map);
1722
1723 VM_MAP_RANGE_CHECK(map, start, end);
1724
1725 if (uvm_map_lookup_entry(map, start, &entry)) {
1726 UVM_MAP_CLIP_START(map, entry, start);
1727 } else {
1728 entry = entry->next;
1729 }
1730
1731 /*
1732 * make a first pass to check for protection violations.
1733 */
1734
1735 current = entry;
1736 while ((current != &map->header) && (current->start < end)) {
1737 if (UVM_ET_ISSUBMAP(current))
1738 return(KERN_INVALID_ARGUMENT);
1739 if ((new_prot & current->max_protection) != new_prot) {
1740 vm_map_unlock(map);
1741 return(KERN_PROTECTION_FAILURE);
1742 }
1743 current = current->next;
1744 }
1745
1746 /* go back and fix up protections (no need to clip this time). */
1747
1748 current = entry;
1749
1750 while ((current != &map->header) && (current->start < end)) {
1751 vm_prot_t old_prot;
1752
1753 UVM_MAP_CLIP_END(map, current, end);
1754
1755 old_prot = current->protection;
1756 if (set_max)
1757 current->protection = (current->max_protection = new_prot) & old_prot;
1758 else
1759 current->protection = new_prot;
1760
1761 /*
1762 * update physical map if necessary. worry about copy-on-write
1763 * here -- CHECK THIS XXX
1764 */
1765
1766 if (current->protection != old_prot) {
1767
1768 if (UVM_ET_ISMAP(current) && !UVM_ET_ISSUBMAP(current)) {
1769 /* share map? gotta go down a level */
1770 vm_map_entry_t share_entry;
1771 vm_offset_t share_end;
1772
1773 /*
1774 * note: a share map has its own address space (starting at zero).
1775 * current->offset is the offset into the share map our mapping
1776 * starts. the length of our mapping is (current->end -
1777 * current->start). thus, our mapping goes from current->offset
1778 * to share_end (which is: current->offset + length) in the share
1779 * map's address space.
1780 *
1781 * thus for any share_entry we need to make sure that the addresses
1782 * we've got fall in the range we want. we use:
1783 * max(any share_entry->start, current->offset)
1784 * min(any share_entry->end, share_end)
1785 *
1786 * of course to change our pmap we've got to convert the share
1787 * map address back to our map's virtual address space using:
1788 * our_va = share_va - current->offset + current->start
1789 *
1790 * XXXCDC: protection change in sharemap may require use
1791 * of pmap_page_protect. needs a rethink.
1792 */
1793
1794 vm_map_lock(current->object.share_map);
1795 /* note: current->offset is offset into share map */
1796 (void) uvm_map_lookup_entry(current->object.share_map,
1797 current->offset, &share_entry);
1798 share_end = current->offset + (current->end - current->start);
1799 while ((share_entry != ¤t->object.share_map->header) &&
1800 (share_entry->start < share_end)) {
1801
1802 pmap_protect(map->pmap, (max(share_entry->start, current->offset)
1803 - current->offset + current->start),
1804 min(share_entry->end, share_end)
1805 - current->offset + current->start,
1806 current->protection & MASK(share_entry));
1807
1808 share_entry = share_entry->next;
1809 }
1810 vm_map_unlock(current->object.share_map);
1811
1812 } else { /* not share map! */
1813
1814 pmap_protect(map->pmap, current->start, current->end,
1815 current->protection & MASK(entry));
1816
1817 }
1818 }
1819 current = current->next;
1820 }
1821
1822 vm_map_unlock(map);
1823 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
1824 return(KERN_SUCCESS);
1825 }
1826
1827 #undef max
1828 #undef MASK
1829
1830 /*
1831 * uvm_map_inherit: set inheritance code for range of addrs in map.
1832 *
1833 * => map must be unlocked
1834 * => note that the inherit code is used during a "fork". see fork
1835 * code for details.
1836 * => XXXCDC: currently only works in main map. what about share map?
1837 */
1838
1839 int uvm_map_inherit(map, start, end, new_inheritance)
1840
1841 vm_map_t map;
1842 vm_offset_t start;
1843 vm_offset_t end;
1844 vm_inherit_t new_inheritance;
1845
1846 {
1847 vm_map_entry_t entry, temp_entry;
1848 UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
1849 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)",
1850 map, start, end, new_inheritance);
1851
1852 switch (new_inheritance) {
1853 case VM_INHERIT_NONE:
1854 case VM_INHERIT_COPY:
1855 case VM_INHERIT_SHARE:
1856 break;
1857 default:
1858 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
1859 return(KERN_INVALID_ARGUMENT);
1860 }
1861
1862 vm_map_lock(map);
1863
1864 VM_MAP_RANGE_CHECK(map, start, end);
1865
1866 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
1867 entry = temp_entry;
1868 UVM_MAP_CLIP_START(map, entry, start);
1869 } else {
1870 entry = temp_entry->next;
1871 }
1872
1873 while ((entry != &map->header) && (entry->start < end)) {
1874 UVM_MAP_CLIP_END(map, entry, end);
1875
1876 entry->inheritance = new_inheritance;
1877
1878 entry = entry->next;
1879 }
1880
1881 vm_map_unlock(map);
1882 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
1883 return(KERN_SUCCESS);
1884 }
1885
1886 /*
1887 * uvm_map_pageable: sets the pageability of a range in a map.
1888 *
1889 * => regions sepcified as not pageable require lock-down (wired) memory
1890 * and page tables.
1891 * => map must not be locked.
1892 * => XXXCDC: check this and try and clean it up.
1893 */
1894
1895 int uvm_map_pageable(map, start, end, new_pageable)
1896
1897 vm_map_t map;
1898 vm_offset_t start, end;
1899 boolean_t new_pageable;
1900
1901 {
1902 vm_map_entry_t entry, start_entry;
1903 vm_offset_t failed = 0;
1904 int rv;
1905 UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
1906 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
1907 map, start, end, new_pageable);
1908
1909 vm_map_lock(map);
1910 VM_MAP_RANGE_CHECK(map, start, end);
1911
1912 /*
1913 * only one pageability change may take place at one time, since
1914 * uvm_fault_wire assumes it will be called only once for each
1915 * wiring/unwiring. therefore, we have to make sure we're actually
1916 * changing the pageability for the entire region. we do so before
1917 * making any changes.
1918 */
1919
1920 if (uvm_map_lookup_entry(map, start, &start_entry) == FALSE) {
1921 vm_map_unlock(map);
1922
1923 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
1924 return(KERN_INVALID_ADDRESS);
1925 }
1926 entry = start_entry;
1927
1928 /*
1929 * handle wiring and unwiring seperately.
1930 */
1931
1932 if (new_pageable) { /* unwire */
1933
1934 UVM_MAP_CLIP_START(map, entry, start);
1935
1936 /*
1937 * unwiring. first ensure that the range to be unwired is really
1938 * wired down and that there are no holes.
1939 */
1940 while ((entry != &map->header) && (entry->start < end)) {
1941
1942 if (entry->wired_count == 0 ||
1943 (entry->end < end &&
1944 (entry->next == &map->header ||
1945 entry->next->start > entry->end))) {
1946 vm_map_unlock(map);
1947 UVMHIST_LOG(maphist,"<- done (INVALID UNWIRE ARG)",0,0,0,0);
1948 return(KERN_INVALID_ARGUMENT);
1949 }
1950 entry = entry->next;
1951 }
1952
1953 /*
1954 * now decrement the wiring count for each region. if a region
1955 * becomes completely unwired, unwire its physical pages and mappings.
1956 */
1957 #if 0 /* not necessary: uvm_fault_unwire does not lock */
1958 lock_set_recursive(&map->lock);
1959 #endif /* XXXCDC */
1960
1961 entry = start_entry;
1962 while ((entry != &map->header) && (entry->start < end)) {
1963 UVM_MAP_CLIP_END(map, entry, end);
1964
1965 entry->wired_count--;
1966 if (entry->wired_count == 0)
1967 uvm_map_entry_unwire(map, entry);
1968
1969 entry = entry->next;
1970 }
1971 #if 0 /* XXXCDC: not necessary, see above */
1972 lock_clear_recursive(&map->lock);
1973 #endif
1974 vm_map_unlock(map);
1975 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
1976 return(KERN_SUCCESS);
1977
1978 /*
1979 * end of unwire case!
1980 */
1981 }
1982
1983 /*
1984 * wire case: in two passes [XXXCDC: ugly block of code here]
1985 *
1986 * 1: holding the write lock, we create any anonymous maps that need
1987 * to be created. then we clip each map entry to the region to
1988 * be wired and increment its wiring count.
1989 *
1990 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
1991 * in the pages for any newly wired area (wired_count is 1).
1992 *
1993 * downgrading to a read lock for uvm_fault_wire avoids a possible
1994 * deadlock with another thread that may have faulted on one of
1995 * the pages to be wired (it would mark the page busy, blocking
1996 * us, then in turn block on the map lock that we hold). because
1997 * of problems in the recursive lock package, we cannot upgrade
1998 * to a write lock in vm_map_lookup. thus, any actions that
1999 * require the write lock must be done beforehand. because we
2000 * keep the read lock on the map, the copy-on-write status of the
2001 * entries we modify here cannot change.
2002 */
2003
2004 while ((entry != &map->header) && (entry->start < end)) {
2005
2006 if (entry->wired_count == 0) { /* not already wired? */
2007
2008 /*
2009 * perform actions of vm_map_lookup that need the write lock on
2010 * the map: create an anonymous map for a copy-on-write region,
2011 * or an anonymous map for a zero-fill region.
2012 *
2013 * we don't have to do this for entries that point to sharing
2014 * maps, because we won't hold the lock on the sharing map.
2015 */
2016
2017 if (!UVM_ET_ISMAP(entry)) { /* not sharing map */
2018 /*
2019 * XXXCDC: protection vs. max_protection?? (wirefault uses max?)
2020 * XXXCDC: used to do it always if uvm_obj == NULL (wrong?)
2021 */
2022 if ( UVM_ET_ISNEEDSCOPY(entry) &&
2023 (entry->protection & VM_PROT_WRITE) != 0) {
2024
2025 amap_copy(map, entry, M_WAITOK, TRUE, start, end);
2026 /* XXXCDC: wait OK? */
2027
2028 }
2029 }
2030 } /* wired_count == 0 */
2031 UVM_MAP_CLIP_START(map, entry, start);
2032 UVM_MAP_CLIP_END(map, entry, end);
2033 entry->wired_count++;
2034
2035 /*
2036 * Check for holes
2037 */
2038 if (entry->end < end && (entry->next == &map->header ||
2039 entry->next->start > entry->end)) {
2040 /*
2041 * found one. amap creation actions do not need to be undone,
2042 * but the wired counts need to be restored.
2043 */
2044 while (entry != &map->header && entry->end > start) {
2045 entry->wired_count--;
2046 entry = entry->prev;
2047 }
2048 vm_map_unlock(map);
2049 UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
2050 return(KERN_INVALID_ARGUMENT);
2051 }
2052 entry = entry->next;
2053 }
2054
2055 /*
2056 * Pass 2.
2057 */
2058 /*
2059 * HACK HACK HACK HACK
2060 *
2061 * if we are wiring in the kernel map or a submap of it, unlock the
2062 * map to avoid deadlocks. we trust that the kernel threads are
2063 * well-behaved, and therefore will not do anything destructive to
2064 * this region of the map while we have it unlocked. we cannot
2065 * trust user threads to do the same.
2066 *
2067 * HACK HACK HACK HACK
2068 */
2069 if (vm_map_pmap(map) == pmap_kernel()) {
2070 vm_map_unlock(map); /* trust me ... */
2071 } else {
2072 vm_map_set_recursive(&map->lock);
2073 lockmgr(&map->lock, LK_DOWNGRADE, (void *)0, curproc);
2074 }
2075
2076 rv = 0;
2077 entry = start_entry;
2078 while (entry != &map->header && entry->start < end) {
2079 /*
2080 * if uvm_fault_wire fails for any page we need to undo what has
2081 * been done. we decrement the wiring count for those pages which
2082 * have not yet been wired (now) and unwire those that have
2083 * (later).
2084 *
2085 * XXX this violates the locking protocol on the map,
2086 * needs to be fixed. [because we only have a read lock on map we
2087 * shouldn't be changing wired_count?]
2088 */
2089 if (rv) {
2090 entry->wired_count--;
2091 } else if (entry->wired_count == 1) {
2092 rv = uvm_fault_wire(map, entry->start, entry->end);
2093 if (rv) {
2094 failed = entry->start;
2095 entry->wired_count--;
2096 }
2097 }
2098 entry = entry->next;
2099 }
2100
2101 if (vm_map_pmap(map) == pmap_kernel()) {
2102 vm_map_lock(map); /* relock */
2103 }
2104 else {
2105 vm_map_clear_recursive(&map->lock);
2106 }
2107
2108 if (rv) { /* failed? */
2109 vm_map_unlock(map);
2110 (void) uvm_map_pageable(map, start, failed, TRUE);
2111 UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
2112 return(rv);
2113 }
2114 vm_map_unlock(map);
2115
2116 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
2117 return(KERN_SUCCESS);
2118 }
2119
2120 /*
2121 * uvm_map_clean: push dirty pages off to backing store.
2122 *
2123 * => valid flags:
2124 * if (flags & PGO_SYNCIO): dirty pages are written synchronously
2125 * if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
2126 * if (flags & PGO_FREE): any cached pages are freed after clean
2127 * => returns an error if any part of the specified range isn't mapped
2128 * => never a need to flush amap layer since the anonymous memory has
2129 * no permanent home...
2130 * => called from sys_msync()
2131 * => caller must not write-lock map (read OK).
2132 * => we may sleep while cleaning if SYNCIO [with map read-locked]
2133 * => XXX: does this handle share maps properly?
2134 */
2135
2136 int uvm_map_clean(map, start, end, flags)
2137
2138 vm_map_t map;
2139 vm_offset_t start, end;
2140 int flags;
2141
2142 {
2143 vm_map_entry_t current;
2144 vm_map_entry_t entry;
2145 vm_size_t size;
2146 struct uvm_object *object;
2147 vm_offset_t offset;
2148 UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
2149 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
2150 map, start, end, flags);
2151
2152 vm_map_lock_read(map);
2153 VM_MAP_RANGE_CHECK(map, start, end);
2154 if (!uvm_map_lookup_entry(map, start, &entry)) {
2155 vm_map_unlock_read(map);
2156 return(KERN_INVALID_ADDRESS);
2157 }
2158
2159 /*
2160 * Make a first pass to check for holes.
2161 */
2162 for (current = entry; current->start < end; current = current->next) {
2163 if (UVM_ET_ISSUBMAP(current)) {
2164 vm_map_unlock_read(map);
2165 return(KERN_INVALID_ARGUMENT);
2166 }
2167 if (end > current->end &&
2168 (current->next == &map->header ||
2169 current->end != current->next->start)) {
2170 vm_map_unlock_read(map);
2171 return(KERN_INVALID_ADDRESS);
2172 }
2173 }
2174
2175 /*
2176 * add "cleanit" flag to flags (for generic flush routine).
2177 * then make a second pass, cleaning/uncaching pages from
2178 * the indicated objects as we go.
2179 */
2180 flags = flags | PGO_CLEANIT;
2181 for (current = entry; current->start < end; current = current->next) {
2182 offset = current->offset + (start - current->start);
2183 size = (end <= current->end ? end : current->end) - start;
2184
2185 /*
2186 * get object/offset. special case to handle share maps.
2187 */
2188 if (UVM_ET_ISMAP(current)) { /* share map? */
2189 register vm_map_t smap;
2190 vm_map_entry_t tentry;
2191 vm_size_t tsize;
2192
2193 smap = current->object.share_map;
2194 vm_map_lock_read(smap);
2195 (void) uvm_map_lookup_entry(smap, offset, &tentry);
2196 tsize = tentry->end - offset;
2197 if (tsize < size)
2198 size = tsize;
2199 object = tentry->object.uvm_obj;
2200 offset = tentry->offset + (offset - tentry->start);
2201 simple_lock(&object->vmobjlock);
2202 vm_map_unlock_read(smap);
2203 } else {
2204 object = current->object.uvm_obj;
2205 simple_lock(&object->vmobjlock);
2206 }
2207
2208 /*
2209 * flush pages if writing is allowed. note that object is locked.
2210 * XXX should we continue on an error?
2211 */
2212
2213 if (object && object->pgops &&
2214 (current->protection & VM_PROT_WRITE) != 0) {
2215 if (!object->pgops->pgo_flush(object, offset, offset+size, flags)) {
2216 simple_unlock(&object->vmobjlock);
2217 vm_map_unlock_read(map);
2218 return(KERN_FAILURE);
2219 }
2220 }
2221 simple_unlock(&object->vmobjlock);
2222 start += size;
2223 }
2224 vm_map_unlock_read(map);
2225 return(KERN_SUCCESS);
2226 }
2227
2228
2229 /*
2230 * uvm_map_checkprot: check protection in map
2231 *
2232 * => must allow specified protection in a fully allocated region.
2233 * => map must be read or write locked by caller.
2234 */
2235
2236 boolean_t uvm_map_checkprot(map, start, end, protection)
2237
2238 vm_map_t map;
2239 vm_offset_t start, end;
2240 vm_prot_t protection;
2241
2242 {
2243 vm_map_entry_t entry;
2244 vm_map_entry_t tmp_entry;
2245
2246 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
2247 return(FALSE);
2248 }
2249
2250 entry = tmp_entry;
2251
2252 while (start < end) {
2253 if (entry == &map->header) {
2254 return(FALSE);
2255 }
2256
2257 /*
2258 * no holes allowed
2259 */
2260
2261 if (start < entry->start) {
2262 return(FALSE);
2263 }
2264
2265 /*
2266 * check protection associated with entry
2267 */
2268
2269 if ((entry->protection & protection) != protection) {
2270 return(FALSE);
2271 }
2272
2273 /* go to next entry */
2274
2275 start = entry->end;
2276 entry = entry->next;
2277 }
2278 return(TRUE);
2279 }
2280
2281 /*
2282 * uvmspace_alloc: allocate a vmspace structure.
2283 *
2284 * - structure includes vm_map and pmap
2285 * - XXX: no locking on this structure
2286 * - refcnt set to 1, rest must be init'd by caller
2287 */
2288 struct vmspace *uvmspace_alloc(min, max, pageable)
2289
2290 vm_offset_t min, max;
2291 int pageable;
2292
2293 {
2294 struct vmspace *vm;
2295 UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
2296
2297 MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK);
2298 bzero(vm, sizeof(*vm));
2299 uvm_map_setup(&vm->vm_map, min, max, pageable);
2300 #if defined(PMAP_NEW)
2301 vm->vm_map.pmap = pmap_create();
2302 #else
2303 vm->vm_map.pmap = pmap_create(0);
2304 #endif
2305 vm->vm_refcnt = 1;
2306 UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0);
2307 return (vm);
2308 }
2309
2310 /*
2311 * uvmspace_share: share a vmspace between two proceses
2312 *
2313 * - XXX: no locking on vmspace
2314 * - used for vfork, threads(?)
2315 */
2316
2317 void uvmspace_share(p1, p2)
2318
2319 struct proc *p1, *p2;
2320
2321 {
2322 p2->p_vmspace = p1->p_vmspace;
2323 p1->p_vmspace->vm_refcnt++;
2324 }
2325
2326 /*
2327 * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
2328 *
2329 * - XXX: no locking on vmspace
2330 */
2331
2332 void uvmspace_unshare(p)
2333
2334 struct proc *p;
2335
2336 {
2337 struct vmspace *nvm, *ovm = p->p_vmspace;
2338
2339 if (ovm->vm_refcnt == 1)
2340 return; /* nothing to do: vmspace isn't shared in the first place */
2341
2342 nvm = uvmspace_fork(ovm); /* make a new vmspace, still holding old one */
2343 p->p_vmspace = nvm;
2344 pmap_activate(p); /* switch to new vmspace */
2345 uvmspace_free(ovm); /* drop reference to old vmspace */
2346 }
2347
2348 /*
2349 * uvmspace_exec: the process wants to exec a new program
2350 *
2351 * - XXX: no locking on vmspace
2352 */
2353
2354 void uvmspace_exec(p)
2355
2356 struct proc *p;
2357
2358 {
2359 struct vmspace *nvm, *ovm = p->p_vmspace;
2360 vm_map_t map = &ovm->vm_map;
2361
2362 #ifdef sparc
2363 /* XXX cgd 960926: the sparc #ifdef should be a MD hook */
2364 kill_user_windows(p); /* before stack addresses go away */
2365 #endif
2366
2367 /*
2368 * see if more than one process is using this vmspace...
2369 */
2370
2371 if (ovm->vm_refcnt == 1) {
2372
2373 /*
2374 * if p is the only process using its vmspace then we can safely
2375 * recycle that vmspace for the program that is being exec'd.
2376 */
2377
2378 #ifdef SYSVSHM
2379 /*
2380 * SYSV SHM semantics require us to kill all segments on an exec.
2381 */
2382 if (ovm->vm_shm)
2383 shmexit(ovm);
2384 #endif
2385
2386 /*
2387 * now unmap the old program
2388 */
2389 uvm_unmap(map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS, 0);
2390
2391 } else {
2392
2393 /*
2394 * p's vmspace is being shared, so we can't reuse it for p since
2395 * it is still being used for others. allocate a new vmspace for
2396 * p
2397 */
2398 nvm = uvmspace_alloc(map->min_offset, map->max_offset,
2399 map->entries_pageable);
2400
2401 #if (defined(i386) && !defined(PMAP_NEW)) || defined(pc532)
2402 /*
2403 * allocate zero fill area in the new vmspace's map for user page
2404 * tables for ports that have old style pmaps that keep user page
2405 * tables in the top part of the process' address space.
2406 *
2407 * XXXCDC: this should go away once all pmaps are fixed
2408 */
2409 {
2410 vm_offset_t addr = VM_MAXUSER_ADDRESS;
2411 if (uvm_map(&nvm->vm_map, &addr, VM_MAX_ADDRESS - addr,
2412 NULL, UVM_UNKNOWN_OFFSET,
2413 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
2414 UVM_ADV_NORMAL, UVM_FLAG_FIXED|UVM_FLAG_COPYONW))
2415 != KERN_SUCCESS)
2416 panic("vm_allocate of PT page area failed");
2417 }
2418 #endif
2419
2420 /*
2421 * install new vmspace and drop our ref to the old one.
2422 */
2423
2424 p->p_vmspace = nvm;
2425 pmap_activate(p);
2426 uvmspace_free(ovm);
2427 }
2428 }
2429
2430 /*
2431 * uvmspace_free: free a vmspace data structure
2432 *
2433 * - XXX: no locking on vmspace
2434 */
2435
2436 void uvmspace_free(vm)
2437
2438 struct vmspace *vm;
2439
2440 {
2441 vm_map_entry_t dead_entries;
2442 UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
2443
2444 UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0);
2445 if (--vm->vm_refcnt == 0) {
2446 /*
2447 * lock the map, to wait out all other references to it. delete
2448 * all of the mappings and pages they hold, then call the pmap
2449 * module to reclaim anything left.
2450 */
2451 vm_map_lock(&vm->vm_map);
2452 if (vm->vm_map.nentries) {
2453 (void) uvm_unmap_remove(&vm->vm_map, vm->vm_map.min_offset,
2454 vm->vm_map.max_offset, TRUE, &dead_entries);
2455 if (dead_entries != NULL)
2456 uvm_unmap_detach(dead_entries, 0);
2457 }
2458 pmap_destroy(vm->vm_map.pmap);
2459 vm->vm_map.pmap = NULL;
2460 FREE(vm, M_VMMAP);
2461 }
2462 UVMHIST_LOG(maphist,"<- done", 0,0,0,0);
2463 }
2464
2465 /*
2466 * F O R K - m a i n e n t r y p o i n t
2467 */
2468 /*
2469 * uvmspace_fork: fork a process' main map
2470 *
2471 * => create a new vmspace for child process from parent.
2472 * => parent's map must not be locked.
2473 */
2474
2475 struct vmspace *uvmspace_fork(vm1)
2476
2477 struct vmspace *vm1;
2478
2479 {
2480 struct vmspace *vm2;
2481 vm_map_t old_map = &vm1->vm_map;
2482 vm_map_t new_map;
2483 vm_map_entry_t old_entry;
2484 vm_map_entry_t new_entry;
2485 pmap_t new_pmap;
2486 UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
2487
2488 #if (defined(i386) && !defined(PMAP_NEW)) || defined(pc532)
2489 /*
2490 * avoid copying any of the parent's pagetables or other per-process
2491 * objects that reside in the map by marking all of them non-inheritable
2492 * XXXCDC: should go away
2493 */
2494 (void) uvm_map_inherit(old_map, VM_MAXUSER_ADDRESS, VM_MAX_ADDRESS,
2495 VM_INHERIT_NONE);
2496 #endif
2497
2498 vm_map_lock(old_map);
2499
2500 vm2 = uvmspace_alloc(old_map->min_offset, old_map->max_offset,
2501 old_map->entries_pageable);
2502 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2503 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
2504 new_map = &vm2->vm_map; /* XXX */
2505 new_pmap = new_map->pmap;
2506
2507 old_entry = old_map->header.next;
2508
2509 /*
2510 * go entry-by-entry
2511 */
2512
2513 while (old_entry != &old_map->header) {
2514
2515 /*
2516 * first, some sanity checks on the old entry
2517 */
2518 if (UVM_ET_ISSUBMAP(old_entry))
2519 panic("fork: encountered a submap during fork (illegal)");
2520 else if (UVM_ET_ISMAP(old_entry)) {
2521 if (UVM_ET_ISNEEDSCOPY(old_entry))
2522 panic("fork: encountered a share map entry that needs_copy (illegal)");
2523 if (UVM_ET_ISCOPYONWRITE(old_entry))
2524 panic("fork: encountered a copy_on_write share map entry (illegal)");
2525 if (old_entry->aref.ar_amap)
2526 panic("fork: detected share map entry that has an amap (illegal)");
2527 } else {
2528 if (!UVM_ET_ISCOPYONWRITE(old_entry) && UVM_ET_ISNEEDSCOPY(old_entry))
2529 panic("fork: non-copy_on_write map entry marked needs_copy (illegal)");
2530 }
2531
2532
2533 switch (old_entry->inheritance) {
2534 case VM_INHERIT_NONE:
2535
2536 /*
2537 * drop the mapping
2538 */
2539
2540 break;
2541
2542 case VM_INHERIT_SHARE:
2543
2544 /*
2545 * share the mapping: this means we want the old and new entries to
2546 * share amaps and backing objects.
2547 */
2548
2549 /*
2550 * if the old_entry needs a new amap (due to prev fork) then we need
2551 * to allocate it now so that we have something we own to share with
2552 * the new_entry. [in other words, we need to clear needs_copy]
2553 */
2554
2555 if (UVM_ET_ISNEEDSCOPY(old_entry)) {
2556 /* get our own amap, clears needs_copy */
2557 amap_copy(old_map, old_entry, M_WAITOK, FALSE, 0, 0);
2558 /* XXXCDC: WAITOK??? */
2559 }
2560
2561 new_entry = uvm_mapent_alloc(new_map);
2562 uvm_mapent_copy(old_entry, new_entry); /* old_entry -> new_entry */
2563 new_entry->wired_count = 0; /* new pmap has nothing wired in it */
2564
2565 /*
2566 * gain reference to objects backing the map
2567 */
2568 if (UVM_ET_ISMAP(new_entry)) { /* share map? */
2569 uvm_map_reference(old_entry->object.share_map);
2570 } else {
2571 if (new_entry->aref.ar_amap)
2572 amap_ref(new_entry, AMAP_SHARED); /* share reference */
2573 if (new_entry->object.uvm_obj &&
2574 new_entry->object.uvm_obj->pgops->pgo_reference)
2575 new_entry->object.uvm_obj->
2576 pgops->pgo_reference(new_entry->object.uvm_obj);
2577 }
2578
2579 /* insert entry at end of new_map's entry list */
2580 uvm_map_entry_link(new_map, new_map->header.prev, new_entry);
2581
2582 /*
2583 * pmap_copy the mappings: this routine is optional but if it is
2584 * there it will reduce the number of page faults in the new proc.
2585 */
2586
2587 pmap_copy(new_pmap, old_map->pmap, new_entry->start,
2588 (old_entry->end - old_entry->start), old_entry->start);
2589
2590 break;
2591
2592 case VM_INHERIT_COPY:
2593
2594 /*
2595 * copy-on-write the mapping (using mmap's MAP_PRIVATE semantics)
2596 */
2597
2598 /*
2599 * share maps: we special case it (handled by uvm_map_sharemapcopy)
2600 */
2601
2602 if (UVM_ET_ISMAP(old_entry)) { /* share map? */
2603 uvm_map_sharemapcopy(old_map, old_entry, new_map);
2604 break;
2605 }
2606
2607 /*
2608 * not a share map. allocate new_entry, adjust reference counts.
2609 * (note that new references are read-only).
2610 */
2611
2612 new_entry = uvm_mapent_alloc(new_map);
2613 uvm_mapent_copy(old_entry, new_entry); /* old_entry -> new_entry */
2614 if (new_entry->aref.ar_amap) {
2615 amap_ref(new_entry, 0);
2616 }
2617 if (new_entry->object.uvm_obj &&
2618 new_entry->object.uvm_obj->pgops->pgo_reference)
2619 new_entry->object.uvm_obj->
2620 pgops->pgo_reference(new_entry->object.uvm_obj);
2621
2622 new_entry->wired_count = 0; /* new pmap has nothing wired in it */
2623 new_entry->etype |= (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
2624 uvm_map_entry_link(new_map, new_map->header.prev, new_entry);
2625
2626 /*
2627 * the new entry will need an amap. it will either need to be
2628 * copied from the old entry or created from scrach (if the old
2629 * does not have an amap). can we defer this process until
2630 * later (by setting needs_copy) or do we need to do it now?
2631 *
2632 * we must do it now if any of the following conditions hold:
2633 *
2634 * 1. the old entry has an amap and it is not copy_on_write [i.e.
2635 * shared].
2636 * why: we would have to write-protect the old mapping in the
2637 * parent's pmap [thus needlessly changing the protection of a
2638 * shared mapping, something we don't want to do]
2639 * note: a non-copy-on-write old entry will not have an
2640 * amap unless we've used non-standard features of this VM system.
2641 * [also, see semantic note below...]
2642 *
2643 * 2. the old entry has an amap and that amap is being shared.
2644 * why: if the amap is being shared between 2 or more processes
2645 * they need to continue sharing the amap. if we try and defer
2646 * the copy there is no easy to determine which process needs to
2647 * break off their references to the amap and which ones are supposed
2648 * to keep it at fault time.
2649 *
2650 * 3. if the old entry was copy_on_write and wired then we
2651 * are going to have to call fault_copy_entry now (see below).
2652 * that needs to have the amap copied also, so we do it here
2653 * too.
2654 *
2655 * semantic note: if the old entry was shared and had an amap
2656 * then the child gets a snapshot copy of the pages in the amap
2657 * now, but the child does not want to see any new pages added
2658 * to the amap by the parent after the fork. the child will see
2659 * changes made by the parent to any amap pages it inherits
2660 * until it writes them itself. to get these semantics we need
2661 * to copy the amap now (as per [1] above).
2662 */
2663
2664 if ((old_entry->aref.ar_amap &&
2665 (UVM_ET_ISCOPYONWRITE(old_entry) == FALSE ||
2666 (old_entry->aref.ar_amap->am_flags & AMAP_SHARED) != 0)) ||
2667 (old_entry->wired_count != 0 && UVM_ET_ISCOPYONWRITE(old_entry)) ) {
2668 amap_copy(new_map, new_entry, M_WAITOK, FALSE, 0, 0);
2669 /* XXXCDC: M_WAITOK? */
2670 }
2671
2672 /*
2673 * if an entry is wired down, then we can not get faults on access.
2674 * this means that we can't do COW because we can't write protect
2675 * the old entry (otherwise we could get a protection fault on wired
2676 * memory). if that is the case we must copy things now. note
2677 * that we've already allocated the new amap (above).
2678 */
2679
2680 if (old_entry->wired_count != 0 && UVM_ET_ISCOPYONWRITE(old_entry)) {
2681
2682 /*
2683 * copy it now
2684 */
2685
2686 amap_cow_now(new_map, new_entry); /* was fault_copy_entry */
2687
2688 } else {
2689
2690 /*
2691 * do a copy-on-write. two cases to consider:
2692 * 1. old_entry is MAP_SHARE (old_entry->copy_on_write == FALSE)
2693 * => no need to protect old mappings
2694 * 2. old_entry is MAP_PRIVATE (old_entry->copy_on_write == TRUE)
2695 * => must protect both old and new mappings
2696 */
2697
2698 if (UVM_ET_ISCOPYONWRITE(old_entry)) { /* private mapping? */
2699
2700 /*
2701 * protect old mappings. note that if needs_copy is true then
2702 * the mappings have already been protected elsewhere and there
2703 * is no need to do it again. also note that pmap_copy will
2704 * copy the protected mappings to the child.
2705 */
2706
2707 if (!UVM_ET_ISNEEDSCOPY(old_entry)) {
2708 /* write protect pages */
2709 pmap_protect(old_map->pmap, old_entry->start, old_entry->end,
2710 old_entry->protection & ~VM_PROT_WRITE);
2711 old_entry->etype |= UVM_ET_NEEDSCOPY;
2712 }
2713 }
2714
2715 pmap_copy(new_pmap, old_map->pmap, new_entry->start,
2716 (old_entry->end - old_entry->start), old_entry->start);
2717
2718 /*
2719 * protect new mappings. already taken care of for private
2720 * mappings by the call to pmap_protect above.
2721 */
2722
2723 if (!UVM_ET_ISCOPYONWRITE(old_entry)) {
2724 pmap_protect(new_pmap, new_entry->start, new_entry->end,
2725 new_entry->protection & ~VM_PROT_WRITE);
2726 }
2727 }
2728
2729 break;
2730 }
2731 old_entry = old_entry->next;
2732 }
2733
2734 new_map->size = old_map->size;
2735 vm_map_unlock(old_map);
2736
2737 #if (defined(i386) && !defined(PMAP_NEW)) || defined(pc532)
2738 /*
2739 * allocate zero fill area in the new vmspace's map for user page
2740 * tables for ports that have old style pmaps that keep user page
2741 * tables in the top part of the process' address space.
2742 *
2743 * XXXCDC: this should go away once all pmaps are fixed
2744 */
2745 {
2746 vm_offset_t addr = VM_MAXUSER_ADDRESS;
2747 if (uvm_map(new_map, &addr, VM_MAX_ADDRESS - addr,
2748 NULL, UVM_UNKNOWN_OFFSET,
2749 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
2750 UVM_ADV_NORMAL, UVM_FLAG_FIXED|UVM_FLAG_COPYONW))
2751 != KERN_SUCCESS)
2752 panic("vm_allocate of PT page area failed");
2753 }
2754 #endif
2755
2756 #ifdef SYSVSHM
2757 if (vm1->vm_shm)
2758 shmfork(vm1, vm2);
2759 #endif
2760
2761 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
2762 return(vm2);
2763 }
2764
2765
2766 /*
2767 * uvm_map_sharemapcopy: handle the copying of a share map during a
2768 * fork. this is a helper function for uvmspace_fork. it is called
2769 * when we are doing a fork and we have encountered a map entry which
2770 * has two attributes: [1] its inherit code is VM_INHERIT_COPY, and
2771 * [2] it points to a share map (i.e. is_a_map is true). in this case
2772 * we must traverse the area of the share map pointed to by the
2773 * old_entry and make private copies of the map entries in the share
2774 * map. this is somewhat similar to what happens in the non-share map
2775 * case in fork, but it has to handle multiple map entries which may
2776 * not be the proper size. it was seperated out into its own function
2777 * in order to make the main body of the fork code easier to read and
2778 * understand!
2779 *
2780 * main_entry->offset = starting VA in share map for our mapping
2781 *
2782 * => main map is locked by caller.
2783 * => we lock share map.
2784 * => new map isn't in use yet (still being set up for the first time).
2785 */
2786
2787 void uvm_map_sharemapcopy(main_map, main_entry, new_map)
2788
2789 vm_map_t main_map, new_map;
2790 vm_map_entry_t main_entry;
2791
2792 {
2793 vm_map_t share_map = main_entry->object.share_map;
2794 vm_map_entry_t share_entry, new_entry;
2795 vm_offset_t shend = main_entry->offset +
2796 (main_entry->end - main_entry->start);
2797 int refs;
2798
2799 /*
2800 * lock share map. find first map entry of interest. clip if needed.
2801 */
2802
2803 vm_map_lock(share_map);
2804 if (uvm_map_lookup_entry(share_map, main_entry->offset, &share_entry))
2805 UVM_MAP_CLIP_START(share_map, share_entry, main_entry->offset);
2806
2807 while (share_entry != &share_map->header && share_entry->start < shend) {
2808
2809 /*
2810 * at this point we have a map entry that we need to make a copy of.
2811 */
2812
2813 UVM_MAP_CLIP_END(share_map, share_entry, shend); /* may need to clip? */
2814
2815 new_entry = uvm_mapent_alloc(new_map);
2816 uvm_mapent_copy(share_entry, new_entry); /* share_entry -> new_entry */
2817
2818 /* convert share map addresses back to main map addresses */
2819 new_entry->start = main_entry->start +
2820 (new_entry->start - main_entry->offset);
2821 new_entry->end = main_entry->start + (new_entry->end - main_entry->offset);
2822
2823 /* gain references */
2824 if (new_entry->aref.ar_amap) {
2825 amap_ref(new_entry, 0);
2826 }
2827 if (new_entry->object.uvm_obj &&
2828 new_entry->object.uvm_obj->pgops->pgo_reference)
2829 new_entry->object.uvm_obj->
2830 pgops->pgo_reference(new_entry->object.uvm_obj);
2831
2832 /* init rest of new entry and insert at end of new map */
2833 new_entry->wired_count = 0;
2834 new_entry->etype |= (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
2835 uvm_map_entry_link(new_map, new_map->header.prev, new_entry);
2836
2837 /* don't bother trying to defer the copy in the share map case */
2838 amap_copy(new_map, new_entry, M_WAITOK, FALSE, 0, 0); /* XXXCDC: WAITOK? */
2839
2840 /* just like non-share case: can't COW wired memory */
2841 if (share_entry->wired_count != 0 && UVM_ET_ISCOPYONWRITE(share_entry)) {
2842
2843
2844 amap_cow_now(new_map, new_entry); /* was fault copy entry */
2845
2846 } else {
2847
2848 /* just like non-share case */
2849 if (UVM_ET_ISCOPYONWRITE(share_entry)) {
2850
2851 if (!UVM_ET_ISNEEDSCOPY(share_entry)) {
2852
2853 /*
2854 * must write protect pages. if we have the sole reference
2855 * to the share map we can use good old pmap_protect. if we
2856 * don't, then we have to use pmap_page_protect.
2857 *
2858 * note that the VA new_entry->start (starting entry of this
2859 * segment of the share map in child process) is the same
2860 * virtual address it is mapped in in the parent (thus we
2861 * can mix main_map and new_entry in the pmap_protect call below).
2862 */
2863
2864 simple_lock(&share_map->ref_lock);
2865 refs = share_map->ref_count;
2866 simple_unlock(&share_map->ref_lock);
2867 if (refs == 1) {
2868 pmap_protect(main_map->pmap, new_entry->start, new_entry->end,
2869 share_entry->protection & ~VM_PROT_WRITE);
2870 } else {
2871 if (share_entry->aref.ar_amap) {
2872 simple_lock(&share_entry->aref.ar_amap->am_l);
2873 amap_share_protect(share_entry,
2874 share_entry->protection & ~VM_PROT_WRITE);
2875 simple_unlock(&share_entry->aref.ar_amap->am_l);
2876 }
2877 if (share_entry->object.uvm_obj) {
2878 #ifdef DIAGNOSTIC
2879 if (!share_entry->object.uvm_obj->pgops->pgo_shareprot)
2880 panic("fork: share_entry with no prot function");
2881 #endif
2882 simple_lock(&share_entry->object.uvm_obj->vmobjlock);
2883 share_entry->object.uvm_obj->pgops->
2884 pgo_shareprot(share_entry,
2885 share_entry->protection & ~VM_PROT_WRITE);
2886 simple_unlock(&share_entry->object.uvm_obj->vmobjlock);
2887 }
2888 }
2889
2890 share_entry->etype |= UVM_ET_NEEDSCOPY;
2891 }
2892 }
2893
2894 /*
2895 * now copy the mappings: note address are the same in both
2896 * main_map and new_map
2897 */
2898 pmap_copy(new_map->pmap, main_map->pmap, new_entry->start,
2899 (new_entry->end - new_entry->start), new_entry->start);
2900
2901 /* just like non-share case */
2902 if (!UVM_ET_ISCOPYONWRITE(share_entry)) {
2903 pmap_protect(new_map->pmap, new_entry->start, new_entry->end,
2904 new_entry->protection & ~VM_PROT_WRITE);
2905 }
2906 }
2907
2908 /* next entry in share map, please */
2909 share_entry = share_entry->next;
2910
2911 }
2912 /* done! */
2913 }
2914
2915 #if defined(DDB)
2916
2917 /*
2918 * DDB hooks
2919 */
2920
2921 /*
2922 * uvm_map_print: print out a map
2923 */
2924
2925 void uvm_map_print(map, full)
2926
2927 vm_map_t map;
2928 boolean_t full;
2929
2930 {
2931 uvm_map_printit(map, full, printf);
2932 }
2933
2934 /*
2935 * uvm_map_printit: actually prints the map
2936 */
2937
2938 void uvm_map_printit(map, full, pr)
2939
2940 vm_map_t map;
2941 boolean_t full;
2942 void (*pr) __P((const char *, ...));
2943
2944 {
2945 vm_map_entry_t entry;
2946 (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, map->min_offset, map->max_offset);
2947 (*pr)("\tpmap=%p, #ent=%d, sz=%d, ref=%d, main=%c, version=%d\n",
2948 map->pmap, map->nentries, map->size, map->ref_count,
2949 (map->is_main_map) ? 'T' : 'F', map->timestamp);
2950 if (!full) return;
2951 for (entry = map->header.next; entry != &map->header; entry = entry->next) {
2952 (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%x, amap=%p/%d\n",
2953 entry, entry->start, entry->end, entry->object.uvm_obj, entry->offset,
2954 entry->aref.ar_amap, entry->aref.ar_slotoff);
2955 (*pr)(
2956 "\tmap=%c, submap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, wc=%d, adv=%d\n",
2957 (entry->etype & UVM_ET_MAP) ? 'T' : 'F',
2958 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
2959 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
2960 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
2961 entry->protection, entry->max_protection, entry->inheritance,
2962 entry->wired_count, entry->advice);
2963 }
2964 }
2965
2966 /*
2967 * uvm_object_print: print out an object
2968 */
2969
2970 void uvm_object_print(uobj, full)
2971
2972 struct uvm_object *uobj;
2973 boolean_t full;
2974
2975 {
2976 uvm_object_printit(uobj, full, printf);
2977 }
2978
2979 /*
2980 * uvm_object_printit: actually prints the object
2981 */
2982
2983 void uvm_object_printit(uobj, full, pr)
2984
2985 struct uvm_object *uobj;
2986 boolean_t full;
2987 void (*pr) __P((const char *, ...));
2988
2989 {
2990 struct vm_page *pg;
2991 int cnt = 0;
2992
2993 (*pr)("OBJECT %p: pgops=%p, npages=%d, ", uobj, uobj->pgops, uobj->uo_npages);
2994 if (uobj->uo_refs == UVM_OBJ_KERN)
2995 (*pr)("refs=<SYSTEM>\n");
2996 else
2997 (*pr)("refs=%d\n", uobj->uo_refs);
2998
2999 if (!full) return;
3000 (*pr)(" PAGES <pg,offset>:\n ");
3001 for (pg = uobj->memq.tqh_first ; pg ; pg = pg->listq.tqe_next, cnt++) {
3002 (*pr)("<%p,0x%lx> ", pg, pg->offset);
3003 if ((cnt % 3) == 2) (*pr)("\n ");
3004 }
3005 if ((cnt % 3) != 2) (*pr)("\n");
3006 }
3007
3008 /*
3009 * uvm_page_print: print out a page
3010 */
3011
3012 void uvm_page_print(pg, full)
3013
3014 struct vm_page *pg;
3015 boolean_t full;
3016
3017 {
3018 uvm_page_printit(pg, full, printf);
3019 }
3020
3021 /*
3022 * uvm_page_printit: actually print the page
3023 */
3024
3025 void uvm_page_printit(pg, full, pr)
3026
3027 struct vm_page *pg;
3028 boolean_t full;
3029 void (*pr) __P((const char *, ...));
3030
3031 {
3032 struct vm_page *lcv;
3033 struct uvm_object *uobj;
3034 struct pglist *pgl;
3035
3036 (*pr)("PAGE %p:\n", pg);
3037 (*pr)(" flags=0x%x, pqflags=0x%x, vers=%d, wire_count=%d, pa=0x%lx\n",
3038 pg->flags, pg->pqflags, pg->version, pg->wire_count, pg->phys_addr);
3039 (*pr)(" uobject=%p, uanon=%p, offset=0x%lx loan_count=%d\n",
3040 pg->uobject, pg->uanon, pg->offset, pg->loan_count);
3041 #if defined(UVM_PAGE_TRKOWN)
3042 if (pg->flags & PG_BUSY)
3043 (*pr)(" owning process = %d, tag=%s\n", pg->owner, pg->owner_tag);
3044 else
3045 (*pr)(" page not busy, no owner\n");
3046 #else
3047 (*pr)(" [page ownership tracking disabled]\n");
3048 #endif
3049
3050 if (!full) return;
3051
3052 /* cross-verify object/anon */
3053 if ((pg->pqflags & PQ_FREE) == 0) {
3054 if (pg->pqflags & PQ_ANON) {
3055 if (pg->uanon == NULL || pg->uanon->u.an_page != pg)
3056 (*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n",
3057 (pg->uanon) ? pg->uanon->u.an_page : NULL);
3058 else
3059 (*pr)(" anon backpointer is OK\n");
3060 } else {
3061 uobj = pg->uobject;
3062 if (uobj) {
3063 (*pr)(" checking object list\n");
3064 for (lcv = uobj->memq.tqh_first ; lcv ; lcv = lcv->listq.tqe_next) {
3065 if (lcv == pg) break;
3066 }
3067 if (lcv)
3068 (*pr)(" page found on object list\n");
3069 else
3070 (*pr)(" >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
3071 }
3072 }
3073 }
3074
3075 /* cross-verify page queue */
3076 if (pg->pqflags & PQ_FREE)
3077 pgl = &uvm.page_free;
3078 else if (pg->pqflags & PQ_INACTIVE)
3079 pgl = (pg->pqflags & PQ_SWAPBACKED) ?
3080 &uvm.page_inactive_swp : &uvm.page_inactive_obj;
3081 else if (pg->pqflags & PQ_ACTIVE)
3082 pgl = &uvm.page_active;
3083 else
3084 pgl = NULL;
3085
3086 if (pgl) {
3087 (*pr)(" checking pageq list\n");
3088 for (lcv = pgl->tqh_first ; lcv ; lcv = lcv->pageq.tqe_next) {
3089 if (lcv == pg) break;
3090 }
3091 if (lcv)
3092 (*pr)(" page found on pageq list\n");
3093 else
3094 (*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
3095 }
3096 }
3097 #endif
3098