uvm_glue.c revision 1.99 1 /* $NetBSD: uvm_glue.c,v 1.99 2007/02/15 20:21:13 ad Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
42 * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.99 2007/02/15 20:21:13 ad Exp $");
71
72 #include "opt_coredump.h"
73 #include "opt_kgdb.h"
74 #include "opt_kstack.h"
75 #include "opt_uvmhist.h"
76
77 /*
78 * uvm_glue.c: glue functions
79 */
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/proc.h>
84 #include <sys/resourcevar.h>
85 #include <sys/buf.h>
86 #include <sys/user.h>
87
88 #include <uvm/uvm.h>
89
90 #include <machine/cpu.h>
91
92 /*
93 * local prototypes
94 */
95
96 static void uvm_swapout(struct lwp *);
97
98 #define UVM_NUAREA_MAX 16
99 static vaddr_t uvm_uareas;
100 static int uvm_nuarea;
101 static struct simplelock uvm_uareas_slock = SIMPLELOCK_INITIALIZER;
102 #define UAREA_NEXTFREE(uarea) (*(vaddr_t *)(UAREA_TO_USER(uarea)))
103
104 static void uvm_uarea_free(vaddr_t);
105
106 /*
107 * XXXCDC: do these really belong here?
108 */
109
110 /*
111 * uvm_kernacc: can the kernel access a region of memory
112 *
113 * - used only by /dev/kmem driver (mem.c)
114 */
115
116 boolean_t
117 uvm_kernacc(caddr_t addr, size_t len, int rw)
118 {
119 boolean_t rv;
120 vaddr_t saddr, eaddr;
121 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
122
123 saddr = trunc_page((vaddr_t)addr);
124 eaddr = round_page((vaddr_t)addr + len);
125 vm_map_lock_read(kernel_map);
126 rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
127 vm_map_unlock_read(kernel_map);
128
129 return(rv);
130 }
131
132 #ifdef KGDB
133 /*
134 * Change protections on kernel pages from addr to addr+len
135 * (presumably so debugger can plant a breakpoint).
136 *
137 * We force the protection change at the pmap level. If we were
138 * to use vm_map_protect a change to allow writing would be lazily-
139 * applied meaning we would still take a protection fault, something
140 * we really don't want to do. It would also fragment the kernel
141 * map unnecessarily. We cannot use pmap_protect since it also won't
142 * enforce a write-enable request. Using pmap_enter is the only way
143 * we can ensure the change takes place properly.
144 */
145 void
146 uvm_chgkprot(caddr_t addr, size_t len, int rw)
147 {
148 vm_prot_t prot;
149 paddr_t pa;
150 vaddr_t sva, eva;
151
152 prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
153 eva = round_page((vaddr_t)addr + len);
154 for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
155 /*
156 * Extract physical address for the page.
157 */
158 if (pmap_extract(pmap_kernel(), sva, &pa) == FALSE)
159 panic("chgkprot: invalid page");
160 pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);
161 }
162 pmap_update(pmap_kernel());
163 }
164 #endif
165
166 /*
167 * uvm_vslock: wire user memory for I/O
168 *
169 * - called from physio and sys___sysctl
170 * - XXXCDC: consider nuking this (or making it a macro?)
171 */
172
173 int
174 uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access_type)
175 {
176 struct vm_map *map;
177 vaddr_t start, end;
178 int error;
179
180 map = &vs->vm_map;
181 start = trunc_page((vaddr_t)addr);
182 end = round_page((vaddr_t)addr + len);
183 error = uvm_fault_wire(map, start, end, access_type, 0);
184 return error;
185 }
186
187 /*
188 * uvm_vsunlock: unwire user memory wired by uvm_vslock()
189 *
190 * - called from physio and sys___sysctl
191 * - XXXCDC: consider nuking this (or making it a macro?)
192 */
193
194 void
195 uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
196 {
197 uvm_fault_unwire(&vs->vm_map, trunc_page((vaddr_t)addr),
198 round_page((vaddr_t)addr + len));
199 }
200
201 /*
202 * uvm_proc_fork: fork a virtual address space
203 *
204 * - the address space is copied as per parent map's inherit values
205 */
206 void
207 uvm_proc_fork(struct proc *p1, struct proc *p2, boolean_t shared)
208 {
209
210 if (shared == TRUE) {
211 p2->p_vmspace = NULL;
212 uvmspace_share(p1, p2);
213 } else {
214 p2->p_vmspace = uvmspace_fork(p1->p_vmspace);
215 }
216
217 cpu_proc_fork(p1, p2);
218 }
219
220
221 /*
222 * uvm_lwp_fork: fork a thread
223 *
224 * - a new "user" structure is allocated for the child process
225 * [filled in by MD layer...]
226 * - if specified, the child gets a new user stack described by
227 * stack and stacksize
228 * - NOTE: the kernel stack may be at a different location in the child
229 * process, and thus addresses of automatic variables may be invalid
230 * after cpu_lwp_fork returns in the child process. We do nothing here
231 * after cpu_lwp_fork returns.
232 * - XXXCDC: we need a way for this to return a failure value rather
233 * than just hang
234 */
235 void
236 uvm_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
237 void (*func)(void *), void *arg)
238 {
239 int error;
240
241 /*
242 * Wire down the U-area for the process, which contains the PCB
243 * and the kernel stack. Wired state is stored in l->l_flag's
244 * L_INMEM bit rather than in the vm_map_entry's wired count
245 * to prevent kernel_map fragmentation. If we reused a cached U-area,
246 * L_INMEM will already be set and we don't need to do anything.
247 *
248 * Note the kernel stack gets read/write accesses right off the bat.
249 */
250
251 if ((l2->l_flag & L_INMEM) == 0) {
252 vaddr_t uarea = USER_TO_UAREA(l2->l_addr);
253
254 error = uvm_fault_wire(kernel_map, uarea,
255 uarea + USPACE, VM_PROT_READ | VM_PROT_WRITE, 0);
256 if (error)
257 panic("uvm_lwp_fork: uvm_fault_wire failed: %d", error);
258 #ifdef PMAP_UAREA
259 /* Tell the pmap this is a u-area mapping */
260 PMAP_UAREA(uarea);
261 #endif
262 l2->l_flag |= L_INMEM;
263 }
264
265 #ifdef KSTACK_CHECK_MAGIC
266 /*
267 * fill stack with magic number
268 */
269 kstack_setup_magic(l2);
270 #endif
271
272 /*
273 * cpu_lwp_fork() copy and update the pcb, and make the child ready
274 * to run. If this is a normal user fork, the child will exit
275 * directly to user mode via child_return() on its first time
276 * slice and will not return here. If this is a kernel thread,
277 * the specified entry point will be executed.
278 */
279 cpu_lwp_fork(l1, l2, stack, stacksize, func, arg);
280 }
281
282 /*
283 * uvm_uarea_alloc: allocate a u-area
284 */
285
286 boolean_t
287 uvm_uarea_alloc(vaddr_t *uaddrp)
288 {
289 vaddr_t uaddr;
290
291 #ifndef USPACE_ALIGN
292 #define USPACE_ALIGN 0
293 #endif
294
295 simple_lock(&uvm_uareas_slock);
296 if (uvm_nuarea > 0) {
297 uaddr = uvm_uareas;
298 uvm_uareas = UAREA_NEXTFREE(uaddr);
299 uvm_nuarea--;
300 simple_unlock(&uvm_uareas_slock);
301 *uaddrp = uaddr;
302 return TRUE;
303 } else {
304 simple_unlock(&uvm_uareas_slock);
305 *uaddrp = uvm_km_alloc(kernel_map, USPACE, USPACE_ALIGN,
306 UVM_KMF_PAGEABLE);
307 return FALSE;
308 }
309 }
310
311 /*
312 * uvm_uarea_free: free a u-area; never blocks
313 */
314
315 static inline void
316 uvm_uarea_free(vaddr_t uaddr)
317 {
318 simple_lock(&uvm_uareas_slock);
319 UAREA_NEXTFREE(uaddr) = uvm_uareas;
320 uvm_uareas = uaddr;
321 uvm_nuarea++;
322 simple_unlock(&uvm_uareas_slock);
323 }
324
325 /*
326 * uvm_uarea_drain: return memory of u-areas over limit
327 * back to system
328 */
329
330 void
331 uvm_uarea_drain(boolean_t empty)
332 {
333 int leave = empty ? 0 : UVM_NUAREA_MAX;
334 vaddr_t uaddr;
335
336 if (uvm_nuarea <= leave)
337 return;
338
339 simple_lock(&uvm_uareas_slock);
340 while(uvm_nuarea > leave) {
341 uaddr = uvm_uareas;
342 uvm_uareas = UAREA_NEXTFREE(uaddr);
343 uvm_nuarea--;
344 simple_unlock(&uvm_uareas_slock);
345 uvm_km_free(kernel_map, uaddr, USPACE, UVM_KMF_PAGEABLE);
346 simple_lock(&uvm_uareas_slock);
347 }
348 simple_unlock(&uvm_uareas_slock);
349 }
350
351 /*
352 * uvm_exit: exit a virtual address space
353 *
354 * - the process passed to us is a dead (pre-zombie) process; we
355 * are running on a different context now (the reaper).
356 * - borrow proc0's address space because freeing the vmspace
357 * of the dead process may block.
358 */
359
360 void
361 uvm_proc_exit(struct proc *p)
362 {
363 struct lwp *l = curlwp; /* XXX */
364 struct vmspace *ovm;
365
366 KASSERT(p == l->l_proc);
367 ovm = p->p_vmspace;
368
369 /*
370 * borrow proc0's address space.
371 */
372 pmap_deactivate(l);
373 p->p_vmspace = proc0.p_vmspace;
374 pmap_activate(l);
375
376 uvmspace_free(ovm);
377 }
378
379 void
380 uvm_lwp_exit(struct lwp *l)
381 {
382 vaddr_t va = USER_TO_UAREA(l->l_addr);
383
384 l->l_flag &= ~L_INMEM;
385 uvm_uarea_free(va);
386 l->l_addr = NULL;
387 }
388
389 /*
390 * uvm_init_limit: init per-process VM limits
391 *
392 * - called for process 0 and then inherited by all others.
393 */
394
395 void
396 uvm_init_limits(struct proc *p)
397 {
398
399 /*
400 * Set up the initial limits on process VM. Set the maximum
401 * resident set size to be all of (reasonably) available memory.
402 * This causes any single, large process to start random page
403 * replacement once it fills memory.
404 */
405
406 p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
407 p->p_rlimit[RLIMIT_STACK].rlim_max = maxsmap;
408 p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
409 p->p_rlimit[RLIMIT_DATA].rlim_max = maxdmap;
410 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(uvmexp.free);
411 }
412
413 #ifdef DEBUG
414 int enableswap = 1;
415 int swapdebug = 0;
416 #define SDB_FOLLOW 1
417 #define SDB_SWAPIN 2
418 #define SDB_SWAPOUT 4
419 #endif
420
421 /*
422 * uvm_swapin: swap in an lwp's u-area.
423 */
424
425 void
426 uvm_swapin(struct lwp *l)
427 {
428 vaddr_t addr;
429 int error;
430
431 addr = USER_TO_UAREA(l->l_addr);
432 /* make L_INMEM true */
433 error = uvm_fault_wire(kernel_map, addr, addr + USPACE,
434 VM_PROT_READ | VM_PROT_WRITE, 0);
435 if (error) {
436 panic("uvm_swapin: rewiring stack failed: %d", error);
437 }
438
439 /*
440 * Some architectures need to be notified when the user area has
441 * moved to new physical page(s) (e.g. see mips/mips/vm_machdep.c).
442 */
443 cpu_swapin(l);
444 lwp_lock(l);
445 if (l->l_stat == LSRUN)
446 setrunqueue(l);
447 l->l_flag |= L_INMEM;
448 l->l_swtime = 0;
449 lwp_unlock(l);
450 ++uvmexp.swapins;
451 }
452
453 /*
454 * uvm_kick_scheduler: kick the scheduler into action if not running.
455 *
456 * - called when swapped out processes have been awoken.
457 */
458
459 void
460 uvm_kick_scheduler(void)
461 {
462
463 mutex_enter(&uvm.scheduler_mutex);
464 uvm.scheduler_kicked = TRUE;
465 cv_signal(&uvm.scheduler_cv);
466 mutex_exit(&uvm.scheduler_mutex);
467 }
468
469 /*
470 * uvm_scheduler: process zero main loop
471 *
472 * - attempt to swapin every swaped-out, runnable process in order of
473 * priority.
474 * - if not enough memory, wake the pagedaemon and let it clear space.
475 */
476
477 void
478 uvm_scheduler(void)
479 {
480 struct lwp *l, *ll;
481 int pri;
482 int ppri;
483
484 l = curlwp;
485 lwp_lock(l);
486 lwp_changepri(l, PVM);
487 lwp_unlock(l);
488
489 for (;;) {
490 #ifdef DEBUG
491 mutex_enter(&uvm.scheduler_mutex);
492 while (!enableswap)
493 cv_wait(&uvm.scheduler_cv, &uvm.scheduler_mutex);
494 mutex_exit(&uvm.scheduler_mutex);
495 #endif
496 ll = NULL; /* process to choose */
497 ppri = INT_MIN; /* its priority */
498
499 mutex_enter(&proclist_mutex);
500 LIST_FOREACH(l, &alllwp, l_list) {
501 /* is it a runnable swapped out process? */
502 if (l->l_stat == LSRUN && (l->l_flag & L_INMEM) == 0) {
503 pri = l->l_swtime + l->l_slptime -
504 (l->l_proc->p_nice - NZERO) * 8;
505 if (pri > ppri) { /* higher priority? */
506 ll = l;
507 ppri = pri;
508 }
509 }
510 }
511 mutex_exit(&proclist_mutex);
512 #ifdef DEBUG
513 if (swapdebug & SDB_FOLLOW)
514 printf("scheduler: running, procp %p pri %d\n", ll,
515 ppri);
516 #endif
517 /*
518 * Nothing to do, back to sleep
519 */
520 if ((l = ll) == NULL) {
521 mutex_enter(&uvm.scheduler_mutex);
522 if (uvm.scheduler_kicked == FALSE)
523 cv_wait(&uvm.scheduler_cv,
524 &uvm.scheduler_mutex);
525 uvm.scheduler_kicked = FALSE;
526 mutex_exit(&uvm.scheduler_mutex);
527 continue;
528 }
529
530 /*
531 * we have found swapped out process which we would like
532 * to bring back in.
533 *
534 * XXX: this part is really bogus cuz we could deadlock
535 * on memory despite our feeble check
536 */
537 if (uvmexp.free > atop(USPACE)) {
538 #ifdef DEBUG
539 if (swapdebug & SDB_SWAPIN)
540 printf("swapin: pid %d(%s)@%p, pri %d "
541 "free %d\n", l->l_proc->p_pid,
542 l->l_proc->p_comm, l->l_addr, ppri,
543 uvmexp.free);
544 #endif
545 uvm_swapin(l);
546 } else {
547 /*
548 * not enough memory, jab the pageout daemon and
549 * wait til the coast is clear
550 */
551 #ifdef DEBUG
552 if (swapdebug & SDB_FOLLOW)
553 printf("scheduler: no room for pid %d(%s),"
554 " free %d\n", l->l_proc->p_pid,
555 l->l_proc->p_comm, uvmexp.free);
556 #endif
557 uvm_wait("schedpwait");
558 #ifdef DEBUG
559 if (swapdebug & SDB_FOLLOW)
560 printf("scheduler: room again, free %d\n",
561 uvmexp.free);
562 #endif
563 }
564 }
565 }
566
567 /*
568 * swappable: is LWP "l" swappable?
569 */
570
571 #define swappable(l) \
572 (((l)->l_flag & (L_INMEM)) && \
573 ((((l)->l_flag) & (L_SYSTEM | L_WEXIT)) == 0) && \
574 (l)->l_holdcnt == 0)
575
576 /*
577 * swapout_threads: find threads that can be swapped and unwire their
578 * u-areas.
579 *
580 * - called by the pagedaemon
581 * - try and swap at least one processs
582 * - processes that are sleeping or stopped for maxslp or more seconds
583 * are swapped... otherwise the longest-sleeping or stopped process
584 * is swapped, otherwise the longest resident process...
585 */
586
587 void
588 uvm_swapout_threads(void)
589 {
590 struct lwp *l;
591 struct lwp *outl, *outl2;
592 int outpri, outpri2;
593 int didswap = 0;
594 extern int maxslp;
595 /* XXXCDC: should move off to uvmexp. or uvm., also in uvm_meter */
596
597 #ifdef DEBUG
598 if (!enableswap)
599 return;
600 #endif
601
602 /*
603 * outl/outpri : stop/sleep thread with largest sleeptime < maxslp
604 * outl2/outpri2: the longest resident thread (its swap time)
605 */
606 outl = outl2 = NULL;
607 outpri = outpri2 = 0;
608 mutex_enter(&proclist_mutex); /* XXXSMP */
609 LIST_FOREACH(l, &alllwp, l_list) {
610 KASSERT(l->l_proc != NULL);
611 lwp_lock(l);
612 if (!swappable(l)) {
613 lwp_unlock(l);
614 continue;
615 }
616 switch (l->l_stat) {
617 case LSONPROC:
618 break;
619
620 case LSRUN:
621 if (l->l_swtime > outpri2) {
622 outl2 = l;
623 outpri2 = l->l_swtime;
624 }
625 break;
626
627 case LSSLEEP:
628 case LSSTOP:
629 if (l->l_slptime >= maxslp) {
630 /* uvm_swapout() will release the lock. */
631 uvm_swapout(l);
632 didswap++;
633 continue;
634 } else if (l->l_slptime > outpri) {
635 outl = l;
636 outpri = l->l_slptime;
637 }
638 break;
639 }
640 lwp_unlock(l);
641 }
642 /*
643 * If we didn't get rid of any real duds, toss out the next most
644 * likely sleeping/stopped or running candidate. We only do this
645 * if we are real low on memory since we don't gain much by doing
646 * it (USPACE bytes).
647 */
648 if (didswap == 0 && uvmexp.free <= atop(round_page(USPACE))) {
649 if ((l = outl) == NULL)
650 l = outl2;
651 #ifdef DEBUG
652 if (swapdebug & SDB_SWAPOUT)
653 printf("swapout_threads: no duds, try procp %p\n", l);
654 #endif
655 if (l) {
656 /* uvm_swapout() will release the lock. */
657 lwp_lock(l);
658 uvm_swapout(l);
659 }
660 }
661
662 mutex_exit(&proclist_mutex);
663
664 }
665
666 /*
667 * uvm_swapout: swap out lwp "l"
668 *
669 * - currently "swapout" means "unwire U-area" and "pmap_collect()"
670 * the pmap.
671 * - must be called with the LWP locked, and will release the lock.
672 * - XXXCDC: should deactivate all process' private anonymous memory
673 */
674
675 static void
676 uvm_swapout(struct lwp *l)
677 {
678 vaddr_t addr;
679 struct proc *p = l->l_proc;
680
681 LOCK_ASSERT(lwp_locked(l, NULL));
682
683 #ifdef DEBUG
684 if (swapdebug & SDB_SWAPOUT)
685 printf("swapout: lid %d.%d(%s)@%p, stat %x pri %d free %d\n",
686 p->p_pid, l->l_lid, p->p_comm, l->l_addr, l->l_stat,
687 l->l_slptime, uvmexp.free);
688 #endif
689
690 /*
691 * Mark it as (potentially) swapped out.
692 */
693 if (l->l_stat == LSONPROC) {
694 KDASSERT(l->l_cpu != curcpu());
695 lwp_unlock(l);
696 return;
697 }
698 l->l_flag &= ~L_INMEM;
699 l->l_swtime = 0;
700 if (l->l_stat == LSRUN)
701 remrunqueue(l);
702 lwp_unlock(l);
703 p->p_stats->p_ru.ru_nswap++; /* XXXSMP */
704 ++uvmexp.swapouts;
705
706 mutex_exit(&proclist_mutex); /* XXXSMP */
707
708 /*
709 * Do any machine-specific actions necessary before swapout.
710 * This can include saving floating point state, etc.
711 */
712 cpu_swapout(l);
713
714 /*
715 * Unwire the to-be-swapped process's user struct and kernel stack.
716 */
717 addr = USER_TO_UAREA(l->l_addr);
718 uvm_fault_unwire(kernel_map, addr, addr + USPACE); /* !L_INMEM */
719 pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
720
721 mutex_enter(&proclist_mutex); /* XXXSMP */
722 }
723
724 #ifdef COREDUMP
725 /*
726 * uvm_coredump_walkmap: walk a process's map for the purpose of dumping
727 * a core file.
728 */
729
730 int
731 uvm_coredump_walkmap(struct proc *p, void *iocookie,
732 int (*func)(struct proc *, void *, struct uvm_coredump_state *),
733 void *cookie)
734 {
735 struct uvm_coredump_state state;
736 struct vmspace *vm = p->p_vmspace;
737 struct vm_map *map = &vm->vm_map;
738 struct vm_map_entry *entry;
739 int error;
740
741 entry = NULL;
742 vm_map_lock_read(map);
743 state.end = 0;
744 for (;;) {
745 if (entry == NULL)
746 entry = map->header.next;
747 else if (!uvm_map_lookup_entry(map, state.end, &entry))
748 entry = entry->next;
749 if (entry == &map->header)
750 break;
751
752 state.cookie = cookie;
753 if (state.end > entry->start) {
754 state.start = state.end;
755 } else {
756 state.start = entry->start;
757 }
758 state.realend = entry->end;
759 state.end = entry->end;
760 state.prot = entry->protection;
761 state.flags = 0;
762
763 /*
764 * Dump the region unless one of the following is true:
765 *
766 * (1) the region has neither object nor amap behind it
767 * (ie. it has never been accessed).
768 *
769 * (2) the region has no amap and is read-only
770 * (eg. an executable text section).
771 *
772 * (3) the region's object is a device.
773 *
774 * (4) the region is unreadable by the process.
775 */
776
777 KASSERT(!UVM_ET_ISSUBMAP(entry));
778 KASSERT(state.start < VM_MAXUSER_ADDRESS);
779 KASSERT(state.end <= VM_MAXUSER_ADDRESS);
780 if (entry->object.uvm_obj == NULL &&
781 entry->aref.ar_amap == NULL) {
782 state.realend = state.start;
783 } else if ((entry->protection & VM_PROT_WRITE) == 0 &&
784 entry->aref.ar_amap == NULL) {
785 state.realend = state.start;
786 } else if (entry->object.uvm_obj != NULL &&
787 UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) {
788 state.realend = state.start;
789 } else if ((entry->protection & VM_PROT_READ) == 0) {
790 state.realend = state.start;
791 } else {
792 if (state.start >= (vaddr_t)vm->vm_maxsaddr)
793 state.flags |= UVM_COREDUMP_STACK;
794
795 /*
796 * If this an anonymous entry, only dump instantiated
797 * pages.
798 */
799 if (entry->object.uvm_obj == NULL) {
800 vaddr_t end;
801
802 amap_lock(entry->aref.ar_amap);
803 for (end = state.start;
804 end < state.end; end += PAGE_SIZE) {
805 struct vm_anon *anon;
806 anon = amap_lookup(&entry->aref,
807 end - entry->start);
808 /*
809 * If we have already encountered an
810 * uninstantiated page, stop at the
811 * first instantied page.
812 */
813 if (anon != NULL &&
814 state.realend != state.end) {
815 state.end = end;
816 break;
817 }
818
819 /*
820 * If this page is the first
821 * uninstantiated page, mark this as
822 * the real ending point. Continue to
823 * counting uninstantiated pages.
824 */
825 if (anon == NULL &&
826 state.realend == state.end) {
827 state.realend = end;
828 }
829 }
830 amap_unlock(entry->aref.ar_amap);
831 }
832 }
833
834
835 vm_map_unlock_read(map);
836 error = (*func)(p, iocookie, &state);
837 if (error)
838 return (error);
839 vm_map_lock_read(map);
840 }
841 vm_map_unlock_read(map);
842
843 return (0);
844 }
845 #endif /* COREDUMP */
846