uvm_glue.c revision 1.89.2.8 1 /* $NetBSD: uvm_glue.c,v 1.89.2.8 2008/02/04 09:25:09 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
42 * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.89.2.8 2008/02/04 09:25:09 yamt Exp $");
71
72 #include "opt_coredump.h"
73 #include "opt_kgdb.h"
74 #include "opt_kstack.h"
75 #include "opt_uvmhist.h"
76
77 /*
78 * uvm_glue.c: glue functions
79 */
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/proc.h>
84 #include <sys/resourcevar.h>
85 #include <sys/buf.h>
86 #include <sys/user.h>
87 #include <sys/syncobj.h>
88 #include <sys/cpu.h>
89 #include <sys/atomic.h>
90
91 #include <uvm/uvm.h>
92
93 /*
94 * local prototypes
95 */
96
97 static void uvm_swapout(struct lwp *);
98
99 #define UVM_NUAREA_HIWAT 20
100 #define UVM_NUAREA_LOWAT 16
101
102 #define UAREA_NEXTFREE(uarea) (*(vaddr_t *)(UAREA_TO_USER(uarea)))
103
104 /*
105 * XXXCDC: do these really belong here?
106 */
107
108 /*
109 * uvm_kernacc: can the kernel access a region of memory
110 *
111 * - used only by /dev/kmem driver (mem.c)
112 */
113
114 bool
115 uvm_kernacc(void *addr, size_t len, int rw)
116 {
117 bool rv;
118 vaddr_t saddr, eaddr;
119 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
120
121 saddr = trunc_page((vaddr_t)addr);
122 eaddr = round_page((vaddr_t)addr + len);
123 vm_map_lock_read(kernel_map);
124 rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
125 vm_map_unlock_read(kernel_map);
126
127 return(rv);
128 }
129
130 #ifdef KGDB
131 /*
132 * Change protections on kernel pages from addr to addr+len
133 * (presumably so debugger can plant a breakpoint).
134 *
135 * We force the protection change at the pmap level. If we were
136 * to use vm_map_protect a change to allow writing would be lazily-
137 * applied meaning we would still take a protection fault, something
138 * we really don't want to do. It would also fragment the kernel
139 * map unnecessarily. We cannot use pmap_protect since it also won't
140 * enforce a write-enable request. Using pmap_enter is the only way
141 * we can ensure the change takes place properly.
142 */
143 void
144 uvm_chgkprot(void *addr, size_t len, int rw)
145 {
146 vm_prot_t prot;
147 paddr_t pa;
148 vaddr_t sva, eva;
149
150 prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
151 eva = round_page((vaddr_t)addr + len);
152 for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
153 /*
154 * Extract physical address for the page.
155 */
156 if (pmap_extract(pmap_kernel(), sva, &pa) == false)
157 panic("chgkprot: invalid page");
158 pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);
159 }
160 pmap_update(pmap_kernel());
161 }
162 #endif
163
164 /*
165 * uvm_vslock: wire user memory for I/O
166 *
167 * - called from physio and sys___sysctl
168 * - XXXCDC: consider nuking this (or making it a macro?)
169 */
170
171 int
172 uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access_type)
173 {
174 struct vm_map *map;
175 vaddr_t start, end;
176 int error;
177
178 map = &vs->vm_map;
179 start = trunc_page((vaddr_t)addr);
180 end = round_page((vaddr_t)addr + len);
181 error = uvm_fault_wire(map, start, end, access_type, 0);
182 return error;
183 }
184
185 /*
186 * uvm_vsunlock: unwire user memory wired by uvm_vslock()
187 *
188 * - called from physio and sys___sysctl
189 * - XXXCDC: consider nuking this (or making it a macro?)
190 */
191
192 void
193 uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
194 {
195 uvm_fault_unwire(&vs->vm_map, trunc_page((vaddr_t)addr),
196 round_page((vaddr_t)addr + len));
197 }
198
199 /*
200 * uvm_proc_fork: fork a virtual address space
201 *
202 * - the address space is copied as per parent map's inherit values
203 */
204 void
205 uvm_proc_fork(struct proc *p1, struct proc *p2, bool shared)
206 {
207
208 if (shared == true) {
209 p2->p_vmspace = NULL;
210 uvmspace_share(p1, p2);
211 } else {
212 p2->p_vmspace = uvmspace_fork(p1->p_vmspace);
213 }
214
215 cpu_proc_fork(p1, p2);
216 }
217
218
219 /*
220 * uvm_lwp_fork: fork a thread
221 *
222 * - a new "user" structure is allocated for the child process
223 * [filled in by MD layer...]
224 * - if specified, the child gets a new user stack described by
225 * stack and stacksize
226 * - NOTE: the kernel stack may be at a different location in the child
227 * process, and thus addresses of automatic variables may be invalid
228 * after cpu_lwp_fork returns in the child process. We do nothing here
229 * after cpu_lwp_fork returns.
230 * - XXXCDC: we need a way for this to return a failure value rather
231 * than just hang
232 */
233 void
234 uvm_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
235 void (*func)(void *), void *arg)
236 {
237 int error;
238
239 /*
240 * Wire down the U-area for the process, which contains the PCB
241 * and the kernel stack. Wired state is stored in l->l_flag's
242 * L_INMEM bit rather than in the vm_map_entry's wired count
243 * to prevent kernel_map fragmentation. If we reused a cached U-area,
244 * L_INMEM will already be set and we don't need to do anything.
245 *
246 * Note the kernel stack gets read/write accesses right off the bat.
247 */
248
249 if ((l2->l_flag & LW_INMEM) == 0) {
250 vaddr_t uarea = USER_TO_UAREA(l2->l_addr);
251
252 error = uvm_fault_wire(kernel_map, uarea,
253 uarea + USPACE, VM_PROT_READ | VM_PROT_WRITE, 0);
254 if (error)
255 panic("uvm_lwp_fork: uvm_fault_wire failed: %d", error);
256 #ifdef PMAP_UAREA
257 /* Tell the pmap this is a u-area mapping */
258 PMAP_UAREA(uarea);
259 #endif
260 l2->l_flag |= LW_INMEM;
261 }
262
263 #ifdef KSTACK_CHECK_MAGIC
264 /*
265 * fill stack with magic number
266 */
267 kstack_setup_magic(l2);
268 #endif
269
270 /*
271 * cpu_lwp_fork() copy and update the pcb, and make the child ready
272 * to run. If this is a normal user fork, the child will exit
273 * directly to user mode via child_return() on its first time
274 * slice and will not return here. If this is a kernel thread,
275 * the specified entry point will be executed.
276 */
277 cpu_lwp_fork(l1, l2, stack, stacksize, func, arg);
278 }
279
280 /*
281 * uvm_cpu_attach: initialize per-CPU data structures.
282 */
283
284 void
285 uvm_cpu_attach(struct cpu_info *ci)
286 {
287
288 }
289
290 static int
291 uarea_swapin(vaddr_t addr)
292 {
293
294 return uvm_fault_wire(kernel_map, addr, addr + USPACE,
295 VM_PROT_READ | VM_PROT_WRITE, 0);
296 }
297
298 static void
299 uarea_swapout(vaddr_t addr)
300 {
301
302 uvm_fault_unwire(kernel_map, addr, addr + USPACE);
303 }
304
305 #ifndef USPACE_ALIGN
306 #define USPACE_ALIGN 0
307 #endif
308
309 static pool_cache_t uvm_uarea_cache;
310
311 static int
312 uarea_ctor(void *arg, void *obj, int flags)
313 {
314
315 KASSERT((flags & PR_WAITOK) != 0);
316 return uarea_swapin((vaddr_t)obj);
317 }
318
319 static void *
320 uarea_poolpage_alloc(struct pool *pp, int flags)
321 {
322
323 return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz,
324 USPACE_ALIGN, UVM_KMF_PAGEABLE |
325 ((flags & PR_WAITOK) != 0 ? UVM_KMF_WAITVA :
326 (UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)));
327 }
328
329 static void
330 uarea_poolpage_free(struct pool *pp, void *addr)
331 {
332
333 uvm_km_free(kernel_map, (vaddr_t)addr, pp->pr_alloc->pa_pagesz,
334 UVM_KMF_PAGEABLE);
335 }
336
337 static struct pool_allocator uvm_uarea_allocator = {
338 .pa_alloc = uarea_poolpage_alloc,
339 .pa_free = uarea_poolpage_free,
340 .pa_pagesz = USPACE,
341 };
342
343 void
344 uvm_uarea_init(void)
345 {
346
347 uvm_uarea_cache = pool_cache_init(USPACE, USPACE_ALIGN, 0,
348 #if USPACE_ALIGN == 0
349 PR_NOALIGN |
350 #endif
351 PR_NOTOUCH,
352 "uarea", &uvm_uarea_allocator, IPL_NONE, uarea_ctor, NULL, NULL);
353 }
354
355 /*
356 * uvm_uarea_alloc: allocate a u-area
357 */
358
359 bool
360 uvm_uarea_alloc(vaddr_t *uaddrp)
361 {
362
363 *uaddrp = (vaddr_t)pool_cache_get(uvm_uarea_cache, PR_WAITOK);
364 return true;
365 }
366
367 /*
368 * uvm_uarea_free: free a u-area
369 */
370
371 void
372 uvm_uarea_free(vaddr_t uaddr, struct cpu_info *ci)
373 {
374
375 pool_cache_put(uvm_uarea_cache, (void *)uaddr);
376 }
377
378 /*
379 * uvm_exit: exit a virtual address space
380 *
381 * - the process passed to us is a dead (pre-zombie) process; we
382 * are running on a different context now (the reaper).
383 * - borrow proc0's address space because freeing the vmspace
384 * of the dead process may block.
385 */
386
387 void
388 uvm_proc_exit(struct proc *p)
389 {
390 struct lwp *l = curlwp; /* XXX */
391 struct vmspace *ovm;
392
393 KASSERT(p == l->l_proc);
394 ovm = p->p_vmspace;
395
396 /*
397 * borrow proc0's address space.
398 */
399 pmap_deactivate(l);
400 p->p_vmspace = proc0.p_vmspace;
401 pmap_activate(l);
402
403 uvmspace_free(ovm);
404 }
405
406 void
407 uvm_lwp_exit(struct lwp *l)
408 {
409 vaddr_t va = USER_TO_UAREA(l->l_addr);
410
411 l->l_flag &= ~LW_INMEM;
412 uvm_uarea_free(va, l->l_cpu);
413 l->l_addr = NULL;
414 }
415
416 /*
417 * uvm_init_limit: init per-process VM limits
418 *
419 * - called for process 0 and then inherited by all others.
420 */
421
422 void
423 uvm_init_limits(struct proc *p)
424 {
425
426 /*
427 * Set up the initial limits on process VM. Set the maximum
428 * resident set size to be all of (reasonably) available memory.
429 * This causes any single, large process to start random page
430 * replacement once it fills memory.
431 */
432
433 p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
434 p->p_rlimit[RLIMIT_STACK].rlim_max = maxsmap;
435 p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
436 p->p_rlimit[RLIMIT_DATA].rlim_max = maxdmap;
437 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(uvmexp.free);
438 }
439
440 #ifdef DEBUG
441 int enableswap = 1;
442 int swapdebug = 0;
443 #define SDB_FOLLOW 1
444 #define SDB_SWAPIN 2
445 #define SDB_SWAPOUT 4
446 #endif
447
448 /*
449 * uvm_swapin: swap in an lwp's u-area.
450 *
451 * - must be called with the LWP's swap lock held.
452 * - naturally, must not be called with l == curlwp
453 */
454
455 void
456 uvm_swapin(struct lwp *l)
457 {
458 int error;
459
460 /* XXXSMP notyet KASSERT(mutex_owned(&l->l_swaplock)); */
461 KASSERT(l != curlwp);
462
463 error = uarea_swapin(USER_TO_UAREA(l->l_addr));
464 if (error) {
465 panic("uvm_swapin: rewiring stack failed: %d", error);
466 }
467
468 /*
469 * Some architectures need to be notified when the user area has
470 * moved to new physical page(s) (e.g. see mips/mips/vm_machdep.c).
471 */
472 cpu_swapin(l);
473 lwp_lock(l);
474 if (l->l_stat == LSRUN)
475 sched_enqueue(l, false);
476 l->l_flag |= LW_INMEM;
477 l->l_swtime = 0;
478 lwp_unlock(l);
479 ++uvmexp.swapins;
480 }
481
482 /*
483 * uvm_kick_scheduler: kick the scheduler into action if not running.
484 *
485 * - called when swapped out processes have been awoken.
486 */
487
488 void
489 uvm_kick_scheduler(void)
490 {
491
492 if (uvm.swap_running == false)
493 return;
494
495 mutex_enter(&uvm_scheduler_mutex);
496 uvm.scheduler_kicked = true;
497 cv_signal(&uvm.scheduler_cv);
498 mutex_exit(&uvm_scheduler_mutex);
499 }
500
501 /*
502 * uvm_scheduler: process zero main loop
503 *
504 * - attempt to swapin every swaped-out, runnable process in order of
505 * priority.
506 * - if not enough memory, wake the pagedaemon and let it clear space.
507 */
508
509 void
510 uvm_scheduler(void)
511 {
512 struct lwp *l, *ll;
513 int pri;
514 int ppri;
515
516 l = curlwp;
517 lwp_lock(l);
518 l->l_priority = PRI_VM;
519 l->l_class = SCHED_FIFO;
520 lwp_unlock(l);
521
522 for (;;) {
523 #ifdef DEBUG
524 mutex_enter(&uvm_scheduler_mutex);
525 while (!enableswap)
526 cv_wait(&uvm.scheduler_cv, &uvm_scheduler_mutex);
527 mutex_exit(&uvm_scheduler_mutex);
528 #endif
529 ll = NULL; /* process to choose */
530 ppri = INT_MIN; /* its priority */
531
532 mutex_enter(&proclist_lock);
533 LIST_FOREACH(l, &alllwp, l_list) {
534 /* is it a runnable swapped out process? */
535 if (l->l_stat == LSRUN && !(l->l_flag & LW_INMEM)) {
536 pri = l->l_swtime + l->l_slptime -
537 (l->l_proc->p_nice - NZERO) * 8;
538 if (pri > ppri) { /* higher priority? */
539 ll = l;
540 ppri = pri;
541 }
542 }
543 }
544 #ifdef DEBUG
545 if (swapdebug & SDB_FOLLOW)
546 printf("scheduler: running, procp %p pri %d\n", ll,
547 ppri);
548 #endif
549 /*
550 * Nothing to do, back to sleep
551 */
552 if ((l = ll) == NULL) {
553 mutex_exit(&proclist_lock);
554 mutex_enter(&uvm_scheduler_mutex);
555 if (uvm.scheduler_kicked == false)
556 cv_wait(&uvm.scheduler_cv,
557 &uvm_scheduler_mutex);
558 uvm.scheduler_kicked = false;
559 mutex_exit(&uvm_scheduler_mutex);
560 continue;
561 }
562
563 /*
564 * we have found swapped out process which we would like
565 * to bring back in.
566 *
567 * XXX: this part is really bogus cuz we could deadlock
568 * on memory despite our feeble check
569 */
570 if (uvmexp.free > atop(USPACE)) {
571 #ifdef DEBUG
572 if (swapdebug & SDB_SWAPIN)
573 printf("swapin: pid %d(%s)@%p, pri %d "
574 "free %d\n", l->l_proc->p_pid,
575 l->l_proc->p_comm, l->l_addr, ppri,
576 uvmexp.free);
577 #endif
578 mutex_enter(&l->l_swaplock);
579 mutex_exit(&proclist_lock);
580 uvm_swapin(l);
581 mutex_exit(&l->l_swaplock);
582 continue;
583 } else {
584 /*
585 * not enough memory, jab the pageout daemon and
586 * wait til the coast is clear
587 */
588 mutex_exit(&proclist_lock);
589 #ifdef DEBUG
590 if (swapdebug & SDB_FOLLOW)
591 printf("scheduler: no room for pid %d(%s),"
592 " free %d\n", l->l_proc->p_pid,
593 l->l_proc->p_comm, uvmexp.free);
594 #endif
595 uvm_wait("schedpwait");
596 #ifdef DEBUG
597 if (swapdebug & SDB_FOLLOW)
598 printf("scheduler: room again, free %d\n",
599 uvmexp.free);
600 #endif
601 }
602 }
603 }
604
605 /*
606 * swappable: is LWP "l" swappable?
607 */
608
609 static bool
610 swappable(struct lwp *l)
611 {
612
613 if ((l->l_flag & (LW_INMEM|LW_RUNNING|LW_SYSTEM|LW_WEXIT)) != LW_INMEM)
614 return false;
615 if (l->l_holdcnt != 0)
616 return false;
617 if (l->l_syncobj == &rw_syncobj || l->l_syncobj == &mutex_syncobj)
618 return false;
619 return true;
620 }
621
622 /*
623 * swapout_threads: find threads that can be swapped and unwire their
624 * u-areas.
625 *
626 * - called by the pagedaemon
627 * - try and swap at least one processs
628 * - processes that are sleeping or stopped for maxslp or more seconds
629 * are swapped... otherwise the longest-sleeping or stopped process
630 * is swapped, otherwise the longest resident process...
631 */
632
633 void
634 uvm_swapout_threads(void)
635 {
636 struct lwp *l;
637 struct lwp *outl, *outl2;
638 int outpri, outpri2;
639 int didswap = 0;
640 extern int maxslp;
641 bool gotit;
642
643 /* XXXCDC: should move off to uvmexp. or uvm., also in uvm_meter */
644
645 #ifdef DEBUG
646 if (!enableswap)
647 return;
648 #endif
649
650 /*
651 * outl/outpri : stop/sleep thread with largest sleeptime < maxslp
652 * outl2/outpri2: the longest resident thread (its swap time)
653 */
654 outl = outl2 = NULL;
655 outpri = outpri2 = 0;
656
657 restart:
658 mutex_enter(&proclist_lock);
659 LIST_FOREACH(l, &alllwp, l_list) {
660 KASSERT(l->l_proc != NULL);
661 if (!mutex_tryenter(&l->l_swaplock))
662 continue;
663 if (!swappable(l)) {
664 mutex_exit(&l->l_swaplock);
665 continue;
666 }
667 switch (l->l_stat) {
668 case LSONPROC:
669 break;
670
671 case LSRUN:
672 if (l->l_swtime > outpri2) {
673 outl2 = l;
674 outpri2 = l->l_swtime;
675 }
676 break;
677
678 case LSSLEEP:
679 case LSSTOP:
680 if (l->l_slptime >= maxslp) {
681 mutex_exit(&proclist_lock);
682 uvm_swapout(l);
683 /*
684 * Locking in the wrong direction -
685 * try to prevent the LWP from exiting.
686 */
687 gotit = mutex_tryenter(&proclist_lock);
688 mutex_exit(&l->l_swaplock);
689 didswap++;
690 if (!gotit)
691 goto restart;
692 continue;
693 } else if (l->l_slptime > outpri) {
694 outl = l;
695 outpri = l->l_slptime;
696 }
697 break;
698 }
699 mutex_exit(&l->l_swaplock);
700 }
701
702 /*
703 * If we didn't get rid of any real duds, toss out the next most
704 * likely sleeping/stopped or running candidate. We only do this
705 * if we are real low on memory since we don't gain much by doing
706 * it (USPACE bytes).
707 */
708 if (didswap == 0 && uvmexp.free <= atop(round_page(USPACE))) {
709 if ((l = outl) == NULL)
710 l = outl2;
711 #ifdef DEBUG
712 if (swapdebug & SDB_SWAPOUT)
713 printf("swapout_threads: no duds, try procp %p\n", l);
714 #endif
715 if (l) {
716 mutex_enter(&l->l_swaplock);
717 mutex_exit(&proclist_lock);
718 if (swappable(l))
719 uvm_swapout(l);
720 mutex_exit(&l->l_swaplock);
721 return;
722 }
723 }
724
725 mutex_exit(&proclist_lock);
726 }
727
728 /*
729 * uvm_swapout: swap out lwp "l"
730 *
731 * - currently "swapout" means "unwire U-area" and "pmap_collect()"
732 * the pmap.
733 * - must be called with l->l_swaplock held.
734 * - XXXCDC: should deactivate all process' private anonymous memory
735 */
736
737 static void
738 uvm_swapout(struct lwp *l)
739 {
740 struct proc *p = l->l_proc;
741
742 KASSERT(mutex_owned(&l->l_swaplock));
743
744 #ifdef DEBUG
745 if (swapdebug & SDB_SWAPOUT)
746 printf("swapout: lid %d.%d(%s)@%p, stat %x pri %d free %d\n",
747 p->p_pid, l->l_lid, p->p_comm, l->l_addr, l->l_stat,
748 l->l_slptime, uvmexp.free);
749 #endif
750
751 /*
752 * Mark it as (potentially) swapped out.
753 */
754 lwp_lock(l);
755 if (!swappable(l)) {
756 KDASSERT(l->l_cpu != curcpu());
757 lwp_unlock(l);
758 return;
759 }
760 l->l_flag &= ~LW_INMEM;
761 l->l_swtime = 0;
762 if (l->l_stat == LSRUN)
763 sched_dequeue(l);
764 lwp_unlock(l);
765 p->p_stats->p_ru.ru_nswap++; /* XXXSMP */
766 ++uvmexp.swapouts;
767
768 /*
769 * Do any machine-specific actions necessary before swapout.
770 * This can include saving floating point state, etc.
771 */
772 cpu_swapout(l);
773
774 /*
775 * Unwire the to-be-swapped process's user struct and kernel stack.
776 */
777 uarea_swapout(USER_TO_UAREA(l->l_addr));
778 pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
779 }
780
781 /*
782 * uvm_lwp_hold: prevent lwp "l" from being swapped out, and bring
783 * back into memory if it is currently swapped.
784 */
785
786 void
787 uvm_lwp_hold(struct lwp *l)
788 {
789
790 if (l == curlwp) {
791 atomic_inc_uint(&l->l_holdcnt);
792 } else {
793 mutex_enter(&l->l_swaplock);
794 if (atomic_inc_uint_nv(&l->l_holdcnt) == 1 &&
795 (l->l_flag & LW_INMEM) == 0)
796 uvm_swapin(l);
797 mutex_exit(&l->l_swaplock);
798 }
799 }
800
801 /*
802 * uvm_lwp_rele: release a hold on lwp "l". when the holdcount
803 * drops to zero, it's eligable to be swapped.
804 */
805
806 void
807 uvm_lwp_rele(struct lwp *l)
808 {
809
810 KASSERT(l->l_holdcnt != 0);
811
812 atomic_dec_uint(&l->l_holdcnt);
813 }
814
815 #ifdef COREDUMP
816 /*
817 * uvm_coredump_walkmap: walk a process's map for the purpose of dumping
818 * a core file.
819 */
820
821 int
822 uvm_coredump_walkmap(struct proc *p, void *iocookie,
823 int (*func)(struct proc *, void *, struct uvm_coredump_state *),
824 void *cookie)
825 {
826 struct uvm_coredump_state state;
827 struct vmspace *vm = p->p_vmspace;
828 struct vm_map *map = &vm->vm_map;
829 struct vm_map_entry *entry;
830 int error;
831
832 entry = NULL;
833 vm_map_lock_read(map);
834 state.end = 0;
835 for (;;) {
836 if (entry == NULL)
837 entry = map->header.next;
838 else if (!uvm_map_lookup_entry(map, state.end, &entry))
839 entry = entry->next;
840 if (entry == &map->header)
841 break;
842
843 state.cookie = cookie;
844 if (state.end > entry->start) {
845 state.start = state.end;
846 } else {
847 state.start = entry->start;
848 }
849 state.realend = entry->end;
850 state.end = entry->end;
851 state.prot = entry->protection;
852 state.flags = 0;
853
854 /*
855 * Dump the region unless one of the following is true:
856 *
857 * (1) the region has neither object nor amap behind it
858 * (ie. it has never been accessed).
859 *
860 * (2) the region has no amap and is read-only
861 * (eg. an executable text section).
862 *
863 * (3) the region's object is a device.
864 *
865 * (4) the region is unreadable by the process.
866 */
867
868 KASSERT(!UVM_ET_ISSUBMAP(entry));
869 KASSERT(state.start < VM_MAXUSER_ADDRESS);
870 KASSERT(state.end <= VM_MAXUSER_ADDRESS);
871 if (entry->object.uvm_obj == NULL &&
872 entry->aref.ar_amap == NULL) {
873 state.realend = state.start;
874 } else if ((entry->protection & VM_PROT_WRITE) == 0 &&
875 entry->aref.ar_amap == NULL) {
876 state.realend = state.start;
877 } else if (entry->object.uvm_obj != NULL &&
878 UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) {
879 state.realend = state.start;
880 } else if ((entry->protection & VM_PROT_READ) == 0) {
881 state.realend = state.start;
882 } else {
883 if (state.start >= (vaddr_t)vm->vm_maxsaddr)
884 state.flags |= UVM_COREDUMP_STACK;
885
886 /*
887 * If this an anonymous entry, only dump instantiated
888 * pages.
889 */
890 if (entry->object.uvm_obj == NULL) {
891 vaddr_t end;
892
893 amap_lock(entry->aref.ar_amap);
894 for (end = state.start;
895 end < state.end; end += PAGE_SIZE) {
896 struct vm_anon *anon;
897 anon = amap_lookup(&entry->aref,
898 end - entry->start);
899 /*
900 * If we have already encountered an
901 * uninstantiated page, stop at the
902 * first instantied page.
903 */
904 if (anon != NULL &&
905 state.realend != state.end) {
906 state.end = end;
907 break;
908 }
909
910 /*
911 * If this page is the first
912 * uninstantiated page, mark this as
913 * the real ending point. Continue to
914 * counting uninstantiated pages.
915 */
916 if (anon == NULL &&
917 state.realend == state.end) {
918 state.realend = end;
919 }
920 }
921 amap_unlock(entry->aref.ar_amap);
922 }
923 }
924
925
926 vm_map_unlock_read(map);
927 error = (*func)(p, iocookie, &state);
928 if (error)
929 return (error);
930 vm_map_lock_read(map);
931 }
932 vm_map_unlock_read(map);
933
934 return (0);
935 }
936 #endif /* COREDUMP */
937