uvm_glue.c revision 1.123 1 /* $NetBSD: uvm_glue.c,v 1.123 2008/04/11 15:31:37 christos Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
42 * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.123 2008/04/11 15:31:37 christos Exp $");
71
72 #include "opt_coredump.h"
73 #include "opt_kgdb.h"
74 #include "opt_kstack.h"
75 #include "opt_uvmhist.h"
76
77 /*
78 * uvm_glue.c: glue functions
79 */
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/proc.h>
84 #include <sys/resourcevar.h>
85 #include <sys/buf.h>
86 #include <sys/user.h>
87 #include <sys/syncobj.h>
88 #include <sys/cpu.h>
89 #include <sys/atomic.h>
90
91 #include <uvm/uvm.h>
92
93 /*
94 * local prototypes
95 */
96
97 static void uvm_swapout(struct lwp *);
98 static int uarea_swapin(vaddr_t);
99
100 #define UVM_NUAREA_HIWAT 20
101 #define UVM_NUAREA_LOWAT 16
102
103 #define UAREA_NEXTFREE(uarea) (*(vaddr_t *)(UAREA_TO_USER(uarea)))
104
105 /*
106 * XXXCDC: do these really belong here?
107 */
108
109 /*
110 * uvm_kernacc: can the kernel access a region of memory
111 *
112 * - used only by /dev/kmem driver (mem.c)
113 */
114
115 bool
116 uvm_kernacc(void *addr, size_t len, int rw)
117 {
118 bool rv;
119 vaddr_t saddr, eaddr;
120 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
121
122 saddr = trunc_page((vaddr_t)addr);
123 eaddr = round_page((vaddr_t)addr + len);
124 vm_map_lock_read(kernel_map);
125 rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
126 vm_map_unlock_read(kernel_map);
127
128 return(rv);
129 }
130
131 #ifdef KGDB
132 /*
133 * Change protections on kernel pages from addr to addr+len
134 * (presumably so debugger can plant a breakpoint).
135 *
136 * We force the protection change at the pmap level. If we were
137 * to use vm_map_protect a change to allow writing would be lazily-
138 * applied meaning we would still take a protection fault, something
139 * we really don't want to do. It would also fragment the kernel
140 * map unnecessarily. We cannot use pmap_protect since it also won't
141 * enforce a write-enable request. Using pmap_enter is the only way
142 * we can ensure the change takes place properly.
143 */
144 void
145 uvm_chgkprot(void *addr, size_t len, int rw)
146 {
147 vm_prot_t prot;
148 paddr_t pa;
149 vaddr_t sva, eva;
150
151 prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
152 eva = round_page((vaddr_t)addr + len);
153 for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
154 /*
155 * Extract physical address for the page.
156 */
157 if (pmap_extract(pmap_kernel(), sva, &pa) == false)
158 panic("%s: invalid page", __func__);
159 pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);
160 }
161 pmap_update(pmap_kernel());
162 }
163 #endif
164
165 /*
166 * uvm_vslock: wire user memory for I/O
167 *
168 * - called from physio and sys___sysctl
169 * - XXXCDC: consider nuking this (or making it a macro?)
170 */
171
172 int
173 uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access_type)
174 {
175 struct vm_map *map;
176 vaddr_t start, end;
177 int error;
178
179 map = &vs->vm_map;
180 start = trunc_page((vaddr_t)addr);
181 end = round_page((vaddr_t)addr + len);
182 error = uvm_fault_wire(map, start, end, access_type, 0);
183 return error;
184 }
185
186 /*
187 * uvm_vsunlock: unwire user memory wired by uvm_vslock()
188 *
189 * - called from physio and sys___sysctl
190 * - XXXCDC: consider nuking this (or making it a macro?)
191 */
192
193 void
194 uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
195 {
196 uvm_fault_unwire(&vs->vm_map, trunc_page((vaddr_t)addr),
197 round_page((vaddr_t)addr + len));
198 }
199
200 /*
201 * uvm_proc_fork: fork a virtual address space
202 *
203 * - the address space is copied as per parent map's inherit values
204 */
205 void
206 uvm_proc_fork(struct proc *p1, struct proc *p2, bool shared)
207 {
208
209 if (shared == true) {
210 p2->p_vmspace = NULL;
211 uvmspace_share(p1, p2);
212 } else {
213 p2->p_vmspace = uvmspace_fork(p1->p_vmspace);
214 }
215
216 cpu_proc_fork(p1, p2);
217 }
218
219
220 /*
221 * uvm_lwp_fork: fork a thread
222 *
223 * - a new "user" structure is allocated for the child process
224 * [filled in by MD layer...]
225 * - if specified, the child gets a new user stack described by
226 * stack and stacksize
227 * - NOTE: the kernel stack may be at a different location in the child
228 * process, and thus addresses of automatic variables may be invalid
229 * after cpu_lwp_fork returns in the child process. We do nothing here
230 * after cpu_lwp_fork returns.
231 * - XXXCDC: we need a way for this to return a failure value rather
232 * than just hang
233 */
234 void
235 uvm_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
236 void (*func)(void *), void *arg)
237 {
238 int error;
239
240 /*
241 * Wire down the U-area for the process, which contains the PCB
242 * and the kernel stack. Wired state is stored in l->l_flag's
243 * L_INMEM bit rather than in the vm_map_entry's wired count
244 * to prevent kernel_map fragmentation. If we reused a cached U-area,
245 * L_INMEM will already be set and we don't need to do anything.
246 *
247 * Note the kernel stack gets read/write accesses right off the bat.
248 */
249
250 if ((l2->l_flag & LW_INMEM) == 0) {
251 vaddr_t uarea = USER_TO_UAREA(l2->l_addr);
252
253 if ((error = uarea_swapin(uarea)) != 0)
254 panic("%s: uvm_fault_wire failed: %d", __func__, error);
255 #ifdef PMAP_UAREA
256 /* Tell the pmap this is a u-area mapping */
257 PMAP_UAREA(uarea);
258 #endif
259 l2->l_flag |= LW_INMEM;
260 }
261
262 #ifdef KSTACK_CHECK_MAGIC
263 /*
264 * fill stack with magic number
265 */
266 kstack_setup_magic(l2);
267 #endif
268
269 /*
270 * cpu_lwp_fork() copy and update the pcb, and make the child ready
271 * to run. If this is a normal user fork, the child will exit
272 * directly to user mode via child_return() on its first time
273 * slice and will not return here. If this is a kernel thread,
274 * the specified entry point will be executed.
275 */
276 cpu_lwp_fork(l1, l2, stack, stacksize, func, arg);
277 }
278
279 /*
280 * uvm_cpu_attach: initialize per-CPU data structures.
281 */
282
283 void
284 uvm_cpu_attach(struct cpu_info *ci)
285 {
286
287 }
288
289 static int
290 uarea_swapin(vaddr_t addr)
291 {
292
293 return uvm_fault_wire(kernel_map, addr, addr + USPACE,
294 VM_PROT_READ | VM_PROT_WRITE, 0);
295 }
296
297 static void
298 uarea_swapout(vaddr_t addr)
299 {
300
301 uvm_fault_unwire(kernel_map, addr, addr + USPACE);
302 }
303
304 #ifndef USPACE_ALIGN
305 #define USPACE_ALIGN 0
306 #endif
307
308 static pool_cache_t uvm_uarea_cache;
309
310 static int
311 uarea_ctor(void *arg, void *obj, int flags)
312 {
313
314 KASSERT((flags & PR_WAITOK) != 0);
315 return uarea_swapin((vaddr_t)obj);
316 }
317
318 static void *
319 uarea_poolpage_alloc(struct pool *pp, int flags)
320 {
321
322 return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz,
323 USPACE_ALIGN, UVM_KMF_PAGEABLE |
324 ((flags & PR_WAITOK) != 0 ? UVM_KMF_WAITVA :
325 (UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)));
326 }
327
328 static void
329 uarea_poolpage_free(struct pool *pp, void *addr)
330 {
331
332 uvm_km_free(kernel_map, (vaddr_t)addr, pp->pr_alloc->pa_pagesz,
333 UVM_KMF_PAGEABLE);
334 }
335
336 static struct pool_allocator uvm_uarea_allocator = {
337 .pa_alloc = uarea_poolpage_alloc,
338 .pa_free = uarea_poolpage_free,
339 .pa_pagesz = USPACE,
340 };
341
342 void
343 uvm_uarea_init(void)
344 {
345 int flags = PR_NOTOUCH;
346
347 /*
348 * specify PR_NOALIGN unless the alignment provided by
349 * the backend (USPACE_ALIGN) is sufficient to provide
350 * pool page size (UPSACE) alignment.
351 */
352
353 if ((USPACE_ALIGN == 0 && USPACE != PAGE_SIZE) ||
354 (USPACE_ALIGN % USPACE) != 0) {
355 flags |= PR_NOALIGN;
356 }
357
358 uvm_uarea_cache = pool_cache_init(USPACE, USPACE_ALIGN, 0, flags,
359 "uarea", &uvm_uarea_allocator, IPL_NONE, uarea_ctor, NULL, NULL);
360 }
361
362 /*
363 * uvm_uarea_alloc: allocate a u-area
364 */
365
366 bool
367 uvm_uarea_alloc(vaddr_t *uaddrp)
368 {
369
370 *uaddrp = (vaddr_t)pool_cache_get(uvm_uarea_cache, PR_WAITOK);
371 return true;
372 }
373
374 /*
375 * uvm_uarea_free: free a u-area
376 */
377
378 void
379 uvm_uarea_free(vaddr_t uaddr, struct cpu_info *ci)
380 {
381
382 pool_cache_put(uvm_uarea_cache, (void *)uaddr);
383 }
384
385 /*
386 * uvm_proc_exit: exit a virtual address space
387 *
388 * - borrow proc0's address space because freeing the vmspace
389 * of the dead process may block.
390 */
391
392 void
393 uvm_proc_exit(struct proc *p)
394 {
395 struct lwp *l = curlwp; /* XXX */
396 struct vmspace *ovm;
397
398 KASSERT(p == l->l_proc);
399 ovm = p->p_vmspace;
400
401 /*
402 * borrow proc0's address space.
403 */
404 pmap_deactivate(l);
405 p->p_vmspace = proc0.p_vmspace;
406 pmap_activate(l);
407
408 uvmspace_free(ovm);
409 }
410
411 void
412 uvm_lwp_exit(struct lwp *l)
413 {
414 vaddr_t va = USER_TO_UAREA(l->l_addr);
415
416 l->l_flag &= ~LW_INMEM;
417 uvm_uarea_free(va, l->l_cpu);
418 l->l_addr = NULL;
419 }
420
421 /*
422 * uvm_init_limit: init per-process VM limits
423 *
424 * - called for process 0 and then inherited by all others.
425 */
426
427 void
428 uvm_init_limits(struct proc *p)
429 {
430
431 /*
432 * Set up the initial limits on process VM. Set the maximum
433 * resident set size to be all of (reasonably) available memory.
434 * This causes any single, large process to start random page
435 * replacement once it fills memory.
436 */
437
438 p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
439 p->p_rlimit[RLIMIT_STACK].rlim_max = maxsmap;
440 p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
441 p->p_rlimit[RLIMIT_DATA].rlim_max = maxdmap;
442 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(uvmexp.free);
443 }
444
445 #ifdef DEBUG
446 int enableswap = 1;
447 int swapdebug = 0;
448 #define SDB_FOLLOW 1
449 #define SDB_SWAPIN 2
450 #define SDB_SWAPOUT 4
451 #endif
452
453 /*
454 * uvm_swapin: swap in an lwp's u-area.
455 *
456 * - must be called with the LWP's swap lock held.
457 * - naturally, must not be called with l == curlwp
458 */
459
460 void
461 uvm_swapin(struct lwp *l)
462 {
463 int error;
464
465 /* XXXSMP notyet KASSERT(mutex_owned(&l->l_swaplock)); */
466 KASSERT(l != curlwp);
467
468 error = uarea_swapin(USER_TO_UAREA(l->l_addr));
469 if (error) {
470 panic("%s: rewiring stack failed: %d", __func__, error);
471 }
472
473 /*
474 * Some architectures need to be notified when the user area has
475 * moved to new physical page(s) (e.g. see mips/mips/vm_machdep.c).
476 */
477 cpu_swapin(l);
478 lwp_lock(l);
479 if (l->l_stat == LSRUN)
480 sched_enqueue(l, false);
481 l->l_flag |= LW_INMEM;
482 l->l_swtime = 0;
483 lwp_unlock(l);
484 ++uvmexp.swapins;
485 }
486
487 /*
488 * uvm_kick_scheduler: kick the scheduler into action if not running.
489 *
490 * - called when swapped out processes have been awoken.
491 */
492
493 void
494 uvm_kick_scheduler(void)
495 {
496
497 if (uvm.swap_running == false)
498 return;
499
500 mutex_enter(&uvm_scheduler_mutex);
501 uvm.scheduler_kicked = true;
502 cv_signal(&uvm.scheduler_cv);
503 mutex_exit(&uvm_scheduler_mutex);
504 }
505
506 /*
507 * uvm_scheduler: process zero main loop
508 *
509 * - attempt to swapin every swaped-out, runnable process in order of
510 * priority.
511 * - if not enough memory, wake the pagedaemon and let it clear space.
512 */
513
514 void
515 uvm_scheduler(void)
516 {
517 struct lwp *l, *ll;
518 int pri;
519 int ppri;
520
521 l = curlwp;
522 lwp_lock(l);
523 l->l_priority = PRI_VM;
524 l->l_class = SCHED_FIFO;
525 lwp_unlock(l);
526
527 for (;;) {
528 #ifdef DEBUG
529 mutex_enter(&uvm_scheduler_mutex);
530 while (!enableswap)
531 cv_wait(&uvm.scheduler_cv, &uvm_scheduler_mutex);
532 mutex_exit(&uvm_scheduler_mutex);
533 #endif
534 ll = NULL; /* process to choose */
535 ppri = INT_MIN; /* its priority */
536
537 mutex_enter(&proclist_lock);
538 LIST_FOREACH(l, &alllwp, l_list) {
539 /* is it a runnable swapped out process? */
540 if (l->l_stat == LSRUN && !(l->l_flag & LW_INMEM)) {
541 pri = l->l_swtime + l->l_slptime -
542 (l->l_proc->p_nice - NZERO) * 8;
543 if (pri > ppri) { /* higher priority? */
544 ll = l;
545 ppri = pri;
546 }
547 }
548 }
549 #ifdef DEBUG
550 if (swapdebug & SDB_FOLLOW)
551 printf("%s: running, procp %p pri %d\n", __func__, ll,
552 ppri);
553 #endif
554 /*
555 * Nothing to do, back to sleep
556 */
557 if ((l = ll) == NULL) {
558 mutex_exit(&proclist_lock);
559 mutex_enter(&uvm_scheduler_mutex);
560 if (uvm.scheduler_kicked == false)
561 cv_wait(&uvm.scheduler_cv,
562 &uvm_scheduler_mutex);
563 uvm.scheduler_kicked = false;
564 mutex_exit(&uvm_scheduler_mutex);
565 continue;
566 }
567
568 /*
569 * we have found swapped out process which we would like
570 * to bring back in.
571 *
572 * XXX: this part is really bogus cuz we could deadlock
573 * on memory despite our feeble check
574 */
575 if (uvmexp.free > atop(USPACE)) {
576 #ifdef DEBUG
577 if (swapdebug & SDB_SWAPIN)
578 printf("swapin: pid %d(%s)@%p, pri %d "
579 "free %d\n", l->l_proc->p_pid,
580 l->l_proc->p_comm, l->l_addr, ppri,
581 uvmexp.free);
582 #endif
583 mutex_enter(&l->l_swaplock);
584 mutex_exit(&proclist_lock);
585 uvm_swapin(l);
586 mutex_exit(&l->l_swaplock);
587 continue;
588 } else {
589 /*
590 * not enough memory, jab the pageout daemon and
591 * wait til the coast is clear
592 */
593 mutex_exit(&proclist_lock);
594 #ifdef DEBUG
595 if (swapdebug & SDB_FOLLOW)
596 printf("%s: no room for pid %d(%s),"
597 " free %d\n", l->l_proc->p_pid, __func__,
598 l->l_proc->p_comm, uvmexp.free);
599 #endif
600 uvm_wait("schedpwait");
601 #ifdef DEBUG
602 if (swapdebug & SDB_FOLLOW)
603 printf("%s: room again, free %d\n", __func__,
604 uvmexp.free);
605 #endif
606 }
607 }
608 }
609
610 /*
611 * swappable: is LWP "l" swappable?
612 */
613
614 static bool
615 swappable(struct lwp *l)
616 {
617
618 if ((l->l_flag & (LW_INMEM|LW_RUNNING|LW_SYSTEM|LW_WEXIT)) != LW_INMEM)
619 return false;
620 if (l->l_holdcnt != 0)
621 return false;
622 if (l->l_syncobj == &rw_syncobj || l->l_syncobj == &mutex_syncobj)
623 return false;
624 return true;
625 }
626
627 /*
628 * swapout_threads: find threads that can be swapped and unwire their
629 * u-areas.
630 *
631 * - called by the pagedaemon
632 * - try and swap at least one processs
633 * - processes that are sleeping or stopped for maxslp or more seconds
634 * are swapped... otherwise the longest-sleeping or stopped process
635 * is swapped, otherwise the longest resident process...
636 */
637
638 void
639 uvm_swapout_threads(void)
640 {
641 struct lwp *l;
642 struct lwp *outl, *outl2;
643 int outpri, outpri2;
644 int didswap = 0;
645 extern int maxslp;
646 bool gotit;
647
648 /* XXXCDC: should move off to uvmexp. or uvm., also in uvm_meter */
649
650 #ifdef DEBUG
651 if (!enableswap)
652 return;
653 #endif
654
655 /*
656 * outl/outpri : stop/sleep thread with largest sleeptime < maxslp
657 * outl2/outpri2: the longest resident thread (its swap time)
658 */
659 outl = outl2 = NULL;
660 outpri = outpri2 = 0;
661
662 restart:
663 mutex_enter(&proclist_lock);
664 LIST_FOREACH(l, &alllwp, l_list) {
665 KASSERT(l->l_proc != NULL);
666 if (!mutex_tryenter(&l->l_swaplock))
667 continue;
668 if (!swappable(l)) {
669 mutex_exit(&l->l_swaplock);
670 continue;
671 }
672 switch (l->l_stat) {
673 case LSONPROC:
674 break;
675
676 case LSRUN:
677 if (l->l_swtime > outpri2) {
678 outl2 = l;
679 outpri2 = l->l_swtime;
680 }
681 break;
682
683 case LSSLEEP:
684 case LSSTOP:
685 if (l->l_slptime >= maxslp) {
686 mutex_exit(&proclist_lock);
687 uvm_swapout(l);
688 /*
689 * Locking in the wrong direction -
690 * try to prevent the LWP from exiting.
691 */
692 gotit = mutex_tryenter(&proclist_lock);
693 mutex_exit(&l->l_swaplock);
694 didswap++;
695 if (!gotit)
696 goto restart;
697 continue;
698 } else if (l->l_slptime > outpri) {
699 outl = l;
700 outpri = l->l_slptime;
701 }
702 break;
703 }
704 mutex_exit(&l->l_swaplock);
705 }
706
707 /*
708 * If we didn't get rid of any real duds, toss out the next most
709 * likely sleeping/stopped or running candidate. We only do this
710 * if we are real low on memory since we don't gain much by doing
711 * it (USPACE bytes).
712 */
713 if (didswap == 0 && uvmexp.free <= atop(round_page(USPACE))) {
714 if ((l = outl) == NULL)
715 l = outl2;
716 #ifdef DEBUG
717 if (swapdebug & SDB_SWAPOUT)
718 printf("%s: no duds, try procp %p\n", __func__, l);
719 #endif
720 if (l) {
721 mutex_enter(&l->l_swaplock);
722 mutex_exit(&proclist_lock);
723 if (swappable(l))
724 uvm_swapout(l);
725 mutex_exit(&l->l_swaplock);
726 return;
727 }
728 }
729
730 mutex_exit(&proclist_lock);
731 }
732
733 /*
734 * uvm_swapout: swap out lwp "l"
735 *
736 * - currently "swapout" means "unwire U-area" and "pmap_collect()"
737 * the pmap.
738 * - must be called with l->l_swaplock held.
739 * - XXXCDC: should deactivate all process' private anonymous memory
740 */
741
742 static void
743 uvm_swapout(struct lwp *l)
744 {
745 KASSERT(mutex_owned(&l->l_swaplock));
746
747 #ifdef DEBUG
748 if (swapdebug & SDB_SWAPOUT)
749 printf("%s: lid %d.%d(%s)@%p, stat %x pri %d free %d\n",
750 __func__, l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm,
751 l->l_addr, l->l_stat, l->l_slptime, uvmexp.free);
752 #endif
753
754 /*
755 * Mark it as (potentially) swapped out.
756 */
757 lwp_lock(l);
758 if (!swappable(l)) {
759 KDASSERT(l->l_cpu != curcpu());
760 lwp_unlock(l);
761 return;
762 }
763 l->l_flag &= ~LW_INMEM;
764 l->l_swtime = 0;
765 if (l->l_stat == LSRUN)
766 sched_dequeue(l);
767 lwp_unlock(l);
768 l->l_ru.ru_nswap++;
769 ++uvmexp.swapouts;
770
771 /*
772 * Do any machine-specific actions necessary before swapout.
773 * This can include saving floating point state, etc.
774 */
775 cpu_swapout(l);
776
777 /*
778 * Unwire the to-be-swapped process's user struct and kernel stack.
779 */
780 uarea_swapout(USER_TO_UAREA(l->l_addr));
781 pmap_collect(vm_map_pmap(&l->l_proc->p_vmspace->vm_map));
782 }
783
784 /*
785 * uvm_lwp_hold: prevent lwp "l" from being swapped out, and bring
786 * back into memory if it is currently swapped.
787 */
788
789 void
790 uvm_lwp_hold(struct lwp *l)
791 {
792
793 if (l == curlwp) {
794 atomic_inc_uint(&l->l_holdcnt);
795 } else {
796 mutex_enter(&l->l_swaplock);
797 if (atomic_inc_uint_nv(&l->l_holdcnt) == 1 &&
798 (l->l_flag & LW_INMEM) == 0)
799 uvm_swapin(l);
800 mutex_exit(&l->l_swaplock);
801 }
802 }
803
804 /*
805 * uvm_lwp_rele: release a hold on lwp "l". when the holdcount
806 * drops to zero, it's eligable to be swapped.
807 */
808
809 void
810 uvm_lwp_rele(struct lwp *l)
811 {
812
813 KASSERT(l->l_holdcnt != 0);
814
815 atomic_dec_uint(&l->l_holdcnt);
816 }
817
818 #ifdef COREDUMP
819 /*
820 * uvm_coredump_walkmap: walk a process's map for the purpose of dumping
821 * a core file.
822 */
823
824 int
825 uvm_coredump_walkmap(struct proc *p, void *iocookie,
826 int (*func)(struct proc *, void *, struct uvm_coredump_state *),
827 void *cookie)
828 {
829 struct uvm_coredump_state state;
830 struct vmspace *vm = p->p_vmspace;
831 struct vm_map *map = &vm->vm_map;
832 struct vm_map_entry *entry;
833 int error;
834
835 entry = NULL;
836 vm_map_lock_read(map);
837 state.end = 0;
838 for (;;) {
839 if (entry == NULL)
840 entry = map->header.next;
841 else if (!uvm_map_lookup_entry(map, state.end, &entry))
842 entry = entry->next;
843 if (entry == &map->header)
844 break;
845
846 state.cookie = cookie;
847 if (state.end > entry->start) {
848 state.start = state.end;
849 } else {
850 state.start = entry->start;
851 }
852 state.realend = entry->end;
853 state.end = entry->end;
854 state.prot = entry->protection;
855 state.flags = 0;
856
857 /*
858 * Dump the region unless one of the following is true:
859 *
860 * (1) the region has neither object nor amap behind it
861 * (ie. it has never been accessed).
862 *
863 * (2) the region has no amap and is read-only
864 * (eg. an executable text section).
865 *
866 * (3) the region's object is a device.
867 *
868 * (4) the region is unreadable by the process.
869 */
870
871 KASSERT(!UVM_ET_ISSUBMAP(entry));
872 KASSERT(state.start < VM_MAXUSER_ADDRESS);
873 KASSERT(state.end <= VM_MAXUSER_ADDRESS);
874 if (entry->object.uvm_obj == NULL &&
875 entry->aref.ar_amap == NULL) {
876 state.realend = state.start;
877 } else if ((entry->protection & VM_PROT_WRITE) == 0 &&
878 entry->aref.ar_amap == NULL) {
879 state.realend = state.start;
880 } else if (entry->object.uvm_obj != NULL &&
881 UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) {
882 state.realend = state.start;
883 } else if ((entry->protection & VM_PROT_READ) == 0) {
884 state.realend = state.start;
885 } else {
886 if (state.start >= (vaddr_t)vm->vm_maxsaddr)
887 state.flags |= UVM_COREDUMP_STACK;
888
889 /*
890 * If this an anonymous entry, only dump instantiated
891 * pages.
892 */
893 if (entry->object.uvm_obj == NULL) {
894 vaddr_t end;
895
896 amap_lock(entry->aref.ar_amap);
897 for (end = state.start;
898 end < state.end; end += PAGE_SIZE) {
899 struct vm_anon *anon;
900 anon = amap_lookup(&entry->aref,
901 end - entry->start);
902 /*
903 * If we have already encountered an
904 * uninstantiated page, stop at the
905 * first instantied page.
906 */
907 if (anon != NULL &&
908 state.realend != state.end) {
909 state.end = end;
910 break;
911 }
912
913 /*
914 * If this page is the first
915 * uninstantiated page, mark this as
916 * the real ending point. Continue to
917 * counting uninstantiated pages.
918 */
919 if (anon == NULL &&
920 state.realend == state.end) {
921 state.realend = end;
922 }
923 }
924 amap_unlock(entry->aref.ar_amap);
925 }
926 }
927
928
929 vm_map_unlock_read(map);
930 error = (*func)(p, iocookie, &state);
931 if (error)
932 return (error);
933 vm_map_lock_read(map);
934 }
935 vm_map_unlock_read(map);
936
937 return (0);
938 }
939 #endif /* COREDUMP */
940