uvm_glue.c revision 1.116 1 1.116 yamt /* $NetBSD: uvm_glue.c,v 1.116 2008/02/07 12:21:24 yamt Exp $ */
2 1.1 mrg
3 1.48 chs /*
4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 1.48 chs * Copyright (c) 1991, 1993, The Regents of the University of California.
6 1.1 mrg *
7 1.1 mrg * All rights reserved.
8 1.1 mrg *
9 1.1 mrg * This code is derived from software contributed to Berkeley by
10 1.1 mrg * The Mach Operating System project at Carnegie-Mellon University.
11 1.1 mrg *
12 1.1 mrg * Redistribution and use in source and binary forms, with or without
13 1.1 mrg * modification, are permitted provided that the following conditions
14 1.1 mrg * are met:
15 1.1 mrg * 1. Redistributions of source code must retain the above copyright
16 1.1 mrg * notice, this list of conditions and the following disclaimer.
17 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 mrg * notice, this list of conditions and the following disclaimer in the
19 1.1 mrg * documentation and/or other materials provided with the distribution.
20 1.1 mrg * 3. All advertising materials mentioning features or use of this software
21 1.1 mrg * must display the following acknowledgement:
22 1.1 mrg * This product includes software developed by Charles D. Cranor,
23 1.48 chs * Washington University, the University of California, Berkeley and
24 1.1 mrg * its contributors.
25 1.1 mrg * 4. Neither the name of the University nor the names of its contributors
26 1.1 mrg * may be used to endorse or promote products derived from this software
27 1.1 mrg * without specific prior written permission.
28 1.1 mrg *
29 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 1.1 mrg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 1.1 mrg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 1.1 mrg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 1.1 mrg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 1.1 mrg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 1.1 mrg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 1.1 mrg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 1.1 mrg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 1.1 mrg * SUCH DAMAGE.
40 1.1 mrg *
41 1.1 mrg * @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
42 1.4 mrg * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp
43 1.1 mrg *
44 1.1 mrg *
45 1.1 mrg * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 1.1 mrg * All rights reserved.
47 1.48 chs *
48 1.1 mrg * Permission to use, copy, modify and distribute this software and
49 1.1 mrg * its documentation is hereby granted, provided that both the copyright
50 1.1 mrg * notice and this permission notice appear in all copies of the
51 1.1 mrg * software, derivative works or modified versions, and any portions
52 1.1 mrg * thereof, and that both notices appear in supporting documentation.
53 1.48 chs *
54 1.48 chs * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 1.48 chs * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 1.1 mrg * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 1.48 chs *
58 1.1 mrg * Carnegie Mellon requests users of this software to return to
59 1.1 mrg *
60 1.1 mrg * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 1.1 mrg * School of Computer Science
62 1.1 mrg * Carnegie Mellon University
63 1.1 mrg * Pittsburgh PA 15213-3890
64 1.1 mrg *
65 1.1 mrg * any improvements or extensions that they make and grant Carnegie the
66 1.1 mrg * rights to redistribute these changes.
67 1.1 mrg */
68 1.55 lukem
69 1.55 lukem #include <sys/cdefs.h>
70 1.116 yamt __KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.116 2008/02/07 12:21:24 yamt Exp $");
71 1.1 mrg
72 1.96 matt #include "opt_coredump.h"
73 1.49 lukem #include "opt_kgdb.h"
74 1.59 yamt #include "opt_kstack.h"
75 1.5 mrg #include "opt_uvmhist.h"
76 1.5 mrg
77 1.1 mrg /*
78 1.1 mrg * uvm_glue.c: glue functions
79 1.1 mrg */
80 1.1 mrg
81 1.1 mrg #include <sys/param.h>
82 1.1 mrg #include <sys/systm.h>
83 1.1 mrg #include <sys/proc.h>
84 1.1 mrg #include <sys/resourcevar.h>
85 1.1 mrg #include <sys/buf.h>
86 1.1 mrg #include <sys/user.h>
87 1.106 yamt #include <sys/syncobj.h>
88 1.111 ad #include <sys/cpu.h>
89 1.114 ad #include <sys/atomic.h>
90 1.1 mrg
91 1.1 mrg #include <uvm/uvm.h>
92 1.1 mrg
93 1.1 mrg /*
94 1.1 mrg * local prototypes
95 1.1 mrg */
96 1.1 mrg
97 1.78 junyoung static void uvm_swapout(struct lwp *);
98 1.1 mrg
99 1.109 ad #define UVM_NUAREA_HIWAT 20
100 1.109 ad #define UVM_NUAREA_LOWAT 16
101 1.109 ad
102 1.94 yamt #define UAREA_NEXTFREE(uarea) (*(vaddr_t *)(UAREA_TO_USER(uarea)))
103 1.60 chs
104 1.1 mrg /*
105 1.1 mrg * XXXCDC: do these really belong here?
106 1.1 mrg */
107 1.1 mrg
108 1.28 thorpej /*
109 1.1 mrg * uvm_kernacc: can the kernel access a region of memory
110 1.1 mrg *
111 1.83 yamt * - used only by /dev/kmem driver (mem.c)
112 1.1 mrg */
113 1.1 mrg
114 1.102 thorpej bool
115 1.104 christos uvm_kernacc(void *addr, size_t len, int rw)
116 1.6 mrg {
117 1.102 thorpej bool rv;
118 1.13 eeh vaddr_t saddr, eaddr;
119 1.6 mrg vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
120 1.6 mrg
121 1.31 kleink saddr = trunc_page((vaddr_t)addr);
122 1.43 chs eaddr = round_page((vaddr_t)addr + len);
123 1.6 mrg vm_map_lock_read(kernel_map);
124 1.6 mrg rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
125 1.6 mrg vm_map_unlock_read(kernel_map);
126 1.6 mrg
127 1.6 mrg return(rv);
128 1.1 mrg }
129 1.1 mrg
130 1.1 mrg #ifdef KGDB
131 1.1 mrg /*
132 1.1 mrg * Change protections on kernel pages from addr to addr+len
133 1.1 mrg * (presumably so debugger can plant a breakpoint).
134 1.1 mrg *
135 1.1 mrg * We force the protection change at the pmap level. If we were
136 1.1 mrg * to use vm_map_protect a change to allow writing would be lazily-
137 1.1 mrg * applied meaning we would still take a protection fault, something
138 1.1 mrg * we really don't want to do. It would also fragment the kernel
139 1.1 mrg * map unnecessarily. We cannot use pmap_protect since it also won't
140 1.1 mrg * enforce a write-enable request. Using pmap_enter is the only way
141 1.1 mrg * we can ensure the change takes place properly.
142 1.1 mrg */
143 1.6 mrg void
144 1.104 christos uvm_chgkprot(void *addr, size_t len, int rw)
145 1.6 mrg {
146 1.6 mrg vm_prot_t prot;
147 1.13 eeh paddr_t pa;
148 1.13 eeh vaddr_t sva, eva;
149 1.6 mrg
150 1.6 mrg prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
151 1.31 kleink eva = round_page((vaddr_t)addr + len);
152 1.31 kleink for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
153 1.6 mrg /*
154 1.6 mrg * Extract physical address for the page.
155 1.6 mrg */
156 1.103 thorpej if (pmap_extract(pmap_kernel(), sva, &pa) == false)
157 1.6 mrg panic("chgkprot: invalid page");
158 1.30 thorpej pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);
159 1.6 mrg }
160 1.51 chris pmap_update(pmap_kernel());
161 1.1 mrg }
162 1.1 mrg #endif
163 1.1 mrg
164 1.1 mrg /*
165 1.52 chs * uvm_vslock: wire user memory for I/O
166 1.1 mrg *
167 1.1 mrg * - called from physio and sys___sysctl
168 1.1 mrg * - XXXCDC: consider nuking this (or making it a macro?)
169 1.1 mrg */
170 1.1 mrg
171 1.26 thorpej int
172 1.97 chs uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access_type)
173 1.1 mrg {
174 1.50 chs struct vm_map *map;
175 1.26 thorpej vaddr_t start, end;
176 1.45 chs int error;
177 1.26 thorpej
178 1.97 chs map = &vs->vm_map;
179 1.31 kleink start = trunc_page((vaddr_t)addr);
180 1.31 kleink end = round_page((vaddr_t)addr + len);
181 1.93 drochner error = uvm_fault_wire(map, start, end, access_type, 0);
182 1.45 chs return error;
183 1.1 mrg }
184 1.1 mrg
185 1.1 mrg /*
186 1.52 chs * uvm_vsunlock: unwire user memory wired by uvm_vslock()
187 1.1 mrg *
188 1.1 mrg * - called from physio and sys___sysctl
189 1.1 mrg * - XXXCDC: consider nuking this (or making it a macro?)
190 1.1 mrg */
191 1.1 mrg
192 1.6 mrg void
193 1.97 chs uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
194 1.1 mrg {
195 1.97 chs uvm_fault_unwire(&vs->vm_map, trunc_page((vaddr_t)addr),
196 1.43 chs round_page((vaddr_t)addr + len));
197 1.1 mrg }
198 1.1 mrg
199 1.1 mrg /*
200 1.62 thorpej * uvm_proc_fork: fork a virtual address space
201 1.1 mrg *
202 1.1 mrg * - the address space is copied as per parent map's inherit values
203 1.62 thorpej */
204 1.62 thorpej void
205 1.102 thorpej uvm_proc_fork(struct proc *p1, struct proc *p2, bool shared)
206 1.62 thorpej {
207 1.62 thorpej
208 1.103 thorpej if (shared == true) {
209 1.62 thorpej p2->p_vmspace = NULL;
210 1.62 thorpej uvmspace_share(p1, p2);
211 1.62 thorpej } else {
212 1.62 thorpej p2->p_vmspace = uvmspace_fork(p1->p_vmspace);
213 1.62 thorpej }
214 1.62 thorpej
215 1.62 thorpej cpu_proc_fork(p1, p2);
216 1.62 thorpej }
217 1.62 thorpej
218 1.62 thorpej
219 1.62 thorpej /*
220 1.62 thorpej * uvm_lwp_fork: fork a thread
221 1.62 thorpej *
222 1.1 mrg * - a new "user" structure is allocated for the child process
223 1.1 mrg * [filled in by MD layer...]
224 1.20 thorpej * - if specified, the child gets a new user stack described by
225 1.20 thorpej * stack and stacksize
226 1.1 mrg * - NOTE: the kernel stack may be at a different location in the child
227 1.1 mrg * process, and thus addresses of automatic variables may be invalid
228 1.62 thorpej * after cpu_lwp_fork returns in the child process. We do nothing here
229 1.62 thorpej * after cpu_lwp_fork returns.
230 1.1 mrg * - XXXCDC: we need a way for this to return a failure value rather
231 1.1 mrg * than just hang
232 1.1 mrg */
233 1.6 mrg void
234 1.89 thorpej uvm_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
235 1.89 thorpej void (*func)(void *), void *arg)
236 1.6 mrg {
237 1.45 chs int error;
238 1.6 mrg
239 1.6 mrg /*
240 1.7 thorpej * Wire down the U-area for the process, which contains the PCB
241 1.62 thorpej * and the kernel stack. Wired state is stored in l->l_flag's
242 1.62 thorpej * L_INMEM bit rather than in the vm_map_entry's wired count
243 1.61 chs * to prevent kernel_map fragmentation. If we reused a cached U-area,
244 1.62 thorpej * L_INMEM will already be set and we don't need to do anything.
245 1.21 thorpej *
246 1.61 chs * Note the kernel stack gets read/write accesses right off the bat.
247 1.6 mrg */
248 1.61 chs
249 1.100 pavel if ((l2->l_flag & LW_INMEM) == 0) {
250 1.94 yamt vaddr_t uarea = USER_TO_UAREA(l2->l_addr);
251 1.94 yamt
252 1.94 yamt error = uvm_fault_wire(kernel_map, uarea,
253 1.94 yamt uarea + USPACE, VM_PROT_READ | VM_PROT_WRITE, 0);
254 1.61 chs if (error)
255 1.62 thorpej panic("uvm_lwp_fork: uvm_fault_wire failed: %d", error);
256 1.67 scw #ifdef PMAP_UAREA
257 1.67 scw /* Tell the pmap this is a u-area mapping */
258 1.94 yamt PMAP_UAREA(uarea);
259 1.67 scw #endif
260 1.100 pavel l2->l_flag |= LW_INMEM;
261 1.61 chs }
262 1.59 yamt
263 1.59 yamt #ifdef KSTACK_CHECK_MAGIC
264 1.59 yamt /*
265 1.59 yamt * fill stack with magic number
266 1.59 yamt */
267 1.63 yamt kstack_setup_magic(l2);
268 1.59 yamt #endif
269 1.6 mrg
270 1.6 mrg /*
271 1.62 thorpej * cpu_lwp_fork() copy and update the pcb, and make the child ready
272 1.62 thorpej * to run. If this is a normal user fork, the child will exit
273 1.34 thorpej * directly to user mode via child_return() on its first time
274 1.34 thorpej * slice and will not return here. If this is a kernel thread,
275 1.34 thorpej * the specified entry point will be executed.
276 1.6 mrg */
277 1.62 thorpej cpu_lwp_fork(l1, l2, stack, stacksize, func, arg);
278 1.14 thorpej }
279 1.14 thorpej
280 1.14 thorpej /*
281 1.109 ad * uvm_cpu_attach: initialize per-CPU data structures.
282 1.109 ad */
283 1.109 ad
284 1.109 ad void
285 1.109 ad uvm_cpu_attach(struct cpu_info *ci)
286 1.109 ad {
287 1.109 ad
288 1.109 ad }
289 1.109 ad
290 1.115 yamt static int
291 1.115 yamt uarea_swapin(vaddr_t addr)
292 1.115 yamt {
293 1.115 yamt
294 1.115 yamt return uvm_fault_wire(kernel_map, addr, addr + USPACE,
295 1.115 yamt VM_PROT_READ | VM_PROT_WRITE, 0);
296 1.115 yamt }
297 1.60 chs
298 1.115 yamt static void
299 1.115 yamt uarea_swapout(vaddr_t addr)
300 1.60 chs {
301 1.115 yamt
302 1.115 yamt uvm_fault_unwire(kernel_map, addr, addr + USPACE);
303 1.115 yamt }
304 1.60 chs
305 1.60 chs #ifndef USPACE_ALIGN
306 1.115 yamt #define USPACE_ALIGN 0
307 1.60 chs #endif
308 1.60 chs
309 1.115 yamt static pool_cache_t uvm_uarea_cache;
310 1.115 yamt
311 1.115 yamt static int
312 1.115 yamt uarea_ctor(void *arg, void *obj, int flags)
313 1.115 yamt {
314 1.115 yamt
315 1.115 yamt KASSERT((flags & PR_WAITOK) != 0);
316 1.115 yamt return uarea_swapin((vaddr_t)obj);
317 1.115 yamt }
318 1.115 yamt
319 1.115 yamt static void *
320 1.115 yamt uarea_poolpage_alloc(struct pool *pp, int flags)
321 1.115 yamt {
322 1.115 yamt
323 1.115 yamt return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz,
324 1.115 yamt USPACE_ALIGN, UVM_KMF_PAGEABLE |
325 1.115 yamt ((flags & PR_WAITOK) != 0 ? UVM_KMF_WAITVA :
326 1.115 yamt (UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)));
327 1.115 yamt }
328 1.109 ad
329 1.115 yamt static void
330 1.115 yamt uarea_poolpage_free(struct pool *pp, void *addr)
331 1.115 yamt {
332 1.109 ad
333 1.115 yamt uvm_km_free(kernel_map, (vaddr_t)addr, pp->pr_alloc->pa_pagesz,
334 1.109 ad UVM_KMF_PAGEABLE);
335 1.115 yamt }
336 1.115 yamt
337 1.115 yamt static struct pool_allocator uvm_uarea_allocator = {
338 1.115 yamt .pa_alloc = uarea_poolpage_alloc,
339 1.115 yamt .pa_free = uarea_poolpage_free,
340 1.115 yamt .pa_pagesz = USPACE,
341 1.115 yamt };
342 1.115 yamt
343 1.115 yamt void
344 1.115 yamt uvm_uarea_init(void)
345 1.115 yamt {
346 1.115 yamt
347 1.116 yamt /*
348 1.116 yamt * specify PR_NOALIGN unless the alignment provided by
349 1.116 yamt * the backend (USPACE_ALIGN) is sufficient to provide
350 1.116 yamt * pool page size (UPSACE) alignment.
351 1.116 yamt */
352 1.116 yamt
353 1.115 yamt uvm_uarea_cache = pool_cache_init(USPACE, USPACE_ALIGN, 0,
354 1.116 yamt #if (USPACE_ALIGN == 0 && USPACE != PAGE_SIZE) || (USPACE_ALIGN % USPACE) != 0
355 1.115 yamt PR_NOALIGN |
356 1.115 yamt #endif
357 1.115 yamt PR_NOTOUCH,
358 1.115 yamt "uarea", &uvm_uarea_allocator, IPL_NONE, uarea_ctor, NULL, NULL);
359 1.60 chs }
360 1.60 chs
361 1.60 chs /*
362 1.115 yamt * uvm_uarea_alloc: allocate a u-area
363 1.75 jdolecek */
364 1.75 jdolecek
365 1.115 yamt bool
366 1.115 yamt uvm_uarea_alloc(vaddr_t *uaddrp)
367 1.75 jdolecek {
368 1.109 ad
369 1.115 yamt *uaddrp = (vaddr_t)pool_cache_get(uvm_uarea_cache, PR_WAITOK);
370 1.115 yamt return true;
371 1.75 jdolecek }
372 1.75 jdolecek
373 1.75 jdolecek /*
374 1.115 yamt * uvm_uarea_free: free a u-area
375 1.60 chs */
376 1.60 chs
377 1.60 chs void
378 1.115 yamt uvm_uarea_free(vaddr_t uaddr, struct cpu_info *ci)
379 1.60 chs {
380 1.60 chs
381 1.115 yamt pool_cache_put(uvm_uarea_cache, (void *)uaddr);
382 1.60 chs }
383 1.60 chs
384 1.60 chs /*
385 1.80 pk * uvm_exit: exit a virtual address space
386 1.80 pk *
387 1.80 pk * - the process passed to us is a dead (pre-zombie) process; we
388 1.80 pk * are running on a different context now (the reaper).
389 1.80 pk * - borrow proc0's address space because freeing the vmspace
390 1.80 pk * of the dead process may block.
391 1.80 pk */
392 1.80 pk
393 1.80 pk void
394 1.89 thorpej uvm_proc_exit(struct proc *p)
395 1.80 pk {
396 1.80 pk struct lwp *l = curlwp; /* XXX */
397 1.80 pk struct vmspace *ovm;
398 1.80 pk
399 1.80 pk KASSERT(p == l->l_proc);
400 1.80 pk ovm = p->p_vmspace;
401 1.80 pk
402 1.80 pk /*
403 1.80 pk * borrow proc0's address space.
404 1.80 pk */
405 1.80 pk pmap_deactivate(l);
406 1.80 pk p->p_vmspace = proc0.p_vmspace;
407 1.80 pk pmap_activate(l);
408 1.80 pk
409 1.80 pk uvmspace_free(ovm);
410 1.80 pk }
411 1.80 pk
412 1.80 pk void
413 1.80 pk uvm_lwp_exit(struct lwp *l)
414 1.80 pk {
415 1.94 yamt vaddr_t va = USER_TO_UAREA(l->l_addr);
416 1.80 pk
417 1.100 pavel l->l_flag &= ~LW_INMEM;
418 1.113 ad uvm_uarea_free(va, l->l_cpu);
419 1.80 pk l->l_addr = NULL;
420 1.80 pk }
421 1.80 pk
422 1.80 pk /*
423 1.1 mrg * uvm_init_limit: init per-process VM limits
424 1.1 mrg *
425 1.1 mrg * - called for process 0 and then inherited by all others.
426 1.1 mrg */
427 1.60 chs
428 1.6 mrg void
429 1.89 thorpej uvm_init_limits(struct proc *p)
430 1.6 mrg {
431 1.6 mrg
432 1.6 mrg /*
433 1.6 mrg * Set up the initial limits on process VM. Set the maximum
434 1.6 mrg * resident set size to be all of (reasonably) available memory.
435 1.6 mrg * This causes any single, large process to start random page
436 1.6 mrg * replacement once it fills memory.
437 1.6 mrg */
438 1.6 mrg
439 1.6 mrg p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
440 1.79 pk p->p_rlimit[RLIMIT_STACK].rlim_max = maxsmap;
441 1.6 mrg p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
442 1.79 pk p->p_rlimit[RLIMIT_DATA].rlim_max = maxdmap;
443 1.6 mrg p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(uvmexp.free);
444 1.1 mrg }
445 1.1 mrg
446 1.1 mrg #ifdef DEBUG
447 1.1 mrg int enableswap = 1;
448 1.1 mrg int swapdebug = 0;
449 1.1 mrg #define SDB_FOLLOW 1
450 1.1 mrg #define SDB_SWAPIN 2
451 1.1 mrg #define SDB_SWAPOUT 4
452 1.1 mrg #endif
453 1.1 mrg
454 1.1 mrg /*
455 1.95 yamt * uvm_swapin: swap in an lwp's u-area.
456 1.107 ad *
457 1.107 ad * - must be called with the LWP's swap lock held.
458 1.107 ad * - naturally, must not be called with l == curlwp
459 1.1 mrg */
460 1.1 mrg
461 1.6 mrg void
462 1.89 thorpej uvm_swapin(struct lwp *l)
463 1.6 mrg {
464 1.98 ad int error;
465 1.6 mrg
466 1.112 ad /* XXXSMP notyet KASSERT(mutex_owned(&l->l_swaplock)); */
467 1.107 ad KASSERT(l != curlwp);
468 1.107 ad
469 1.115 yamt error = uarea_swapin(USER_TO_UAREA(l->l_addr));
470 1.52 chs if (error) {
471 1.52 chs panic("uvm_swapin: rewiring stack failed: %d", error);
472 1.52 chs }
473 1.6 mrg
474 1.6 mrg /*
475 1.6 mrg * Some architectures need to be notified when the user area has
476 1.6 mrg * moved to new physical page(s) (e.g. see mips/mips/vm_machdep.c).
477 1.6 mrg */
478 1.62 thorpej cpu_swapin(l);
479 1.98 ad lwp_lock(l);
480 1.62 thorpej if (l->l_stat == LSRUN)
481 1.106 yamt sched_enqueue(l, false);
482 1.100 pavel l->l_flag |= LW_INMEM;
483 1.62 thorpej l->l_swtime = 0;
484 1.98 ad lwp_unlock(l);
485 1.6 mrg ++uvmexp.swapins;
486 1.1 mrg }
487 1.1 mrg
488 1.1 mrg /*
489 1.99 ad * uvm_kick_scheduler: kick the scheduler into action if not running.
490 1.99 ad *
491 1.99 ad * - called when swapped out processes have been awoken.
492 1.99 ad */
493 1.99 ad
494 1.99 ad void
495 1.99 ad uvm_kick_scheduler(void)
496 1.99 ad {
497 1.99 ad
498 1.103 thorpej if (uvm.swap_running == false)
499 1.101 ad return;
500 1.101 ad
501 1.107 ad mutex_enter(&uvm_scheduler_mutex);
502 1.103 thorpej uvm.scheduler_kicked = true;
503 1.99 ad cv_signal(&uvm.scheduler_cv);
504 1.107 ad mutex_exit(&uvm_scheduler_mutex);
505 1.99 ad }
506 1.99 ad
507 1.99 ad /*
508 1.1 mrg * uvm_scheduler: process zero main loop
509 1.1 mrg *
510 1.1 mrg * - attempt to swapin every swaped-out, runnable process in order of
511 1.1 mrg * priority.
512 1.1 mrg * - if not enough memory, wake the pagedaemon and let it clear space.
513 1.1 mrg */
514 1.1 mrg
515 1.6 mrg void
516 1.89 thorpej uvm_scheduler(void)
517 1.1 mrg {
518 1.62 thorpej struct lwp *l, *ll;
519 1.32 augustss int pri;
520 1.6 mrg int ppri;
521 1.1 mrg
522 1.99 ad l = curlwp;
523 1.99 ad lwp_lock(l);
524 1.113 ad l->l_priority = PRI_VM;
525 1.113 ad l->l_class = SCHED_FIFO;
526 1.99 ad lwp_unlock(l);
527 1.99 ad
528 1.99 ad for (;;) {
529 1.1 mrg #ifdef DEBUG
530 1.107 ad mutex_enter(&uvm_scheduler_mutex);
531 1.99 ad while (!enableswap)
532 1.107 ad cv_wait(&uvm.scheduler_cv, &uvm_scheduler_mutex);
533 1.107 ad mutex_exit(&uvm_scheduler_mutex);
534 1.99 ad #endif
535 1.99 ad ll = NULL; /* process to choose */
536 1.99 ad ppri = INT_MIN; /* its priority */
537 1.99 ad
538 1.107 ad mutex_enter(&proclist_lock);
539 1.99 ad LIST_FOREACH(l, &alllwp, l_list) {
540 1.99 ad /* is it a runnable swapped out process? */
541 1.100 pavel if (l->l_stat == LSRUN && !(l->l_flag & LW_INMEM)) {
542 1.99 ad pri = l->l_swtime + l->l_slptime -
543 1.99 ad (l->l_proc->p_nice - NZERO) * 8;
544 1.99 ad if (pri > ppri) { /* higher priority? */
545 1.99 ad ll = l;
546 1.99 ad ppri = pri;
547 1.99 ad }
548 1.6 mrg }
549 1.6 mrg }
550 1.1 mrg #ifdef DEBUG
551 1.99 ad if (swapdebug & SDB_FOLLOW)
552 1.99 ad printf("scheduler: running, procp %p pri %d\n", ll,
553 1.99 ad ppri);
554 1.1 mrg #endif
555 1.99 ad /*
556 1.99 ad * Nothing to do, back to sleep
557 1.99 ad */
558 1.99 ad if ((l = ll) == NULL) {
559 1.107 ad mutex_exit(&proclist_lock);
560 1.107 ad mutex_enter(&uvm_scheduler_mutex);
561 1.103 thorpej if (uvm.scheduler_kicked == false)
562 1.99 ad cv_wait(&uvm.scheduler_cv,
563 1.107 ad &uvm_scheduler_mutex);
564 1.103 thorpej uvm.scheduler_kicked = false;
565 1.107 ad mutex_exit(&uvm_scheduler_mutex);
566 1.99 ad continue;
567 1.99 ad }
568 1.6 mrg
569 1.99 ad /*
570 1.99 ad * we have found swapped out process which we would like
571 1.99 ad * to bring back in.
572 1.99 ad *
573 1.99 ad * XXX: this part is really bogus cuz we could deadlock
574 1.99 ad * on memory despite our feeble check
575 1.99 ad */
576 1.99 ad if (uvmexp.free > atop(USPACE)) {
577 1.1 mrg #ifdef DEBUG
578 1.99 ad if (swapdebug & SDB_SWAPIN)
579 1.99 ad printf("swapin: pid %d(%s)@%p, pri %d "
580 1.99 ad "free %d\n", l->l_proc->p_pid,
581 1.99 ad l->l_proc->p_comm, l->l_addr, ppri,
582 1.99 ad uvmexp.free);
583 1.1 mrg #endif
584 1.107 ad mutex_enter(&l->l_swaplock);
585 1.107 ad mutex_exit(&proclist_lock);
586 1.99 ad uvm_swapin(l);
587 1.107 ad mutex_exit(&l->l_swaplock);
588 1.107 ad continue;
589 1.99 ad } else {
590 1.99 ad /*
591 1.99 ad * not enough memory, jab the pageout daemon and
592 1.99 ad * wait til the coast is clear
593 1.99 ad */
594 1.107 ad mutex_exit(&proclist_lock);
595 1.1 mrg #ifdef DEBUG
596 1.99 ad if (swapdebug & SDB_FOLLOW)
597 1.99 ad printf("scheduler: no room for pid %d(%s),"
598 1.99 ad " free %d\n", l->l_proc->p_pid,
599 1.99 ad l->l_proc->p_comm, uvmexp.free);
600 1.1 mrg #endif
601 1.99 ad uvm_wait("schedpwait");
602 1.1 mrg #ifdef DEBUG
603 1.99 ad if (swapdebug & SDB_FOLLOW)
604 1.99 ad printf("scheduler: room again, free %d\n",
605 1.99 ad uvmexp.free);
606 1.1 mrg #endif
607 1.99 ad }
608 1.99 ad }
609 1.1 mrg }
610 1.1 mrg
611 1.1 mrg /*
612 1.62 thorpej * swappable: is LWP "l" swappable?
613 1.1 mrg */
614 1.1 mrg
615 1.106 yamt static bool
616 1.106 yamt swappable(struct lwp *l)
617 1.106 yamt {
618 1.106 yamt
619 1.106 yamt if ((l->l_flag & (LW_INMEM|LW_RUNNING|LW_SYSTEM|LW_WEXIT)) != LW_INMEM)
620 1.106 yamt return false;
621 1.106 yamt if (l->l_holdcnt != 0)
622 1.106 yamt return false;
623 1.106 yamt if (l->l_syncobj == &rw_syncobj || l->l_syncobj == &mutex_syncobj)
624 1.106 yamt return false;
625 1.106 yamt return true;
626 1.106 yamt }
627 1.1 mrg
628 1.1 mrg /*
629 1.1 mrg * swapout_threads: find threads that can be swapped and unwire their
630 1.1 mrg * u-areas.
631 1.1 mrg *
632 1.1 mrg * - called by the pagedaemon
633 1.1 mrg * - try and swap at least one processs
634 1.1 mrg * - processes that are sleeping or stopped for maxslp or more seconds
635 1.1 mrg * are swapped... otherwise the longest-sleeping or stopped process
636 1.1 mrg * is swapped, otherwise the longest resident process...
637 1.1 mrg */
638 1.60 chs
639 1.6 mrg void
640 1.89 thorpej uvm_swapout_threads(void)
641 1.1 mrg {
642 1.62 thorpej struct lwp *l;
643 1.62 thorpej struct lwp *outl, *outl2;
644 1.6 mrg int outpri, outpri2;
645 1.6 mrg int didswap = 0;
646 1.48 chs extern int maxslp;
647 1.107 ad bool gotit;
648 1.107 ad
649 1.6 mrg /* XXXCDC: should move off to uvmexp. or uvm., also in uvm_meter */
650 1.1 mrg
651 1.1 mrg #ifdef DEBUG
652 1.6 mrg if (!enableswap)
653 1.6 mrg return;
654 1.1 mrg #endif
655 1.1 mrg
656 1.6 mrg /*
657 1.62 thorpej * outl/outpri : stop/sleep thread with largest sleeptime < maxslp
658 1.62 thorpej * outl2/outpri2: the longest resident thread (its swap time)
659 1.6 mrg */
660 1.62 thorpej outl = outl2 = NULL;
661 1.6 mrg outpri = outpri2 = 0;
662 1.107 ad
663 1.107 ad restart:
664 1.107 ad mutex_enter(&proclist_lock);
665 1.62 thorpej LIST_FOREACH(l, &alllwp, l_list) {
666 1.81 yamt KASSERT(l->l_proc != NULL);
667 1.107 ad if (!mutex_tryenter(&l->l_swaplock))
668 1.107 ad continue;
669 1.98 ad if (!swappable(l)) {
670 1.107 ad mutex_exit(&l->l_swaplock);
671 1.6 mrg continue;
672 1.98 ad }
673 1.62 thorpej switch (l->l_stat) {
674 1.68 cl case LSONPROC:
675 1.98 ad break;
676 1.69 cl
677 1.62 thorpej case LSRUN:
678 1.62 thorpej if (l->l_swtime > outpri2) {
679 1.62 thorpej outl2 = l;
680 1.62 thorpej outpri2 = l->l_swtime;
681 1.6 mrg }
682 1.98 ad break;
683 1.48 chs
684 1.62 thorpej case LSSLEEP:
685 1.62 thorpej case LSSTOP:
686 1.62 thorpej if (l->l_slptime >= maxslp) {
687 1.107 ad mutex_exit(&proclist_lock);
688 1.62 thorpej uvm_swapout(l);
689 1.107 ad /*
690 1.107 ad * Locking in the wrong direction -
691 1.107 ad * try to prevent the LWP from exiting.
692 1.107 ad */
693 1.107 ad gotit = mutex_tryenter(&proclist_lock);
694 1.107 ad mutex_exit(&l->l_swaplock);
695 1.6 mrg didswap++;
696 1.107 ad if (!gotit)
697 1.107 ad goto restart;
698 1.98 ad continue;
699 1.62 thorpej } else if (l->l_slptime > outpri) {
700 1.62 thorpej outl = l;
701 1.62 thorpej outpri = l->l_slptime;
702 1.6 mrg }
703 1.98 ad break;
704 1.6 mrg }
705 1.107 ad mutex_exit(&l->l_swaplock);
706 1.6 mrg }
707 1.107 ad
708 1.6 mrg /*
709 1.6 mrg * If we didn't get rid of any real duds, toss out the next most
710 1.6 mrg * likely sleeping/stopped or running candidate. We only do this
711 1.6 mrg * if we are real low on memory since we don't gain much by doing
712 1.6 mrg * it (USPACE bytes).
713 1.6 mrg */
714 1.6 mrg if (didswap == 0 && uvmexp.free <= atop(round_page(USPACE))) {
715 1.62 thorpej if ((l = outl) == NULL)
716 1.62 thorpej l = outl2;
717 1.1 mrg #ifdef DEBUG
718 1.6 mrg if (swapdebug & SDB_SWAPOUT)
719 1.62 thorpej printf("swapout_threads: no duds, try procp %p\n", l);
720 1.1 mrg #endif
721 1.98 ad if (l) {
722 1.107 ad mutex_enter(&l->l_swaplock);
723 1.107 ad mutex_exit(&proclist_lock);
724 1.107 ad if (swappable(l))
725 1.107 ad uvm_swapout(l);
726 1.107 ad mutex_exit(&l->l_swaplock);
727 1.107 ad return;
728 1.98 ad }
729 1.6 mrg }
730 1.98 ad
731 1.107 ad mutex_exit(&proclist_lock);
732 1.1 mrg }
733 1.1 mrg
734 1.1 mrg /*
735 1.62 thorpej * uvm_swapout: swap out lwp "l"
736 1.1 mrg *
737 1.48 chs * - currently "swapout" means "unwire U-area" and "pmap_collect()"
738 1.1 mrg * the pmap.
739 1.107 ad * - must be called with l->l_swaplock held.
740 1.1 mrg * - XXXCDC: should deactivate all process' private anonymous memory
741 1.1 mrg */
742 1.1 mrg
743 1.6 mrg static void
744 1.89 thorpej uvm_swapout(struct lwp *l)
745 1.1 mrg {
746 1.62 thorpej struct proc *p = l->l_proc;
747 1.1 mrg
748 1.107 ad KASSERT(mutex_owned(&l->l_swaplock));
749 1.98 ad
750 1.1 mrg #ifdef DEBUG
751 1.6 mrg if (swapdebug & SDB_SWAPOUT)
752 1.62 thorpej printf("swapout: lid %d.%d(%s)@%p, stat %x pri %d free %d\n",
753 1.62 thorpej p->p_pid, l->l_lid, p->p_comm, l->l_addr, l->l_stat,
754 1.62 thorpej l->l_slptime, uvmexp.free);
755 1.1 mrg #endif
756 1.1 mrg
757 1.6 mrg /*
758 1.6 mrg * Mark it as (potentially) swapped out.
759 1.6 mrg */
760 1.107 ad lwp_lock(l);
761 1.106 yamt if (!swappable(l)) {
762 1.69 cl KDASSERT(l->l_cpu != curcpu());
763 1.98 ad lwp_unlock(l);
764 1.68 cl return;
765 1.68 cl }
766 1.100 pavel l->l_flag &= ~LW_INMEM;
767 1.98 ad l->l_swtime = 0;
768 1.62 thorpej if (l->l_stat == LSRUN)
769 1.106 yamt sched_dequeue(l);
770 1.98 ad lwp_unlock(l);
771 1.98 ad p->p_stats->p_ru.ru_nswap++; /* XXXSMP */
772 1.6 mrg ++uvmexp.swapouts;
773 1.68 cl
774 1.68 cl /*
775 1.68 cl * Do any machine-specific actions necessary before swapout.
776 1.68 cl * This can include saving floating point state, etc.
777 1.68 cl */
778 1.68 cl cpu_swapout(l);
779 1.43 chs
780 1.43 chs /*
781 1.43 chs * Unwire the to-be-swapped process's user struct and kernel stack.
782 1.43 chs */
783 1.115 yamt uarea_swapout(USER_TO_UAREA(l->l_addr));
784 1.43 chs pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
785 1.107 ad }
786 1.107 ad
787 1.107 ad /*
788 1.107 ad * uvm_lwp_hold: prevent lwp "l" from being swapped out, and bring
789 1.107 ad * back into memory if it is currently swapped.
790 1.107 ad */
791 1.107 ad
792 1.107 ad void
793 1.107 ad uvm_lwp_hold(struct lwp *l)
794 1.107 ad {
795 1.107 ad
796 1.114 ad if (l == curlwp) {
797 1.114 ad atomic_inc_uint(&l->l_holdcnt);
798 1.114 ad } else {
799 1.114 ad mutex_enter(&l->l_swaplock);
800 1.114 ad if (atomic_inc_uint_nv(&l->l_holdcnt) == 1 &&
801 1.114 ad (l->l_flag & LW_INMEM) == 0)
802 1.114 ad uvm_swapin(l);
803 1.114 ad mutex_exit(&l->l_swaplock);
804 1.114 ad }
805 1.107 ad }
806 1.107 ad
807 1.107 ad /*
808 1.107 ad * uvm_lwp_rele: release a hold on lwp "l". when the holdcount
809 1.107 ad * drops to zero, it's eligable to be swapped.
810 1.107 ad */
811 1.107 ad
812 1.107 ad void
813 1.107 ad uvm_lwp_rele(struct lwp *l)
814 1.107 ad {
815 1.107 ad
816 1.107 ad KASSERT(l->l_holdcnt != 0);
817 1.98 ad
818 1.114 ad atomic_dec_uint(&l->l_holdcnt);
819 1.1 mrg }
820 1.1 mrg
821 1.96 matt #ifdef COREDUMP
822 1.56 thorpej /*
823 1.56 thorpej * uvm_coredump_walkmap: walk a process's map for the purpose of dumping
824 1.56 thorpej * a core file.
825 1.56 thorpej */
826 1.56 thorpej
827 1.56 thorpej int
828 1.89 thorpej uvm_coredump_walkmap(struct proc *p, void *iocookie,
829 1.89 thorpej int (*func)(struct proc *, void *, struct uvm_coredump_state *),
830 1.89 thorpej void *cookie)
831 1.56 thorpej {
832 1.56 thorpej struct uvm_coredump_state state;
833 1.56 thorpej struct vmspace *vm = p->p_vmspace;
834 1.56 thorpej struct vm_map *map = &vm->vm_map;
835 1.56 thorpej struct vm_map_entry *entry;
836 1.56 thorpej int error;
837 1.56 thorpej
838 1.64 atatat entry = NULL;
839 1.64 atatat vm_map_lock_read(map);
840 1.87 matt state.end = 0;
841 1.64 atatat for (;;) {
842 1.64 atatat if (entry == NULL)
843 1.64 atatat entry = map->header.next;
844 1.64 atatat else if (!uvm_map_lookup_entry(map, state.end, &entry))
845 1.64 atatat entry = entry->next;
846 1.64 atatat if (entry == &map->header)
847 1.64 atatat break;
848 1.64 atatat
849 1.56 thorpej state.cookie = cookie;
850 1.86 matt if (state.end > entry->start) {
851 1.86 matt state.start = state.end;
852 1.86 matt } else {
853 1.86 matt state.start = entry->start;
854 1.86 matt }
855 1.86 matt state.realend = entry->end;
856 1.56 thorpej state.end = entry->end;
857 1.56 thorpej state.prot = entry->protection;
858 1.56 thorpej state.flags = 0;
859 1.56 thorpej
860 1.82 chs /*
861 1.82 chs * Dump the region unless one of the following is true:
862 1.82 chs *
863 1.82 chs * (1) the region has neither object nor amap behind it
864 1.82 chs * (ie. it has never been accessed).
865 1.82 chs *
866 1.82 chs * (2) the region has no amap and is read-only
867 1.82 chs * (eg. an executable text section).
868 1.82 chs *
869 1.82 chs * (3) the region's object is a device.
870 1.85 nathanw *
871 1.85 nathanw * (4) the region is unreadable by the process.
872 1.82 chs */
873 1.56 thorpej
874 1.82 chs KASSERT(!UVM_ET_ISSUBMAP(entry));
875 1.82 chs KASSERT(state.start < VM_MAXUSER_ADDRESS);
876 1.82 chs KASSERT(state.end <= VM_MAXUSER_ADDRESS);
877 1.82 chs if (entry->object.uvm_obj == NULL &&
878 1.82 chs entry->aref.ar_amap == NULL) {
879 1.86 matt state.realend = state.start;
880 1.86 matt } else if ((entry->protection & VM_PROT_WRITE) == 0 &&
881 1.82 chs entry->aref.ar_amap == NULL) {
882 1.86 matt state.realend = state.start;
883 1.86 matt } else if (entry->object.uvm_obj != NULL &&
884 1.82 chs UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) {
885 1.86 matt state.realend = state.start;
886 1.86 matt } else if ((entry->protection & VM_PROT_READ) == 0) {
887 1.86 matt state.realend = state.start;
888 1.86 matt } else {
889 1.86 matt if (state.start >= (vaddr_t)vm->vm_maxsaddr)
890 1.86 matt state.flags |= UVM_COREDUMP_STACK;
891 1.86 matt
892 1.86 matt /*
893 1.86 matt * If this an anonymous entry, only dump instantiated
894 1.86 matt * pages.
895 1.86 matt */
896 1.86 matt if (entry->object.uvm_obj == NULL) {
897 1.86 matt vaddr_t end;
898 1.86 matt
899 1.86 matt amap_lock(entry->aref.ar_amap);
900 1.86 matt for (end = state.start;
901 1.86 matt end < state.end; end += PAGE_SIZE) {
902 1.86 matt struct vm_anon *anon;
903 1.86 matt anon = amap_lookup(&entry->aref,
904 1.86 matt end - entry->start);
905 1.86 matt /*
906 1.86 matt * If we have already encountered an
907 1.86 matt * uninstantiated page, stop at the
908 1.86 matt * first instantied page.
909 1.86 matt */
910 1.86 matt if (anon != NULL &&
911 1.86 matt state.realend != state.end) {
912 1.86 matt state.end = end;
913 1.86 matt break;
914 1.86 matt }
915 1.86 matt
916 1.86 matt /*
917 1.86 matt * If this page is the first
918 1.86 matt * uninstantiated page, mark this as
919 1.86 matt * the real ending point. Continue to
920 1.86 matt * counting uninstantiated pages.
921 1.86 matt */
922 1.86 matt if (anon == NULL &&
923 1.86 matt state.realend == state.end) {
924 1.86 matt state.realend = end;
925 1.86 matt }
926 1.86 matt }
927 1.86 matt amap_unlock(entry->aref.ar_amap);
928 1.86 matt }
929 1.82 chs }
930 1.86 matt
931 1.56 thorpej
932 1.64 atatat vm_map_unlock_read(map);
933 1.88 matt error = (*func)(p, iocookie, &state);
934 1.56 thorpej if (error)
935 1.56 thorpej return (error);
936 1.64 atatat vm_map_lock_read(map);
937 1.56 thorpej }
938 1.64 atatat vm_map_unlock_read(map);
939 1.56 thorpej
940 1.56 thorpej return (0);
941 1.56 thorpej }
942 1.96 matt #endif /* COREDUMP */
943