uvm_glue.c revision 1.144 1 1.144 jym /* $NetBSD: uvm_glue.c,v 1.144 2010/02/25 23:10:49 jym Exp $ */
2 1.1 mrg
3 1.48 chs /*
4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 1.48 chs * Copyright (c) 1991, 1993, The Regents of the University of California.
6 1.1 mrg *
7 1.1 mrg * All rights reserved.
8 1.1 mrg *
9 1.1 mrg * This code is derived from software contributed to Berkeley by
10 1.1 mrg * The Mach Operating System project at Carnegie-Mellon University.
11 1.1 mrg *
12 1.1 mrg * Redistribution and use in source and binary forms, with or without
13 1.1 mrg * modification, are permitted provided that the following conditions
14 1.1 mrg * are met:
15 1.1 mrg * 1. Redistributions of source code must retain the above copyright
16 1.1 mrg * notice, this list of conditions and the following disclaimer.
17 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 mrg * notice, this list of conditions and the following disclaimer in the
19 1.1 mrg * documentation and/or other materials provided with the distribution.
20 1.1 mrg * 3. All advertising materials mentioning features or use of this software
21 1.1 mrg * must display the following acknowledgement:
22 1.1 mrg * This product includes software developed by Charles D. Cranor,
23 1.48 chs * Washington University, the University of California, Berkeley and
24 1.1 mrg * its contributors.
25 1.1 mrg * 4. Neither the name of the University nor the names of its contributors
26 1.1 mrg * may be used to endorse or promote products derived from this software
27 1.1 mrg * without specific prior written permission.
28 1.1 mrg *
29 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 1.1 mrg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 1.1 mrg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 1.1 mrg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 1.1 mrg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 1.1 mrg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 1.1 mrg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 1.1 mrg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 1.1 mrg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 1.1 mrg * SUCH DAMAGE.
40 1.1 mrg *
41 1.1 mrg * @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
42 1.4 mrg * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp
43 1.1 mrg *
44 1.1 mrg *
45 1.1 mrg * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 1.1 mrg * All rights reserved.
47 1.48 chs *
48 1.1 mrg * Permission to use, copy, modify and distribute this software and
49 1.1 mrg * its documentation is hereby granted, provided that both the copyright
50 1.1 mrg * notice and this permission notice appear in all copies of the
51 1.1 mrg * software, derivative works or modified versions, and any portions
52 1.1 mrg * thereof, and that both notices appear in supporting documentation.
53 1.48 chs *
54 1.48 chs * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 1.48 chs * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 1.1 mrg * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 1.48 chs *
58 1.1 mrg * Carnegie Mellon requests users of this software to return to
59 1.1 mrg *
60 1.1 mrg * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 1.1 mrg * School of Computer Science
62 1.1 mrg * Carnegie Mellon University
63 1.1 mrg * Pittsburgh PA 15213-3890
64 1.1 mrg *
65 1.1 mrg * any improvements or extensions that they make and grant Carnegie the
66 1.1 mrg * rights to redistribute these changes.
67 1.1 mrg */
68 1.55 lukem
69 1.55 lukem #include <sys/cdefs.h>
70 1.144 jym __KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.144 2010/02/25 23:10:49 jym Exp $");
71 1.1 mrg
72 1.49 lukem #include "opt_kgdb.h"
73 1.59 yamt #include "opt_kstack.h"
74 1.5 mrg #include "opt_uvmhist.h"
75 1.5 mrg
76 1.1 mrg /*
77 1.1 mrg * uvm_glue.c: glue functions
78 1.1 mrg */
79 1.1 mrg
80 1.1 mrg #include <sys/param.h>
81 1.1 mrg #include <sys/systm.h>
82 1.1 mrg #include <sys/proc.h>
83 1.1 mrg #include <sys/resourcevar.h>
84 1.1 mrg #include <sys/buf.h>
85 1.1 mrg #include <sys/user.h>
86 1.106 yamt #include <sys/syncobj.h>
87 1.111 ad #include <sys/cpu.h>
88 1.114 ad #include <sys/atomic.h>
89 1.1 mrg
90 1.1 mrg #include <uvm/uvm.h>
91 1.1 mrg
92 1.1 mrg /*
93 1.1 mrg * XXXCDC: do these really belong here?
94 1.1 mrg */
95 1.1 mrg
96 1.28 thorpej /*
97 1.1 mrg * uvm_kernacc: can the kernel access a region of memory
98 1.1 mrg *
99 1.83 yamt * - used only by /dev/kmem driver (mem.c)
100 1.1 mrg */
101 1.1 mrg
102 1.102 thorpej bool
103 1.104 christos uvm_kernacc(void *addr, size_t len, int rw)
104 1.6 mrg {
105 1.102 thorpej bool rv;
106 1.13 eeh vaddr_t saddr, eaddr;
107 1.6 mrg vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
108 1.6 mrg
109 1.31 kleink saddr = trunc_page((vaddr_t)addr);
110 1.43 chs eaddr = round_page((vaddr_t)addr + len);
111 1.6 mrg vm_map_lock_read(kernel_map);
112 1.6 mrg rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
113 1.6 mrg vm_map_unlock_read(kernel_map);
114 1.6 mrg
115 1.6 mrg return(rv);
116 1.1 mrg }
117 1.1 mrg
118 1.1 mrg #ifdef KGDB
119 1.1 mrg /*
120 1.1 mrg * Change protections on kernel pages from addr to addr+len
121 1.1 mrg * (presumably so debugger can plant a breakpoint).
122 1.1 mrg *
123 1.1 mrg * We force the protection change at the pmap level. If we were
124 1.1 mrg * to use vm_map_protect a change to allow writing would be lazily-
125 1.1 mrg * applied meaning we would still take a protection fault, something
126 1.1 mrg * we really don't want to do. It would also fragment the kernel
127 1.1 mrg * map unnecessarily. We cannot use pmap_protect since it also won't
128 1.1 mrg * enforce a write-enable request. Using pmap_enter is the only way
129 1.1 mrg * we can ensure the change takes place properly.
130 1.1 mrg */
131 1.6 mrg void
132 1.104 christos uvm_chgkprot(void *addr, size_t len, int rw)
133 1.6 mrg {
134 1.6 mrg vm_prot_t prot;
135 1.13 eeh paddr_t pa;
136 1.13 eeh vaddr_t sva, eva;
137 1.6 mrg
138 1.6 mrg prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
139 1.31 kleink eva = round_page((vaddr_t)addr + len);
140 1.31 kleink for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
141 1.6 mrg /*
142 1.6 mrg * Extract physical address for the page.
143 1.6 mrg */
144 1.103 thorpej if (pmap_extract(pmap_kernel(), sva, &pa) == false)
145 1.123 christos panic("%s: invalid page", __func__);
146 1.30 thorpej pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);
147 1.6 mrg }
148 1.51 chris pmap_update(pmap_kernel());
149 1.1 mrg }
150 1.1 mrg #endif
151 1.1 mrg
152 1.1 mrg /*
153 1.52 chs * uvm_vslock: wire user memory for I/O
154 1.1 mrg *
155 1.1 mrg * - called from physio and sys___sysctl
156 1.1 mrg * - XXXCDC: consider nuking this (or making it a macro?)
157 1.1 mrg */
158 1.1 mrg
159 1.26 thorpej int
160 1.97 chs uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access_type)
161 1.1 mrg {
162 1.50 chs struct vm_map *map;
163 1.26 thorpej vaddr_t start, end;
164 1.45 chs int error;
165 1.26 thorpej
166 1.97 chs map = &vs->vm_map;
167 1.31 kleink start = trunc_page((vaddr_t)addr);
168 1.31 kleink end = round_page((vaddr_t)addr + len);
169 1.93 drochner error = uvm_fault_wire(map, start, end, access_type, 0);
170 1.45 chs return error;
171 1.1 mrg }
172 1.1 mrg
173 1.1 mrg /*
174 1.52 chs * uvm_vsunlock: unwire user memory wired by uvm_vslock()
175 1.1 mrg *
176 1.1 mrg * - called from physio and sys___sysctl
177 1.1 mrg * - XXXCDC: consider nuking this (or making it a macro?)
178 1.1 mrg */
179 1.1 mrg
180 1.6 mrg void
181 1.97 chs uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
182 1.1 mrg {
183 1.97 chs uvm_fault_unwire(&vs->vm_map, trunc_page((vaddr_t)addr),
184 1.43 chs round_page((vaddr_t)addr + len));
185 1.1 mrg }
186 1.1 mrg
187 1.1 mrg /*
188 1.62 thorpej * uvm_proc_fork: fork a virtual address space
189 1.1 mrg *
190 1.1 mrg * - the address space is copied as per parent map's inherit values
191 1.62 thorpej */
192 1.62 thorpej void
193 1.102 thorpej uvm_proc_fork(struct proc *p1, struct proc *p2, bool shared)
194 1.62 thorpej {
195 1.62 thorpej
196 1.103 thorpej if (shared == true) {
197 1.62 thorpej p2->p_vmspace = NULL;
198 1.62 thorpej uvmspace_share(p1, p2);
199 1.62 thorpej } else {
200 1.62 thorpej p2->p_vmspace = uvmspace_fork(p1->p_vmspace);
201 1.62 thorpej }
202 1.62 thorpej
203 1.62 thorpej cpu_proc_fork(p1, p2);
204 1.62 thorpej }
205 1.62 thorpej
206 1.62 thorpej /*
207 1.62 thorpej * uvm_lwp_fork: fork a thread
208 1.62 thorpej *
209 1.1 mrg * - a new "user" structure is allocated for the child process
210 1.1 mrg * [filled in by MD layer...]
211 1.20 thorpej * - if specified, the child gets a new user stack described by
212 1.20 thorpej * stack and stacksize
213 1.1 mrg * - NOTE: the kernel stack may be at a different location in the child
214 1.1 mrg * process, and thus addresses of automatic variables may be invalid
215 1.62 thorpej * after cpu_lwp_fork returns in the child process. We do nothing here
216 1.62 thorpej * after cpu_lwp_fork returns.
217 1.1 mrg */
218 1.6 mrg void
219 1.89 thorpej uvm_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
220 1.89 thorpej void (*func)(void *), void *arg)
221 1.6 mrg {
222 1.6 mrg
223 1.137 rmind /* Fill stack with magic number. */
224 1.63 yamt kstack_setup_magic(l2);
225 1.6 mrg
226 1.6 mrg /*
227 1.62 thorpej * cpu_lwp_fork() copy and update the pcb, and make the child ready
228 1.62 thorpej * to run. If this is a normal user fork, the child will exit
229 1.34 thorpej * directly to user mode via child_return() on its first time
230 1.34 thorpej * slice and will not return here. If this is a kernel thread,
231 1.34 thorpej * the specified entry point will be executed.
232 1.6 mrg */
233 1.62 thorpej cpu_lwp_fork(l1, l2, stack, stacksize, func, arg);
234 1.138 rmind
235 1.138 rmind /* Inactive emap for new LWP. */
236 1.138 rmind l2->l_emap_gen = UVM_EMAP_INACTIVE;
237 1.14 thorpej }
238 1.14 thorpej
239 1.60 chs #ifndef USPACE_ALIGN
240 1.115 yamt #define USPACE_ALIGN 0
241 1.60 chs #endif
242 1.60 chs
243 1.115 yamt static pool_cache_t uvm_uarea_cache;
244 1.115 yamt
245 1.115 yamt static void *
246 1.115 yamt uarea_poolpage_alloc(struct pool *pp, int flags)
247 1.115 yamt {
248 1.141 rmind #if defined(PMAP_MAP_POOLPAGE)
249 1.139 matt if (USPACE == PAGE_SIZE && USPACE_ALIGN == 0) {
250 1.139 matt struct vm_page *pg;
251 1.139 matt vaddr_t va;
252 1.139 matt
253 1.139 matt pg = uvm_pagealloc(NULL, 0, NULL,
254 1.139 matt ((flags & PR_WAITOK) == 0 ? UVM_KMF_NOWAIT : 0));
255 1.139 matt if (pg == NULL)
256 1.139 matt return NULL;
257 1.139 matt va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
258 1.139 matt if (va == 0)
259 1.139 matt uvm_pagefree(pg);
260 1.139 matt return (void *)va;
261 1.139 matt }
262 1.139 matt #endif
263 1.115 yamt return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz,
264 1.141 rmind USPACE_ALIGN, UVM_KMF_WIRED |
265 1.141 rmind ((flags & PR_WAITOK) ? UVM_KMF_WAITVA :
266 1.115 yamt (UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)));
267 1.115 yamt }
268 1.109 ad
269 1.115 yamt static void
270 1.115 yamt uarea_poolpage_free(struct pool *pp, void *addr)
271 1.115 yamt {
272 1.141 rmind #if defined(PMAP_MAP_POOLPAGE)
273 1.139 matt if (USPACE == PAGE_SIZE && USPACE_ALIGN == 0) {
274 1.139 matt paddr_t pa;
275 1.139 matt
276 1.139 matt pa = PMAP_UNMAP_POOLPAGE((vaddr_t) addr);
277 1.139 matt KASSERT(pa != 0);
278 1.139 matt uvm_pagefree(PHYS_TO_VM_PAGE(pa));
279 1.139 matt return;
280 1.139 matt }
281 1.139 matt #endif
282 1.115 yamt uvm_km_free(kernel_map, (vaddr_t)addr, pp->pr_alloc->pa_pagesz,
283 1.141 rmind UVM_KMF_WIRED);
284 1.115 yamt }
285 1.115 yamt
286 1.115 yamt static struct pool_allocator uvm_uarea_allocator = {
287 1.115 yamt .pa_alloc = uarea_poolpage_alloc,
288 1.115 yamt .pa_free = uarea_poolpage_free,
289 1.115 yamt .pa_pagesz = USPACE,
290 1.115 yamt };
291 1.115 yamt
292 1.115 yamt void
293 1.115 yamt uvm_uarea_init(void)
294 1.115 yamt {
295 1.117 yamt int flags = PR_NOTOUCH;
296 1.115 yamt
297 1.116 yamt /*
298 1.116 yamt * specify PR_NOALIGN unless the alignment provided by
299 1.116 yamt * the backend (USPACE_ALIGN) is sufficient to provide
300 1.116 yamt * pool page size (UPSACE) alignment.
301 1.116 yamt */
302 1.116 yamt
303 1.117 yamt if ((USPACE_ALIGN == 0 && USPACE != PAGE_SIZE) ||
304 1.117 yamt (USPACE_ALIGN % USPACE) != 0) {
305 1.117 yamt flags |= PR_NOALIGN;
306 1.117 yamt }
307 1.117 yamt
308 1.117 yamt uvm_uarea_cache = pool_cache_init(USPACE, USPACE_ALIGN, 0, flags,
309 1.141 rmind "uarea", &uvm_uarea_allocator, IPL_NONE, NULL, NULL, NULL);
310 1.60 chs }
311 1.60 chs
312 1.60 chs /*
313 1.115 yamt * uvm_uarea_alloc: allocate a u-area
314 1.75 jdolecek */
315 1.75 jdolecek
316 1.141 rmind vaddr_t
317 1.141 rmind uvm_uarea_alloc(void)
318 1.75 jdolecek {
319 1.109 ad
320 1.141 rmind return (vaddr_t)pool_cache_get(uvm_uarea_cache, PR_WAITOK);
321 1.75 jdolecek }
322 1.75 jdolecek
323 1.75 jdolecek /*
324 1.115 yamt * uvm_uarea_free: free a u-area
325 1.60 chs */
326 1.60 chs
327 1.60 chs void
328 1.141 rmind uvm_uarea_free(vaddr_t uaddr)
329 1.60 chs {
330 1.60 chs
331 1.115 yamt pool_cache_put(uvm_uarea_cache, (void *)uaddr);
332 1.60 chs }
333 1.60 chs
334 1.142 rmind vaddr_t
335 1.142 rmind uvm_lwp_getuarea(lwp_t *l)
336 1.142 rmind {
337 1.142 rmind
338 1.143 rmind return (vaddr_t)l->l_addr - UAREA_USER_OFFSET;
339 1.142 rmind }
340 1.142 rmind
341 1.142 rmind void
342 1.142 rmind uvm_lwp_setuarea(lwp_t *l, vaddr_t addr)
343 1.142 rmind {
344 1.142 rmind
345 1.143 rmind l->l_addr = (void *)(addr + UAREA_USER_OFFSET);
346 1.142 rmind }
347 1.142 rmind
348 1.60 chs /*
349 1.118 yamt * uvm_proc_exit: exit a virtual address space
350 1.80 pk *
351 1.80 pk * - borrow proc0's address space because freeing the vmspace
352 1.80 pk * of the dead process may block.
353 1.80 pk */
354 1.80 pk
355 1.80 pk void
356 1.89 thorpej uvm_proc_exit(struct proc *p)
357 1.80 pk {
358 1.80 pk struct lwp *l = curlwp; /* XXX */
359 1.80 pk struct vmspace *ovm;
360 1.80 pk
361 1.80 pk KASSERT(p == l->l_proc);
362 1.80 pk ovm = p->p_vmspace;
363 1.80 pk
364 1.80 pk /*
365 1.80 pk * borrow proc0's address space.
366 1.80 pk */
367 1.129 ad KPREEMPT_DISABLE(l);
368 1.80 pk pmap_deactivate(l);
369 1.80 pk p->p_vmspace = proc0.p_vmspace;
370 1.80 pk pmap_activate(l);
371 1.129 ad KPREEMPT_ENABLE(l);
372 1.80 pk
373 1.80 pk uvmspace_free(ovm);
374 1.80 pk }
375 1.80 pk
376 1.80 pk void
377 1.80 pk uvm_lwp_exit(struct lwp *l)
378 1.80 pk {
379 1.143 rmind vaddr_t va = uvm_lwp_getuarea(l);
380 1.80 pk
381 1.141 rmind uvm_uarea_free(va);
382 1.143 rmind #ifdef DIAGNOSTIC
383 1.143 rmind uvm_lwp_setuarea(l, (vaddr_t)NULL);
384 1.143 rmind #endif
385 1.80 pk }
386 1.80 pk
387 1.80 pk /*
388 1.1 mrg * uvm_init_limit: init per-process VM limits
389 1.1 mrg *
390 1.1 mrg * - called for process 0 and then inherited by all others.
391 1.1 mrg */
392 1.60 chs
393 1.6 mrg void
394 1.89 thorpej uvm_init_limits(struct proc *p)
395 1.6 mrg {
396 1.6 mrg
397 1.6 mrg /*
398 1.6 mrg * Set up the initial limits on process VM. Set the maximum
399 1.6 mrg * resident set size to be all of (reasonably) available memory.
400 1.6 mrg * This causes any single, large process to start random page
401 1.6 mrg * replacement once it fills memory.
402 1.6 mrg */
403 1.6 mrg
404 1.6 mrg p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
405 1.79 pk p->p_rlimit[RLIMIT_STACK].rlim_max = maxsmap;
406 1.6 mrg p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
407 1.79 pk p->p_rlimit[RLIMIT_DATA].rlim_max = maxdmap;
408 1.136 mrg p->p_rlimit[RLIMIT_AS].rlim_cur = RLIM_INFINITY;
409 1.136 mrg p->p_rlimit[RLIMIT_AS].rlim_max = RLIM_INFINITY;
410 1.144 jym p->p_rlimit[RLIMIT_RSS].rlim_cur = MIN(
411 1.144 jym VM_MAXUSER_ADDRESS, ctob((rlim_t)uvmexp.free));
412 1.1 mrg }
413 1.1 mrg
414 1.99 ad /*
415 1.141 rmind * uvm_scheduler: process zero main loop.
416 1.1 mrg */
417 1.6 mrg void
418 1.89 thorpej uvm_scheduler(void)
419 1.1 mrg {
420 1.141 rmind lwp_t *l = curlwp;
421 1.1 mrg
422 1.99 ad lwp_lock(l);
423 1.113 ad l->l_priority = PRI_VM;
424 1.113 ad l->l_class = SCHED_FIFO;
425 1.99 ad lwp_unlock(l);
426 1.99 ad
427 1.99 ad for (;;) {
428 1.141 rmind /* XXX/TODO: move some workload to this LWP? */
429 1.141 rmind (void)kpause("uvm", false, 0, NULL);
430 1.114 ad }
431 1.107 ad }
432