uvm_glue.c revision 1.144 1 /* $NetBSD: uvm_glue.c,v 1.144 2010/02/25 23:10:49 jym Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
42 * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.144 2010/02/25 23:10:49 jym Exp $");
71
72 #include "opt_kgdb.h"
73 #include "opt_kstack.h"
74 #include "opt_uvmhist.h"
75
76 /*
77 * uvm_glue.c: glue functions
78 */
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc.h>
83 #include <sys/resourcevar.h>
84 #include <sys/buf.h>
85 #include <sys/user.h>
86 #include <sys/syncobj.h>
87 #include <sys/cpu.h>
88 #include <sys/atomic.h>
89
90 #include <uvm/uvm.h>
91
92 /*
93 * XXXCDC: do these really belong here?
94 */
95
96 /*
97 * uvm_kernacc: can the kernel access a region of memory
98 *
99 * - used only by /dev/kmem driver (mem.c)
100 */
101
102 bool
103 uvm_kernacc(void *addr, size_t len, int rw)
104 {
105 bool rv;
106 vaddr_t saddr, eaddr;
107 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
108
109 saddr = trunc_page((vaddr_t)addr);
110 eaddr = round_page((vaddr_t)addr + len);
111 vm_map_lock_read(kernel_map);
112 rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
113 vm_map_unlock_read(kernel_map);
114
115 return(rv);
116 }
117
118 #ifdef KGDB
119 /*
120 * Change protections on kernel pages from addr to addr+len
121 * (presumably so debugger can plant a breakpoint).
122 *
123 * We force the protection change at the pmap level. If we were
124 * to use vm_map_protect a change to allow writing would be lazily-
125 * applied meaning we would still take a protection fault, something
126 * we really don't want to do. It would also fragment the kernel
127 * map unnecessarily. We cannot use pmap_protect since it also won't
128 * enforce a write-enable request. Using pmap_enter is the only way
129 * we can ensure the change takes place properly.
130 */
131 void
132 uvm_chgkprot(void *addr, size_t len, int rw)
133 {
134 vm_prot_t prot;
135 paddr_t pa;
136 vaddr_t sva, eva;
137
138 prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
139 eva = round_page((vaddr_t)addr + len);
140 for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
141 /*
142 * Extract physical address for the page.
143 */
144 if (pmap_extract(pmap_kernel(), sva, &pa) == false)
145 panic("%s: invalid page", __func__);
146 pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);
147 }
148 pmap_update(pmap_kernel());
149 }
150 #endif
151
152 /*
153 * uvm_vslock: wire user memory for I/O
154 *
155 * - called from physio and sys___sysctl
156 * - XXXCDC: consider nuking this (or making it a macro?)
157 */
158
159 int
160 uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access_type)
161 {
162 struct vm_map *map;
163 vaddr_t start, end;
164 int error;
165
166 map = &vs->vm_map;
167 start = trunc_page((vaddr_t)addr);
168 end = round_page((vaddr_t)addr + len);
169 error = uvm_fault_wire(map, start, end, access_type, 0);
170 return error;
171 }
172
173 /*
174 * uvm_vsunlock: unwire user memory wired by uvm_vslock()
175 *
176 * - called from physio and sys___sysctl
177 * - XXXCDC: consider nuking this (or making it a macro?)
178 */
179
180 void
181 uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
182 {
183 uvm_fault_unwire(&vs->vm_map, trunc_page((vaddr_t)addr),
184 round_page((vaddr_t)addr + len));
185 }
186
187 /*
188 * uvm_proc_fork: fork a virtual address space
189 *
190 * - the address space is copied as per parent map's inherit values
191 */
192 void
193 uvm_proc_fork(struct proc *p1, struct proc *p2, bool shared)
194 {
195
196 if (shared == true) {
197 p2->p_vmspace = NULL;
198 uvmspace_share(p1, p2);
199 } else {
200 p2->p_vmspace = uvmspace_fork(p1->p_vmspace);
201 }
202
203 cpu_proc_fork(p1, p2);
204 }
205
206 /*
207 * uvm_lwp_fork: fork a thread
208 *
209 * - a new "user" structure is allocated for the child process
210 * [filled in by MD layer...]
211 * - if specified, the child gets a new user stack described by
212 * stack and stacksize
213 * - NOTE: the kernel stack may be at a different location in the child
214 * process, and thus addresses of automatic variables may be invalid
215 * after cpu_lwp_fork returns in the child process. We do nothing here
216 * after cpu_lwp_fork returns.
217 */
218 void
219 uvm_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
220 void (*func)(void *), void *arg)
221 {
222
223 /* Fill stack with magic number. */
224 kstack_setup_magic(l2);
225
226 /*
227 * cpu_lwp_fork() copy and update the pcb, and make the child ready
228 * to run. If this is a normal user fork, the child will exit
229 * directly to user mode via child_return() on its first time
230 * slice and will not return here. If this is a kernel thread,
231 * the specified entry point will be executed.
232 */
233 cpu_lwp_fork(l1, l2, stack, stacksize, func, arg);
234
235 /* Inactive emap for new LWP. */
236 l2->l_emap_gen = UVM_EMAP_INACTIVE;
237 }
238
239 #ifndef USPACE_ALIGN
240 #define USPACE_ALIGN 0
241 #endif
242
243 static pool_cache_t uvm_uarea_cache;
244
245 static void *
246 uarea_poolpage_alloc(struct pool *pp, int flags)
247 {
248 #if defined(PMAP_MAP_POOLPAGE)
249 if (USPACE == PAGE_SIZE && USPACE_ALIGN == 0) {
250 struct vm_page *pg;
251 vaddr_t va;
252
253 pg = uvm_pagealloc(NULL, 0, NULL,
254 ((flags & PR_WAITOK) == 0 ? UVM_KMF_NOWAIT : 0));
255 if (pg == NULL)
256 return NULL;
257 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
258 if (va == 0)
259 uvm_pagefree(pg);
260 return (void *)va;
261 }
262 #endif
263 return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz,
264 USPACE_ALIGN, UVM_KMF_WIRED |
265 ((flags & PR_WAITOK) ? UVM_KMF_WAITVA :
266 (UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)));
267 }
268
269 static void
270 uarea_poolpage_free(struct pool *pp, void *addr)
271 {
272 #if defined(PMAP_MAP_POOLPAGE)
273 if (USPACE == PAGE_SIZE && USPACE_ALIGN == 0) {
274 paddr_t pa;
275
276 pa = PMAP_UNMAP_POOLPAGE((vaddr_t) addr);
277 KASSERT(pa != 0);
278 uvm_pagefree(PHYS_TO_VM_PAGE(pa));
279 return;
280 }
281 #endif
282 uvm_km_free(kernel_map, (vaddr_t)addr, pp->pr_alloc->pa_pagesz,
283 UVM_KMF_WIRED);
284 }
285
286 static struct pool_allocator uvm_uarea_allocator = {
287 .pa_alloc = uarea_poolpage_alloc,
288 .pa_free = uarea_poolpage_free,
289 .pa_pagesz = USPACE,
290 };
291
292 void
293 uvm_uarea_init(void)
294 {
295 int flags = PR_NOTOUCH;
296
297 /*
298 * specify PR_NOALIGN unless the alignment provided by
299 * the backend (USPACE_ALIGN) is sufficient to provide
300 * pool page size (UPSACE) alignment.
301 */
302
303 if ((USPACE_ALIGN == 0 && USPACE != PAGE_SIZE) ||
304 (USPACE_ALIGN % USPACE) != 0) {
305 flags |= PR_NOALIGN;
306 }
307
308 uvm_uarea_cache = pool_cache_init(USPACE, USPACE_ALIGN, 0, flags,
309 "uarea", &uvm_uarea_allocator, IPL_NONE, NULL, NULL, NULL);
310 }
311
312 /*
313 * uvm_uarea_alloc: allocate a u-area
314 */
315
316 vaddr_t
317 uvm_uarea_alloc(void)
318 {
319
320 return (vaddr_t)pool_cache_get(uvm_uarea_cache, PR_WAITOK);
321 }
322
323 /*
324 * uvm_uarea_free: free a u-area
325 */
326
327 void
328 uvm_uarea_free(vaddr_t uaddr)
329 {
330
331 pool_cache_put(uvm_uarea_cache, (void *)uaddr);
332 }
333
334 vaddr_t
335 uvm_lwp_getuarea(lwp_t *l)
336 {
337
338 return (vaddr_t)l->l_addr - UAREA_USER_OFFSET;
339 }
340
341 void
342 uvm_lwp_setuarea(lwp_t *l, vaddr_t addr)
343 {
344
345 l->l_addr = (void *)(addr + UAREA_USER_OFFSET);
346 }
347
348 /*
349 * uvm_proc_exit: exit a virtual address space
350 *
351 * - borrow proc0's address space because freeing the vmspace
352 * of the dead process may block.
353 */
354
355 void
356 uvm_proc_exit(struct proc *p)
357 {
358 struct lwp *l = curlwp; /* XXX */
359 struct vmspace *ovm;
360
361 KASSERT(p == l->l_proc);
362 ovm = p->p_vmspace;
363
364 /*
365 * borrow proc0's address space.
366 */
367 KPREEMPT_DISABLE(l);
368 pmap_deactivate(l);
369 p->p_vmspace = proc0.p_vmspace;
370 pmap_activate(l);
371 KPREEMPT_ENABLE(l);
372
373 uvmspace_free(ovm);
374 }
375
376 void
377 uvm_lwp_exit(struct lwp *l)
378 {
379 vaddr_t va = uvm_lwp_getuarea(l);
380
381 uvm_uarea_free(va);
382 #ifdef DIAGNOSTIC
383 uvm_lwp_setuarea(l, (vaddr_t)NULL);
384 #endif
385 }
386
387 /*
388 * uvm_init_limit: init per-process VM limits
389 *
390 * - called for process 0 and then inherited by all others.
391 */
392
393 void
394 uvm_init_limits(struct proc *p)
395 {
396
397 /*
398 * Set up the initial limits on process VM. Set the maximum
399 * resident set size to be all of (reasonably) available memory.
400 * This causes any single, large process to start random page
401 * replacement once it fills memory.
402 */
403
404 p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
405 p->p_rlimit[RLIMIT_STACK].rlim_max = maxsmap;
406 p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
407 p->p_rlimit[RLIMIT_DATA].rlim_max = maxdmap;
408 p->p_rlimit[RLIMIT_AS].rlim_cur = RLIM_INFINITY;
409 p->p_rlimit[RLIMIT_AS].rlim_max = RLIM_INFINITY;
410 p->p_rlimit[RLIMIT_RSS].rlim_cur = MIN(
411 VM_MAXUSER_ADDRESS, ctob((rlim_t)uvmexp.free));
412 }
413
414 /*
415 * uvm_scheduler: process zero main loop.
416 */
417 void
418 uvm_scheduler(void)
419 {
420 lwp_t *l = curlwp;
421
422 lwp_lock(l);
423 l->l_priority = PRI_VM;
424 l->l_class = SCHED_FIFO;
425 lwp_unlock(l);
426
427 for (;;) {
428 /* XXX/TODO: move some workload to this LWP? */
429 (void)kpause("uvm", false, 0, NULL);
430 }
431 }
432