vm.c revision 1.164 1 1.164 pooka /* $NetBSD: vm.c,v 1.164 2015/04/17 12:43:16 pooka Exp $ */
2 1.1 pooka
3 1.1 pooka /*
4 1.114 pooka * Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved.
5 1.1 pooka *
6 1.76 pooka * Development of this software was supported by
7 1.76 pooka * The Finnish Cultural Foundation and the Research Foundation of
8 1.76 pooka * The Helsinki University of Technology.
9 1.1 pooka *
10 1.1 pooka * Redistribution and use in source and binary forms, with or without
11 1.1 pooka * modification, are permitted provided that the following conditions
12 1.1 pooka * are met:
13 1.1 pooka * 1. Redistributions of source code must retain the above copyright
14 1.1 pooka * notice, this list of conditions and the following disclaimer.
15 1.1 pooka * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 pooka * notice, this list of conditions and the following disclaimer in the
17 1.1 pooka * documentation and/or other materials provided with the distribution.
18 1.1 pooka *
19 1.1 pooka * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20 1.1 pooka * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 1.1 pooka * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 1.1 pooka * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 1.1 pooka * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 1.1 pooka * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 1.1 pooka * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 1.1 pooka * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 1.1 pooka * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 1.1 pooka * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 1.1 pooka * SUCH DAMAGE.
30 1.1 pooka */
31 1.1 pooka
32 1.1 pooka /*
33 1.88 pooka * Virtual memory emulation routines.
34 1.1 pooka */
35 1.1 pooka
36 1.1 pooka /*
37 1.5 pooka * XXX: we abuse pg->uanon for the virtual address of the storage
38 1.1 pooka * for each page. phys_addr would fit the job description better,
39 1.1 pooka * except that it will create unnecessary lossage on some platforms
40 1.1 pooka * due to not being a pointer type.
41 1.1 pooka */
42 1.1 pooka
43 1.48 pooka #include <sys/cdefs.h>
44 1.164 pooka __KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.164 2015/04/17 12:43:16 pooka Exp $");
45 1.48 pooka
46 1.1 pooka #include <sys/param.h>
47 1.40 pooka #include <sys/atomic.h>
48 1.80 pooka #include <sys/buf.h>
49 1.80 pooka #include <sys/kernel.h>
50 1.67 pooka #include <sys/kmem.h>
51 1.121 para #include <sys/vmem.h>
52 1.69 pooka #include <sys/mman.h>
53 1.1 pooka #include <sys/null.h>
54 1.1 pooka #include <sys/vnode.h>
55 1.1 pooka
56 1.34 pooka #include <machine/pmap.h>
57 1.34 pooka
58 1.34 pooka #include <rump/rumpuser.h>
59 1.34 pooka
60 1.1 pooka #include <uvm/uvm.h>
61 1.56 pooka #include <uvm/uvm_ddb.h>
62 1.88 pooka #include <uvm/uvm_pdpolicy.h>
63 1.1 pooka #include <uvm/uvm_prot.h>
64 1.58 he #include <uvm/uvm_readahead.h>
65 1.160 chs #include <uvm/uvm_device.h>
66 1.1 pooka
67 1.13 pooka #include "rump_private.h"
68 1.91 pooka #include "rump_vfs_private.h"
69 1.1 pooka
70 1.152 pooka kmutex_t uvm_pageqlock; /* non-free page lock */
71 1.152 pooka kmutex_t uvm_fpageqlock; /* free page lock, non-gpl license */
72 1.88 pooka kmutex_t uvm_swap_data_lock;
73 1.25 ad
74 1.1 pooka struct uvmexp uvmexp;
75 1.7 pooka struct uvm uvm;
76 1.1 pooka
77 1.112 pooka #ifdef __uvmexp_pagesize
78 1.123 martin const int * const uvmexp_pagesize = &uvmexp.pagesize;
79 1.123 martin const int * const uvmexp_pagemask = &uvmexp.pagemask;
80 1.123 martin const int * const uvmexp_pageshift = &uvmexp.pageshift;
81 1.112 pooka #endif
82 1.112 pooka
83 1.1 pooka struct vm_map rump_vmmap;
84 1.1 pooka
85 1.121 para static struct vm_map kernel_map_store;
86 1.121 para struct vm_map *kernel_map = &kernel_map_store;
87 1.121 para
88 1.130 pooka static struct vm_map module_map_store;
89 1.130 pooka extern struct vm_map *module_map;
90 1.130 pooka
91 1.164 pooka static struct pmap pmap_kernel;
92 1.164 pooka struct pmap rump_pmap_local;
93 1.164 pooka struct pmap *const kernel_pmap_ptr = &pmap_kernel;
94 1.164 pooka
95 1.121 para vmem_t *kmem_arena;
96 1.121 para vmem_t *kmem_va_arena;
97 1.35 pooka
98 1.80 pooka static unsigned int pdaemon_waiters;
99 1.80 pooka static kmutex_t pdaemonmtx;
100 1.80 pooka static kcondvar_t pdaemoncv, oomwait;
101 1.80 pooka
102 1.162 pooka /* all local non-proc0 processes share this vmspace */
103 1.162 pooka struct vmspace *rump_vmspace_local;
104 1.162 pooka
105 1.91 pooka unsigned long rump_physmemlimit = RUMPMEM_UNLIMITED;
106 1.147 pooka static unsigned long pdlimit = RUMPMEM_UNLIMITED; /* page daemon memlimit */
107 1.84 pooka static unsigned long curphysmem;
108 1.92 pooka static unsigned long dddlim; /* 90% of memory limit used */
109 1.92 pooka #define NEED_PAGEDAEMON() \
110 1.92 pooka (rump_physmemlimit != RUMPMEM_UNLIMITED && curphysmem > dddlim)
111 1.158 pooka #define PDRESERVE (2*MAXPHYS)
112 1.92 pooka
113 1.92 pooka /*
114 1.92 pooka * Try to free two pages worth of pages from objects.
115 1.92 pooka * If this succesfully frees a full page cache page, we'll
116 1.120 yamt * free the released page plus PAGE_SIZE/sizeof(vm_page).
117 1.92 pooka */
118 1.92 pooka #define PAGEDAEMON_OBJCHUNK (2*PAGE_SIZE / sizeof(struct vm_page))
119 1.92 pooka
120 1.92 pooka /*
121 1.92 pooka * Keep a list of least recently used pages. Since the only way a
122 1.92 pooka * rump kernel can "access" a page is via lookup, we put the page
123 1.92 pooka * at the back of queue every time a lookup for it is done. If the
124 1.92 pooka * page is in front of this global queue and we're short of memory,
125 1.92 pooka * it's a candidate for pageout.
126 1.92 pooka */
127 1.92 pooka static struct pglist vmpage_lruqueue;
128 1.92 pooka static unsigned vmpage_onqueue;
129 1.84 pooka
130 1.89 pooka static int
131 1.96 rmind pg_compare_key(void *ctx, const void *n, const void *key)
132 1.89 pooka {
133 1.89 pooka voff_t a = ((const struct vm_page *)n)->offset;
134 1.89 pooka voff_t b = *(const voff_t *)key;
135 1.89 pooka
136 1.89 pooka if (a < b)
137 1.96 rmind return -1;
138 1.96 rmind else if (a > b)
139 1.89 pooka return 1;
140 1.89 pooka else
141 1.89 pooka return 0;
142 1.89 pooka }
143 1.89 pooka
144 1.89 pooka static int
145 1.96 rmind pg_compare_nodes(void *ctx, const void *n1, const void *n2)
146 1.89 pooka {
147 1.89 pooka
148 1.96 rmind return pg_compare_key(ctx, n1, &((const struct vm_page *)n2)->offset);
149 1.89 pooka }
150 1.89 pooka
151 1.96 rmind const rb_tree_ops_t uvm_page_tree_ops = {
152 1.89 pooka .rbto_compare_nodes = pg_compare_nodes,
153 1.89 pooka .rbto_compare_key = pg_compare_key,
154 1.96 rmind .rbto_node_offset = offsetof(struct vm_page, rb_node),
155 1.96 rmind .rbto_context = NULL
156 1.89 pooka };
157 1.89 pooka
158 1.1 pooka /*
159 1.1 pooka * vm pages
160 1.1 pooka */
161 1.1 pooka
162 1.90 pooka static int
163 1.90 pooka pgctor(void *arg, void *obj, int flags)
164 1.90 pooka {
165 1.90 pooka struct vm_page *pg = obj;
166 1.90 pooka
167 1.90 pooka memset(pg, 0, sizeof(*pg));
168 1.103 pooka pg->uanon = rump_hypermalloc(PAGE_SIZE, PAGE_SIZE,
169 1.103 pooka (flags & PR_WAITOK) == PR_WAITOK, "pgalloc");
170 1.103 pooka return pg->uanon == NULL;
171 1.90 pooka }
172 1.90 pooka
173 1.90 pooka static void
174 1.90 pooka pgdtor(void *arg, void *obj)
175 1.90 pooka {
176 1.90 pooka struct vm_page *pg = obj;
177 1.90 pooka
178 1.90 pooka rump_hyperfree(pg->uanon, PAGE_SIZE);
179 1.90 pooka }
180 1.90 pooka
181 1.90 pooka static struct pool_cache pagecache;
182 1.90 pooka
183 1.92 pooka /*
184 1.92 pooka * Called with the object locked. We don't support anons.
185 1.92 pooka */
186 1.1 pooka struct vm_page *
187 1.76 pooka uvm_pagealloc_strat(struct uvm_object *uobj, voff_t off, struct vm_anon *anon,
188 1.76 pooka int flags, int strat, int free_list)
189 1.1 pooka {
190 1.1 pooka struct vm_page *pg;
191 1.1 pooka
192 1.115 rmind KASSERT(uobj && mutex_owned(uobj->vmobjlock));
193 1.92 pooka KASSERT(anon == NULL);
194 1.92 pooka
195 1.103 pooka pg = pool_cache_get(&pagecache, PR_NOWAIT);
196 1.104 pooka if (__predict_false(pg == NULL)) {
197 1.103 pooka return NULL;
198 1.104 pooka }
199 1.103 pooka
200 1.1 pooka pg->offset = off;
201 1.5 pooka pg->uobject = uobj;
202 1.1 pooka
203 1.22 pooka pg->flags = PG_CLEAN|PG_BUSY|PG_FAKE;
204 1.90 pooka if (flags & UVM_PGA_ZERO) {
205 1.90 pooka uvm_pagezero(pg);
206 1.90 pooka }
207 1.1 pooka
208 1.31 ad TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue);
209 1.96 rmind (void)rb_tree_insert_node(&uobj->rb_tree, pg);
210 1.89 pooka
211 1.92 pooka /*
212 1.93 pooka * Don't put anons on the LRU page queue. We can't flush them
213 1.93 pooka * (there's no concept of swap in a rump kernel), so no reason
214 1.93 pooka * to bother with them.
215 1.92 pooka */
216 1.93 pooka if (!UVM_OBJ_IS_AOBJ(uobj)) {
217 1.92 pooka atomic_inc_uint(&vmpage_onqueue);
218 1.92 pooka mutex_enter(&uvm_pageqlock);
219 1.92 pooka TAILQ_INSERT_TAIL(&vmpage_lruqueue, pg, pageq.queue);
220 1.92 pooka mutex_exit(&uvm_pageqlock);
221 1.92 pooka }
222 1.92 pooka
223 1.59 pooka uobj->uo_npages++;
224 1.21 pooka
225 1.1 pooka return pg;
226 1.1 pooka }
227 1.1 pooka
228 1.21 pooka /*
229 1.21 pooka * Release a page.
230 1.21 pooka *
231 1.22 pooka * Called with the vm object locked.
232 1.21 pooka */
233 1.1 pooka void
234 1.22 pooka uvm_pagefree(struct vm_page *pg)
235 1.1 pooka {
236 1.5 pooka struct uvm_object *uobj = pg->uobject;
237 1.1 pooka
238 1.92 pooka KASSERT(mutex_owned(&uvm_pageqlock));
239 1.115 rmind KASSERT(mutex_owned(uobj->vmobjlock));
240 1.92 pooka
241 1.22 pooka if (pg->flags & PG_WANTED)
242 1.22 pooka wakeup(pg);
243 1.22 pooka
244 1.92 pooka TAILQ_REMOVE(&uobj->memq, pg, listq.queue);
245 1.92 pooka
246 1.59 pooka uobj->uo_npages--;
247 1.96 rmind rb_tree_remove_node(&uobj->rb_tree, pg);
248 1.92 pooka
249 1.93 pooka if (!UVM_OBJ_IS_AOBJ(uobj)) {
250 1.92 pooka TAILQ_REMOVE(&vmpage_lruqueue, pg, pageq.queue);
251 1.92 pooka atomic_dec_uint(&vmpage_onqueue);
252 1.92 pooka }
253 1.92 pooka
254 1.90 pooka pool_cache_put(&pagecache, pg);
255 1.1 pooka }
256 1.1 pooka
257 1.15 pooka void
258 1.61 pooka uvm_pagezero(struct vm_page *pg)
259 1.15 pooka {
260 1.15 pooka
261 1.61 pooka pg->flags &= ~PG_CLEAN;
262 1.61 pooka memset((void *)pg->uanon, 0, PAGE_SIZE);
263 1.15 pooka }
264 1.15 pooka
265 1.1 pooka /*
266 1.136 yamt * uvm_page_locked_p: return true if object associated with page is
267 1.136 yamt * locked. this is a weak check for runtime assertions only.
268 1.136 yamt */
269 1.136 yamt
270 1.136 yamt bool
271 1.136 yamt uvm_page_locked_p(struct vm_page *pg)
272 1.136 yamt {
273 1.136 yamt
274 1.136 yamt return mutex_owned(pg->uobject->vmobjlock);
275 1.136 yamt }
276 1.136 yamt
277 1.136 yamt /*
278 1.1 pooka * Misc routines
279 1.1 pooka */
280 1.1 pooka
281 1.61 pooka static kmutex_t pagermtx;
282 1.61 pooka
283 1.1 pooka void
284 1.79 pooka uvm_init(void)
285 1.1 pooka {
286 1.84 pooka char buf[64];
287 1.84 pooka
288 1.141 pooka if (rumpuser_getparam("RUMP_MEMLIMIT", buf, sizeof(buf)) == 0) {
289 1.105 pooka unsigned long tmp;
290 1.105 pooka char *ep;
291 1.105 pooka int mult;
292 1.105 pooka
293 1.109 pooka tmp = strtoul(buf, &ep, 10);
294 1.105 pooka if (strlen(ep) > 1)
295 1.105 pooka panic("uvm_init: invalid RUMP_MEMLIMIT: %s", buf);
296 1.105 pooka
297 1.105 pooka /* mini-dehumanize-number */
298 1.105 pooka mult = 1;
299 1.105 pooka switch (*ep) {
300 1.105 pooka case 'k':
301 1.105 pooka mult = 1024;
302 1.105 pooka break;
303 1.105 pooka case 'm':
304 1.105 pooka mult = 1024*1024;
305 1.105 pooka break;
306 1.105 pooka case 'g':
307 1.105 pooka mult = 1024*1024*1024;
308 1.105 pooka break;
309 1.105 pooka case 0:
310 1.105 pooka break;
311 1.105 pooka default:
312 1.105 pooka panic("uvm_init: invalid RUMP_MEMLIMIT: %s", buf);
313 1.105 pooka }
314 1.105 pooka rump_physmemlimit = tmp * mult;
315 1.105 pooka
316 1.105 pooka if (rump_physmemlimit / mult != tmp)
317 1.105 pooka panic("uvm_init: RUMP_MEMLIMIT overflow: %s", buf);
318 1.147 pooka
319 1.147 pooka /* reserve some memory for the pager */
320 1.158 pooka if (rump_physmemlimit <= PDRESERVE)
321 1.158 pooka panic("uvm_init: system reserves %d bytes of mem, "
322 1.158 pooka "only %lu bytes given",
323 1.158 pooka PDRESERVE, rump_physmemlimit);
324 1.147 pooka pdlimit = rump_physmemlimit;
325 1.158 pooka rump_physmemlimit -= PDRESERVE;
326 1.105 pooka
327 1.157 pooka if (pdlimit < 1024*1024)
328 1.157 pooka printf("uvm_init: WARNING: <1MB RAM limit, "
329 1.157 pooka "hope you know what you're doing\n");
330 1.157 pooka
331 1.84 pooka #define HUMANIZE_BYTES 9
332 1.84 pooka CTASSERT(sizeof(buf) >= HUMANIZE_BYTES);
333 1.91 pooka format_bytes(buf, HUMANIZE_BYTES, rump_physmemlimit);
334 1.84 pooka #undef HUMANIZE_BYTES
335 1.92 pooka dddlim = 9 * (rump_physmemlimit / 10);
336 1.84 pooka } else {
337 1.84 pooka strlcpy(buf, "unlimited (host limit)", sizeof(buf));
338 1.84 pooka }
339 1.84 pooka aprint_verbose("total memory = %s\n", buf);
340 1.1 pooka
341 1.92 pooka TAILQ_INIT(&vmpage_lruqueue);
342 1.92 pooka
343 1.157 pooka if (rump_physmemlimit == RUMPMEM_UNLIMITED) {
344 1.157 pooka uvmexp.npages = physmem;
345 1.157 pooka } else {
346 1.157 pooka uvmexp.npages = pdlimit >> PAGE_SHIFT;
347 1.158 pooka uvmexp.reserve_pagedaemon = PDRESERVE >> PAGE_SHIFT;
348 1.157 pooka uvmexp.freetarg = (rump_physmemlimit-dddlim) >> PAGE_SHIFT;
349 1.157 pooka }
350 1.157 pooka /*
351 1.157 pooka * uvmexp.free is not used internally or updated. The reason is
352 1.157 pooka * that the memory hypercall allocator is allowed to allocate
353 1.157 pooka * non-page sized chunks. We use a byte count in curphysmem
354 1.157 pooka * instead.
355 1.157 pooka */
356 1.157 pooka uvmexp.free = uvmexp.npages;
357 1.21 pooka
358 1.112 pooka #ifndef __uvmexp_pagesize
359 1.112 pooka uvmexp.pagesize = PAGE_SIZE;
360 1.112 pooka uvmexp.pagemask = PAGE_MASK;
361 1.112 pooka uvmexp.pageshift = PAGE_SHIFT;
362 1.112 pooka #else
363 1.112 pooka #define FAKE_PAGE_SHIFT 12
364 1.112 pooka uvmexp.pageshift = FAKE_PAGE_SHIFT;
365 1.112 pooka uvmexp.pagesize = 1<<FAKE_PAGE_SHIFT;
366 1.112 pooka uvmexp.pagemask = (1<<FAKE_PAGE_SHIFT)-1;
367 1.112 pooka #undef FAKE_PAGE_SHIFT
368 1.112 pooka #endif
369 1.112 pooka
370 1.140 pooka mutex_init(&pagermtx, MUTEX_DEFAULT, IPL_NONE);
371 1.140 pooka mutex_init(&uvm_pageqlock, MUTEX_DEFAULT, IPL_NONE);
372 1.140 pooka mutex_init(&uvm_swap_data_lock, MUTEX_DEFAULT, IPL_NONE);
373 1.35 pooka
374 1.152 pooka /* just to appease linkage */
375 1.152 pooka mutex_init(&uvm_fpageqlock, MUTEX_SPIN, IPL_VM);
376 1.152 pooka
377 1.140 pooka mutex_init(&pdaemonmtx, MUTEX_DEFAULT, IPL_NONE);
378 1.80 pooka cv_init(&pdaemoncv, "pdaemon");
379 1.80 pooka cv_init(&oomwait, "oomwait");
380 1.80 pooka
381 1.130 pooka module_map = &module_map_store;
382 1.130 pooka
383 1.50 pooka kernel_map->pmap = pmap_kernel();
384 1.121 para
385 1.122 njoly pool_subsystem_init();
386 1.128 pooka
387 1.121 para kmem_arena = vmem_create("kmem", 0, 1024*1024, PAGE_SIZE,
388 1.121 para NULL, NULL, NULL,
389 1.121 para 0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
390 1.121 para
391 1.135 para vmem_subsystem_init(kmem_arena);
392 1.121 para
393 1.121 para kmem_va_arena = vmem_create("kva", 0, 0, PAGE_SIZE,
394 1.121 para vmem_alloc, vmem_free, kmem_arena,
395 1.124 para 8 * PAGE_SIZE, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
396 1.90 pooka
397 1.90 pooka pool_cache_bootstrap(&pagecache, sizeof(struct vm_page), 0, 0, 0,
398 1.90 pooka "page$", NULL, IPL_NONE, pgctor, pgdtor, NULL);
399 1.162 pooka
400 1.162 pooka /* create vmspace used by local clients */
401 1.162 pooka rump_vmspace_local = kmem_zalloc(sizeof(*rump_vmspace_local), KM_SLEEP);
402 1.164 pooka uvmspace_init(rump_vmspace_local, &rump_pmap_local, 0, 0, false);
403 1.1 pooka }
404 1.1 pooka
405 1.83 pooka void
406 1.145 martin uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin, vaddr_t vmax,
407 1.145 martin bool topdown)
408 1.83 pooka {
409 1.83 pooka
410 1.162 pooka vm->vm_map.pmap = pmap;
411 1.83 pooka vm->vm_refcnt = 1;
412 1.83 pooka }
413 1.1 pooka
414 1.1 pooka void
415 1.7 pooka uvm_pagewire(struct vm_page *pg)
416 1.7 pooka {
417 1.7 pooka
418 1.7 pooka /* nada */
419 1.7 pooka }
420 1.7 pooka
421 1.7 pooka void
422 1.7 pooka uvm_pageunwire(struct vm_page *pg)
423 1.7 pooka {
424 1.7 pooka
425 1.7 pooka /* nada */
426 1.7 pooka }
427 1.7 pooka
428 1.83 pooka /* where's your schmonz now? */
429 1.83 pooka #define PUNLIMIT(a) \
430 1.83 pooka p->p_rlimit[a].rlim_cur = p->p_rlimit[a].rlim_max = RLIM_INFINITY;
431 1.83 pooka void
432 1.83 pooka uvm_init_limits(struct proc *p)
433 1.83 pooka {
434 1.83 pooka
435 1.155 pooka #ifndef DFLSSIZ
436 1.155 pooka #define DFLSSIZ (16*1024*1024)
437 1.155 pooka #endif
438 1.154 pooka p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
439 1.154 pooka p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
440 1.83 pooka PUNLIMIT(RLIMIT_DATA);
441 1.83 pooka PUNLIMIT(RLIMIT_RSS);
442 1.83 pooka PUNLIMIT(RLIMIT_AS);
443 1.83 pooka /* nice, cascade */
444 1.83 pooka }
445 1.83 pooka #undef PUNLIMIT
446 1.83 pooka
447 1.69 pooka /*
448 1.69 pooka * This satisfies the "disgusting mmap hack" used by proplib.
449 1.69 pooka */
450 1.49 pooka int
451 1.160 chs uvm_mmap_anon(struct proc *p, void **addrp, size_t size)
452 1.49 pooka {
453 1.69 pooka int error;
454 1.49 pooka
455 1.69 pooka /* no reason in particular, but cf. uvm_default_mapaddr() */
456 1.160 chs if (*addrp != NULL)
457 1.69 pooka panic("uvm_mmap() variant unsupported");
458 1.69 pooka
459 1.106 pooka if (RUMP_LOCALPROC_P(curproc)) {
460 1.160 chs error = rumpuser_anonmmap(NULL, size, 0, 0, addrp);
461 1.98 pooka } else {
462 1.161 pooka error = rump_sysproxy_anonmmap(p->p_vmspace->vm_map.pmap,
463 1.160 chs size, addrp);
464 1.98 pooka }
465 1.160 chs return error;
466 1.160 chs }
467 1.69 pooka
468 1.160 chs /*
469 1.160 chs * Stubs for things referenced from vfs_vnode.c but not used.
470 1.160 chs */
471 1.160 chs const dev_t zerodev;
472 1.160 chs
473 1.160 chs struct uvm_object *
474 1.160 chs udv_attach(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size)
475 1.160 chs {
476 1.160 chs return NULL;
477 1.49 pooka }
478 1.49 pooka
479 1.61 pooka struct pagerinfo {
480 1.61 pooka vaddr_t pgr_kva;
481 1.61 pooka int pgr_npages;
482 1.61 pooka struct vm_page **pgr_pgs;
483 1.61 pooka bool pgr_read;
484 1.61 pooka
485 1.61 pooka LIST_ENTRY(pagerinfo) pgr_entries;
486 1.61 pooka };
487 1.61 pooka static LIST_HEAD(, pagerinfo) pagerlist = LIST_HEAD_INITIALIZER(pagerlist);
488 1.61 pooka
489 1.61 pooka /*
490 1.61 pooka * Pager "map" in routine. Instead of mapping, we allocate memory
491 1.159 pooka * and copy page contents there. The reason for copying instead of
492 1.159 pooka * mapping is simple: we do not assume we are running on virtual
493 1.159 pooka * memory. Even if we could emulate virtual memory in some envs
494 1.159 pooka * such as userspace, copying is much faster than trying to awkardly
495 1.159 pooka * cope with remapping (see "Design and Implementation" pp.95-98).
496 1.159 pooka * The downside of the approach is that the pager requires MAXPHYS
497 1.159 pooka * free memory to perform paging, but short of virtual memory or
498 1.159 pooka * making the pager do I/O in page-sized chunks we cannot do much
499 1.159 pooka * about that.
500 1.61 pooka */
501 1.7 pooka vaddr_t
502 1.61 pooka uvm_pagermapin(struct vm_page **pgs, int npages, int flags)
503 1.7 pooka {
504 1.61 pooka struct pagerinfo *pgri;
505 1.61 pooka vaddr_t curkva;
506 1.61 pooka int i;
507 1.61 pooka
508 1.61 pooka /* allocate structures */
509 1.61 pooka pgri = kmem_alloc(sizeof(*pgri), KM_SLEEP);
510 1.61 pooka pgri->pgr_kva = (vaddr_t)kmem_alloc(npages * PAGE_SIZE, KM_SLEEP);
511 1.61 pooka pgri->pgr_npages = npages;
512 1.61 pooka pgri->pgr_pgs = kmem_alloc(sizeof(struct vm_page *) * npages, KM_SLEEP);
513 1.61 pooka pgri->pgr_read = (flags & UVMPAGER_MAPIN_READ) != 0;
514 1.61 pooka
515 1.61 pooka /* copy contents to "mapped" memory */
516 1.61 pooka for (i = 0, curkva = pgri->pgr_kva;
517 1.61 pooka i < npages;
518 1.61 pooka i++, curkva += PAGE_SIZE) {
519 1.61 pooka /*
520 1.61 pooka * We need to copy the previous contents of the pages to
521 1.61 pooka * the window even if we are reading from the
522 1.61 pooka * device, since the device might not fill the contents of
523 1.61 pooka * the full mapped range and we will end up corrupting
524 1.61 pooka * data when we unmap the window.
525 1.61 pooka */
526 1.61 pooka memcpy((void*)curkva, pgs[i]->uanon, PAGE_SIZE);
527 1.61 pooka pgri->pgr_pgs[i] = pgs[i];
528 1.61 pooka }
529 1.61 pooka
530 1.61 pooka mutex_enter(&pagermtx);
531 1.61 pooka LIST_INSERT_HEAD(&pagerlist, pgri, pgr_entries);
532 1.61 pooka mutex_exit(&pagermtx);
533 1.7 pooka
534 1.61 pooka return pgri->pgr_kva;
535 1.7 pooka }
536 1.7 pooka
537 1.61 pooka /*
538 1.61 pooka * map out the pager window. return contents from VA to page storage
539 1.61 pooka * and free structures.
540 1.61 pooka *
541 1.61 pooka * Note: does not currently support partial frees
542 1.61 pooka */
543 1.61 pooka void
544 1.61 pooka uvm_pagermapout(vaddr_t kva, int npages)
545 1.7 pooka {
546 1.61 pooka struct pagerinfo *pgri;
547 1.61 pooka vaddr_t curkva;
548 1.61 pooka int i;
549 1.7 pooka
550 1.61 pooka mutex_enter(&pagermtx);
551 1.61 pooka LIST_FOREACH(pgri, &pagerlist, pgr_entries) {
552 1.61 pooka if (pgri->pgr_kva == kva)
553 1.61 pooka break;
554 1.61 pooka }
555 1.61 pooka KASSERT(pgri);
556 1.61 pooka if (pgri->pgr_npages != npages)
557 1.61 pooka panic("uvm_pagermapout: partial unmapping not supported");
558 1.61 pooka LIST_REMOVE(pgri, pgr_entries);
559 1.61 pooka mutex_exit(&pagermtx);
560 1.61 pooka
561 1.61 pooka if (pgri->pgr_read) {
562 1.61 pooka for (i = 0, curkva = pgri->pgr_kva;
563 1.61 pooka i < pgri->pgr_npages;
564 1.61 pooka i++, curkva += PAGE_SIZE) {
565 1.61 pooka memcpy(pgri->pgr_pgs[i]->uanon,(void*)curkva,PAGE_SIZE);
566 1.21 pooka }
567 1.21 pooka }
568 1.10 pooka
569 1.61 pooka kmem_free(pgri->pgr_pgs, npages * sizeof(struct vm_page *));
570 1.61 pooka kmem_free((void*)pgri->pgr_kva, npages * PAGE_SIZE);
571 1.61 pooka kmem_free(pgri, sizeof(*pgri));
572 1.7 pooka }
573 1.7 pooka
574 1.61 pooka /*
575 1.61 pooka * convert va in pager window to page structure.
576 1.61 pooka * XXX: how expensive is this (global lock, list traversal)?
577 1.61 pooka */
578 1.14 pooka struct vm_page *
579 1.14 pooka uvm_pageratop(vaddr_t va)
580 1.14 pooka {
581 1.61 pooka struct pagerinfo *pgri;
582 1.61 pooka struct vm_page *pg = NULL;
583 1.61 pooka int i;
584 1.14 pooka
585 1.61 pooka mutex_enter(&pagermtx);
586 1.61 pooka LIST_FOREACH(pgri, &pagerlist, pgr_entries) {
587 1.61 pooka if (pgri->pgr_kva <= va
588 1.61 pooka && va < pgri->pgr_kva + pgri->pgr_npages*PAGE_SIZE)
589 1.21 pooka break;
590 1.61 pooka }
591 1.61 pooka if (pgri) {
592 1.61 pooka i = (va - pgri->pgr_kva) >> PAGE_SHIFT;
593 1.61 pooka pg = pgri->pgr_pgs[i];
594 1.61 pooka }
595 1.61 pooka mutex_exit(&pagermtx);
596 1.21 pooka
597 1.61 pooka return pg;
598 1.61 pooka }
599 1.15 pooka
600 1.97 pooka /*
601 1.97 pooka * Called with the vm object locked.
602 1.97 pooka *
603 1.97 pooka * Put vnode object pages at the end of the access queue to indicate
604 1.97 pooka * they have been recently accessed and should not be immediate
605 1.97 pooka * candidates for pageout. Do not do this for lookups done by
606 1.97 pooka * the pagedaemon to mimic pmap_kentered mappings which don't track
607 1.97 pooka * access information.
608 1.97 pooka */
609 1.61 pooka struct vm_page *
610 1.61 pooka uvm_pagelookup(struct uvm_object *uobj, voff_t off)
611 1.61 pooka {
612 1.92 pooka struct vm_page *pg;
613 1.97 pooka bool ispagedaemon = curlwp == uvm.pagedaemon_lwp;
614 1.61 pooka
615 1.96 rmind pg = rb_tree_find_node(&uobj->rb_tree, &off);
616 1.97 pooka if (pg && !UVM_OBJ_IS_AOBJ(pg->uobject) && !ispagedaemon) {
617 1.92 pooka mutex_enter(&uvm_pageqlock);
618 1.92 pooka TAILQ_REMOVE(&vmpage_lruqueue, pg, pageq.queue);
619 1.92 pooka TAILQ_INSERT_TAIL(&vmpage_lruqueue, pg, pageq.queue);
620 1.92 pooka mutex_exit(&uvm_pageqlock);
621 1.92 pooka }
622 1.92 pooka
623 1.92 pooka return pg;
624 1.14 pooka }
625 1.14 pooka
626 1.7 pooka void
627 1.22 pooka uvm_page_unbusy(struct vm_page **pgs, int npgs)
628 1.22 pooka {
629 1.22 pooka struct vm_page *pg;
630 1.22 pooka int i;
631 1.22 pooka
632 1.94 pooka KASSERT(npgs > 0);
633 1.115 rmind KASSERT(mutex_owned(pgs[0]->uobject->vmobjlock));
634 1.94 pooka
635 1.22 pooka for (i = 0; i < npgs; i++) {
636 1.22 pooka pg = pgs[i];
637 1.22 pooka if (pg == NULL)
638 1.22 pooka continue;
639 1.22 pooka
640 1.22 pooka KASSERT(pg->flags & PG_BUSY);
641 1.22 pooka if (pg->flags & PG_WANTED)
642 1.22 pooka wakeup(pg);
643 1.36 pooka if (pg->flags & PG_RELEASED)
644 1.36 pooka uvm_pagefree(pg);
645 1.36 pooka else
646 1.36 pooka pg->flags &= ~(PG_WANTED|PG_BUSY);
647 1.22 pooka }
648 1.22 pooka }
649 1.22 pooka
650 1.22 pooka void
651 1.7 pooka uvm_estimatepageable(int *active, int *inactive)
652 1.7 pooka {
653 1.7 pooka
654 1.19 pooka /* XXX: guessing game */
655 1.19 pooka *active = 1024;
656 1.19 pooka *inactive = 1024;
657 1.7 pooka }
658 1.7 pooka
659 1.41 pooka bool
660 1.41 pooka vm_map_starved_p(struct vm_map *map)
661 1.41 pooka {
662 1.41 pooka
663 1.80 pooka if (map->flags & VM_MAP_WANTVA)
664 1.80 pooka return true;
665 1.80 pooka
666 1.41 pooka return false;
667 1.41 pooka }
668 1.41 pooka
669 1.41 pooka int
670 1.41 pooka uvm_loan(struct vm_map *map, vaddr_t start, vsize_t len, void *v, int flags)
671 1.41 pooka {
672 1.41 pooka
673 1.41 pooka panic("%s: unimplemented", __func__);
674 1.41 pooka }
675 1.41 pooka
676 1.41 pooka void
677 1.41 pooka uvm_unloan(void *v, int npages, int flags)
678 1.41 pooka {
679 1.41 pooka
680 1.41 pooka panic("%s: unimplemented", __func__);
681 1.41 pooka }
682 1.41 pooka
683 1.43 pooka int
684 1.43 pooka uvm_loanuobjpages(struct uvm_object *uobj, voff_t pgoff, int orignpages,
685 1.43 pooka struct vm_page **opp)
686 1.43 pooka {
687 1.43 pooka
688 1.72 pooka return EBUSY;
689 1.43 pooka }
690 1.43 pooka
691 1.116 mrg struct vm_page *
692 1.116 mrg uvm_loanbreak(struct vm_page *pg)
693 1.116 mrg {
694 1.116 mrg
695 1.116 mrg panic("%s: unimplemented", __func__);
696 1.116 mrg }
697 1.116 mrg
698 1.116 mrg void
699 1.116 mrg ubc_purge(struct uvm_object *uobj)
700 1.116 mrg {
701 1.116 mrg
702 1.116 mrg }
703 1.116 mrg
704 1.68 pooka vaddr_t
705 1.68 pooka uvm_default_mapaddr(struct proc *p, vaddr_t base, vsize_t sz)
706 1.68 pooka {
707 1.68 pooka
708 1.68 pooka return 0;
709 1.68 pooka }
710 1.68 pooka
711 1.71 pooka int
712 1.71 pooka uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
713 1.71 pooka vm_prot_t prot, bool set_max)
714 1.71 pooka {
715 1.71 pooka
716 1.71 pooka return EOPNOTSUPP;
717 1.71 pooka }
718 1.71 pooka
719 1.9 pooka /*
720 1.12 pooka * UVM km
721 1.12 pooka */
722 1.12 pooka
723 1.12 pooka vaddr_t
724 1.12 pooka uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
725 1.12 pooka {
726 1.82 pooka void *rv, *desired = NULL;
727 1.50 pooka int alignbit, error;
728 1.50 pooka
729 1.82 pooka #ifdef __x86_64__
730 1.82 pooka /*
731 1.82 pooka * On amd64, allocate all module memory from the lowest 2GB.
732 1.82 pooka * This is because NetBSD kernel modules are compiled
733 1.82 pooka * with -mcmodel=kernel and reserve only 4 bytes for
734 1.82 pooka * offsets. If we load code compiled with -mcmodel=kernel
735 1.82 pooka * anywhere except the lowest or highest 2GB, it will not
736 1.82 pooka * work. Since userspace does not have access to the highest
737 1.82 pooka * 2GB, use the lowest 2GB.
738 1.82 pooka *
739 1.82 pooka * Note: this assumes the rump kernel resides in
740 1.82 pooka * the lowest 2GB as well.
741 1.82 pooka *
742 1.82 pooka * Note2: yes, it's a quick hack, but since this the only
743 1.82 pooka * place where we care about the map we're allocating from,
744 1.82 pooka * just use a simple "if" instead of coming up with a fancy
745 1.82 pooka * generic solution.
746 1.82 pooka */
747 1.82 pooka if (map == module_map) {
748 1.82 pooka desired = (void *)(0x80000000 - size);
749 1.82 pooka }
750 1.82 pooka #endif
751 1.82 pooka
752 1.130 pooka if (__predict_false(map == module_map)) {
753 1.130 pooka alignbit = 0;
754 1.130 pooka if (align) {
755 1.130 pooka alignbit = ffs(align)-1;
756 1.130 pooka }
757 1.142 pooka error = rumpuser_anonmmap(desired, size, alignbit,
758 1.142 pooka flags & UVM_KMF_EXEC, &rv);
759 1.130 pooka } else {
760 1.142 pooka error = rumpuser_malloc(size, align, &rv);
761 1.50 pooka }
762 1.50 pooka
763 1.142 pooka if (error) {
764 1.50 pooka if (flags & (UVM_KMF_CANFAIL | UVM_KMF_NOWAIT))
765 1.50 pooka return 0;
766 1.50 pooka else
767 1.50 pooka panic("uvm_km_alloc failed");
768 1.50 pooka }
769 1.12 pooka
770 1.50 pooka if (flags & UVM_KMF_ZERO)
771 1.12 pooka memset(rv, 0, size);
772 1.12 pooka
773 1.12 pooka return (vaddr_t)rv;
774 1.12 pooka }
775 1.12 pooka
776 1.12 pooka void
777 1.12 pooka uvm_km_free(struct vm_map *map, vaddr_t vaddr, vsize_t size, uvm_flag_t flags)
778 1.12 pooka {
779 1.12 pooka
780 1.130 pooka if (__predict_false(map == module_map))
781 1.130 pooka rumpuser_unmap((void *)vaddr, size);
782 1.130 pooka else
783 1.138 pooka rumpuser_free((void *)vaddr, size);
784 1.12 pooka }
785 1.12 pooka
786 1.12 pooka struct vm_map *
787 1.12 pooka uvm_km_suballoc(struct vm_map *map, vaddr_t *minaddr, vaddr_t *maxaddr,
788 1.121 para vsize_t size, int pageable, bool fixed, struct vm_map *submap)
789 1.12 pooka {
790 1.12 pooka
791 1.12 pooka return (struct vm_map *)417416;
792 1.12 pooka }
793 1.40 pooka
794 1.121 para int
795 1.121 para uvm_km_kmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags,
796 1.121 para vmem_addr_t *addr)
797 1.40 pooka {
798 1.121 para vaddr_t va;
799 1.121 para va = (vaddr_t)rump_hypermalloc(size, PAGE_SIZE,
800 1.121 para (flags & VM_SLEEP), "kmalloc");
801 1.40 pooka
802 1.121 para if (va) {
803 1.121 para *addr = va;
804 1.121 para return 0;
805 1.121 para } else {
806 1.121 para return ENOMEM;
807 1.121 para }
808 1.40 pooka }
809 1.40 pooka
810 1.40 pooka void
811 1.121 para uvm_km_kmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
812 1.40 pooka {
813 1.40 pooka
814 1.121 para rump_hyperfree((void *)addr, size);
815 1.74 pooka }
816 1.74 pooka
817 1.57 pooka /*
818 1.102 pooka * VM space locking routines. We don't really have to do anything,
819 1.102 pooka * since the pages are always "wired" (both local and remote processes).
820 1.57 pooka */
821 1.57 pooka int
822 1.57 pooka uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access)
823 1.57 pooka {
824 1.57 pooka
825 1.57 pooka return 0;
826 1.57 pooka }
827 1.57 pooka
828 1.57 pooka void
829 1.57 pooka uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
830 1.57 pooka {
831 1.57 pooka
832 1.57 pooka }
833 1.57 pooka
834 1.102 pooka /*
835 1.102 pooka * For the local case the buffer mappers don't need to do anything.
836 1.102 pooka * For the remote case we need to reserve space and copy data in or
837 1.102 pooka * out, depending on B_READ/B_WRITE.
838 1.102 pooka */
839 1.111 pooka int
840 1.57 pooka vmapbuf(struct buf *bp, vsize_t len)
841 1.57 pooka {
842 1.111 pooka int error = 0;
843 1.57 pooka
844 1.57 pooka bp->b_saveaddr = bp->b_data;
845 1.102 pooka
846 1.102 pooka /* remote case */
847 1.106 pooka if (!RUMP_LOCALPROC_P(curproc)) {
848 1.102 pooka bp->b_data = rump_hypermalloc(len, 0, true, "vmapbuf");
849 1.102 pooka if (BUF_ISWRITE(bp)) {
850 1.111 pooka error = copyin(bp->b_saveaddr, bp->b_data, len);
851 1.111 pooka if (error) {
852 1.111 pooka rump_hyperfree(bp->b_data, len);
853 1.111 pooka bp->b_data = bp->b_saveaddr;
854 1.111 pooka bp->b_saveaddr = 0;
855 1.111 pooka }
856 1.102 pooka }
857 1.102 pooka }
858 1.111 pooka
859 1.111 pooka return error;
860 1.57 pooka }
861 1.57 pooka
862 1.57 pooka void
863 1.57 pooka vunmapbuf(struct buf *bp, vsize_t len)
864 1.57 pooka {
865 1.57 pooka
866 1.102 pooka /* remote case */
867 1.106 pooka if (!RUMP_LOCALPROC_P(bp->b_proc)) {
868 1.102 pooka if (BUF_ISREAD(bp)) {
869 1.110 pooka bp->b_error = copyout_proc(bp->b_proc,
870 1.102 pooka bp->b_data, bp->b_saveaddr, len);
871 1.102 pooka }
872 1.102 pooka rump_hyperfree(bp->b_data, len);
873 1.102 pooka }
874 1.102 pooka
875 1.57 pooka bp->b_data = bp->b_saveaddr;
876 1.57 pooka bp->b_saveaddr = 0;
877 1.57 pooka }
878 1.61 pooka
879 1.61 pooka void
880 1.83 pooka uvmspace_addref(struct vmspace *vm)
881 1.83 pooka {
882 1.83 pooka
883 1.83 pooka /*
884 1.103 pooka * No dynamically allocated vmspaces exist.
885 1.83 pooka */
886 1.83 pooka }
887 1.83 pooka
888 1.83 pooka void
889 1.66 pooka uvmspace_free(struct vmspace *vm)
890 1.66 pooka {
891 1.66 pooka
892 1.66 pooka /* nothing for now */
893 1.66 pooka }
894 1.66 pooka
895 1.61 pooka /*
896 1.61 pooka * page life cycle stuff. it really doesn't exist, so just stubs.
897 1.61 pooka */
898 1.61 pooka
899 1.61 pooka void
900 1.61 pooka uvm_pageactivate(struct vm_page *pg)
901 1.61 pooka {
902 1.61 pooka
903 1.61 pooka /* nada */
904 1.61 pooka }
905 1.61 pooka
906 1.61 pooka void
907 1.61 pooka uvm_pagedeactivate(struct vm_page *pg)
908 1.61 pooka {
909 1.61 pooka
910 1.61 pooka /* nada */
911 1.61 pooka }
912 1.61 pooka
913 1.61 pooka void
914 1.61 pooka uvm_pagedequeue(struct vm_page *pg)
915 1.61 pooka {
916 1.61 pooka
917 1.61 pooka /* nada*/
918 1.61 pooka }
919 1.61 pooka
920 1.61 pooka void
921 1.61 pooka uvm_pageenqueue(struct vm_page *pg)
922 1.61 pooka {
923 1.61 pooka
924 1.61 pooka /* nada */
925 1.61 pooka }
926 1.80 pooka
927 1.88 pooka void
928 1.88 pooka uvmpdpol_anfree(struct vm_anon *an)
929 1.88 pooka {
930 1.88 pooka
931 1.88 pooka /* nada */
932 1.88 pooka }
933 1.88 pooka
934 1.80 pooka /*
935 1.99 uebayasi * Physical address accessors.
936 1.99 uebayasi */
937 1.99 uebayasi
938 1.99 uebayasi struct vm_page *
939 1.99 uebayasi uvm_phys_to_vm_page(paddr_t pa)
940 1.99 uebayasi {
941 1.99 uebayasi
942 1.99 uebayasi return NULL;
943 1.99 uebayasi }
944 1.99 uebayasi
945 1.99 uebayasi paddr_t
946 1.99 uebayasi uvm_vm_page_to_phys(const struct vm_page *pg)
947 1.99 uebayasi {
948 1.99 uebayasi
949 1.99 uebayasi return 0;
950 1.99 uebayasi }
951 1.99 uebayasi
952 1.153 pooka vaddr_t
953 1.153 pooka uvm_uarea_alloc(void)
954 1.153 pooka {
955 1.153 pooka
956 1.153 pooka /* non-zero */
957 1.153 pooka return (vaddr_t)11;
958 1.153 pooka }
959 1.153 pooka
960 1.153 pooka void
961 1.153 pooka uvm_uarea_free(vaddr_t uarea)
962 1.153 pooka {
963 1.153 pooka
964 1.153 pooka /* nata, so creamy */
965 1.153 pooka }
966 1.153 pooka
967 1.99 uebayasi /*
968 1.80 pooka * Routines related to the Page Baroness.
969 1.80 pooka */
970 1.80 pooka
971 1.80 pooka void
972 1.80 pooka uvm_wait(const char *msg)
973 1.80 pooka {
974 1.80 pooka
975 1.80 pooka if (__predict_false(rump_threads == 0))
976 1.80 pooka panic("pagedaemon missing (RUMP_THREADS = 0)");
977 1.80 pooka
978 1.147 pooka if (curlwp == uvm.pagedaemon_lwp) {
979 1.147 pooka /* is it possible for us to later get memory? */
980 1.147 pooka if (!uvmexp.paging)
981 1.147 pooka panic("pagedaemon out of memory");
982 1.147 pooka }
983 1.147 pooka
984 1.80 pooka mutex_enter(&pdaemonmtx);
985 1.80 pooka pdaemon_waiters++;
986 1.80 pooka cv_signal(&pdaemoncv);
987 1.80 pooka cv_wait(&oomwait, &pdaemonmtx);
988 1.80 pooka mutex_exit(&pdaemonmtx);
989 1.80 pooka }
990 1.80 pooka
991 1.80 pooka void
992 1.80 pooka uvm_pageout_start(int npages)
993 1.80 pooka {
994 1.80 pooka
995 1.113 pooka mutex_enter(&pdaemonmtx);
996 1.113 pooka uvmexp.paging += npages;
997 1.113 pooka mutex_exit(&pdaemonmtx);
998 1.80 pooka }
999 1.80 pooka
1000 1.80 pooka void
1001 1.80 pooka uvm_pageout_done(int npages)
1002 1.80 pooka {
1003 1.80 pooka
1004 1.113 pooka if (!npages)
1005 1.113 pooka return;
1006 1.113 pooka
1007 1.113 pooka mutex_enter(&pdaemonmtx);
1008 1.113 pooka KASSERT(uvmexp.paging >= npages);
1009 1.113 pooka uvmexp.paging -= npages;
1010 1.113 pooka
1011 1.113 pooka if (pdaemon_waiters) {
1012 1.113 pooka pdaemon_waiters = 0;
1013 1.113 pooka cv_broadcast(&oomwait);
1014 1.113 pooka }
1015 1.113 pooka mutex_exit(&pdaemonmtx);
1016 1.80 pooka }
1017 1.80 pooka
1018 1.95 pooka static bool
1019 1.104 pooka processpage(struct vm_page *pg, bool *lockrunning)
1020 1.95 pooka {
1021 1.95 pooka struct uvm_object *uobj;
1022 1.95 pooka
1023 1.95 pooka uobj = pg->uobject;
1024 1.115 rmind if (mutex_tryenter(uobj->vmobjlock)) {
1025 1.95 pooka if ((pg->flags & PG_BUSY) == 0) {
1026 1.95 pooka mutex_exit(&uvm_pageqlock);
1027 1.95 pooka uobj->pgops->pgo_put(uobj, pg->offset,
1028 1.95 pooka pg->offset + PAGE_SIZE,
1029 1.95 pooka PGO_CLEANIT|PGO_FREE);
1030 1.115 rmind KASSERT(!mutex_owned(uobj->vmobjlock));
1031 1.95 pooka return true;
1032 1.95 pooka } else {
1033 1.115 rmind mutex_exit(uobj->vmobjlock);
1034 1.95 pooka }
1035 1.104 pooka } else if (*lockrunning == false && ncpu > 1) {
1036 1.104 pooka CPU_INFO_ITERATOR cii;
1037 1.104 pooka struct cpu_info *ci;
1038 1.104 pooka struct lwp *l;
1039 1.104 pooka
1040 1.115 rmind l = mutex_owner(uobj->vmobjlock);
1041 1.104 pooka for (CPU_INFO_FOREACH(cii, ci)) {
1042 1.104 pooka if (ci->ci_curlwp == l) {
1043 1.104 pooka *lockrunning = true;
1044 1.104 pooka break;
1045 1.104 pooka }
1046 1.104 pooka }
1047 1.95 pooka }
1048 1.95 pooka
1049 1.95 pooka return false;
1050 1.95 pooka }
1051 1.95 pooka
1052 1.80 pooka /*
1053 1.92 pooka * The Diabolical pageDaemon Director (DDD).
1054 1.113 pooka *
1055 1.113 pooka * This routine can always use better heuristics.
1056 1.80 pooka */
1057 1.80 pooka void
1058 1.80 pooka uvm_pageout(void *arg)
1059 1.80 pooka {
1060 1.92 pooka struct vm_page *pg;
1061 1.80 pooka struct pool *pp, *pp_first;
1062 1.92 pooka int cleaned, skip, skipped;
1063 1.113 pooka bool succ;
1064 1.104 pooka bool lockrunning;
1065 1.80 pooka
1066 1.80 pooka mutex_enter(&pdaemonmtx);
1067 1.80 pooka for (;;) {
1068 1.113 pooka if (!NEED_PAGEDAEMON()) {
1069 1.92 pooka kernel_map->flags &= ~VM_MAP_WANTVA;
1070 1.92 pooka }
1071 1.92 pooka
1072 1.113 pooka if (pdaemon_waiters) {
1073 1.113 pooka pdaemon_waiters = 0;
1074 1.113 pooka cv_broadcast(&oomwait);
1075 1.104 pooka }
1076 1.92 pooka
1077 1.113 pooka cv_wait(&pdaemoncv, &pdaemonmtx);
1078 1.113 pooka uvmexp.pdwoke++;
1079 1.113 pooka
1080 1.92 pooka /* tell the world that we are hungry */
1081 1.80 pooka kernel_map->flags |= VM_MAP_WANTVA;
1082 1.80 pooka mutex_exit(&pdaemonmtx);
1083 1.80 pooka
1084 1.92 pooka /*
1085 1.92 pooka * step one: reclaim the page cache. this should give
1086 1.92 pooka * us the biggest earnings since whole pages are released
1087 1.92 pooka * into backing memory.
1088 1.92 pooka */
1089 1.92 pooka pool_cache_reclaim(&pagecache);
1090 1.92 pooka if (!NEED_PAGEDAEMON()) {
1091 1.92 pooka mutex_enter(&pdaemonmtx);
1092 1.92 pooka continue;
1093 1.92 pooka }
1094 1.92 pooka
1095 1.92 pooka /*
1096 1.92 pooka * Ok, so that didn't help. Next, try to hunt memory
1097 1.92 pooka * by pushing out vnode pages. The pages might contain
1098 1.92 pooka * useful cached data, but we need the memory.
1099 1.92 pooka */
1100 1.92 pooka cleaned = 0;
1101 1.92 pooka skip = 0;
1102 1.104 pooka lockrunning = false;
1103 1.92 pooka again:
1104 1.92 pooka mutex_enter(&uvm_pageqlock);
1105 1.92 pooka while (cleaned < PAGEDAEMON_OBJCHUNK) {
1106 1.92 pooka skipped = 0;
1107 1.92 pooka TAILQ_FOREACH(pg, &vmpage_lruqueue, pageq.queue) {
1108 1.92 pooka
1109 1.92 pooka /*
1110 1.92 pooka * skip over pages we _might_ have tried
1111 1.92 pooka * to handle earlier. they might not be
1112 1.92 pooka * exactly the same ones, but I'm not too
1113 1.92 pooka * concerned.
1114 1.92 pooka */
1115 1.92 pooka while (skipped++ < skip)
1116 1.92 pooka continue;
1117 1.92 pooka
1118 1.104 pooka if (processpage(pg, &lockrunning)) {
1119 1.95 pooka cleaned++;
1120 1.95 pooka goto again;
1121 1.92 pooka }
1122 1.92 pooka
1123 1.92 pooka skip++;
1124 1.92 pooka }
1125 1.92 pooka break;
1126 1.92 pooka }
1127 1.92 pooka mutex_exit(&uvm_pageqlock);
1128 1.92 pooka
1129 1.92 pooka /*
1130 1.104 pooka * Ok, someone is running with an object lock held.
1131 1.104 pooka * We want to yield the host CPU to make sure the
1132 1.104 pooka * thread is not parked on the host. Since sched_yield()
1133 1.104 pooka * doesn't appear to do anything on NetBSD, nanosleep
1134 1.104 pooka * for the smallest possible time and hope we're back in
1135 1.104 pooka * the game soon.
1136 1.104 pooka */
1137 1.104 pooka if (cleaned == 0 && lockrunning) {
1138 1.144 pooka rumpuser_clock_sleep(RUMPUSER_CLOCK_RELWALL, 0, 1);
1139 1.104 pooka
1140 1.104 pooka lockrunning = false;
1141 1.104 pooka skip = 0;
1142 1.104 pooka
1143 1.104 pooka /* and here we go again */
1144 1.104 pooka goto again;
1145 1.104 pooka }
1146 1.104 pooka
1147 1.104 pooka /*
1148 1.92 pooka * And of course we need to reclaim the page cache
1149 1.92 pooka * again to actually release memory.
1150 1.92 pooka */
1151 1.92 pooka pool_cache_reclaim(&pagecache);
1152 1.92 pooka if (!NEED_PAGEDAEMON()) {
1153 1.92 pooka mutex_enter(&pdaemonmtx);
1154 1.92 pooka continue;
1155 1.92 pooka }
1156 1.92 pooka
1157 1.92 pooka /*
1158 1.92 pooka * And then drain the pools. Wipe them out ... all of them.
1159 1.92 pooka */
1160 1.127 jym for (pp_first = NULL;;) {
1161 1.156 pooka rump_vfs_drainbufs(10 /* XXX: estimate! */);
1162 1.92 pooka
1163 1.127 jym succ = pool_drain(&pp);
1164 1.127 jym if (succ || pp == pp_first)
1165 1.80 pooka break;
1166 1.127 jym
1167 1.127 jym if (pp_first == NULL)
1168 1.127 jym pp_first = pp;
1169 1.80 pooka }
1170 1.92 pooka
1171 1.92 pooka /*
1172 1.92 pooka * Need to use PYEC on our bag of tricks.
1173 1.92 pooka * Unfortunately, the wife just borrowed it.
1174 1.92 pooka */
1175 1.80 pooka
1176 1.113 pooka mutex_enter(&pdaemonmtx);
1177 1.113 pooka if (!succ && cleaned == 0 && pdaemon_waiters &&
1178 1.113 pooka uvmexp.paging == 0) {
1179 1.80 pooka rumpuser_dprintf("pagedaemoness: failed to reclaim "
1180 1.80 pooka "memory ... sleeping (deadlock?)\n");
1181 1.113 pooka cv_timedwait(&pdaemoncv, &pdaemonmtx, hz);
1182 1.80 pooka }
1183 1.80 pooka }
1184 1.80 pooka
1185 1.80 pooka panic("you can swap out any time you like, but you can never leave");
1186 1.80 pooka }
1187 1.80 pooka
1188 1.80 pooka void
1189 1.80 pooka uvm_kick_pdaemon()
1190 1.80 pooka {
1191 1.80 pooka
1192 1.92 pooka /*
1193 1.92 pooka * Wake up the diabolical pagedaemon director if we are over
1194 1.92 pooka * 90% of the memory limit. This is a complete and utter
1195 1.92 pooka * stetson-harrison decision which you are allowed to finetune.
1196 1.92 pooka * Don't bother locking. If we have some unflushed caches,
1197 1.92 pooka * other waker-uppers will deal with the issue.
1198 1.92 pooka */
1199 1.92 pooka if (NEED_PAGEDAEMON()) {
1200 1.92 pooka cv_signal(&pdaemoncv);
1201 1.92 pooka }
1202 1.80 pooka }
1203 1.80 pooka
1204 1.80 pooka void *
1205 1.80 pooka rump_hypermalloc(size_t howmuch, int alignment, bool waitok, const char *wmsg)
1206 1.80 pooka {
1207 1.150 pooka const unsigned long thelimit =
1208 1.150 pooka curlwp == uvm.pagedaemon_lwp ? pdlimit : rump_physmemlimit;
1209 1.84 pooka unsigned long newmem;
1210 1.80 pooka void *rv;
1211 1.142 pooka int error;
1212 1.80 pooka
1213 1.92 pooka uvm_kick_pdaemon(); /* ouch */
1214 1.92 pooka
1215 1.84 pooka /* first we must be within the limit */
1216 1.84 pooka limitagain:
1217 1.150 pooka if (thelimit != RUMPMEM_UNLIMITED) {
1218 1.84 pooka newmem = atomic_add_long_nv(&curphysmem, howmuch);
1219 1.150 pooka if (newmem > thelimit) {
1220 1.84 pooka newmem = atomic_add_long_nv(&curphysmem, -howmuch);
1221 1.103 pooka if (!waitok) {
1222 1.84 pooka return NULL;
1223 1.103 pooka }
1224 1.84 pooka uvm_wait(wmsg);
1225 1.84 pooka goto limitagain;
1226 1.84 pooka }
1227 1.84 pooka }
1228 1.84 pooka
1229 1.84 pooka /* second, we must get something from the backend */
1230 1.80 pooka again:
1231 1.142 pooka error = rumpuser_malloc(howmuch, alignment, &rv);
1232 1.142 pooka if (__predict_false(error && waitok)) {
1233 1.80 pooka uvm_wait(wmsg);
1234 1.80 pooka goto again;
1235 1.80 pooka }
1236 1.80 pooka
1237 1.80 pooka return rv;
1238 1.80 pooka }
1239 1.84 pooka
1240 1.84 pooka void
1241 1.84 pooka rump_hyperfree(void *what, size_t size)
1242 1.84 pooka {
1243 1.84 pooka
1244 1.91 pooka if (rump_physmemlimit != RUMPMEM_UNLIMITED) {
1245 1.84 pooka atomic_add_long(&curphysmem, -size);
1246 1.84 pooka }
1247 1.138 pooka rumpuser_free(what, size);
1248 1.84 pooka }
1249