vm.c revision 1.101 1 1.101 pooka /* $NetBSD: vm.c,v 1.101 2010/11/17 19:54:09 pooka Exp $ */
2 1.1 pooka
3 1.1 pooka /*
4 1.76 pooka * Copyright (c) 2007-2010 Antti Kantee. All Rights Reserved.
5 1.1 pooka *
6 1.76 pooka * Development of this software was supported by
7 1.76 pooka * The Finnish Cultural Foundation and the Research Foundation of
8 1.76 pooka * The Helsinki University of Technology.
9 1.1 pooka *
10 1.1 pooka * Redistribution and use in source and binary forms, with or without
11 1.1 pooka * modification, are permitted provided that the following conditions
12 1.1 pooka * are met:
13 1.1 pooka * 1. Redistributions of source code must retain the above copyright
14 1.1 pooka * notice, this list of conditions and the following disclaimer.
15 1.1 pooka * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 pooka * notice, this list of conditions and the following disclaimer in the
17 1.1 pooka * documentation and/or other materials provided with the distribution.
18 1.1 pooka *
19 1.1 pooka * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20 1.1 pooka * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 1.1 pooka * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 1.1 pooka * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 1.1 pooka * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 1.1 pooka * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 1.1 pooka * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 1.1 pooka * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 1.1 pooka * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 1.1 pooka * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 1.1 pooka * SUCH DAMAGE.
30 1.1 pooka */
31 1.1 pooka
32 1.1 pooka /*
33 1.88 pooka * Virtual memory emulation routines.
34 1.1 pooka */
35 1.1 pooka
36 1.1 pooka /*
37 1.5 pooka * XXX: we abuse pg->uanon for the virtual address of the storage
38 1.1 pooka * for each page. phys_addr would fit the job description better,
39 1.1 pooka * except that it will create unnecessary lossage on some platforms
40 1.1 pooka * due to not being a pointer type.
41 1.1 pooka */
42 1.1 pooka
43 1.48 pooka #include <sys/cdefs.h>
44 1.101 pooka __KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.101 2010/11/17 19:54:09 pooka Exp $");
45 1.48 pooka
46 1.1 pooka #include <sys/param.h>
47 1.40 pooka #include <sys/atomic.h>
48 1.80 pooka #include <sys/buf.h>
49 1.80 pooka #include <sys/kernel.h>
50 1.67 pooka #include <sys/kmem.h>
51 1.69 pooka #include <sys/mman.h>
52 1.1 pooka #include <sys/null.h>
53 1.1 pooka #include <sys/vnode.h>
54 1.1 pooka
55 1.34 pooka #include <machine/pmap.h>
56 1.34 pooka
57 1.34 pooka #include <rump/rumpuser.h>
58 1.34 pooka
59 1.1 pooka #include <uvm/uvm.h>
60 1.56 pooka #include <uvm/uvm_ddb.h>
61 1.88 pooka #include <uvm/uvm_pdpolicy.h>
62 1.1 pooka #include <uvm/uvm_prot.h>
63 1.58 he #include <uvm/uvm_readahead.h>
64 1.1 pooka
65 1.13 pooka #include "rump_private.h"
66 1.91 pooka #include "rump_vfs_private.h"
67 1.1 pooka
68 1.25 ad kmutex_t uvm_pageqlock;
69 1.88 pooka kmutex_t uvm_swap_data_lock;
70 1.25 ad
71 1.1 pooka struct uvmexp uvmexp;
72 1.100 uebayasi int *uvmexp_pagesize;
73 1.100 uebayasi int *uvmexp_pagemask;
74 1.100 uebayasi int *uvmexp_pageshift;
75 1.7 pooka struct uvm uvm;
76 1.1 pooka
77 1.1 pooka struct vm_map rump_vmmap;
78 1.50 pooka static struct vm_map_kernel kmem_map_store;
79 1.50 pooka struct vm_map *kmem_map = &kmem_map_store.vmk_map;
80 1.1 pooka
81 1.35 pooka static struct vm_map_kernel kernel_map_store;
82 1.35 pooka struct vm_map *kernel_map = &kernel_map_store.vmk_map;
83 1.35 pooka
84 1.80 pooka static unsigned int pdaemon_waiters;
85 1.80 pooka static kmutex_t pdaemonmtx;
86 1.80 pooka static kcondvar_t pdaemoncv, oomwait;
87 1.80 pooka
88 1.91 pooka unsigned long rump_physmemlimit = RUMPMEM_UNLIMITED;
89 1.84 pooka static unsigned long curphysmem;
90 1.92 pooka static unsigned long dddlim; /* 90% of memory limit used */
91 1.92 pooka #define NEED_PAGEDAEMON() \
92 1.92 pooka (rump_physmemlimit != RUMPMEM_UNLIMITED && curphysmem > dddlim)
93 1.92 pooka
94 1.92 pooka /*
95 1.92 pooka * Try to free two pages worth of pages from objects.
96 1.92 pooka * If this succesfully frees a full page cache page, we'll
97 1.92 pooka * free the released page plus PAGE_SIZE/sizeof(vm_page).
98 1.92 pooka */
99 1.92 pooka #define PAGEDAEMON_OBJCHUNK (2*PAGE_SIZE / sizeof(struct vm_page))
100 1.92 pooka
101 1.92 pooka /*
102 1.92 pooka * Keep a list of least recently used pages. Since the only way a
103 1.92 pooka * rump kernel can "access" a page is via lookup, we put the page
104 1.92 pooka * at the back of queue every time a lookup for it is done. If the
105 1.92 pooka * page is in front of this global queue and we're short of memory,
106 1.92 pooka * it's a candidate for pageout.
107 1.92 pooka */
108 1.92 pooka static struct pglist vmpage_lruqueue;
109 1.92 pooka static unsigned vmpage_onqueue;
110 1.84 pooka
111 1.89 pooka static int
112 1.96 rmind pg_compare_key(void *ctx, const void *n, const void *key)
113 1.89 pooka {
114 1.89 pooka voff_t a = ((const struct vm_page *)n)->offset;
115 1.89 pooka voff_t b = *(const voff_t *)key;
116 1.89 pooka
117 1.89 pooka if (a < b)
118 1.96 rmind return -1;
119 1.96 rmind else if (a > b)
120 1.89 pooka return 1;
121 1.89 pooka else
122 1.89 pooka return 0;
123 1.89 pooka }
124 1.89 pooka
125 1.89 pooka static int
126 1.96 rmind pg_compare_nodes(void *ctx, const void *n1, const void *n2)
127 1.89 pooka {
128 1.89 pooka
129 1.96 rmind return pg_compare_key(ctx, n1, &((const struct vm_page *)n2)->offset);
130 1.89 pooka }
131 1.89 pooka
132 1.96 rmind const rb_tree_ops_t uvm_page_tree_ops = {
133 1.89 pooka .rbto_compare_nodes = pg_compare_nodes,
134 1.89 pooka .rbto_compare_key = pg_compare_key,
135 1.96 rmind .rbto_node_offset = offsetof(struct vm_page, rb_node),
136 1.96 rmind .rbto_context = NULL
137 1.89 pooka };
138 1.89 pooka
139 1.1 pooka /*
140 1.1 pooka * vm pages
141 1.1 pooka */
142 1.1 pooka
143 1.90 pooka static int
144 1.90 pooka pgctor(void *arg, void *obj, int flags)
145 1.90 pooka {
146 1.90 pooka struct vm_page *pg = obj;
147 1.90 pooka
148 1.90 pooka memset(pg, 0, sizeof(*pg));
149 1.90 pooka pg->uanon = rump_hypermalloc(PAGE_SIZE, PAGE_SIZE, true, "pgalloc");
150 1.90 pooka return 0;
151 1.90 pooka }
152 1.90 pooka
153 1.90 pooka static void
154 1.90 pooka pgdtor(void *arg, void *obj)
155 1.90 pooka {
156 1.90 pooka struct vm_page *pg = obj;
157 1.90 pooka
158 1.90 pooka rump_hyperfree(pg->uanon, PAGE_SIZE);
159 1.90 pooka }
160 1.90 pooka
161 1.90 pooka static struct pool_cache pagecache;
162 1.90 pooka
163 1.92 pooka /*
164 1.92 pooka * Called with the object locked. We don't support anons.
165 1.92 pooka */
166 1.1 pooka struct vm_page *
167 1.76 pooka uvm_pagealloc_strat(struct uvm_object *uobj, voff_t off, struct vm_anon *anon,
168 1.76 pooka int flags, int strat, int free_list)
169 1.1 pooka {
170 1.1 pooka struct vm_page *pg;
171 1.1 pooka
172 1.92 pooka KASSERT(uobj && mutex_owned(&uobj->vmobjlock));
173 1.92 pooka KASSERT(anon == NULL);
174 1.92 pooka
175 1.90 pooka pg = pool_cache_get(&pagecache, PR_WAITOK);
176 1.1 pooka pg->offset = off;
177 1.5 pooka pg->uobject = uobj;
178 1.1 pooka
179 1.22 pooka pg->flags = PG_CLEAN|PG_BUSY|PG_FAKE;
180 1.90 pooka if (flags & UVM_PGA_ZERO) {
181 1.90 pooka uvm_pagezero(pg);
182 1.90 pooka }
183 1.1 pooka
184 1.31 ad TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue);
185 1.96 rmind (void)rb_tree_insert_node(&uobj->rb_tree, pg);
186 1.89 pooka
187 1.92 pooka /*
188 1.93 pooka * Don't put anons on the LRU page queue. We can't flush them
189 1.93 pooka * (there's no concept of swap in a rump kernel), so no reason
190 1.93 pooka * to bother with them.
191 1.92 pooka */
192 1.93 pooka if (!UVM_OBJ_IS_AOBJ(uobj)) {
193 1.92 pooka atomic_inc_uint(&vmpage_onqueue);
194 1.92 pooka mutex_enter(&uvm_pageqlock);
195 1.92 pooka TAILQ_INSERT_TAIL(&vmpage_lruqueue, pg, pageq.queue);
196 1.92 pooka mutex_exit(&uvm_pageqlock);
197 1.92 pooka }
198 1.92 pooka
199 1.59 pooka uobj->uo_npages++;
200 1.21 pooka
201 1.1 pooka return pg;
202 1.1 pooka }
203 1.1 pooka
204 1.21 pooka /*
205 1.21 pooka * Release a page.
206 1.21 pooka *
207 1.22 pooka * Called with the vm object locked.
208 1.21 pooka */
209 1.1 pooka void
210 1.22 pooka uvm_pagefree(struct vm_page *pg)
211 1.1 pooka {
212 1.5 pooka struct uvm_object *uobj = pg->uobject;
213 1.1 pooka
214 1.92 pooka KASSERT(mutex_owned(&uvm_pageqlock));
215 1.95 pooka KASSERT(mutex_owned(&uobj->vmobjlock));
216 1.92 pooka
217 1.22 pooka if (pg->flags & PG_WANTED)
218 1.22 pooka wakeup(pg);
219 1.22 pooka
220 1.92 pooka TAILQ_REMOVE(&uobj->memq, pg, listq.queue);
221 1.92 pooka
222 1.59 pooka uobj->uo_npages--;
223 1.96 rmind rb_tree_remove_node(&uobj->rb_tree, pg);
224 1.92 pooka
225 1.93 pooka if (!UVM_OBJ_IS_AOBJ(uobj)) {
226 1.92 pooka TAILQ_REMOVE(&vmpage_lruqueue, pg, pageq.queue);
227 1.92 pooka atomic_dec_uint(&vmpage_onqueue);
228 1.92 pooka }
229 1.92 pooka
230 1.90 pooka pool_cache_put(&pagecache, pg);
231 1.1 pooka }
232 1.1 pooka
233 1.15 pooka void
234 1.61 pooka uvm_pagezero(struct vm_page *pg)
235 1.15 pooka {
236 1.15 pooka
237 1.61 pooka pg->flags &= ~PG_CLEAN;
238 1.61 pooka memset((void *)pg->uanon, 0, PAGE_SIZE);
239 1.15 pooka }
240 1.15 pooka
241 1.1 pooka /*
242 1.1 pooka * Misc routines
243 1.1 pooka */
244 1.1 pooka
245 1.61 pooka static kmutex_t pagermtx;
246 1.61 pooka
247 1.1 pooka void
248 1.79 pooka uvm_init(void)
249 1.1 pooka {
250 1.84 pooka char buf[64];
251 1.84 pooka int error;
252 1.84 pooka
253 1.84 pooka if (rumpuser_getenv("RUMP_MEMLIMIT", buf, sizeof(buf), &error) == 0) {
254 1.91 pooka rump_physmemlimit = strtoll(buf, NULL, 10);
255 1.84 pooka /* it's not like we'd get far with, say, 1 byte, but ... */
256 1.91 pooka if (rump_physmemlimit == 0)
257 1.84 pooka panic("uvm_init: no memory available");
258 1.84 pooka #define HUMANIZE_BYTES 9
259 1.84 pooka CTASSERT(sizeof(buf) >= HUMANIZE_BYTES);
260 1.91 pooka format_bytes(buf, HUMANIZE_BYTES, rump_physmemlimit);
261 1.84 pooka #undef HUMANIZE_BYTES
262 1.92 pooka dddlim = 9 * (rump_physmemlimit / 10);
263 1.84 pooka } else {
264 1.84 pooka strlcpy(buf, "unlimited (host limit)", sizeof(buf));
265 1.84 pooka }
266 1.84 pooka aprint_verbose("total memory = %s\n", buf);
267 1.1 pooka
268 1.92 pooka TAILQ_INIT(&vmpage_lruqueue);
269 1.92 pooka
270 1.84 pooka uvmexp.free = 1024*1024; /* XXX: arbitrary & not updated */
271 1.21 pooka
272 1.61 pooka mutex_init(&pagermtx, MUTEX_DEFAULT, 0);
273 1.25 ad mutex_init(&uvm_pageqlock, MUTEX_DEFAULT, 0);
274 1.88 pooka mutex_init(&uvm_swap_data_lock, MUTEX_DEFAULT, 0);
275 1.35 pooka
276 1.80 pooka mutex_init(&pdaemonmtx, MUTEX_DEFAULT, 0);
277 1.80 pooka cv_init(&pdaemoncv, "pdaemon");
278 1.80 pooka cv_init(&oomwait, "oomwait");
279 1.80 pooka
280 1.50 pooka kernel_map->pmap = pmap_kernel();
281 1.35 pooka callback_head_init(&kernel_map_store.vmk_reclaim_callback, IPL_VM);
282 1.50 pooka kmem_map->pmap = pmap_kernel();
283 1.50 pooka callback_head_init(&kmem_map_store.vmk_reclaim_callback, IPL_VM);
284 1.90 pooka
285 1.90 pooka pool_cache_bootstrap(&pagecache, sizeof(struct vm_page), 0, 0, 0,
286 1.90 pooka "page$", NULL, IPL_NONE, pgctor, pgdtor, NULL);
287 1.1 pooka }
288 1.1 pooka
289 1.83 pooka void
290 1.83 pooka uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin, vaddr_t vmax)
291 1.83 pooka {
292 1.83 pooka
293 1.83 pooka vm->vm_map.pmap = pmap_kernel();
294 1.83 pooka vm->vm_refcnt = 1;
295 1.83 pooka }
296 1.1 pooka
297 1.1 pooka void
298 1.7 pooka uvm_pagewire(struct vm_page *pg)
299 1.7 pooka {
300 1.7 pooka
301 1.7 pooka /* nada */
302 1.7 pooka }
303 1.7 pooka
304 1.7 pooka void
305 1.7 pooka uvm_pageunwire(struct vm_page *pg)
306 1.7 pooka {
307 1.7 pooka
308 1.7 pooka /* nada */
309 1.7 pooka }
310 1.7 pooka
311 1.83 pooka /* where's your schmonz now? */
312 1.83 pooka #define PUNLIMIT(a) \
313 1.83 pooka p->p_rlimit[a].rlim_cur = p->p_rlimit[a].rlim_max = RLIM_INFINITY;
314 1.83 pooka void
315 1.83 pooka uvm_init_limits(struct proc *p)
316 1.83 pooka {
317 1.83 pooka
318 1.83 pooka PUNLIMIT(RLIMIT_STACK);
319 1.83 pooka PUNLIMIT(RLIMIT_DATA);
320 1.83 pooka PUNLIMIT(RLIMIT_RSS);
321 1.83 pooka PUNLIMIT(RLIMIT_AS);
322 1.83 pooka /* nice, cascade */
323 1.83 pooka }
324 1.83 pooka #undef PUNLIMIT
325 1.83 pooka
326 1.69 pooka /*
327 1.69 pooka * This satisfies the "disgusting mmap hack" used by proplib.
328 1.69 pooka * We probably should grow some more assertables to make sure we're
329 1.69 pooka * not satisfying anything we shouldn't be satisfying. At least we
330 1.69 pooka * should make sure it's the local machine we're mmapping ...
331 1.69 pooka */
332 1.49 pooka int
333 1.49 pooka uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
334 1.49 pooka vm_prot_t maxprot, int flags, void *handle, voff_t off, vsize_t locklim)
335 1.49 pooka {
336 1.69 pooka void *uaddr;
337 1.69 pooka int error;
338 1.49 pooka
339 1.69 pooka if (prot != (VM_PROT_READ | VM_PROT_WRITE))
340 1.69 pooka panic("uvm_mmap() variant unsupported");
341 1.69 pooka if (flags != (MAP_PRIVATE | MAP_ANON))
342 1.69 pooka panic("uvm_mmap() variant unsupported");
343 1.98 pooka
344 1.69 pooka /* no reason in particular, but cf. uvm_default_mapaddr() */
345 1.69 pooka if (*addr != 0)
346 1.69 pooka panic("uvm_mmap() variant unsupported");
347 1.69 pooka
348 1.101 pooka if (curproc->p_vmspace == vmspace_kernel()) {
349 1.98 pooka uaddr = rumpuser_anonmmap(NULL, size, 0, 0, &error);
350 1.98 pooka } else {
351 1.98 pooka error = rumpuser_sp_anonmmap(size, &uaddr);
352 1.98 pooka }
353 1.69 pooka if (uaddr == NULL)
354 1.69 pooka return error;
355 1.69 pooka
356 1.69 pooka *addr = (vaddr_t)uaddr;
357 1.69 pooka return 0;
358 1.49 pooka }
359 1.49 pooka
360 1.61 pooka struct pagerinfo {
361 1.61 pooka vaddr_t pgr_kva;
362 1.61 pooka int pgr_npages;
363 1.61 pooka struct vm_page **pgr_pgs;
364 1.61 pooka bool pgr_read;
365 1.61 pooka
366 1.61 pooka LIST_ENTRY(pagerinfo) pgr_entries;
367 1.61 pooka };
368 1.61 pooka static LIST_HEAD(, pagerinfo) pagerlist = LIST_HEAD_INITIALIZER(pagerlist);
369 1.61 pooka
370 1.61 pooka /*
371 1.61 pooka * Pager "map" in routine. Instead of mapping, we allocate memory
372 1.61 pooka * and copy page contents there. Not optimal or even strictly
373 1.61 pooka * correct (the caller might modify the page contents after mapping
374 1.61 pooka * them in), but what the heck. Assumes UVMPAGER_MAPIN_WAITOK.
375 1.61 pooka */
376 1.7 pooka vaddr_t
377 1.61 pooka uvm_pagermapin(struct vm_page **pgs, int npages, int flags)
378 1.7 pooka {
379 1.61 pooka struct pagerinfo *pgri;
380 1.61 pooka vaddr_t curkva;
381 1.61 pooka int i;
382 1.61 pooka
383 1.61 pooka /* allocate structures */
384 1.61 pooka pgri = kmem_alloc(sizeof(*pgri), KM_SLEEP);
385 1.61 pooka pgri->pgr_kva = (vaddr_t)kmem_alloc(npages * PAGE_SIZE, KM_SLEEP);
386 1.61 pooka pgri->pgr_npages = npages;
387 1.61 pooka pgri->pgr_pgs = kmem_alloc(sizeof(struct vm_page *) * npages, KM_SLEEP);
388 1.61 pooka pgri->pgr_read = (flags & UVMPAGER_MAPIN_READ) != 0;
389 1.61 pooka
390 1.61 pooka /* copy contents to "mapped" memory */
391 1.61 pooka for (i = 0, curkva = pgri->pgr_kva;
392 1.61 pooka i < npages;
393 1.61 pooka i++, curkva += PAGE_SIZE) {
394 1.61 pooka /*
395 1.61 pooka * We need to copy the previous contents of the pages to
396 1.61 pooka * the window even if we are reading from the
397 1.61 pooka * device, since the device might not fill the contents of
398 1.61 pooka * the full mapped range and we will end up corrupting
399 1.61 pooka * data when we unmap the window.
400 1.61 pooka */
401 1.61 pooka memcpy((void*)curkva, pgs[i]->uanon, PAGE_SIZE);
402 1.61 pooka pgri->pgr_pgs[i] = pgs[i];
403 1.61 pooka }
404 1.61 pooka
405 1.61 pooka mutex_enter(&pagermtx);
406 1.61 pooka LIST_INSERT_HEAD(&pagerlist, pgri, pgr_entries);
407 1.61 pooka mutex_exit(&pagermtx);
408 1.7 pooka
409 1.61 pooka return pgri->pgr_kva;
410 1.7 pooka }
411 1.7 pooka
412 1.61 pooka /*
413 1.61 pooka * map out the pager window. return contents from VA to page storage
414 1.61 pooka * and free structures.
415 1.61 pooka *
416 1.61 pooka * Note: does not currently support partial frees
417 1.61 pooka */
418 1.61 pooka void
419 1.61 pooka uvm_pagermapout(vaddr_t kva, int npages)
420 1.7 pooka {
421 1.61 pooka struct pagerinfo *pgri;
422 1.61 pooka vaddr_t curkva;
423 1.61 pooka int i;
424 1.7 pooka
425 1.61 pooka mutex_enter(&pagermtx);
426 1.61 pooka LIST_FOREACH(pgri, &pagerlist, pgr_entries) {
427 1.61 pooka if (pgri->pgr_kva == kva)
428 1.61 pooka break;
429 1.61 pooka }
430 1.61 pooka KASSERT(pgri);
431 1.61 pooka if (pgri->pgr_npages != npages)
432 1.61 pooka panic("uvm_pagermapout: partial unmapping not supported");
433 1.61 pooka LIST_REMOVE(pgri, pgr_entries);
434 1.61 pooka mutex_exit(&pagermtx);
435 1.61 pooka
436 1.61 pooka if (pgri->pgr_read) {
437 1.61 pooka for (i = 0, curkva = pgri->pgr_kva;
438 1.61 pooka i < pgri->pgr_npages;
439 1.61 pooka i++, curkva += PAGE_SIZE) {
440 1.61 pooka memcpy(pgri->pgr_pgs[i]->uanon,(void*)curkva,PAGE_SIZE);
441 1.21 pooka }
442 1.21 pooka }
443 1.10 pooka
444 1.61 pooka kmem_free(pgri->pgr_pgs, npages * sizeof(struct vm_page *));
445 1.61 pooka kmem_free((void*)pgri->pgr_kva, npages * PAGE_SIZE);
446 1.61 pooka kmem_free(pgri, sizeof(*pgri));
447 1.7 pooka }
448 1.7 pooka
449 1.61 pooka /*
450 1.61 pooka * convert va in pager window to page structure.
451 1.61 pooka * XXX: how expensive is this (global lock, list traversal)?
452 1.61 pooka */
453 1.14 pooka struct vm_page *
454 1.14 pooka uvm_pageratop(vaddr_t va)
455 1.14 pooka {
456 1.61 pooka struct pagerinfo *pgri;
457 1.61 pooka struct vm_page *pg = NULL;
458 1.61 pooka int i;
459 1.14 pooka
460 1.61 pooka mutex_enter(&pagermtx);
461 1.61 pooka LIST_FOREACH(pgri, &pagerlist, pgr_entries) {
462 1.61 pooka if (pgri->pgr_kva <= va
463 1.61 pooka && va < pgri->pgr_kva + pgri->pgr_npages*PAGE_SIZE)
464 1.21 pooka break;
465 1.61 pooka }
466 1.61 pooka if (pgri) {
467 1.61 pooka i = (va - pgri->pgr_kva) >> PAGE_SHIFT;
468 1.61 pooka pg = pgri->pgr_pgs[i];
469 1.61 pooka }
470 1.61 pooka mutex_exit(&pagermtx);
471 1.21 pooka
472 1.61 pooka return pg;
473 1.61 pooka }
474 1.15 pooka
475 1.97 pooka /*
476 1.97 pooka * Called with the vm object locked.
477 1.97 pooka *
478 1.97 pooka * Put vnode object pages at the end of the access queue to indicate
479 1.97 pooka * they have been recently accessed and should not be immediate
480 1.97 pooka * candidates for pageout. Do not do this for lookups done by
481 1.97 pooka * the pagedaemon to mimic pmap_kentered mappings which don't track
482 1.97 pooka * access information.
483 1.97 pooka */
484 1.61 pooka struct vm_page *
485 1.61 pooka uvm_pagelookup(struct uvm_object *uobj, voff_t off)
486 1.61 pooka {
487 1.92 pooka struct vm_page *pg;
488 1.97 pooka bool ispagedaemon = curlwp == uvm.pagedaemon_lwp;
489 1.61 pooka
490 1.96 rmind pg = rb_tree_find_node(&uobj->rb_tree, &off);
491 1.97 pooka if (pg && !UVM_OBJ_IS_AOBJ(pg->uobject) && !ispagedaemon) {
492 1.92 pooka mutex_enter(&uvm_pageqlock);
493 1.92 pooka TAILQ_REMOVE(&vmpage_lruqueue, pg, pageq.queue);
494 1.92 pooka TAILQ_INSERT_TAIL(&vmpage_lruqueue, pg, pageq.queue);
495 1.92 pooka mutex_exit(&uvm_pageqlock);
496 1.92 pooka }
497 1.92 pooka
498 1.92 pooka return pg;
499 1.14 pooka }
500 1.14 pooka
501 1.7 pooka void
502 1.22 pooka uvm_page_unbusy(struct vm_page **pgs, int npgs)
503 1.22 pooka {
504 1.22 pooka struct vm_page *pg;
505 1.22 pooka int i;
506 1.22 pooka
507 1.94 pooka KASSERT(npgs > 0);
508 1.94 pooka KASSERT(mutex_owned(&pgs[0]->uobject->vmobjlock));
509 1.94 pooka
510 1.22 pooka for (i = 0; i < npgs; i++) {
511 1.22 pooka pg = pgs[i];
512 1.22 pooka if (pg == NULL)
513 1.22 pooka continue;
514 1.22 pooka
515 1.22 pooka KASSERT(pg->flags & PG_BUSY);
516 1.22 pooka if (pg->flags & PG_WANTED)
517 1.22 pooka wakeup(pg);
518 1.36 pooka if (pg->flags & PG_RELEASED)
519 1.36 pooka uvm_pagefree(pg);
520 1.36 pooka else
521 1.36 pooka pg->flags &= ~(PG_WANTED|PG_BUSY);
522 1.22 pooka }
523 1.22 pooka }
524 1.22 pooka
525 1.22 pooka void
526 1.7 pooka uvm_estimatepageable(int *active, int *inactive)
527 1.7 pooka {
528 1.7 pooka
529 1.19 pooka /* XXX: guessing game */
530 1.19 pooka *active = 1024;
531 1.19 pooka *inactive = 1024;
532 1.7 pooka }
533 1.7 pooka
534 1.39 pooka struct vm_map_kernel *
535 1.39 pooka vm_map_to_kernel(struct vm_map *map)
536 1.39 pooka {
537 1.39 pooka
538 1.39 pooka return (struct vm_map_kernel *)map;
539 1.39 pooka }
540 1.39 pooka
541 1.41 pooka bool
542 1.41 pooka vm_map_starved_p(struct vm_map *map)
543 1.41 pooka {
544 1.41 pooka
545 1.80 pooka if (map->flags & VM_MAP_WANTVA)
546 1.80 pooka return true;
547 1.80 pooka
548 1.41 pooka return false;
549 1.41 pooka }
550 1.41 pooka
551 1.41 pooka int
552 1.41 pooka uvm_loan(struct vm_map *map, vaddr_t start, vsize_t len, void *v, int flags)
553 1.41 pooka {
554 1.41 pooka
555 1.41 pooka panic("%s: unimplemented", __func__);
556 1.41 pooka }
557 1.41 pooka
558 1.41 pooka void
559 1.41 pooka uvm_unloan(void *v, int npages, int flags)
560 1.41 pooka {
561 1.41 pooka
562 1.41 pooka panic("%s: unimplemented", __func__);
563 1.41 pooka }
564 1.41 pooka
565 1.43 pooka int
566 1.43 pooka uvm_loanuobjpages(struct uvm_object *uobj, voff_t pgoff, int orignpages,
567 1.43 pooka struct vm_page **opp)
568 1.43 pooka {
569 1.43 pooka
570 1.72 pooka return EBUSY;
571 1.43 pooka }
572 1.43 pooka
573 1.73 pooka #ifdef DEBUGPRINT
574 1.56 pooka void
575 1.56 pooka uvm_object_printit(struct uvm_object *uobj, bool full,
576 1.56 pooka void (*pr)(const char *, ...))
577 1.56 pooka {
578 1.56 pooka
579 1.75 pooka pr("VM OBJECT at %p, refs %d", uobj, uobj->uo_refs);
580 1.56 pooka }
581 1.73 pooka #endif
582 1.56 pooka
583 1.68 pooka vaddr_t
584 1.68 pooka uvm_default_mapaddr(struct proc *p, vaddr_t base, vsize_t sz)
585 1.68 pooka {
586 1.68 pooka
587 1.68 pooka return 0;
588 1.68 pooka }
589 1.68 pooka
590 1.71 pooka int
591 1.71 pooka uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
592 1.71 pooka vm_prot_t prot, bool set_max)
593 1.71 pooka {
594 1.71 pooka
595 1.71 pooka return EOPNOTSUPP;
596 1.71 pooka }
597 1.71 pooka
598 1.9 pooka /*
599 1.12 pooka * UVM km
600 1.12 pooka */
601 1.12 pooka
602 1.12 pooka vaddr_t
603 1.12 pooka uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
604 1.12 pooka {
605 1.82 pooka void *rv, *desired = NULL;
606 1.50 pooka int alignbit, error;
607 1.50 pooka
608 1.82 pooka #ifdef __x86_64__
609 1.82 pooka /*
610 1.82 pooka * On amd64, allocate all module memory from the lowest 2GB.
611 1.82 pooka * This is because NetBSD kernel modules are compiled
612 1.82 pooka * with -mcmodel=kernel and reserve only 4 bytes for
613 1.82 pooka * offsets. If we load code compiled with -mcmodel=kernel
614 1.82 pooka * anywhere except the lowest or highest 2GB, it will not
615 1.82 pooka * work. Since userspace does not have access to the highest
616 1.82 pooka * 2GB, use the lowest 2GB.
617 1.82 pooka *
618 1.82 pooka * Note: this assumes the rump kernel resides in
619 1.82 pooka * the lowest 2GB as well.
620 1.82 pooka *
621 1.82 pooka * Note2: yes, it's a quick hack, but since this the only
622 1.82 pooka * place where we care about the map we're allocating from,
623 1.82 pooka * just use a simple "if" instead of coming up with a fancy
624 1.82 pooka * generic solution.
625 1.82 pooka */
626 1.82 pooka extern struct vm_map *module_map;
627 1.82 pooka if (map == module_map) {
628 1.82 pooka desired = (void *)(0x80000000 - size);
629 1.82 pooka }
630 1.82 pooka #endif
631 1.82 pooka
632 1.50 pooka alignbit = 0;
633 1.50 pooka if (align) {
634 1.50 pooka alignbit = ffs(align)-1;
635 1.50 pooka }
636 1.50 pooka
637 1.82 pooka rv = rumpuser_anonmmap(desired, size, alignbit, flags & UVM_KMF_EXEC,
638 1.81 pooka &error);
639 1.50 pooka if (rv == NULL) {
640 1.50 pooka if (flags & (UVM_KMF_CANFAIL | UVM_KMF_NOWAIT))
641 1.50 pooka return 0;
642 1.50 pooka else
643 1.50 pooka panic("uvm_km_alloc failed");
644 1.50 pooka }
645 1.12 pooka
646 1.50 pooka if (flags & UVM_KMF_ZERO)
647 1.12 pooka memset(rv, 0, size);
648 1.12 pooka
649 1.12 pooka return (vaddr_t)rv;
650 1.12 pooka }
651 1.12 pooka
652 1.12 pooka void
653 1.12 pooka uvm_km_free(struct vm_map *map, vaddr_t vaddr, vsize_t size, uvm_flag_t flags)
654 1.12 pooka {
655 1.12 pooka
656 1.50 pooka rumpuser_unmap((void *)vaddr, size);
657 1.12 pooka }
658 1.12 pooka
659 1.12 pooka struct vm_map *
660 1.12 pooka uvm_km_suballoc(struct vm_map *map, vaddr_t *minaddr, vaddr_t *maxaddr,
661 1.12 pooka vsize_t size, int pageable, bool fixed, struct vm_map_kernel *submap)
662 1.12 pooka {
663 1.12 pooka
664 1.12 pooka return (struct vm_map *)417416;
665 1.12 pooka }
666 1.40 pooka
667 1.40 pooka vaddr_t
668 1.40 pooka uvm_km_alloc_poolpage(struct vm_map *map, bool waitok)
669 1.40 pooka {
670 1.40 pooka
671 1.80 pooka return (vaddr_t)rump_hypermalloc(PAGE_SIZE, PAGE_SIZE,
672 1.80 pooka waitok, "kmalloc");
673 1.40 pooka }
674 1.40 pooka
675 1.40 pooka void
676 1.40 pooka uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr)
677 1.40 pooka {
678 1.40 pooka
679 1.84 pooka rump_hyperfree((void *)addr, PAGE_SIZE);
680 1.50 pooka }
681 1.50 pooka
682 1.50 pooka vaddr_t
683 1.50 pooka uvm_km_alloc_poolpage_cache(struct vm_map *map, bool waitok)
684 1.50 pooka {
685 1.50 pooka
686 1.77 pooka return uvm_km_alloc_poolpage(map, waitok);
687 1.50 pooka }
688 1.50 pooka
689 1.50 pooka void
690 1.50 pooka uvm_km_free_poolpage_cache(struct vm_map *map, vaddr_t vaddr)
691 1.50 pooka {
692 1.50 pooka
693 1.77 pooka uvm_km_free_poolpage(map, vaddr);
694 1.40 pooka }
695 1.57 pooka
696 1.74 pooka void
697 1.74 pooka uvm_km_va_drain(struct vm_map *map, uvm_flag_t flags)
698 1.74 pooka {
699 1.74 pooka
700 1.74 pooka /* we eventually maybe want some model for available memory */
701 1.74 pooka }
702 1.74 pooka
703 1.57 pooka /*
704 1.57 pooka * Mapping and vm space locking routines.
705 1.57 pooka * XXX: these don't work for non-local vmspaces
706 1.57 pooka */
707 1.57 pooka int
708 1.57 pooka uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access)
709 1.57 pooka {
710 1.57 pooka
711 1.101 pooka KASSERT(vs == vmspace_kernel());
712 1.57 pooka return 0;
713 1.57 pooka }
714 1.57 pooka
715 1.57 pooka void
716 1.57 pooka uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
717 1.57 pooka {
718 1.57 pooka
719 1.101 pooka KASSERT(vs == vmspace_kernel());
720 1.57 pooka }
721 1.57 pooka
722 1.57 pooka void
723 1.57 pooka vmapbuf(struct buf *bp, vsize_t len)
724 1.57 pooka {
725 1.57 pooka
726 1.57 pooka bp->b_saveaddr = bp->b_data;
727 1.57 pooka }
728 1.57 pooka
729 1.57 pooka void
730 1.57 pooka vunmapbuf(struct buf *bp, vsize_t len)
731 1.57 pooka {
732 1.57 pooka
733 1.57 pooka bp->b_data = bp->b_saveaddr;
734 1.57 pooka bp->b_saveaddr = 0;
735 1.57 pooka }
736 1.61 pooka
737 1.61 pooka void
738 1.83 pooka uvmspace_addref(struct vmspace *vm)
739 1.83 pooka {
740 1.83 pooka
741 1.83 pooka /*
742 1.83 pooka * there is only vmspace0. we're not planning on
743 1.83 pooka * feeding it to the fishes.
744 1.83 pooka */
745 1.83 pooka }
746 1.83 pooka
747 1.83 pooka void
748 1.66 pooka uvmspace_free(struct vmspace *vm)
749 1.66 pooka {
750 1.66 pooka
751 1.66 pooka /* nothing for now */
752 1.66 pooka }
753 1.66 pooka
754 1.66 pooka int
755 1.66 pooka uvm_io(struct vm_map *map, struct uio *uio)
756 1.66 pooka {
757 1.66 pooka
758 1.66 pooka /*
759 1.66 pooka * just do direct uio for now. but this needs some vmspace
760 1.66 pooka * olympics for rump_sysproxy.
761 1.66 pooka */
762 1.66 pooka return uiomove((void *)(vaddr_t)uio->uio_offset, uio->uio_resid, uio);
763 1.66 pooka }
764 1.66 pooka
765 1.61 pooka /*
766 1.61 pooka * page life cycle stuff. it really doesn't exist, so just stubs.
767 1.61 pooka */
768 1.61 pooka
769 1.61 pooka void
770 1.61 pooka uvm_pageactivate(struct vm_page *pg)
771 1.61 pooka {
772 1.61 pooka
773 1.61 pooka /* nada */
774 1.61 pooka }
775 1.61 pooka
776 1.61 pooka void
777 1.61 pooka uvm_pagedeactivate(struct vm_page *pg)
778 1.61 pooka {
779 1.61 pooka
780 1.61 pooka /* nada */
781 1.61 pooka }
782 1.61 pooka
783 1.61 pooka void
784 1.61 pooka uvm_pagedequeue(struct vm_page *pg)
785 1.61 pooka {
786 1.61 pooka
787 1.61 pooka /* nada*/
788 1.61 pooka }
789 1.61 pooka
790 1.61 pooka void
791 1.61 pooka uvm_pageenqueue(struct vm_page *pg)
792 1.61 pooka {
793 1.61 pooka
794 1.61 pooka /* nada */
795 1.61 pooka }
796 1.80 pooka
797 1.88 pooka void
798 1.88 pooka uvmpdpol_anfree(struct vm_anon *an)
799 1.88 pooka {
800 1.88 pooka
801 1.88 pooka /* nada */
802 1.88 pooka }
803 1.88 pooka
804 1.80 pooka /*
805 1.99 uebayasi * Physical address accessors.
806 1.99 uebayasi */
807 1.99 uebayasi
808 1.99 uebayasi struct vm_page *
809 1.99 uebayasi uvm_phys_to_vm_page(paddr_t pa)
810 1.99 uebayasi {
811 1.99 uebayasi
812 1.99 uebayasi return NULL;
813 1.99 uebayasi }
814 1.99 uebayasi
815 1.99 uebayasi paddr_t
816 1.99 uebayasi uvm_vm_page_to_phys(const struct vm_page *pg)
817 1.99 uebayasi {
818 1.99 uebayasi
819 1.99 uebayasi return 0;
820 1.99 uebayasi }
821 1.99 uebayasi
822 1.99 uebayasi /*
823 1.80 pooka * Routines related to the Page Baroness.
824 1.80 pooka */
825 1.80 pooka
826 1.80 pooka void
827 1.80 pooka uvm_wait(const char *msg)
828 1.80 pooka {
829 1.80 pooka
830 1.80 pooka if (__predict_false(curlwp == uvm.pagedaemon_lwp))
831 1.80 pooka panic("pagedaemon out of memory");
832 1.80 pooka if (__predict_false(rump_threads == 0))
833 1.80 pooka panic("pagedaemon missing (RUMP_THREADS = 0)");
834 1.80 pooka
835 1.80 pooka mutex_enter(&pdaemonmtx);
836 1.80 pooka pdaemon_waiters++;
837 1.80 pooka cv_signal(&pdaemoncv);
838 1.80 pooka cv_wait(&oomwait, &pdaemonmtx);
839 1.80 pooka mutex_exit(&pdaemonmtx);
840 1.80 pooka }
841 1.80 pooka
842 1.80 pooka void
843 1.80 pooka uvm_pageout_start(int npages)
844 1.80 pooka {
845 1.80 pooka
846 1.80 pooka /* we don't have the heuristics */
847 1.80 pooka }
848 1.80 pooka
849 1.80 pooka void
850 1.80 pooka uvm_pageout_done(int npages)
851 1.80 pooka {
852 1.80 pooka
853 1.80 pooka /* could wakeup waiters, but just let the pagedaemon do it */
854 1.80 pooka }
855 1.80 pooka
856 1.95 pooka static bool
857 1.95 pooka processpage(struct vm_page *pg)
858 1.95 pooka {
859 1.95 pooka struct uvm_object *uobj;
860 1.95 pooka
861 1.95 pooka uobj = pg->uobject;
862 1.95 pooka if (mutex_tryenter(&uobj->vmobjlock)) {
863 1.95 pooka if ((pg->flags & PG_BUSY) == 0) {
864 1.95 pooka mutex_exit(&uvm_pageqlock);
865 1.95 pooka uobj->pgops->pgo_put(uobj, pg->offset,
866 1.95 pooka pg->offset + PAGE_SIZE,
867 1.95 pooka PGO_CLEANIT|PGO_FREE);
868 1.95 pooka KASSERT(!mutex_owned(&uobj->vmobjlock));
869 1.95 pooka return true;
870 1.95 pooka } else {
871 1.95 pooka mutex_exit(&uobj->vmobjlock);
872 1.95 pooka }
873 1.95 pooka }
874 1.95 pooka
875 1.95 pooka return false;
876 1.95 pooka }
877 1.95 pooka
878 1.80 pooka /*
879 1.92 pooka * The Diabolical pageDaemon Director (DDD).
880 1.80 pooka */
881 1.80 pooka void
882 1.80 pooka uvm_pageout(void *arg)
883 1.80 pooka {
884 1.92 pooka struct vm_page *pg;
885 1.80 pooka struct pool *pp, *pp_first;
886 1.80 pooka uint64_t where;
887 1.80 pooka int timo = 0;
888 1.92 pooka int cleaned, skip, skipped;
889 1.92 pooka bool succ = false;
890 1.80 pooka
891 1.80 pooka mutex_enter(&pdaemonmtx);
892 1.80 pooka for (;;) {
893 1.92 pooka if (succ) {
894 1.92 pooka kernel_map->flags &= ~VM_MAP_WANTVA;
895 1.95 pooka kmem_map->flags &= ~VM_MAP_WANTVA;
896 1.92 pooka timo = 0;
897 1.95 pooka if (pdaemon_waiters) {
898 1.95 pooka pdaemon_waiters = 0;
899 1.95 pooka cv_broadcast(&oomwait);
900 1.95 pooka }
901 1.92 pooka }
902 1.92 pooka succ = false;
903 1.92 pooka
904 1.95 pooka cv_timedwait(&pdaemoncv, &pdaemonmtx, timo);
905 1.80 pooka uvmexp.pdwoke++;
906 1.92 pooka
907 1.92 pooka /* tell the world that we are hungry */
908 1.80 pooka kernel_map->flags |= VM_MAP_WANTVA;
909 1.92 pooka kmem_map->flags |= VM_MAP_WANTVA;
910 1.92 pooka
911 1.92 pooka if (pdaemon_waiters == 0 && !NEED_PAGEDAEMON())
912 1.92 pooka continue;
913 1.80 pooka mutex_exit(&pdaemonmtx);
914 1.80 pooka
915 1.92 pooka /*
916 1.92 pooka * step one: reclaim the page cache. this should give
917 1.92 pooka * us the biggest earnings since whole pages are released
918 1.92 pooka * into backing memory.
919 1.92 pooka */
920 1.92 pooka pool_cache_reclaim(&pagecache);
921 1.92 pooka if (!NEED_PAGEDAEMON()) {
922 1.92 pooka succ = true;
923 1.92 pooka mutex_enter(&pdaemonmtx);
924 1.92 pooka continue;
925 1.92 pooka }
926 1.92 pooka
927 1.92 pooka /*
928 1.92 pooka * Ok, so that didn't help. Next, try to hunt memory
929 1.92 pooka * by pushing out vnode pages. The pages might contain
930 1.92 pooka * useful cached data, but we need the memory.
931 1.92 pooka */
932 1.92 pooka cleaned = 0;
933 1.92 pooka skip = 0;
934 1.92 pooka again:
935 1.92 pooka mutex_enter(&uvm_pageqlock);
936 1.92 pooka while (cleaned < PAGEDAEMON_OBJCHUNK) {
937 1.92 pooka skipped = 0;
938 1.92 pooka TAILQ_FOREACH(pg, &vmpage_lruqueue, pageq.queue) {
939 1.92 pooka
940 1.92 pooka /*
941 1.92 pooka * skip over pages we _might_ have tried
942 1.92 pooka * to handle earlier. they might not be
943 1.92 pooka * exactly the same ones, but I'm not too
944 1.92 pooka * concerned.
945 1.92 pooka */
946 1.92 pooka while (skipped++ < skip)
947 1.92 pooka continue;
948 1.92 pooka
949 1.95 pooka if (processpage(pg)) {
950 1.95 pooka cleaned++;
951 1.95 pooka goto again;
952 1.92 pooka }
953 1.92 pooka
954 1.92 pooka skip++;
955 1.92 pooka }
956 1.92 pooka break;
957 1.92 pooka }
958 1.92 pooka mutex_exit(&uvm_pageqlock);
959 1.92 pooka
960 1.92 pooka /*
961 1.92 pooka * And of course we need to reclaim the page cache
962 1.92 pooka * again to actually release memory.
963 1.92 pooka */
964 1.92 pooka pool_cache_reclaim(&pagecache);
965 1.92 pooka if (!NEED_PAGEDAEMON()) {
966 1.92 pooka succ = true;
967 1.92 pooka mutex_enter(&pdaemonmtx);
968 1.92 pooka continue;
969 1.92 pooka }
970 1.92 pooka
971 1.92 pooka /*
972 1.92 pooka * Still not there? sleeves come off right about now.
973 1.92 pooka * First: do reclaim on kernel/kmem map.
974 1.92 pooka */
975 1.92 pooka callback_run_roundrobin(&kernel_map_store.vmk_reclaim_callback,
976 1.92 pooka NULL);
977 1.92 pooka callback_run_roundrobin(&kmem_map_store.vmk_reclaim_callback,
978 1.92 pooka NULL);
979 1.92 pooka
980 1.92 pooka /*
981 1.92 pooka * And then drain the pools. Wipe them out ... all of them.
982 1.92 pooka */
983 1.92 pooka
984 1.80 pooka pool_drain_start(&pp_first, &where);
985 1.80 pooka pp = pp_first;
986 1.80 pooka for (;;) {
987 1.91 pooka rump_vfs_drainbufs(10 /* XXX: estimate better */);
988 1.80 pooka succ = pool_drain_end(pp, where);
989 1.80 pooka if (succ)
990 1.80 pooka break;
991 1.80 pooka pool_drain_start(&pp, &where);
992 1.80 pooka if (pp == pp_first) {
993 1.80 pooka succ = pool_drain_end(pp, where);
994 1.80 pooka break;
995 1.80 pooka }
996 1.80 pooka }
997 1.92 pooka
998 1.92 pooka /*
999 1.92 pooka * Need to use PYEC on our bag of tricks.
1000 1.92 pooka * Unfortunately, the wife just borrowed it.
1001 1.92 pooka */
1002 1.80 pooka
1003 1.80 pooka if (!succ) {
1004 1.80 pooka rumpuser_dprintf("pagedaemoness: failed to reclaim "
1005 1.80 pooka "memory ... sleeping (deadlock?)\n");
1006 1.95 pooka timo = hz;
1007 1.80 pooka }
1008 1.80 pooka
1009 1.92 pooka mutex_enter(&pdaemonmtx);
1010 1.80 pooka }
1011 1.80 pooka
1012 1.80 pooka panic("you can swap out any time you like, but you can never leave");
1013 1.80 pooka }
1014 1.80 pooka
1015 1.80 pooka void
1016 1.80 pooka uvm_kick_pdaemon()
1017 1.80 pooka {
1018 1.80 pooka
1019 1.92 pooka /*
1020 1.92 pooka * Wake up the diabolical pagedaemon director if we are over
1021 1.92 pooka * 90% of the memory limit. This is a complete and utter
1022 1.92 pooka * stetson-harrison decision which you are allowed to finetune.
1023 1.92 pooka * Don't bother locking. If we have some unflushed caches,
1024 1.92 pooka * other waker-uppers will deal with the issue.
1025 1.92 pooka */
1026 1.92 pooka if (NEED_PAGEDAEMON()) {
1027 1.92 pooka cv_signal(&pdaemoncv);
1028 1.92 pooka }
1029 1.80 pooka }
1030 1.80 pooka
1031 1.80 pooka void *
1032 1.80 pooka rump_hypermalloc(size_t howmuch, int alignment, bool waitok, const char *wmsg)
1033 1.80 pooka {
1034 1.84 pooka unsigned long newmem;
1035 1.80 pooka void *rv;
1036 1.80 pooka
1037 1.92 pooka uvm_kick_pdaemon(); /* ouch */
1038 1.92 pooka
1039 1.84 pooka /* first we must be within the limit */
1040 1.84 pooka limitagain:
1041 1.91 pooka if (rump_physmemlimit != RUMPMEM_UNLIMITED) {
1042 1.84 pooka newmem = atomic_add_long_nv(&curphysmem, howmuch);
1043 1.91 pooka if (newmem > rump_physmemlimit) {
1044 1.84 pooka newmem = atomic_add_long_nv(&curphysmem, -howmuch);
1045 1.84 pooka if (!waitok)
1046 1.84 pooka return NULL;
1047 1.84 pooka uvm_wait(wmsg);
1048 1.84 pooka goto limitagain;
1049 1.84 pooka }
1050 1.84 pooka }
1051 1.84 pooka
1052 1.84 pooka /* second, we must get something from the backend */
1053 1.80 pooka again:
1054 1.80 pooka rv = rumpuser_malloc(howmuch, alignment);
1055 1.80 pooka if (__predict_false(rv == NULL && waitok)) {
1056 1.80 pooka uvm_wait(wmsg);
1057 1.80 pooka goto again;
1058 1.80 pooka }
1059 1.80 pooka
1060 1.80 pooka return rv;
1061 1.80 pooka }
1062 1.84 pooka
1063 1.84 pooka void
1064 1.84 pooka rump_hyperfree(void *what, size_t size)
1065 1.84 pooka {
1066 1.84 pooka
1067 1.91 pooka if (rump_physmemlimit != RUMPMEM_UNLIMITED) {
1068 1.84 pooka atomic_add_long(&curphysmem, -size);
1069 1.84 pooka }
1070 1.84 pooka rumpuser_free(what);
1071 1.84 pooka }
1072