Home | History | Annotate | Line # | Download | only in rumpkern
vm.c revision 1.70.4.3
      1  1.70.4.3  rmind /*	$NetBSD: vm.c,v 1.70.4.3 2010/07/03 01:20:02 rmind Exp $	*/
      2       1.1  pooka 
      3       1.1  pooka /*
      4  1.70.4.3  rmind  * Copyright (c) 2007-2010 Antti Kantee.  All Rights Reserved.
      5       1.1  pooka  *
      6  1.70.4.3  rmind  * Development of this software was supported by
      7  1.70.4.3  rmind  * The Finnish Cultural Foundation and the Research Foundation of
      8  1.70.4.3  rmind  * The Helsinki University of Technology.
      9       1.1  pooka  *
     10       1.1  pooka  * Redistribution and use in source and binary forms, with or without
     11       1.1  pooka  * modification, are permitted provided that the following conditions
     12       1.1  pooka  * are met:
     13       1.1  pooka  * 1. Redistributions of source code must retain the above copyright
     14       1.1  pooka  *    notice, this list of conditions and the following disclaimer.
     15       1.1  pooka  * 2. Redistributions in binary form must reproduce the above copyright
     16       1.1  pooka  *    notice, this list of conditions and the following disclaimer in the
     17       1.1  pooka  *    documentation and/or other materials provided with the distribution.
     18       1.1  pooka  *
     19       1.1  pooka  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     20       1.1  pooka  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     21       1.1  pooka  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     22       1.1  pooka  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     23       1.1  pooka  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24       1.1  pooka  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     25       1.1  pooka  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26       1.1  pooka  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27       1.1  pooka  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28       1.1  pooka  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29       1.1  pooka  * SUCH DAMAGE.
     30       1.1  pooka  */
     31       1.1  pooka 
     32       1.1  pooka /*
     33       1.1  pooka  * Virtual memory emulation routines.  Contents:
     34       1.1  pooka  *  + anon objects & pager
     35       1.1  pooka  *  + misc support routines
     36       1.1  pooka  */
     37       1.1  pooka 
     38       1.1  pooka /*
     39       1.5  pooka  * XXX: we abuse pg->uanon for the virtual address of the storage
     40       1.1  pooka  * for each page.  phys_addr would fit the job description better,
     41       1.1  pooka  * except that it will create unnecessary lossage on some platforms
     42       1.1  pooka  * due to not being a pointer type.
     43       1.1  pooka  */
     44       1.1  pooka 
     45      1.48  pooka #include <sys/cdefs.h>
     46  1.70.4.3  rmind __KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.70.4.3 2010/07/03 01:20:02 rmind Exp $");
     47      1.48  pooka 
     48       1.1  pooka #include <sys/param.h>
     49      1.40  pooka #include <sys/atomic.h>
     50  1.70.4.3  rmind #include <sys/buf.h>
     51  1.70.4.3  rmind #include <sys/kernel.h>
     52      1.67  pooka #include <sys/kmem.h>
     53      1.69  pooka #include <sys/mman.h>
     54       1.1  pooka #include <sys/null.h>
     55       1.1  pooka #include <sys/vnode.h>
     56       1.1  pooka 
     57      1.34  pooka #include <machine/pmap.h>
     58      1.34  pooka 
     59      1.34  pooka #include <rump/rumpuser.h>
     60      1.34  pooka 
     61       1.1  pooka #include <uvm/uvm.h>
     62      1.56  pooka #include <uvm/uvm_ddb.h>
     63       1.1  pooka #include <uvm/uvm_prot.h>
     64      1.58     he #include <uvm/uvm_readahead.h>
     65       1.1  pooka 
     66      1.13  pooka #include "rump_private.h"
     67       1.1  pooka 
     68      1.24   yamt static int ao_get(struct uvm_object *, voff_t, struct vm_page **,
     69      1.24   yamt 	int *, int, vm_prot_t, int, int);
     70      1.24   yamt static int ao_put(struct uvm_object *, voff_t, voff_t, int);
     71      1.24   yamt 
     72      1.24   yamt const struct uvm_pagerops aobj_pager = {
     73      1.24   yamt 	.pgo_get = ao_get,
     74      1.24   yamt 	.pgo_put = ao_put,
     75      1.24   yamt };
     76      1.24   yamt 
     77      1.25     ad kmutex_t uvm_pageqlock;
     78      1.25     ad 
     79       1.1  pooka struct uvmexp uvmexp;
     80       1.7  pooka struct uvm uvm;
     81       1.1  pooka 
     82       1.1  pooka struct vm_map rump_vmmap;
     83      1.50  pooka static struct vm_map_kernel kmem_map_store;
     84      1.50  pooka struct vm_map *kmem_map = &kmem_map_store.vmk_map;
     85      1.32     ad const struct rb_tree_ops uvm_page_tree_ops;
     86       1.1  pooka 
     87      1.35  pooka static struct vm_map_kernel kernel_map_store;
     88      1.35  pooka struct vm_map *kernel_map = &kernel_map_store.vmk_map;
     89      1.35  pooka 
     90  1.70.4.3  rmind static unsigned int pdaemon_waiters;
     91  1.70.4.3  rmind static kmutex_t pdaemonmtx;
     92  1.70.4.3  rmind static kcondvar_t pdaemoncv, oomwait;
     93  1.70.4.3  rmind 
     94  1.70.4.3  rmind #define RUMPMEM_UNLIMITED ((unsigned long)-1)
     95  1.70.4.3  rmind static unsigned long physmemlimit = RUMPMEM_UNLIMITED;
     96  1.70.4.3  rmind static unsigned long curphysmem;
     97  1.70.4.3  rmind 
     98       1.1  pooka /*
     99       1.1  pooka  * vm pages
    100       1.1  pooka  */
    101       1.1  pooka 
    102      1.22  pooka /* called with the object locked */
    103       1.1  pooka struct vm_page *
    104  1.70.4.3  rmind uvm_pagealloc_strat(struct uvm_object *uobj, voff_t off, struct vm_anon *anon,
    105  1.70.4.3  rmind 	int flags, int strat, int free_list)
    106       1.1  pooka {
    107       1.1  pooka 	struct vm_page *pg;
    108       1.1  pooka 
    109      1.27  pooka 	pg = kmem_zalloc(sizeof(struct vm_page), KM_SLEEP);
    110       1.1  pooka 	pg->offset = off;
    111       1.5  pooka 	pg->uobject = uobj;
    112       1.1  pooka 
    113  1.70.4.3  rmind 	pg->uanon = (void *)kmem_alloc(PAGE_SIZE, KM_SLEEP);
    114  1.70.4.3  rmind 	if (flags & UVM_PGA_ZERO)
    115  1.70.4.3  rmind 		memset(pg->uanon, 0, PAGE_SIZE);
    116      1.22  pooka 	pg->flags = PG_CLEAN|PG_BUSY|PG_FAKE;
    117       1.1  pooka 
    118      1.31     ad 	TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue);
    119      1.59  pooka 	uobj->uo_npages++;
    120      1.21  pooka 
    121       1.1  pooka 	return pg;
    122       1.1  pooka }
    123       1.1  pooka 
    124      1.21  pooka /*
    125      1.21  pooka  * Release a page.
    126      1.21  pooka  *
    127      1.22  pooka  * Called with the vm object locked.
    128      1.21  pooka  */
    129       1.1  pooka void
    130      1.22  pooka uvm_pagefree(struct vm_page *pg)
    131       1.1  pooka {
    132       1.5  pooka 	struct uvm_object *uobj = pg->uobject;
    133       1.1  pooka 
    134      1.22  pooka 	if (pg->flags & PG_WANTED)
    135      1.22  pooka 		wakeup(pg);
    136      1.22  pooka 
    137      1.59  pooka 	uobj->uo_npages--;
    138      1.31     ad 	TAILQ_REMOVE(&uobj->memq, pg, listq.queue);
    139      1.27  pooka 	kmem_free((void *)pg->uanon, PAGE_SIZE);
    140      1.27  pooka 	kmem_free(pg, sizeof(*pg));
    141       1.1  pooka }
    142       1.1  pooka 
    143      1.15  pooka void
    144      1.61  pooka uvm_pagezero(struct vm_page *pg)
    145      1.15  pooka {
    146      1.15  pooka 
    147      1.61  pooka 	pg->flags &= ~PG_CLEAN;
    148      1.61  pooka 	memset((void *)pg->uanon, 0, PAGE_SIZE);
    149      1.15  pooka }
    150      1.15  pooka 
    151       1.1  pooka /*
    152       1.1  pooka  * Anon object stuff
    153       1.1  pooka  */
    154       1.1  pooka 
    155       1.1  pooka static int
    156       1.1  pooka ao_get(struct uvm_object *uobj, voff_t off, struct vm_page **pgs,
    157       1.1  pooka 	int *npages, int centeridx, vm_prot_t access_type,
    158       1.1  pooka 	int advice, int flags)
    159       1.1  pooka {
    160       1.1  pooka 	struct vm_page *pg;
    161       1.1  pooka 	int i;
    162       1.1  pooka 
    163       1.1  pooka 	if (centeridx)
    164       1.1  pooka 		panic("%s: centeridx != 0 not supported", __func__);
    165       1.1  pooka 
    166       1.1  pooka 	/* loop over pages */
    167       1.1  pooka 	off = trunc_page(off);
    168       1.1  pooka 	for (i = 0; i < *npages; i++) {
    169      1.23  pooka  retrylookup:
    170      1.10  pooka 		pg = uvm_pagelookup(uobj, off + (i << PAGE_SHIFT));
    171       1.1  pooka 		if (pg) {
    172      1.23  pooka 			if (pg->flags & PG_BUSY) {
    173      1.23  pooka 				pg->flags |= PG_WANTED;
    174  1.70.4.1  rmind 				UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0,
    175      1.23  pooka 				    "aogetpg", 0);
    176      1.23  pooka 				goto retrylookup;
    177      1.23  pooka 			}
    178      1.23  pooka 			pg->flags |= PG_BUSY;
    179       1.1  pooka 			pgs[i] = pg;
    180       1.1  pooka 		} else {
    181  1.70.4.3  rmind 			pg = uvm_pagealloc(uobj,
    182  1.70.4.3  rmind 			    off + (i << PAGE_SHIFT), NULL, UVM_PGA_ZERO);
    183       1.1  pooka 			pgs[i] = pg;
    184       1.1  pooka 		}
    185       1.1  pooka 	}
    186  1.70.4.1  rmind 	mutex_exit(uobj->vmobjlock);
    187       1.1  pooka 
    188       1.1  pooka 	return 0;
    189       1.1  pooka 
    190       1.1  pooka }
    191       1.1  pooka 
    192       1.1  pooka static int
    193       1.1  pooka ao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
    194       1.1  pooka {
    195       1.1  pooka 	struct vm_page *pg;
    196       1.1  pooka 
    197       1.1  pooka 	/* we only free all pages for now */
    198      1.23  pooka 	if ((flags & PGO_FREE) == 0 || (flags & PGO_ALLPAGES) == 0) {
    199  1.70.4.1  rmind 		mutex_exit(uobj->vmobjlock);
    200       1.1  pooka 		return 0;
    201      1.23  pooka 	}
    202       1.1  pooka 
    203       1.1  pooka 	while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL)
    204      1.22  pooka 		uvm_pagefree(pg);
    205  1.70.4.1  rmind 	mutex_exit(uobj->vmobjlock);
    206       1.1  pooka 
    207       1.1  pooka 	return 0;
    208       1.1  pooka }
    209       1.1  pooka 
    210       1.1  pooka struct uvm_object *
    211       1.1  pooka uao_create(vsize_t size, int flags)
    212       1.1  pooka {
    213       1.1  pooka 	struct uvm_object *uobj;
    214       1.1  pooka 
    215      1.27  pooka 	uobj = kmem_zalloc(sizeof(struct uvm_object), KM_SLEEP);
    216  1.70.4.1  rmind 	uobj->vmobjlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
    217       1.1  pooka 	uobj->pgops = &aobj_pager;
    218       1.1  pooka 	TAILQ_INIT(&uobj->memq);
    219       1.1  pooka 
    220       1.1  pooka 	return uobj;
    221       1.1  pooka }
    222       1.1  pooka 
    223       1.1  pooka void
    224       1.1  pooka uao_detach(struct uvm_object *uobj)
    225       1.1  pooka {
    226       1.1  pooka 
    227  1.70.4.1  rmind 	mutex_enter(uobj->vmobjlock);
    228       1.1  pooka 	ao_put(uobj, 0, 0, PGO_ALLPAGES | PGO_FREE);
    229  1.70.4.1  rmind 	mutex_obj_free(uobj->vmobjlock);
    230      1.27  pooka 	kmem_free(uobj, sizeof(*uobj));
    231       1.1  pooka }
    232       1.1  pooka 
    233       1.1  pooka /*
    234       1.1  pooka  * Misc routines
    235       1.1  pooka  */
    236       1.1  pooka 
    237      1.61  pooka static kmutex_t pagermtx;
    238      1.61  pooka 
    239       1.1  pooka void
    240  1.70.4.3  rmind uvm_init(void)
    241       1.1  pooka {
    242  1.70.4.3  rmind 	char buf[64];
    243  1.70.4.3  rmind 	int error;
    244  1.70.4.3  rmind 
    245  1.70.4.3  rmind 	if (rumpuser_getenv("RUMP_MEMLIMIT", buf, sizeof(buf), &error) == 0) {
    246  1.70.4.3  rmind 		physmemlimit = strtoll(buf, NULL, 10);
    247  1.70.4.3  rmind 		/* it's not like we'd get far with, say, 1 byte, but ... */
    248  1.70.4.3  rmind 		if (physmemlimit == 0)
    249  1.70.4.3  rmind 			panic("uvm_init: no memory available");
    250  1.70.4.3  rmind #define HUMANIZE_BYTES 9
    251  1.70.4.3  rmind 		CTASSERT(sizeof(buf) >= HUMANIZE_BYTES);
    252  1.70.4.3  rmind 		format_bytes(buf, HUMANIZE_BYTES, physmemlimit);
    253  1.70.4.3  rmind #undef HUMANIZE_BYTES
    254  1.70.4.3  rmind 	} else {
    255  1.70.4.3  rmind 		strlcpy(buf, "unlimited (host limit)", sizeof(buf));
    256  1.70.4.3  rmind 	}
    257  1.70.4.3  rmind 	aprint_verbose("total memory = %s\n", buf);
    258       1.1  pooka 
    259  1.70.4.3  rmind 	uvmexp.free = 1024*1024; /* XXX: arbitrary & not updated */
    260      1.21  pooka 
    261      1.61  pooka 	mutex_init(&pagermtx, MUTEX_DEFAULT, 0);
    262      1.25     ad 	mutex_init(&uvm_pageqlock, MUTEX_DEFAULT, 0);
    263      1.35  pooka 
    264  1.70.4.3  rmind 	mutex_init(&pdaemonmtx, MUTEX_DEFAULT, 0);
    265  1.70.4.3  rmind 	cv_init(&pdaemoncv, "pdaemon");
    266  1.70.4.3  rmind 	cv_init(&oomwait, "oomwait");
    267  1.70.4.3  rmind 
    268      1.50  pooka 	kernel_map->pmap = pmap_kernel();
    269      1.35  pooka 	callback_head_init(&kernel_map_store.vmk_reclaim_callback, IPL_VM);
    270      1.50  pooka 	kmem_map->pmap = pmap_kernel();
    271      1.50  pooka 	callback_head_init(&kmem_map_store.vmk_reclaim_callback, IPL_VM);
    272       1.1  pooka }
    273       1.1  pooka 
    274  1.70.4.3  rmind void
    275  1.70.4.3  rmind uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin, vaddr_t vmax)
    276  1.70.4.3  rmind {
    277  1.70.4.3  rmind 
    278  1.70.4.3  rmind 	vm->vm_map.pmap = pmap_kernel();
    279  1.70.4.3  rmind 	vm->vm_refcnt = 1;
    280  1.70.4.3  rmind }
    281       1.1  pooka 
    282       1.1  pooka void
    283       1.7  pooka uvm_pagewire(struct vm_page *pg)
    284       1.7  pooka {
    285       1.7  pooka 
    286       1.7  pooka 	/* nada */
    287       1.7  pooka }
    288       1.7  pooka 
    289       1.7  pooka void
    290       1.7  pooka uvm_pageunwire(struct vm_page *pg)
    291       1.7  pooka {
    292       1.7  pooka 
    293       1.7  pooka 	/* nada */
    294       1.7  pooka }
    295       1.7  pooka 
    296  1.70.4.3  rmind /* where's your schmonz now? */
    297  1.70.4.3  rmind #define PUNLIMIT(a)	\
    298  1.70.4.3  rmind p->p_rlimit[a].rlim_cur = p->p_rlimit[a].rlim_max = RLIM_INFINITY;
    299  1.70.4.3  rmind void
    300  1.70.4.3  rmind uvm_init_limits(struct proc *p)
    301  1.70.4.3  rmind {
    302  1.70.4.3  rmind 
    303  1.70.4.3  rmind 	PUNLIMIT(RLIMIT_STACK);
    304  1.70.4.3  rmind 	PUNLIMIT(RLIMIT_DATA);
    305  1.70.4.3  rmind 	PUNLIMIT(RLIMIT_RSS);
    306  1.70.4.3  rmind 	PUNLIMIT(RLIMIT_AS);
    307  1.70.4.3  rmind 	/* nice, cascade */
    308  1.70.4.3  rmind }
    309  1.70.4.3  rmind #undef PUNLIMIT
    310  1.70.4.3  rmind 
    311      1.69  pooka /*
    312      1.69  pooka  * This satisfies the "disgusting mmap hack" used by proplib.
    313      1.69  pooka  * We probably should grow some more assertables to make sure we're
    314      1.69  pooka  * not satisfying anything we shouldn't be satisfying.  At least we
    315      1.69  pooka  * should make sure it's the local machine we're mmapping ...
    316      1.69  pooka  */
    317      1.49  pooka int
    318      1.49  pooka uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
    319      1.49  pooka 	vm_prot_t maxprot, int flags, void *handle, voff_t off, vsize_t locklim)
    320      1.49  pooka {
    321      1.69  pooka 	void *uaddr;
    322      1.69  pooka 	int error;
    323      1.49  pooka 
    324      1.69  pooka 	if (prot != (VM_PROT_READ | VM_PROT_WRITE))
    325      1.69  pooka 		panic("uvm_mmap() variant unsupported");
    326      1.69  pooka 	if (flags != (MAP_PRIVATE | MAP_ANON))
    327      1.69  pooka 		panic("uvm_mmap() variant unsupported");
    328      1.69  pooka 	/* no reason in particular, but cf. uvm_default_mapaddr() */
    329      1.69  pooka 	if (*addr != 0)
    330      1.69  pooka 		panic("uvm_mmap() variant unsupported");
    331      1.69  pooka 
    332  1.70.4.3  rmind 	uaddr = rumpuser_anonmmap(NULL, size, 0, 0, &error);
    333      1.69  pooka 	if (uaddr == NULL)
    334      1.69  pooka 		return error;
    335      1.69  pooka 
    336      1.69  pooka 	*addr = (vaddr_t)uaddr;
    337      1.69  pooka 	return 0;
    338      1.49  pooka }
    339      1.49  pooka 
    340      1.61  pooka struct pagerinfo {
    341      1.61  pooka 	vaddr_t pgr_kva;
    342      1.61  pooka 	int pgr_npages;
    343      1.61  pooka 	struct vm_page **pgr_pgs;
    344      1.61  pooka 	bool pgr_read;
    345      1.61  pooka 
    346      1.61  pooka 	LIST_ENTRY(pagerinfo) pgr_entries;
    347      1.61  pooka };
    348      1.61  pooka static LIST_HEAD(, pagerinfo) pagerlist = LIST_HEAD_INITIALIZER(pagerlist);
    349      1.61  pooka 
    350      1.61  pooka /*
    351      1.61  pooka  * Pager "map" in routine.  Instead of mapping, we allocate memory
    352      1.61  pooka  * and copy page contents there.  Not optimal or even strictly
    353      1.61  pooka  * correct (the caller might modify the page contents after mapping
    354      1.61  pooka  * them in), but what the heck.  Assumes UVMPAGER_MAPIN_WAITOK.
    355      1.61  pooka  */
    356       1.7  pooka vaddr_t
    357      1.61  pooka uvm_pagermapin(struct vm_page **pgs, int npages, int flags)
    358       1.7  pooka {
    359      1.61  pooka 	struct pagerinfo *pgri;
    360      1.61  pooka 	vaddr_t curkva;
    361      1.61  pooka 	int i;
    362      1.61  pooka 
    363      1.61  pooka 	/* allocate structures */
    364      1.61  pooka 	pgri = kmem_alloc(sizeof(*pgri), KM_SLEEP);
    365      1.61  pooka 	pgri->pgr_kva = (vaddr_t)kmem_alloc(npages * PAGE_SIZE, KM_SLEEP);
    366      1.61  pooka 	pgri->pgr_npages = npages;
    367      1.61  pooka 	pgri->pgr_pgs = kmem_alloc(sizeof(struct vm_page *) * npages, KM_SLEEP);
    368      1.61  pooka 	pgri->pgr_read = (flags & UVMPAGER_MAPIN_READ) != 0;
    369      1.61  pooka 
    370      1.61  pooka 	/* copy contents to "mapped" memory */
    371      1.61  pooka 	for (i = 0, curkva = pgri->pgr_kva;
    372      1.61  pooka 	    i < npages;
    373      1.61  pooka 	    i++, curkva += PAGE_SIZE) {
    374      1.61  pooka 		/*
    375      1.61  pooka 		 * We need to copy the previous contents of the pages to
    376      1.61  pooka 		 * the window even if we are reading from the
    377      1.61  pooka 		 * device, since the device might not fill the contents of
    378      1.61  pooka 		 * the full mapped range and we will end up corrupting
    379      1.61  pooka 		 * data when we unmap the window.
    380      1.61  pooka 		 */
    381      1.61  pooka 		memcpy((void*)curkva, pgs[i]->uanon, PAGE_SIZE);
    382      1.61  pooka 		pgri->pgr_pgs[i] = pgs[i];
    383      1.61  pooka 	}
    384      1.61  pooka 
    385      1.61  pooka 	mutex_enter(&pagermtx);
    386      1.61  pooka 	LIST_INSERT_HEAD(&pagerlist, pgri, pgr_entries);
    387      1.61  pooka 	mutex_exit(&pagermtx);
    388       1.7  pooka 
    389      1.61  pooka 	return pgri->pgr_kva;
    390       1.7  pooka }
    391       1.7  pooka 
    392      1.61  pooka /*
    393      1.61  pooka  * map out the pager window.  return contents from VA to page storage
    394      1.61  pooka  * and free structures.
    395      1.61  pooka  *
    396      1.61  pooka  * Note: does not currently support partial frees
    397      1.61  pooka  */
    398      1.61  pooka void
    399      1.61  pooka uvm_pagermapout(vaddr_t kva, int npages)
    400       1.7  pooka {
    401      1.61  pooka 	struct pagerinfo *pgri;
    402      1.61  pooka 	vaddr_t curkva;
    403      1.61  pooka 	int i;
    404       1.7  pooka 
    405      1.61  pooka 	mutex_enter(&pagermtx);
    406      1.61  pooka 	LIST_FOREACH(pgri, &pagerlist, pgr_entries) {
    407      1.61  pooka 		if (pgri->pgr_kva == kva)
    408      1.61  pooka 			break;
    409      1.61  pooka 	}
    410      1.61  pooka 	KASSERT(pgri);
    411      1.61  pooka 	if (pgri->pgr_npages != npages)
    412      1.61  pooka 		panic("uvm_pagermapout: partial unmapping not supported");
    413      1.61  pooka 	LIST_REMOVE(pgri, pgr_entries);
    414      1.61  pooka 	mutex_exit(&pagermtx);
    415      1.61  pooka 
    416      1.61  pooka 	if (pgri->pgr_read) {
    417      1.61  pooka 		for (i = 0, curkva = pgri->pgr_kva;
    418      1.61  pooka 		    i < pgri->pgr_npages;
    419      1.61  pooka 		    i++, curkva += PAGE_SIZE) {
    420      1.61  pooka 			memcpy(pgri->pgr_pgs[i]->uanon,(void*)curkva,PAGE_SIZE);
    421      1.21  pooka 		}
    422      1.21  pooka 	}
    423      1.10  pooka 
    424      1.61  pooka 	kmem_free(pgri->pgr_pgs, npages * sizeof(struct vm_page *));
    425      1.61  pooka 	kmem_free((void*)pgri->pgr_kva, npages * PAGE_SIZE);
    426      1.61  pooka 	kmem_free(pgri, sizeof(*pgri));
    427       1.7  pooka }
    428       1.7  pooka 
    429      1.61  pooka /*
    430      1.61  pooka  * convert va in pager window to page structure.
    431      1.61  pooka  * XXX: how expensive is this (global lock, list traversal)?
    432      1.61  pooka  */
    433      1.14  pooka struct vm_page *
    434      1.14  pooka uvm_pageratop(vaddr_t va)
    435      1.14  pooka {
    436      1.61  pooka 	struct pagerinfo *pgri;
    437      1.61  pooka 	struct vm_page *pg = NULL;
    438      1.61  pooka 	int i;
    439      1.14  pooka 
    440      1.61  pooka 	mutex_enter(&pagermtx);
    441      1.61  pooka 	LIST_FOREACH(pgri, &pagerlist, pgr_entries) {
    442      1.61  pooka 		if (pgri->pgr_kva <= va
    443      1.61  pooka 		    && va < pgri->pgr_kva + pgri->pgr_npages*PAGE_SIZE)
    444      1.21  pooka 			break;
    445      1.61  pooka 	}
    446      1.61  pooka 	if (pgri) {
    447      1.61  pooka 		i = (va - pgri->pgr_kva) >> PAGE_SHIFT;
    448      1.61  pooka 		pg = pgri->pgr_pgs[i];
    449      1.61  pooka 	}
    450      1.61  pooka 	mutex_exit(&pagermtx);
    451      1.21  pooka 
    452      1.61  pooka 	return pg;
    453      1.61  pooka }
    454      1.15  pooka 
    455      1.61  pooka /* Called with the vm object locked */
    456      1.61  pooka struct vm_page *
    457      1.61  pooka uvm_pagelookup(struct uvm_object *uobj, voff_t off)
    458      1.61  pooka {
    459      1.61  pooka 	struct vm_page *pg;
    460      1.61  pooka 
    461      1.61  pooka 	TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
    462      1.61  pooka 		if (pg->offset == off) {
    463      1.61  pooka 			return pg;
    464      1.61  pooka 		}
    465      1.61  pooka 	}
    466      1.61  pooka 
    467      1.61  pooka 	return NULL;
    468      1.14  pooka }
    469      1.14  pooka 
    470       1.7  pooka void
    471      1.22  pooka uvm_page_unbusy(struct vm_page **pgs, int npgs)
    472      1.22  pooka {
    473      1.22  pooka 	struct vm_page *pg;
    474      1.22  pooka 	int i;
    475      1.22  pooka 
    476      1.22  pooka 	for (i = 0; i < npgs; i++) {
    477      1.22  pooka 		pg = pgs[i];
    478      1.22  pooka 		if (pg == NULL)
    479      1.22  pooka 			continue;
    480      1.22  pooka 
    481      1.22  pooka 		KASSERT(pg->flags & PG_BUSY);
    482      1.22  pooka 		if (pg->flags & PG_WANTED)
    483      1.22  pooka 			wakeup(pg);
    484      1.36  pooka 		if (pg->flags & PG_RELEASED)
    485      1.36  pooka 			uvm_pagefree(pg);
    486      1.36  pooka 		else
    487      1.36  pooka 			pg->flags &= ~(PG_WANTED|PG_BUSY);
    488      1.22  pooka 	}
    489      1.22  pooka }
    490      1.22  pooka 
    491      1.22  pooka void
    492       1.7  pooka uvm_estimatepageable(int *active, int *inactive)
    493       1.7  pooka {
    494       1.7  pooka 
    495      1.19  pooka 	/* XXX: guessing game */
    496      1.19  pooka 	*active = 1024;
    497      1.19  pooka 	*inactive = 1024;
    498       1.7  pooka }
    499       1.7  pooka 
    500      1.39  pooka struct vm_map_kernel *
    501      1.39  pooka vm_map_to_kernel(struct vm_map *map)
    502      1.39  pooka {
    503      1.39  pooka 
    504      1.39  pooka 	return (struct vm_map_kernel *)map;
    505      1.39  pooka }
    506      1.39  pooka 
    507      1.41  pooka bool
    508      1.41  pooka vm_map_starved_p(struct vm_map *map)
    509      1.41  pooka {
    510      1.41  pooka 
    511  1.70.4.3  rmind 	if (map->flags & VM_MAP_WANTVA)
    512  1.70.4.3  rmind 		return true;
    513      1.41  pooka 
    514  1.70.4.3  rmind 	return false;
    515      1.39  pooka }
    516      1.39  pooka 
    517      1.41  pooka int
    518      1.41  pooka uvm_loan(struct vm_map *map, vaddr_t start, vsize_t len, void *v, int flags)
    519      1.41  pooka {
    520      1.41  pooka 
    521      1.41  pooka 	panic("%s: unimplemented", __func__);
    522      1.41  pooka }
    523      1.41  pooka 
    524      1.41  pooka void
    525      1.41  pooka uvm_unloan(void *v, int npages, int flags)
    526      1.41  pooka {
    527      1.41  pooka 
    528      1.41  pooka 	panic("%s: unimplemented", __func__);
    529      1.41  pooka }
    530      1.41  pooka 
    531      1.43  pooka int
    532      1.43  pooka uvm_loanuobjpages(struct uvm_object *uobj, voff_t pgoff, int orignpages,
    533      1.43  pooka 	struct vm_page **opp)
    534      1.43  pooka {
    535      1.43  pooka 
    536  1.70.4.2  rmind 	return EBUSY;
    537      1.43  pooka }
    538      1.43  pooka 
    539  1.70.4.2  rmind #ifdef DEBUGPRINT
    540      1.56  pooka void
    541      1.56  pooka uvm_object_printit(struct uvm_object *uobj, bool full,
    542      1.56  pooka 	void (*pr)(const char *, ...))
    543      1.56  pooka {
    544      1.56  pooka 
    545  1.70.4.2  rmind 	pr("VM OBJECT at %p, refs %d", uobj, uobj->uo_refs);
    546      1.56  pooka }
    547  1.70.4.2  rmind #endif
    548      1.56  pooka 
    549      1.68  pooka vaddr_t
    550      1.68  pooka uvm_default_mapaddr(struct proc *p, vaddr_t base, vsize_t sz)
    551      1.68  pooka {
    552      1.68  pooka 
    553      1.68  pooka 	return 0;
    554      1.68  pooka }
    555      1.68  pooka 
    556  1.70.4.2  rmind int
    557  1.70.4.2  rmind uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
    558  1.70.4.2  rmind 	vm_prot_t prot, bool set_max)
    559  1.70.4.2  rmind {
    560  1.70.4.2  rmind 
    561  1.70.4.2  rmind 	return EOPNOTSUPP;
    562  1.70.4.2  rmind }
    563  1.70.4.2  rmind 
    564       1.9  pooka /*
    565      1.12  pooka  * UVM km
    566      1.12  pooka  */
    567      1.12  pooka 
    568      1.12  pooka vaddr_t
    569      1.12  pooka uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
    570      1.12  pooka {
    571  1.70.4.3  rmind 	void *rv, *desired = NULL;
    572      1.50  pooka 	int alignbit, error;
    573      1.50  pooka 
    574  1.70.4.3  rmind #ifdef __x86_64__
    575  1.70.4.3  rmind 	/*
    576  1.70.4.3  rmind 	 * On amd64, allocate all module memory from the lowest 2GB.
    577  1.70.4.3  rmind 	 * This is because NetBSD kernel modules are compiled
    578  1.70.4.3  rmind 	 * with -mcmodel=kernel and reserve only 4 bytes for
    579  1.70.4.3  rmind 	 * offsets.  If we load code compiled with -mcmodel=kernel
    580  1.70.4.3  rmind 	 * anywhere except the lowest or highest 2GB, it will not
    581  1.70.4.3  rmind 	 * work.  Since userspace does not have access to the highest
    582  1.70.4.3  rmind 	 * 2GB, use the lowest 2GB.
    583  1.70.4.3  rmind 	 *
    584  1.70.4.3  rmind 	 * Note: this assumes the rump kernel resides in
    585  1.70.4.3  rmind 	 * the lowest 2GB as well.
    586  1.70.4.3  rmind 	 *
    587  1.70.4.3  rmind 	 * Note2: yes, it's a quick hack, but since this the only
    588  1.70.4.3  rmind 	 * place where we care about the map we're allocating from,
    589  1.70.4.3  rmind 	 * just use a simple "if" instead of coming up with a fancy
    590  1.70.4.3  rmind 	 * generic solution.
    591  1.70.4.3  rmind 	 */
    592  1.70.4.3  rmind 	extern struct vm_map *module_map;
    593  1.70.4.3  rmind 	if (map == module_map) {
    594  1.70.4.3  rmind 		desired = (void *)(0x80000000 - size);
    595  1.70.4.3  rmind 	}
    596  1.70.4.3  rmind #endif
    597  1.70.4.3  rmind 
    598      1.50  pooka 	alignbit = 0;
    599      1.50  pooka 	if (align) {
    600      1.50  pooka 		alignbit = ffs(align)-1;
    601      1.50  pooka 	}
    602      1.50  pooka 
    603  1.70.4.3  rmind 	rv = rumpuser_anonmmap(desired, size, alignbit, flags & UVM_KMF_EXEC,
    604  1.70.4.3  rmind 	    &error);
    605      1.50  pooka 	if (rv == NULL) {
    606      1.50  pooka 		if (flags & (UVM_KMF_CANFAIL | UVM_KMF_NOWAIT))
    607      1.50  pooka 			return 0;
    608      1.50  pooka 		else
    609      1.50  pooka 			panic("uvm_km_alloc failed");
    610      1.50  pooka 	}
    611      1.12  pooka 
    612      1.50  pooka 	if (flags & UVM_KMF_ZERO)
    613      1.12  pooka 		memset(rv, 0, size);
    614      1.12  pooka 
    615      1.12  pooka 	return (vaddr_t)rv;
    616      1.12  pooka }
    617      1.12  pooka 
    618      1.12  pooka void
    619      1.12  pooka uvm_km_free(struct vm_map *map, vaddr_t vaddr, vsize_t size, uvm_flag_t flags)
    620      1.12  pooka {
    621      1.12  pooka 
    622      1.50  pooka 	rumpuser_unmap((void *)vaddr, size);
    623      1.12  pooka }
    624      1.12  pooka 
    625      1.12  pooka struct vm_map *
    626      1.12  pooka uvm_km_suballoc(struct vm_map *map, vaddr_t *minaddr, vaddr_t *maxaddr,
    627      1.12  pooka 	vsize_t size, int pageable, bool fixed, struct vm_map_kernel *submap)
    628      1.12  pooka {
    629      1.12  pooka 
    630      1.12  pooka 	return (struct vm_map *)417416;
    631      1.12  pooka }
    632      1.40  pooka 
    633      1.40  pooka vaddr_t
    634      1.40  pooka uvm_km_alloc_poolpage(struct vm_map *map, bool waitok)
    635      1.40  pooka {
    636      1.40  pooka 
    637  1.70.4.3  rmind 	return (vaddr_t)rump_hypermalloc(PAGE_SIZE, PAGE_SIZE,
    638  1.70.4.3  rmind 	    waitok, "kmalloc");
    639      1.40  pooka }
    640      1.40  pooka 
    641      1.40  pooka void
    642      1.40  pooka uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr)
    643      1.40  pooka {
    644      1.40  pooka 
    645  1.70.4.3  rmind 	rump_hyperfree((void *)addr, PAGE_SIZE);
    646      1.50  pooka }
    647      1.50  pooka 
    648      1.50  pooka vaddr_t
    649      1.50  pooka uvm_km_alloc_poolpage_cache(struct vm_map *map, bool waitok)
    650      1.50  pooka {
    651      1.50  pooka 
    652  1.70.4.3  rmind 	return uvm_km_alloc_poolpage(map, waitok);
    653      1.50  pooka }
    654      1.50  pooka 
    655      1.50  pooka void
    656      1.50  pooka uvm_km_free_poolpage_cache(struct vm_map *map, vaddr_t vaddr)
    657      1.50  pooka {
    658      1.50  pooka 
    659  1.70.4.3  rmind 	uvm_km_free_poolpage(map, vaddr);
    660      1.40  pooka }
    661      1.57  pooka 
    662  1.70.4.2  rmind void
    663  1.70.4.2  rmind uvm_km_va_drain(struct vm_map *map, uvm_flag_t flags)
    664  1.70.4.2  rmind {
    665  1.70.4.2  rmind 
    666  1.70.4.2  rmind 	/* we eventually maybe want some model for available memory */
    667  1.70.4.2  rmind }
    668  1.70.4.2  rmind 
    669      1.57  pooka /*
    670      1.57  pooka  * Mapping and vm space locking routines.
    671      1.57  pooka  * XXX: these don't work for non-local vmspaces
    672      1.57  pooka  */
    673      1.57  pooka int
    674      1.57  pooka uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access)
    675      1.57  pooka {
    676      1.57  pooka 
    677  1.70.4.3  rmind 	KASSERT(vs == &vmspace0);
    678      1.57  pooka 	return 0;
    679      1.57  pooka }
    680      1.57  pooka 
    681      1.57  pooka void
    682      1.57  pooka uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
    683      1.57  pooka {
    684      1.57  pooka 
    685  1.70.4.3  rmind 	KASSERT(vs == &vmspace0);
    686      1.57  pooka }
    687      1.57  pooka 
    688      1.57  pooka void
    689      1.57  pooka vmapbuf(struct buf *bp, vsize_t len)
    690      1.57  pooka {
    691      1.57  pooka 
    692      1.57  pooka 	bp->b_saveaddr = bp->b_data;
    693      1.57  pooka }
    694      1.57  pooka 
    695      1.57  pooka void
    696      1.57  pooka vunmapbuf(struct buf *bp, vsize_t len)
    697      1.57  pooka {
    698      1.57  pooka 
    699      1.57  pooka 	bp->b_data = bp->b_saveaddr;
    700      1.57  pooka 	bp->b_saveaddr = 0;
    701      1.57  pooka }
    702      1.61  pooka 
    703      1.61  pooka void
    704  1.70.4.3  rmind uvmspace_addref(struct vmspace *vm)
    705      1.61  pooka {
    706      1.61  pooka 
    707  1.70.4.3  rmind 	/*
    708  1.70.4.3  rmind 	 * there is only vmspace0.  we're not planning on
    709  1.70.4.3  rmind 	 * feeding it to the fishes.
    710  1.70.4.3  rmind 	 */
    711      1.61  pooka }
    712      1.61  pooka 
    713      1.66  pooka void
    714      1.66  pooka uvmspace_free(struct vmspace *vm)
    715      1.66  pooka {
    716      1.66  pooka 
    717      1.66  pooka 	/* nothing for now */
    718      1.66  pooka }
    719      1.66  pooka 
    720      1.66  pooka int
    721      1.66  pooka uvm_io(struct vm_map *map, struct uio *uio)
    722      1.66  pooka {
    723      1.66  pooka 
    724      1.66  pooka 	/*
    725      1.66  pooka 	 * just do direct uio for now.  but this needs some vmspace
    726      1.66  pooka 	 * olympics for rump_sysproxy.
    727      1.66  pooka 	 */
    728      1.66  pooka 	return uiomove((void *)(vaddr_t)uio->uio_offset, uio->uio_resid, uio);
    729      1.66  pooka }
    730      1.66  pooka 
    731      1.61  pooka /*
    732      1.61  pooka  * page life cycle stuff.  it really doesn't exist, so just stubs.
    733      1.61  pooka  */
    734      1.61  pooka 
    735      1.61  pooka void
    736      1.61  pooka uvm_pageactivate(struct vm_page *pg)
    737      1.61  pooka {
    738      1.61  pooka 
    739      1.61  pooka 	/* nada */
    740      1.61  pooka }
    741      1.61  pooka 
    742      1.61  pooka void
    743      1.61  pooka uvm_pagedeactivate(struct vm_page *pg)
    744      1.61  pooka {
    745      1.61  pooka 
    746      1.61  pooka 	/* nada */
    747      1.61  pooka }
    748      1.61  pooka 
    749      1.61  pooka void
    750      1.61  pooka uvm_pagedequeue(struct vm_page *pg)
    751      1.61  pooka {
    752      1.61  pooka 
    753      1.61  pooka 	/* nada*/
    754      1.61  pooka }
    755      1.61  pooka 
    756      1.61  pooka void
    757      1.61  pooka uvm_pageenqueue(struct vm_page *pg)
    758      1.61  pooka {
    759      1.61  pooka 
    760      1.61  pooka 	/* nada */
    761      1.61  pooka }
    762  1.70.4.3  rmind 
    763  1.70.4.3  rmind /*
    764  1.70.4.3  rmind  * Routines related to the Page Baroness.
    765  1.70.4.3  rmind  */
    766  1.70.4.3  rmind 
    767  1.70.4.3  rmind void
    768  1.70.4.3  rmind uvm_wait(const char *msg)
    769  1.70.4.3  rmind {
    770  1.70.4.3  rmind 
    771  1.70.4.3  rmind 	if (__predict_false(curlwp == uvm.pagedaemon_lwp))
    772  1.70.4.3  rmind 		panic("pagedaemon out of memory");
    773  1.70.4.3  rmind 	if (__predict_false(rump_threads == 0))
    774  1.70.4.3  rmind 		panic("pagedaemon missing (RUMP_THREADS = 0)");
    775  1.70.4.3  rmind 
    776  1.70.4.3  rmind 	mutex_enter(&pdaemonmtx);
    777  1.70.4.3  rmind 	pdaemon_waiters++;
    778  1.70.4.3  rmind 	cv_signal(&pdaemoncv);
    779  1.70.4.3  rmind 	cv_wait(&oomwait, &pdaemonmtx);
    780  1.70.4.3  rmind 	mutex_exit(&pdaemonmtx);
    781  1.70.4.3  rmind }
    782  1.70.4.3  rmind 
    783  1.70.4.3  rmind void
    784  1.70.4.3  rmind uvm_pageout_start(int npages)
    785  1.70.4.3  rmind {
    786  1.70.4.3  rmind 
    787  1.70.4.3  rmind 	/* we don't have the heuristics */
    788  1.70.4.3  rmind }
    789  1.70.4.3  rmind 
    790  1.70.4.3  rmind void
    791  1.70.4.3  rmind uvm_pageout_done(int npages)
    792  1.70.4.3  rmind {
    793  1.70.4.3  rmind 
    794  1.70.4.3  rmind 	/* could wakeup waiters, but just let the pagedaemon do it */
    795  1.70.4.3  rmind }
    796  1.70.4.3  rmind 
    797  1.70.4.3  rmind /*
    798  1.70.4.3  rmind  * Under-construction page mistress.  This is lacking vfs support, namely:
    799  1.70.4.3  rmind  *
    800  1.70.4.3  rmind  *  1) draining vfs buffers
    801  1.70.4.3  rmind  *  2) paging out pages in vm vnode objects
    802  1.70.4.3  rmind  *     (we will not page out anon memory on the basis that
    803  1.70.4.3  rmind  *     that's the task of the host)
    804  1.70.4.3  rmind  */
    805  1.70.4.3  rmind 
    806  1.70.4.3  rmind void
    807  1.70.4.3  rmind uvm_pageout(void *arg)
    808  1.70.4.3  rmind {
    809  1.70.4.3  rmind 	struct pool *pp, *pp_first;
    810  1.70.4.3  rmind 	uint64_t where;
    811  1.70.4.3  rmind 	int timo = 0;
    812  1.70.4.3  rmind 	bool succ;
    813  1.70.4.3  rmind 
    814  1.70.4.3  rmind 	mutex_enter(&pdaemonmtx);
    815  1.70.4.3  rmind 	for (;;) {
    816  1.70.4.3  rmind 		cv_timedwait(&pdaemoncv, &pdaemonmtx, timo);
    817  1.70.4.3  rmind 		uvmexp.pdwoke++;
    818  1.70.4.3  rmind 		kernel_map->flags |= VM_MAP_WANTVA;
    819  1.70.4.3  rmind 		mutex_exit(&pdaemonmtx);
    820  1.70.4.3  rmind 
    821  1.70.4.3  rmind 		succ = false;
    822  1.70.4.3  rmind 		pool_drain_start(&pp_first, &where);
    823  1.70.4.3  rmind 		pp = pp_first;
    824  1.70.4.3  rmind 		for (;;) {
    825  1.70.4.3  rmind 			succ = pool_drain_end(pp, where);
    826  1.70.4.3  rmind 			if (succ)
    827  1.70.4.3  rmind 				break;
    828  1.70.4.3  rmind 			pool_drain_start(&pp, &where);
    829  1.70.4.3  rmind 			if (pp == pp_first) {
    830  1.70.4.3  rmind 				succ = pool_drain_end(pp, where);
    831  1.70.4.3  rmind 				break;
    832  1.70.4.3  rmind 			}
    833  1.70.4.3  rmind 		}
    834  1.70.4.3  rmind 		mutex_enter(&pdaemonmtx);
    835  1.70.4.3  rmind 
    836  1.70.4.3  rmind 		if (!succ) {
    837  1.70.4.3  rmind 			rumpuser_dprintf("pagedaemoness: failed to reclaim "
    838  1.70.4.3  rmind 			    "memory ... sleeping (deadlock?)\n");
    839  1.70.4.3  rmind 			timo = hz;
    840  1.70.4.3  rmind 			continue;
    841  1.70.4.3  rmind 		}
    842  1.70.4.3  rmind 		kernel_map->flags &= ~VM_MAP_WANTVA;
    843  1.70.4.3  rmind 		timo = 0;
    844  1.70.4.3  rmind 
    845  1.70.4.3  rmind 		if (pdaemon_waiters) {
    846  1.70.4.3  rmind 			pdaemon_waiters = 0;
    847  1.70.4.3  rmind 			cv_broadcast(&oomwait);
    848  1.70.4.3  rmind 		}
    849  1.70.4.3  rmind 	}
    850  1.70.4.3  rmind 
    851  1.70.4.3  rmind 	panic("you can swap out any time you like, but you can never leave");
    852  1.70.4.3  rmind }
    853  1.70.4.3  rmind 
    854  1.70.4.3  rmind /*
    855  1.70.4.3  rmind  * In a regular kernel the pagedaemon is activated when memory becomes
    856  1.70.4.3  rmind  * low.  In a virtual rump kernel we do not know exactly how much memory
    857  1.70.4.3  rmind  * we have available -- it depends on the conditions on the host.
    858  1.70.4.3  rmind  * Therefore, we cannot preemptively kick the pagedaemon.  Rather, we
    859  1.70.4.3  rmind  * wait until things we desperate and we're forced to uvm_wait().
    860  1.70.4.3  rmind  *
    861  1.70.4.3  rmind  * The alternative would be to allocate a huge chunk of memory at
    862  1.70.4.3  rmind  * startup, but that solution has a number of problems including
    863  1.70.4.3  rmind  * being a resource hog, failing anyway due to host memory overcommit
    864  1.70.4.3  rmind  * and core dump size.
    865  1.70.4.3  rmind  */
    866  1.70.4.3  rmind 
    867  1.70.4.3  rmind void
    868  1.70.4.3  rmind uvm_kick_pdaemon()
    869  1.70.4.3  rmind {
    870  1.70.4.3  rmind 
    871  1.70.4.3  rmind 	/* nada */
    872  1.70.4.3  rmind }
    873  1.70.4.3  rmind 
    874  1.70.4.3  rmind void *
    875  1.70.4.3  rmind rump_hypermalloc(size_t howmuch, int alignment, bool waitok, const char *wmsg)
    876  1.70.4.3  rmind {
    877  1.70.4.3  rmind 	unsigned long newmem;
    878  1.70.4.3  rmind 	void *rv;
    879  1.70.4.3  rmind 
    880  1.70.4.3  rmind 	/* first we must be within the limit */
    881  1.70.4.3  rmind  limitagain:
    882  1.70.4.3  rmind 	if (physmemlimit != RUMPMEM_UNLIMITED) {
    883  1.70.4.3  rmind 		newmem = atomic_add_long_nv(&curphysmem, howmuch);
    884  1.70.4.3  rmind 		if (newmem > physmemlimit) {
    885  1.70.4.3  rmind 			newmem = atomic_add_long_nv(&curphysmem, -howmuch);
    886  1.70.4.3  rmind 			if (!waitok)
    887  1.70.4.3  rmind 				return NULL;
    888  1.70.4.3  rmind 			uvm_wait(wmsg);
    889  1.70.4.3  rmind 			goto limitagain;
    890  1.70.4.3  rmind 		}
    891  1.70.4.3  rmind 	}
    892  1.70.4.3  rmind 
    893  1.70.4.3  rmind 	/* second, we must get something from the backend */
    894  1.70.4.3  rmind  again:
    895  1.70.4.3  rmind 	rv = rumpuser_malloc(howmuch, alignment);
    896  1.70.4.3  rmind 	if (__predict_false(rv == NULL && waitok)) {
    897  1.70.4.3  rmind 		uvm_wait(wmsg);
    898  1.70.4.3  rmind 		goto again;
    899  1.70.4.3  rmind 	}
    900  1.70.4.3  rmind 
    901  1.70.4.3  rmind 	return rv;
    902  1.70.4.3  rmind }
    903  1.70.4.3  rmind 
    904  1.70.4.3  rmind void
    905  1.70.4.3  rmind rump_hyperfree(void *what, size_t size)
    906  1.70.4.3  rmind {
    907  1.70.4.3  rmind 
    908  1.70.4.3  rmind 	if (physmemlimit != RUMPMEM_UNLIMITED) {
    909  1.70.4.3  rmind 		atomic_add_long(&curphysmem, -size);
    910  1.70.4.3  rmind 	}
    911  1.70.4.3  rmind 	rumpuser_free(what);
    912  1.70.4.3  rmind }
    913