Home | History | Annotate | Line # | Download | only in rumpkern
vm.c revision 1.88
      1  1.88    pooka /*	$NetBSD: vm.c,v 1.88 2010/09/06 20:10:20 pooka Exp $	*/
      2   1.1    pooka 
      3   1.1    pooka /*
      4  1.76    pooka  * Copyright (c) 2007-2010 Antti Kantee.  All Rights Reserved.
      5   1.1    pooka  *
      6  1.76    pooka  * Development of this software was supported by
      7  1.76    pooka  * The Finnish Cultural Foundation and the Research Foundation of
      8  1.76    pooka  * The Helsinki University of Technology.
      9   1.1    pooka  *
     10   1.1    pooka  * Redistribution and use in source and binary forms, with or without
     11   1.1    pooka  * modification, are permitted provided that the following conditions
     12   1.1    pooka  * are met:
     13   1.1    pooka  * 1. Redistributions of source code must retain the above copyright
     14   1.1    pooka  *    notice, this list of conditions and the following disclaimer.
     15   1.1    pooka  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1    pooka  *    notice, this list of conditions and the following disclaimer in the
     17   1.1    pooka  *    documentation and/or other materials provided with the distribution.
     18   1.1    pooka  *
     19   1.1    pooka  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     20   1.1    pooka  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     21   1.1    pooka  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     22   1.1    pooka  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     23   1.1    pooka  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24   1.1    pooka  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     25   1.1    pooka  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26   1.1    pooka  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27   1.1    pooka  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28   1.1    pooka  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29   1.1    pooka  * SUCH DAMAGE.
     30   1.1    pooka  */
     31   1.1    pooka 
     32   1.1    pooka /*
     33  1.88    pooka  * Virtual memory emulation routines.
     34   1.1    pooka  */
     35   1.1    pooka 
     36   1.1    pooka /*
     37   1.5    pooka  * XXX: we abuse pg->uanon for the virtual address of the storage
     38   1.1    pooka  * for each page.  phys_addr would fit the job description better,
     39   1.1    pooka  * except that it will create unnecessary lossage on some platforms
     40   1.1    pooka  * due to not being a pointer type.
     41   1.1    pooka  */
     42   1.1    pooka 
     43  1.48    pooka #include <sys/cdefs.h>
     44  1.88    pooka __KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.88 2010/09/06 20:10:20 pooka Exp $");
     45  1.48    pooka 
     46   1.1    pooka #include <sys/param.h>
     47  1.40    pooka #include <sys/atomic.h>
     48  1.80    pooka #include <sys/buf.h>
     49  1.80    pooka #include <sys/kernel.h>
     50  1.67    pooka #include <sys/kmem.h>
     51  1.69    pooka #include <sys/mman.h>
     52   1.1    pooka #include <sys/null.h>
     53   1.1    pooka #include <sys/vnode.h>
     54   1.1    pooka 
     55  1.34    pooka #include <machine/pmap.h>
     56  1.34    pooka 
     57  1.34    pooka #include <rump/rumpuser.h>
     58  1.34    pooka 
     59   1.1    pooka #include <uvm/uvm.h>
     60  1.56    pooka #include <uvm/uvm_ddb.h>
     61  1.88    pooka #include <uvm/uvm_pdpolicy.h>
     62   1.1    pooka #include <uvm/uvm_prot.h>
     63  1.58       he #include <uvm/uvm_readahead.h>
     64   1.1    pooka 
     65  1.13    pooka #include "rump_private.h"
     66   1.1    pooka 
     67  1.25       ad kmutex_t uvm_pageqlock;
     68  1.88    pooka kmutex_t uvm_swap_data_lock;
     69  1.25       ad 
     70   1.1    pooka struct uvmexp uvmexp;
     71   1.7    pooka struct uvm uvm;
     72   1.1    pooka 
     73   1.1    pooka struct vm_map rump_vmmap;
     74  1.50    pooka static struct vm_map_kernel kmem_map_store;
     75  1.50    pooka struct vm_map *kmem_map = &kmem_map_store.vmk_map;
     76  1.32       ad const struct rb_tree_ops uvm_page_tree_ops;
     77   1.1    pooka 
     78  1.35    pooka static struct vm_map_kernel kernel_map_store;
     79  1.35    pooka struct vm_map *kernel_map = &kernel_map_store.vmk_map;
     80  1.35    pooka 
     81  1.80    pooka static unsigned int pdaemon_waiters;
     82  1.80    pooka static kmutex_t pdaemonmtx;
     83  1.80    pooka static kcondvar_t pdaemoncv, oomwait;
     84  1.80    pooka 
     85  1.84    pooka #define RUMPMEM_UNLIMITED ((unsigned long)-1)
     86  1.84    pooka static unsigned long physmemlimit = RUMPMEM_UNLIMITED;
     87  1.84    pooka static unsigned long curphysmem;
     88  1.84    pooka 
     89   1.1    pooka /*
     90   1.1    pooka  * vm pages
     91   1.1    pooka  */
     92   1.1    pooka 
     93  1.22    pooka /* called with the object locked */
     94   1.1    pooka struct vm_page *
     95  1.76    pooka uvm_pagealloc_strat(struct uvm_object *uobj, voff_t off, struct vm_anon *anon,
     96  1.76    pooka 	int flags, int strat, int free_list)
     97   1.1    pooka {
     98   1.1    pooka 	struct vm_page *pg;
     99   1.1    pooka 
    100  1.27    pooka 	pg = kmem_zalloc(sizeof(struct vm_page), KM_SLEEP);
    101   1.1    pooka 	pg->offset = off;
    102   1.5    pooka 	pg->uobject = uobj;
    103   1.1    pooka 
    104  1.76    pooka 	pg->uanon = (void *)kmem_alloc(PAGE_SIZE, KM_SLEEP);
    105  1.76    pooka 	if (flags & UVM_PGA_ZERO)
    106  1.76    pooka 		memset(pg->uanon, 0, PAGE_SIZE);
    107  1.22    pooka 	pg->flags = PG_CLEAN|PG_BUSY|PG_FAKE;
    108   1.1    pooka 
    109  1.31       ad 	TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue);
    110  1.59    pooka 	uobj->uo_npages++;
    111  1.21    pooka 
    112   1.1    pooka 	return pg;
    113   1.1    pooka }
    114   1.1    pooka 
    115  1.21    pooka /*
    116  1.21    pooka  * Release a page.
    117  1.21    pooka  *
    118  1.22    pooka  * Called with the vm object locked.
    119  1.21    pooka  */
    120   1.1    pooka void
    121  1.22    pooka uvm_pagefree(struct vm_page *pg)
    122   1.1    pooka {
    123   1.5    pooka 	struct uvm_object *uobj = pg->uobject;
    124   1.1    pooka 
    125  1.22    pooka 	if (pg->flags & PG_WANTED)
    126  1.22    pooka 		wakeup(pg);
    127  1.22    pooka 
    128  1.59    pooka 	uobj->uo_npages--;
    129  1.31       ad 	TAILQ_REMOVE(&uobj->memq, pg, listq.queue);
    130  1.27    pooka 	kmem_free((void *)pg->uanon, PAGE_SIZE);
    131  1.27    pooka 	kmem_free(pg, sizeof(*pg));
    132   1.1    pooka }
    133   1.1    pooka 
    134  1.15    pooka void
    135  1.61    pooka uvm_pagezero(struct vm_page *pg)
    136  1.15    pooka {
    137  1.15    pooka 
    138  1.61    pooka 	pg->flags &= ~PG_CLEAN;
    139  1.61    pooka 	memset((void *)pg->uanon, 0, PAGE_SIZE);
    140  1.15    pooka }
    141  1.15    pooka 
    142   1.1    pooka /*
    143   1.1    pooka  * Misc routines
    144   1.1    pooka  */
    145   1.1    pooka 
    146  1.61    pooka static kmutex_t pagermtx;
    147  1.61    pooka 
    148   1.1    pooka void
    149  1.79    pooka uvm_init(void)
    150   1.1    pooka {
    151  1.84    pooka 	char buf[64];
    152  1.84    pooka 	int error;
    153  1.84    pooka 
    154  1.84    pooka 	if (rumpuser_getenv("RUMP_MEMLIMIT", buf, sizeof(buf), &error) == 0) {
    155  1.84    pooka 		physmemlimit = strtoll(buf, NULL, 10);
    156  1.84    pooka 		/* it's not like we'd get far with, say, 1 byte, but ... */
    157  1.84    pooka 		if (physmemlimit == 0)
    158  1.84    pooka 			panic("uvm_init: no memory available");
    159  1.84    pooka #define HUMANIZE_BYTES 9
    160  1.84    pooka 		CTASSERT(sizeof(buf) >= HUMANIZE_BYTES);
    161  1.84    pooka 		format_bytes(buf, HUMANIZE_BYTES, physmemlimit);
    162  1.84    pooka #undef HUMANIZE_BYTES
    163  1.84    pooka 	} else {
    164  1.84    pooka 		strlcpy(buf, "unlimited (host limit)", sizeof(buf));
    165  1.84    pooka 	}
    166  1.84    pooka 	aprint_verbose("total memory = %s\n", buf);
    167   1.1    pooka 
    168  1.84    pooka 	uvmexp.free = 1024*1024; /* XXX: arbitrary & not updated */
    169  1.21    pooka 
    170  1.61    pooka 	mutex_init(&pagermtx, MUTEX_DEFAULT, 0);
    171  1.25       ad 	mutex_init(&uvm_pageqlock, MUTEX_DEFAULT, 0);
    172  1.88    pooka 	mutex_init(&uvm_swap_data_lock, MUTEX_DEFAULT, 0);
    173  1.35    pooka 
    174  1.80    pooka 	mutex_init(&pdaemonmtx, MUTEX_DEFAULT, 0);
    175  1.80    pooka 	cv_init(&pdaemoncv, "pdaemon");
    176  1.80    pooka 	cv_init(&oomwait, "oomwait");
    177  1.80    pooka 
    178  1.50    pooka 	kernel_map->pmap = pmap_kernel();
    179  1.35    pooka 	callback_head_init(&kernel_map_store.vmk_reclaim_callback, IPL_VM);
    180  1.50    pooka 	kmem_map->pmap = pmap_kernel();
    181  1.50    pooka 	callback_head_init(&kmem_map_store.vmk_reclaim_callback, IPL_VM);
    182   1.1    pooka }
    183   1.1    pooka 
    184  1.83    pooka void
    185  1.83    pooka uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin, vaddr_t vmax)
    186  1.83    pooka {
    187  1.83    pooka 
    188  1.83    pooka 	vm->vm_map.pmap = pmap_kernel();
    189  1.83    pooka 	vm->vm_refcnt = 1;
    190  1.83    pooka }
    191   1.1    pooka 
    192   1.1    pooka void
    193   1.7    pooka uvm_pagewire(struct vm_page *pg)
    194   1.7    pooka {
    195   1.7    pooka 
    196   1.7    pooka 	/* nada */
    197   1.7    pooka }
    198   1.7    pooka 
    199   1.7    pooka void
    200   1.7    pooka uvm_pageunwire(struct vm_page *pg)
    201   1.7    pooka {
    202   1.7    pooka 
    203   1.7    pooka 	/* nada */
    204   1.7    pooka }
    205   1.7    pooka 
    206  1.83    pooka /* where's your schmonz now? */
    207  1.83    pooka #define PUNLIMIT(a)	\
    208  1.83    pooka p->p_rlimit[a].rlim_cur = p->p_rlimit[a].rlim_max = RLIM_INFINITY;
    209  1.83    pooka void
    210  1.83    pooka uvm_init_limits(struct proc *p)
    211  1.83    pooka {
    212  1.83    pooka 
    213  1.83    pooka 	PUNLIMIT(RLIMIT_STACK);
    214  1.83    pooka 	PUNLIMIT(RLIMIT_DATA);
    215  1.83    pooka 	PUNLIMIT(RLIMIT_RSS);
    216  1.83    pooka 	PUNLIMIT(RLIMIT_AS);
    217  1.83    pooka 	/* nice, cascade */
    218  1.83    pooka }
    219  1.83    pooka #undef PUNLIMIT
    220  1.83    pooka 
    221  1.69    pooka /*
    222  1.69    pooka  * This satisfies the "disgusting mmap hack" used by proplib.
    223  1.69    pooka  * We probably should grow some more assertables to make sure we're
    224  1.69    pooka  * not satisfying anything we shouldn't be satisfying.  At least we
    225  1.69    pooka  * should make sure it's the local machine we're mmapping ...
    226  1.69    pooka  */
    227  1.49    pooka int
    228  1.49    pooka uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
    229  1.49    pooka 	vm_prot_t maxprot, int flags, void *handle, voff_t off, vsize_t locklim)
    230  1.49    pooka {
    231  1.69    pooka 	void *uaddr;
    232  1.69    pooka 	int error;
    233  1.49    pooka 
    234  1.69    pooka 	if (prot != (VM_PROT_READ | VM_PROT_WRITE))
    235  1.69    pooka 		panic("uvm_mmap() variant unsupported");
    236  1.69    pooka 	if (flags != (MAP_PRIVATE | MAP_ANON))
    237  1.69    pooka 		panic("uvm_mmap() variant unsupported");
    238  1.69    pooka 	/* no reason in particular, but cf. uvm_default_mapaddr() */
    239  1.69    pooka 	if (*addr != 0)
    240  1.69    pooka 		panic("uvm_mmap() variant unsupported");
    241  1.69    pooka 
    242  1.81    pooka 	uaddr = rumpuser_anonmmap(NULL, size, 0, 0, &error);
    243  1.69    pooka 	if (uaddr == NULL)
    244  1.69    pooka 		return error;
    245  1.69    pooka 
    246  1.69    pooka 	*addr = (vaddr_t)uaddr;
    247  1.69    pooka 	return 0;
    248  1.49    pooka }
    249  1.49    pooka 
    250  1.61    pooka struct pagerinfo {
    251  1.61    pooka 	vaddr_t pgr_kva;
    252  1.61    pooka 	int pgr_npages;
    253  1.61    pooka 	struct vm_page **pgr_pgs;
    254  1.61    pooka 	bool pgr_read;
    255  1.61    pooka 
    256  1.61    pooka 	LIST_ENTRY(pagerinfo) pgr_entries;
    257  1.61    pooka };
    258  1.61    pooka static LIST_HEAD(, pagerinfo) pagerlist = LIST_HEAD_INITIALIZER(pagerlist);
    259  1.61    pooka 
    260  1.61    pooka /*
    261  1.61    pooka  * Pager "map" in routine.  Instead of mapping, we allocate memory
    262  1.61    pooka  * and copy page contents there.  Not optimal or even strictly
    263  1.61    pooka  * correct (the caller might modify the page contents after mapping
    264  1.61    pooka  * them in), but what the heck.  Assumes UVMPAGER_MAPIN_WAITOK.
    265  1.61    pooka  */
    266   1.7    pooka vaddr_t
    267  1.61    pooka uvm_pagermapin(struct vm_page **pgs, int npages, int flags)
    268   1.7    pooka {
    269  1.61    pooka 	struct pagerinfo *pgri;
    270  1.61    pooka 	vaddr_t curkva;
    271  1.61    pooka 	int i;
    272  1.61    pooka 
    273  1.61    pooka 	/* allocate structures */
    274  1.61    pooka 	pgri = kmem_alloc(sizeof(*pgri), KM_SLEEP);
    275  1.61    pooka 	pgri->pgr_kva = (vaddr_t)kmem_alloc(npages * PAGE_SIZE, KM_SLEEP);
    276  1.61    pooka 	pgri->pgr_npages = npages;
    277  1.61    pooka 	pgri->pgr_pgs = kmem_alloc(sizeof(struct vm_page *) * npages, KM_SLEEP);
    278  1.61    pooka 	pgri->pgr_read = (flags & UVMPAGER_MAPIN_READ) != 0;
    279  1.61    pooka 
    280  1.61    pooka 	/* copy contents to "mapped" memory */
    281  1.61    pooka 	for (i = 0, curkva = pgri->pgr_kva;
    282  1.61    pooka 	    i < npages;
    283  1.61    pooka 	    i++, curkva += PAGE_SIZE) {
    284  1.61    pooka 		/*
    285  1.61    pooka 		 * We need to copy the previous contents of the pages to
    286  1.61    pooka 		 * the window even if we are reading from the
    287  1.61    pooka 		 * device, since the device might not fill the contents of
    288  1.61    pooka 		 * the full mapped range and we will end up corrupting
    289  1.61    pooka 		 * data when we unmap the window.
    290  1.61    pooka 		 */
    291  1.61    pooka 		memcpy((void*)curkva, pgs[i]->uanon, PAGE_SIZE);
    292  1.61    pooka 		pgri->pgr_pgs[i] = pgs[i];
    293  1.61    pooka 	}
    294  1.61    pooka 
    295  1.61    pooka 	mutex_enter(&pagermtx);
    296  1.61    pooka 	LIST_INSERT_HEAD(&pagerlist, pgri, pgr_entries);
    297  1.61    pooka 	mutex_exit(&pagermtx);
    298   1.7    pooka 
    299  1.61    pooka 	return pgri->pgr_kva;
    300   1.7    pooka }
    301   1.7    pooka 
    302  1.61    pooka /*
    303  1.61    pooka  * map out the pager window.  return contents from VA to page storage
    304  1.61    pooka  * and free structures.
    305  1.61    pooka  *
    306  1.61    pooka  * Note: does not currently support partial frees
    307  1.61    pooka  */
    308  1.61    pooka void
    309  1.61    pooka uvm_pagermapout(vaddr_t kva, int npages)
    310   1.7    pooka {
    311  1.61    pooka 	struct pagerinfo *pgri;
    312  1.61    pooka 	vaddr_t curkva;
    313  1.61    pooka 	int i;
    314   1.7    pooka 
    315  1.61    pooka 	mutex_enter(&pagermtx);
    316  1.61    pooka 	LIST_FOREACH(pgri, &pagerlist, pgr_entries) {
    317  1.61    pooka 		if (pgri->pgr_kva == kva)
    318  1.61    pooka 			break;
    319  1.61    pooka 	}
    320  1.61    pooka 	KASSERT(pgri);
    321  1.61    pooka 	if (pgri->pgr_npages != npages)
    322  1.61    pooka 		panic("uvm_pagermapout: partial unmapping not supported");
    323  1.61    pooka 	LIST_REMOVE(pgri, pgr_entries);
    324  1.61    pooka 	mutex_exit(&pagermtx);
    325  1.61    pooka 
    326  1.61    pooka 	if (pgri->pgr_read) {
    327  1.61    pooka 		for (i = 0, curkva = pgri->pgr_kva;
    328  1.61    pooka 		    i < pgri->pgr_npages;
    329  1.61    pooka 		    i++, curkva += PAGE_SIZE) {
    330  1.61    pooka 			memcpy(pgri->pgr_pgs[i]->uanon,(void*)curkva,PAGE_SIZE);
    331  1.21    pooka 		}
    332  1.21    pooka 	}
    333  1.10    pooka 
    334  1.61    pooka 	kmem_free(pgri->pgr_pgs, npages * sizeof(struct vm_page *));
    335  1.61    pooka 	kmem_free((void*)pgri->pgr_kva, npages * PAGE_SIZE);
    336  1.61    pooka 	kmem_free(pgri, sizeof(*pgri));
    337   1.7    pooka }
    338   1.7    pooka 
    339  1.61    pooka /*
    340  1.61    pooka  * convert va in pager window to page structure.
    341  1.61    pooka  * XXX: how expensive is this (global lock, list traversal)?
    342  1.61    pooka  */
    343  1.14    pooka struct vm_page *
    344  1.14    pooka uvm_pageratop(vaddr_t va)
    345  1.14    pooka {
    346  1.61    pooka 	struct pagerinfo *pgri;
    347  1.61    pooka 	struct vm_page *pg = NULL;
    348  1.61    pooka 	int i;
    349  1.14    pooka 
    350  1.61    pooka 	mutex_enter(&pagermtx);
    351  1.61    pooka 	LIST_FOREACH(pgri, &pagerlist, pgr_entries) {
    352  1.61    pooka 		if (pgri->pgr_kva <= va
    353  1.61    pooka 		    && va < pgri->pgr_kva + pgri->pgr_npages*PAGE_SIZE)
    354  1.21    pooka 			break;
    355  1.61    pooka 	}
    356  1.61    pooka 	if (pgri) {
    357  1.61    pooka 		i = (va - pgri->pgr_kva) >> PAGE_SHIFT;
    358  1.61    pooka 		pg = pgri->pgr_pgs[i];
    359  1.61    pooka 	}
    360  1.61    pooka 	mutex_exit(&pagermtx);
    361  1.21    pooka 
    362  1.61    pooka 	return pg;
    363  1.61    pooka }
    364  1.15    pooka 
    365  1.61    pooka /* Called with the vm object locked */
    366  1.61    pooka struct vm_page *
    367  1.61    pooka uvm_pagelookup(struct uvm_object *uobj, voff_t off)
    368  1.61    pooka {
    369  1.61    pooka 	struct vm_page *pg;
    370  1.61    pooka 
    371  1.61    pooka 	TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
    372  1.87  hannken 		if ((pg->flags & PG_MARKER) != 0)
    373  1.87  hannken 			continue;
    374  1.61    pooka 		if (pg->offset == off) {
    375  1.61    pooka 			return pg;
    376  1.61    pooka 		}
    377  1.61    pooka 	}
    378  1.61    pooka 
    379  1.61    pooka 	return NULL;
    380  1.14    pooka }
    381  1.14    pooka 
    382   1.7    pooka void
    383  1.22    pooka uvm_page_unbusy(struct vm_page **pgs, int npgs)
    384  1.22    pooka {
    385  1.22    pooka 	struct vm_page *pg;
    386  1.22    pooka 	int i;
    387  1.22    pooka 
    388  1.22    pooka 	for (i = 0; i < npgs; i++) {
    389  1.22    pooka 		pg = pgs[i];
    390  1.22    pooka 		if (pg == NULL)
    391  1.22    pooka 			continue;
    392  1.22    pooka 
    393  1.22    pooka 		KASSERT(pg->flags & PG_BUSY);
    394  1.22    pooka 		if (pg->flags & PG_WANTED)
    395  1.22    pooka 			wakeup(pg);
    396  1.36    pooka 		if (pg->flags & PG_RELEASED)
    397  1.36    pooka 			uvm_pagefree(pg);
    398  1.36    pooka 		else
    399  1.36    pooka 			pg->flags &= ~(PG_WANTED|PG_BUSY);
    400  1.22    pooka 	}
    401  1.22    pooka }
    402  1.22    pooka 
    403  1.22    pooka void
    404   1.7    pooka uvm_estimatepageable(int *active, int *inactive)
    405   1.7    pooka {
    406   1.7    pooka 
    407  1.19    pooka 	/* XXX: guessing game */
    408  1.19    pooka 	*active = 1024;
    409  1.19    pooka 	*inactive = 1024;
    410   1.7    pooka }
    411   1.7    pooka 
    412  1.39    pooka struct vm_map_kernel *
    413  1.39    pooka vm_map_to_kernel(struct vm_map *map)
    414  1.39    pooka {
    415  1.39    pooka 
    416  1.39    pooka 	return (struct vm_map_kernel *)map;
    417  1.39    pooka }
    418  1.39    pooka 
    419  1.41    pooka bool
    420  1.41    pooka vm_map_starved_p(struct vm_map *map)
    421  1.41    pooka {
    422  1.41    pooka 
    423  1.80    pooka 	if (map->flags & VM_MAP_WANTVA)
    424  1.80    pooka 		return true;
    425  1.80    pooka 
    426  1.41    pooka 	return false;
    427  1.41    pooka }
    428  1.41    pooka 
    429  1.41    pooka int
    430  1.41    pooka uvm_loan(struct vm_map *map, vaddr_t start, vsize_t len, void *v, int flags)
    431  1.41    pooka {
    432  1.41    pooka 
    433  1.41    pooka 	panic("%s: unimplemented", __func__);
    434  1.41    pooka }
    435  1.41    pooka 
    436  1.41    pooka void
    437  1.41    pooka uvm_unloan(void *v, int npages, int flags)
    438  1.41    pooka {
    439  1.41    pooka 
    440  1.41    pooka 	panic("%s: unimplemented", __func__);
    441  1.41    pooka }
    442  1.41    pooka 
    443  1.43    pooka int
    444  1.43    pooka uvm_loanuobjpages(struct uvm_object *uobj, voff_t pgoff, int orignpages,
    445  1.43    pooka 	struct vm_page **opp)
    446  1.43    pooka {
    447  1.43    pooka 
    448  1.72    pooka 	return EBUSY;
    449  1.43    pooka }
    450  1.43    pooka 
    451  1.73    pooka #ifdef DEBUGPRINT
    452  1.56    pooka void
    453  1.56    pooka uvm_object_printit(struct uvm_object *uobj, bool full,
    454  1.56    pooka 	void (*pr)(const char *, ...))
    455  1.56    pooka {
    456  1.56    pooka 
    457  1.75    pooka 	pr("VM OBJECT at %p, refs %d", uobj, uobj->uo_refs);
    458  1.56    pooka }
    459  1.73    pooka #endif
    460  1.56    pooka 
    461  1.68    pooka vaddr_t
    462  1.68    pooka uvm_default_mapaddr(struct proc *p, vaddr_t base, vsize_t sz)
    463  1.68    pooka {
    464  1.68    pooka 
    465  1.68    pooka 	return 0;
    466  1.68    pooka }
    467  1.68    pooka 
    468  1.71    pooka int
    469  1.71    pooka uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
    470  1.71    pooka 	vm_prot_t prot, bool set_max)
    471  1.71    pooka {
    472  1.71    pooka 
    473  1.71    pooka 	return EOPNOTSUPP;
    474  1.71    pooka }
    475  1.71    pooka 
    476   1.9    pooka /*
    477  1.12    pooka  * UVM km
    478  1.12    pooka  */
    479  1.12    pooka 
    480  1.12    pooka vaddr_t
    481  1.12    pooka uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
    482  1.12    pooka {
    483  1.82    pooka 	void *rv, *desired = NULL;
    484  1.50    pooka 	int alignbit, error;
    485  1.50    pooka 
    486  1.82    pooka #ifdef __x86_64__
    487  1.82    pooka 	/*
    488  1.82    pooka 	 * On amd64, allocate all module memory from the lowest 2GB.
    489  1.82    pooka 	 * This is because NetBSD kernel modules are compiled
    490  1.82    pooka 	 * with -mcmodel=kernel and reserve only 4 bytes for
    491  1.82    pooka 	 * offsets.  If we load code compiled with -mcmodel=kernel
    492  1.82    pooka 	 * anywhere except the lowest or highest 2GB, it will not
    493  1.82    pooka 	 * work.  Since userspace does not have access to the highest
    494  1.82    pooka 	 * 2GB, use the lowest 2GB.
    495  1.82    pooka 	 *
    496  1.82    pooka 	 * Note: this assumes the rump kernel resides in
    497  1.82    pooka 	 * the lowest 2GB as well.
    498  1.82    pooka 	 *
    499  1.82    pooka 	 * Note2: yes, it's a quick hack, but since this the only
    500  1.82    pooka 	 * place where we care about the map we're allocating from,
    501  1.82    pooka 	 * just use a simple "if" instead of coming up with a fancy
    502  1.82    pooka 	 * generic solution.
    503  1.82    pooka 	 */
    504  1.82    pooka 	extern struct vm_map *module_map;
    505  1.82    pooka 	if (map == module_map) {
    506  1.82    pooka 		desired = (void *)(0x80000000 - size);
    507  1.82    pooka 	}
    508  1.82    pooka #endif
    509  1.82    pooka 
    510  1.50    pooka 	alignbit = 0;
    511  1.50    pooka 	if (align) {
    512  1.50    pooka 		alignbit = ffs(align)-1;
    513  1.50    pooka 	}
    514  1.50    pooka 
    515  1.82    pooka 	rv = rumpuser_anonmmap(desired, size, alignbit, flags & UVM_KMF_EXEC,
    516  1.81    pooka 	    &error);
    517  1.50    pooka 	if (rv == NULL) {
    518  1.50    pooka 		if (flags & (UVM_KMF_CANFAIL | UVM_KMF_NOWAIT))
    519  1.50    pooka 			return 0;
    520  1.50    pooka 		else
    521  1.50    pooka 			panic("uvm_km_alloc failed");
    522  1.50    pooka 	}
    523  1.12    pooka 
    524  1.50    pooka 	if (flags & UVM_KMF_ZERO)
    525  1.12    pooka 		memset(rv, 0, size);
    526  1.12    pooka 
    527  1.12    pooka 	return (vaddr_t)rv;
    528  1.12    pooka }
    529  1.12    pooka 
    530  1.12    pooka void
    531  1.12    pooka uvm_km_free(struct vm_map *map, vaddr_t vaddr, vsize_t size, uvm_flag_t flags)
    532  1.12    pooka {
    533  1.12    pooka 
    534  1.50    pooka 	rumpuser_unmap((void *)vaddr, size);
    535  1.12    pooka }
    536  1.12    pooka 
    537  1.12    pooka struct vm_map *
    538  1.12    pooka uvm_km_suballoc(struct vm_map *map, vaddr_t *minaddr, vaddr_t *maxaddr,
    539  1.12    pooka 	vsize_t size, int pageable, bool fixed, struct vm_map_kernel *submap)
    540  1.12    pooka {
    541  1.12    pooka 
    542  1.12    pooka 	return (struct vm_map *)417416;
    543  1.12    pooka }
    544  1.40    pooka 
    545  1.40    pooka vaddr_t
    546  1.40    pooka uvm_km_alloc_poolpage(struct vm_map *map, bool waitok)
    547  1.40    pooka {
    548  1.40    pooka 
    549  1.80    pooka 	return (vaddr_t)rump_hypermalloc(PAGE_SIZE, PAGE_SIZE,
    550  1.80    pooka 	    waitok, "kmalloc");
    551  1.40    pooka }
    552  1.40    pooka 
    553  1.40    pooka void
    554  1.40    pooka uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr)
    555  1.40    pooka {
    556  1.40    pooka 
    557  1.84    pooka 	rump_hyperfree((void *)addr, PAGE_SIZE);
    558  1.50    pooka }
    559  1.50    pooka 
    560  1.50    pooka vaddr_t
    561  1.50    pooka uvm_km_alloc_poolpage_cache(struct vm_map *map, bool waitok)
    562  1.50    pooka {
    563  1.50    pooka 
    564  1.77    pooka 	return uvm_km_alloc_poolpage(map, waitok);
    565  1.50    pooka }
    566  1.50    pooka 
    567  1.50    pooka void
    568  1.50    pooka uvm_km_free_poolpage_cache(struct vm_map *map, vaddr_t vaddr)
    569  1.50    pooka {
    570  1.50    pooka 
    571  1.77    pooka 	uvm_km_free_poolpage(map, vaddr);
    572  1.40    pooka }
    573  1.57    pooka 
    574  1.74    pooka void
    575  1.74    pooka uvm_km_va_drain(struct vm_map *map, uvm_flag_t flags)
    576  1.74    pooka {
    577  1.74    pooka 
    578  1.74    pooka 	/* we eventually maybe want some model for available memory */
    579  1.74    pooka }
    580  1.74    pooka 
    581  1.57    pooka /*
    582  1.57    pooka  * Mapping and vm space locking routines.
    583  1.57    pooka  * XXX: these don't work for non-local vmspaces
    584  1.57    pooka  */
    585  1.57    pooka int
    586  1.57    pooka uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access)
    587  1.57    pooka {
    588  1.57    pooka 
    589  1.83    pooka 	KASSERT(vs == &vmspace0);
    590  1.57    pooka 	return 0;
    591  1.57    pooka }
    592  1.57    pooka 
    593  1.57    pooka void
    594  1.57    pooka uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
    595  1.57    pooka {
    596  1.57    pooka 
    597  1.83    pooka 	KASSERT(vs == &vmspace0);
    598  1.57    pooka }
    599  1.57    pooka 
    600  1.57    pooka void
    601  1.57    pooka vmapbuf(struct buf *bp, vsize_t len)
    602  1.57    pooka {
    603  1.57    pooka 
    604  1.57    pooka 	bp->b_saveaddr = bp->b_data;
    605  1.57    pooka }
    606  1.57    pooka 
    607  1.57    pooka void
    608  1.57    pooka vunmapbuf(struct buf *bp, vsize_t len)
    609  1.57    pooka {
    610  1.57    pooka 
    611  1.57    pooka 	bp->b_data = bp->b_saveaddr;
    612  1.57    pooka 	bp->b_saveaddr = 0;
    613  1.57    pooka }
    614  1.61    pooka 
    615  1.61    pooka void
    616  1.83    pooka uvmspace_addref(struct vmspace *vm)
    617  1.83    pooka {
    618  1.83    pooka 
    619  1.83    pooka 	/*
    620  1.83    pooka 	 * there is only vmspace0.  we're not planning on
    621  1.83    pooka 	 * feeding it to the fishes.
    622  1.83    pooka 	 */
    623  1.83    pooka }
    624  1.83    pooka 
    625  1.83    pooka void
    626  1.66    pooka uvmspace_free(struct vmspace *vm)
    627  1.66    pooka {
    628  1.66    pooka 
    629  1.66    pooka 	/* nothing for now */
    630  1.66    pooka }
    631  1.66    pooka 
    632  1.66    pooka int
    633  1.66    pooka uvm_io(struct vm_map *map, struct uio *uio)
    634  1.66    pooka {
    635  1.66    pooka 
    636  1.66    pooka 	/*
    637  1.66    pooka 	 * just do direct uio for now.  but this needs some vmspace
    638  1.66    pooka 	 * olympics for rump_sysproxy.
    639  1.66    pooka 	 */
    640  1.66    pooka 	return uiomove((void *)(vaddr_t)uio->uio_offset, uio->uio_resid, uio);
    641  1.66    pooka }
    642  1.66    pooka 
    643  1.61    pooka /*
    644  1.61    pooka  * page life cycle stuff.  it really doesn't exist, so just stubs.
    645  1.61    pooka  */
    646  1.61    pooka 
    647  1.61    pooka void
    648  1.61    pooka uvm_pageactivate(struct vm_page *pg)
    649  1.61    pooka {
    650  1.61    pooka 
    651  1.61    pooka 	/* nada */
    652  1.61    pooka }
    653  1.61    pooka 
    654  1.61    pooka void
    655  1.61    pooka uvm_pagedeactivate(struct vm_page *pg)
    656  1.61    pooka {
    657  1.61    pooka 
    658  1.61    pooka 	/* nada */
    659  1.61    pooka }
    660  1.61    pooka 
    661  1.61    pooka void
    662  1.61    pooka uvm_pagedequeue(struct vm_page *pg)
    663  1.61    pooka {
    664  1.61    pooka 
    665  1.61    pooka 	/* nada*/
    666  1.61    pooka }
    667  1.61    pooka 
    668  1.61    pooka void
    669  1.61    pooka uvm_pageenqueue(struct vm_page *pg)
    670  1.61    pooka {
    671  1.61    pooka 
    672  1.61    pooka 	/* nada */
    673  1.61    pooka }
    674  1.80    pooka 
    675  1.88    pooka void
    676  1.88    pooka uvmpdpol_anfree(struct vm_anon *an)
    677  1.88    pooka {
    678  1.88    pooka 
    679  1.88    pooka 	/* nada */
    680  1.88    pooka }
    681  1.88    pooka 
    682  1.80    pooka /*
    683  1.80    pooka  * Routines related to the Page Baroness.
    684  1.80    pooka  */
    685  1.80    pooka 
    686  1.80    pooka void
    687  1.80    pooka uvm_wait(const char *msg)
    688  1.80    pooka {
    689  1.80    pooka 
    690  1.80    pooka 	if (__predict_false(curlwp == uvm.pagedaemon_lwp))
    691  1.80    pooka 		panic("pagedaemon out of memory");
    692  1.80    pooka 	if (__predict_false(rump_threads == 0))
    693  1.80    pooka 		panic("pagedaemon missing (RUMP_THREADS = 0)");
    694  1.80    pooka 
    695  1.80    pooka 	mutex_enter(&pdaemonmtx);
    696  1.80    pooka 	pdaemon_waiters++;
    697  1.80    pooka 	cv_signal(&pdaemoncv);
    698  1.80    pooka 	cv_wait(&oomwait, &pdaemonmtx);
    699  1.80    pooka 	mutex_exit(&pdaemonmtx);
    700  1.80    pooka }
    701  1.80    pooka 
    702  1.80    pooka void
    703  1.80    pooka uvm_pageout_start(int npages)
    704  1.80    pooka {
    705  1.80    pooka 
    706  1.80    pooka 	/* we don't have the heuristics */
    707  1.80    pooka }
    708  1.80    pooka 
    709  1.80    pooka void
    710  1.80    pooka uvm_pageout_done(int npages)
    711  1.80    pooka {
    712  1.80    pooka 
    713  1.80    pooka 	/* could wakeup waiters, but just let the pagedaemon do it */
    714  1.80    pooka }
    715  1.80    pooka 
    716  1.80    pooka /*
    717  1.80    pooka  * Under-construction page mistress.  This is lacking vfs support, namely:
    718  1.80    pooka  *
    719  1.80    pooka  *  1) draining vfs buffers
    720  1.80    pooka  *  2) paging out pages in vm vnode objects
    721  1.80    pooka  *     (we will not page out anon memory on the basis that
    722  1.80    pooka  *     that's the task of the host)
    723  1.80    pooka  */
    724  1.80    pooka 
    725  1.80    pooka void
    726  1.80    pooka uvm_pageout(void *arg)
    727  1.80    pooka {
    728  1.80    pooka 	struct pool *pp, *pp_first;
    729  1.80    pooka 	uint64_t where;
    730  1.80    pooka 	int timo = 0;
    731  1.80    pooka 	bool succ;
    732  1.80    pooka 
    733  1.80    pooka 	mutex_enter(&pdaemonmtx);
    734  1.80    pooka 	for (;;) {
    735  1.80    pooka 		cv_timedwait(&pdaemoncv, &pdaemonmtx, timo);
    736  1.80    pooka 		uvmexp.pdwoke++;
    737  1.80    pooka 		kernel_map->flags |= VM_MAP_WANTVA;
    738  1.80    pooka 		mutex_exit(&pdaemonmtx);
    739  1.80    pooka 
    740  1.80    pooka 		succ = false;
    741  1.80    pooka 		pool_drain_start(&pp_first, &where);
    742  1.80    pooka 		pp = pp_first;
    743  1.80    pooka 		for (;;) {
    744  1.80    pooka 			succ = pool_drain_end(pp, where);
    745  1.80    pooka 			if (succ)
    746  1.80    pooka 				break;
    747  1.80    pooka 			pool_drain_start(&pp, &where);
    748  1.80    pooka 			if (pp == pp_first) {
    749  1.80    pooka 				succ = pool_drain_end(pp, where);
    750  1.80    pooka 				break;
    751  1.80    pooka 			}
    752  1.80    pooka 		}
    753  1.80    pooka 		mutex_enter(&pdaemonmtx);
    754  1.80    pooka 
    755  1.80    pooka 		if (!succ) {
    756  1.80    pooka 			rumpuser_dprintf("pagedaemoness: failed to reclaim "
    757  1.80    pooka 			    "memory ... sleeping (deadlock?)\n");
    758  1.80    pooka 			timo = hz;
    759  1.80    pooka 			continue;
    760  1.80    pooka 		}
    761  1.80    pooka 		kernel_map->flags &= ~VM_MAP_WANTVA;
    762  1.80    pooka 		timo = 0;
    763  1.80    pooka 
    764  1.80    pooka 		if (pdaemon_waiters) {
    765  1.80    pooka 			pdaemon_waiters = 0;
    766  1.80    pooka 			cv_broadcast(&oomwait);
    767  1.80    pooka 		}
    768  1.80    pooka 	}
    769  1.80    pooka 
    770  1.80    pooka 	panic("you can swap out any time you like, but you can never leave");
    771  1.80    pooka }
    772  1.80    pooka 
    773  1.80    pooka /*
    774  1.80    pooka  * In a regular kernel the pagedaemon is activated when memory becomes
    775  1.80    pooka  * low.  In a virtual rump kernel we do not know exactly how much memory
    776  1.80    pooka  * we have available -- it depends on the conditions on the host.
    777  1.80    pooka  * Therefore, we cannot preemptively kick the pagedaemon.  Rather, we
    778  1.80    pooka  * wait until things we desperate and we're forced to uvm_wait().
    779  1.80    pooka  *
    780  1.80    pooka  * The alternative would be to allocate a huge chunk of memory at
    781  1.80    pooka  * startup, but that solution has a number of problems including
    782  1.80    pooka  * being a resource hog, failing anyway due to host memory overcommit
    783  1.80    pooka  * and core dump size.
    784  1.80    pooka  */
    785  1.80    pooka 
    786  1.80    pooka void
    787  1.80    pooka uvm_kick_pdaemon()
    788  1.80    pooka {
    789  1.80    pooka 
    790  1.80    pooka 	/* nada */
    791  1.80    pooka }
    792  1.80    pooka 
    793  1.80    pooka void *
    794  1.80    pooka rump_hypermalloc(size_t howmuch, int alignment, bool waitok, const char *wmsg)
    795  1.80    pooka {
    796  1.84    pooka 	unsigned long newmem;
    797  1.80    pooka 	void *rv;
    798  1.80    pooka 
    799  1.84    pooka 	/* first we must be within the limit */
    800  1.84    pooka  limitagain:
    801  1.84    pooka 	if (physmemlimit != RUMPMEM_UNLIMITED) {
    802  1.84    pooka 		newmem = atomic_add_long_nv(&curphysmem, howmuch);
    803  1.84    pooka 		if (newmem > physmemlimit) {
    804  1.84    pooka 			newmem = atomic_add_long_nv(&curphysmem, -howmuch);
    805  1.84    pooka 			if (!waitok)
    806  1.84    pooka 				return NULL;
    807  1.84    pooka 			uvm_wait(wmsg);
    808  1.84    pooka 			goto limitagain;
    809  1.84    pooka 		}
    810  1.84    pooka 	}
    811  1.84    pooka 
    812  1.84    pooka 	/* second, we must get something from the backend */
    813  1.80    pooka  again:
    814  1.80    pooka 	rv = rumpuser_malloc(howmuch, alignment);
    815  1.80    pooka 	if (__predict_false(rv == NULL && waitok)) {
    816  1.80    pooka 		uvm_wait(wmsg);
    817  1.80    pooka 		goto again;
    818  1.80    pooka 	}
    819  1.80    pooka 
    820  1.80    pooka 	return rv;
    821  1.80    pooka }
    822  1.84    pooka 
    823  1.84    pooka void
    824  1.84    pooka rump_hyperfree(void *what, size_t size)
    825  1.84    pooka {
    826  1.84    pooka 
    827  1.84    pooka 	if (physmemlimit != RUMPMEM_UNLIMITED) {
    828  1.84    pooka 		atomic_add_long(&curphysmem, -size);
    829  1.84    pooka 	}
    830  1.84    pooka 	rumpuser_free(what);
    831  1.84    pooka }
    832