Home | History | Annotate | Line # | Download | only in rumpkern
vm.c revision 1.70.2.4
      1  1.70.2.1  uebayasi /*	$NetBSD: vm.c,v 1.70.2.4 2010/10/22 07:22:51 uebayasi Exp $	*/
      2       1.1     pooka 
      3       1.1     pooka /*
      4  1.70.2.3  uebayasi  * Copyright (c) 2007-2010 Antti Kantee.  All Rights Reserved.
      5       1.1     pooka  *
      6  1.70.2.3  uebayasi  * Development of this software was supported by
      7  1.70.2.3  uebayasi  * The Finnish Cultural Foundation and the Research Foundation of
      8  1.70.2.3  uebayasi  * The Helsinki University of Technology.
      9       1.1     pooka  *
     10       1.1     pooka  * Redistribution and use in source and binary forms, with or without
     11       1.1     pooka  * modification, are permitted provided that the following conditions
     12       1.1     pooka  * are met:
     13       1.1     pooka  * 1. Redistributions of source code must retain the above copyright
     14       1.1     pooka  *    notice, this list of conditions and the following disclaimer.
     15       1.1     pooka  * 2. Redistributions in binary form must reproduce the above copyright
     16       1.1     pooka  *    notice, this list of conditions and the following disclaimer in the
     17       1.1     pooka  *    documentation and/or other materials provided with the distribution.
     18       1.1     pooka  *
     19       1.1     pooka  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     20       1.1     pooka  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     21       1.1     pooka  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     22       1.1     pooka  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     23       1.1     pooka  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24       1.1     pooka  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     25       1.1     pooka  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26       1.1     pooka  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27       1.1     pooka  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28       1.1     pooka  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29       1.1     pooka  * SUCH DAMAGE.
     30       1.1     pooka  */
     31       1.1     pooka 
     32       1.1     pooka /*
     33  1.70.2.4  uebayasi  * Virtual memory emulation routines.
     34       1.1     pooka  */
     35       1.1     pooka 
     36       1.1     pooka /*
     37       1.5     pooka  * XXX: we abuse pg->uanon for the virtual address of the storage
     38       1.1     pooka  * for each page.  phys_addr would fit the job description better,
     39       1.1     pooka  * except that it will create unnecessary lossage on some platforms
     40       1.1     pooka  * due to not being a pointer type.
     41       1.1     pooka  */
     42       1.1     pooka 
     43      1.48     pooka #include <sys/cdefs.h>
     44  1.70.2.1  uebayasi __KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.70.2.4 2010/10/22 07:22:51 uebayasi Exp $");
     45      1.48     pooka 
     46       1.1     pooka #include <sys/param.h>
     47      1.40     pooka #include <sys/atomic.h>
     48  1.70.2.3  uebayasi #include <sys/buf.h>
     49  1.70.2.3  uebayasi #include <sys/kernel.h>
     50      1.67     pooka #include <sys/kmem.h>
     51      1.69     pooka #include <sys/mman.h>
     52       1.1     pooka #include <sys/null.h>
     53       1.1     pooka #include <sys/vnode.h>
     54       1.1     pooka 
     55      1.34     pooka #include <machine/pmap.h>
     56      1.34     pooka 
     57      1.34     pooka #include <rump/rumpuser.h>
     58      1.34     pooka 
     59       1.1     pooka #include <uvm/uvm.h>
     60      1.56     pooka #include <uvm/uvm_ddb.h>
     61  1.70.2.4  uebayasi #include <uvm/uvm_pdpolicy.h>
     62       1.1     pooka #include <uvm/uvm_prot.h>
     63      1.58        he #include <uvm/uvm_readahead.h>
     64       1.1     pooka 
     65      1.13     pooka #include "rump_private.h"
     66  1.70.2.4  uebayasi #include "rump_vfs_private.h"
     67      1.24      yamt 
     68      1.25        ad kmutex_t uvm_pageqlock;
     69  1.70.2.4  uebayasi kmutex_t uvm_swap_data_lock;
     70      1.25        ad 
     71       1.1     pooka struct uvmexp uvmexp;
     72       1.7     pooka struct uvm uvm;
     73       1.1     pooka 
     74       1.1     pooka struct vm_map rump_vmmap;
     75      1.50     pooka static struct vm_map_kernel kmem_map_store;
     76      1.50     pooka struct vm_map *kmem_map = &kmem_map_store.vmk_map;
     77       1.1     pooka 
     78      1.35     pooka static struct vm_map_kernel kernel_map_store;
     79      1.35     pooka struct vm_map *kernel_map = &kernel_map_store.vmk_map;
     80      1.35     pooka 
     81  1.70.2.3  uebayasi static unsigned int pdaemon_waiters;
     82  1.70.2.3  uebayasi static kmutex_t pdaemonmtx;
     83  1.70.2.3  uebayasi static kcondvar_t pdaemoncv, oomwait;
     84  1.70.2.3  uebayasi 
     85  1.70.2.4  uebayasi unsigned long rump_physmemlimit = RUMPMEM_UNLIMITED;
     86  1.70.2.3  uebayasi static unsigned long curphysmem;
     87  1.70.2.4  uebayasi static unsigned long dddlim;		/* 90% of memory limit used */
     88  1.70.2.4  uebayasi #define NEED_PAGEDAEMON() \
     89  1.70.2.4  uebayasi     (rump_physmemlimit != RUMPMEM_UNLIMITED && curphysmem > dddlim)
     90  1.70.2.4  uebayasi 
     91  1.70.2.4  uebayasi /*
     92  1.70.2.4  uebayasi  * Try to free two pages worth of pages from objects.
     93  1.70.2.4  uebayasi  * If this succesfully frees a full page cache page, we'll
     94  1.70.2.4  uebayasi  * free the released page plus PAGE_SIZE/sizeof(vm_page).
     95  1.70.2.4  uebayasi  */
     96  1.70.2.4  uebayasi #define PAGEDAEMON_OBJCHUNK (2*PAGE_SIZE / sizeof(struct vm_page))
     97  1.70.2.4  uebayasi 
     98  1.70.2.4  uebayasi /*
     99  1.70.2.4  uebayasi  * Keep a list of least recently used pages.  Since the only way a
    100  1.70.2.4  uebayasi  * rump kernel can "access" a page is via lookup, we put the page
    101  1.70.2.4  uebayasi  * at the back of queue every time a lookup for it is done.  If the
    102  1.70.2.4  uebayasi  * page is in front of this global queue and we're short of memory,
    103  1.70.2.4  uebayasi  * it's a candidate for pageout.
    104  1.70.2.4  uebayasi  */
    105  1.70.2.4  uebayasi static struct pglist vmpage_lruqueue;
    106  1.70.2.4  uebayasi static unsigned vmpage_onqueue;
    107  1.70.2.4  uebayasi 
    108  1.70.2.4  uebayasi static int
    109  1.70.2.4  uebayasi pg_compare_key(void *ctx, const void *n, const void *key)
    110  1.70.2.4  uebayasi {
    111  1.70.2.4  uebayasi 	voff_t a = ((const struct vm_page *)n)->offset;
    112  1.70.2.4  uebayasi 	voff_t b = *(const voff_t *)key;
    113  1.70.2.4  uebayasi 
    114  1.70.2.4  uebayasi 	if (a < b)
    115  1.70.2.4  uebayasi 		return -1;
    116  1.70.2.4  uebayasi 	else if (a > b)
    117  1.70.2.4  uebayasi 		return 1;
    118  1.70.2.4  uebayasi 	else
    119  1.70.2.4  uebayasi 		return 0;
    120  1.70.2.4  uebayasi }
    121  1.70.2.4  uebayasi 
    122  1.70.2.4  uebayasi static int
    123  1.70.2.4  uebayasi pg_compare_nodes(void *ctx, const void *n1, const void *n2)
    124  1.70.2.4  uebayasi {
    125  1.70.2.4  uebayasi 
    126  1.70.2.4  uebayasi 	return pg_compare_key(ctx, n1, &((const struct vm_page *)n2)->offset);
    127  1.70.2.4  uebayasi }
    128  1.70.2.4  uebayasi 
    129  1.70.2.4  uebayasi const rb_tree_ops_t uvm_page_tree_ops = {
    130  1.70.2.4  uebayasi 	.rbto_compare_nodes = pg_compare_nodes,
    131  1.70.2.4  uebayasi 	.rbto_compare_key = pg_compare_key,
    132  1.70.2.4  uebayasi 	.rbto_node_offset = offsetof(struct vm_page, rb_node),
    133  1.70.2.4  uebayasi 	.rbto_context = NULL
    134  1.70.2.4  uebayasi };
    135  1.70.2.3  uebayasi 
    136       1.1     pooka /*
    137       1.1     pooka  * vm pages
    138       1.1     pooka  */
    139       1.1     pooka 
    140  1.70.2.4  uebayasi static int
    141  1.70.2.4  uebayasi pgctor(void *arg, void *obj, int flags)
    142  1.70.2.4  uebayasi {
    143  1.70.2.4  uebayasi 	struct vm_page *pg = obj;
    144  1.70.2.4  uebayasi 
    145  1.70.2.4  uebayasi 	memset(pg, 0, sizeof(*pg));
    146  1.70.2.4  uebayasi 	pg->uanon = rump_hypermalloc(PAGE_SIZE, PAGE_SIZE, true, "pgalloc");
    147  1.70.2.4  uebayasi 	return 0;
    148  1.70.2.4  uebayasi }
    149  1.70.2.4  uebayasi 
    150  1.70.2.4  uebayasi static void
    151  1.70.2.4  uebayasi pgdtor(void *arg, void *obj)
    152  1.70.2.4  uebayasi {
    153  1.70.2.4  uebayasi 	struct vm_page *pg = obj;
    154  1.70.2.4  uebayasi 
    155  1.70.2.4  uebayasi 	rump_hyperfree(pg->uanon, PAGE_SIZE);
    156  1.70.2.4  uebayasi }
    157  1.70.2.4  uebayasi 
    158  1.70.2.4  uebayasi static struct pool_cache pagecache;
    159  1.70.2.4  uebayasi 
    160  1.70.2.4  uebayasi /*
    161  1.70.2.4  uebayasi  * Called with the object locked.  We don't support anons.
    162  1.70.2.4  uebayasi  */
    163       1.1     pooka struct vm_page *
    164  1.70.2.3  uebayasi uvm_pagealloc_strat(struct uvm_object *uobj, voff_t off, struct vm_anon *anon,
    165  1.70.2.3  uebayasi 	int flags, int strat, int free_list)
    166       1.1     pooka {
    167       1.1     pooka 	struct vm_page *pg;
    168       1.1     pooka 
    169  1.70.2.4  uebayasi 	KASSERT(uobj && mutex_owned(&uobj->vmobjlock));
    170  1.70.2.4  uebayasi 	KASSERT(anon == NULL);
    171  1.70.2.4  uebayasi 
    172  1.70.2.4  uebayasi 	pg = pool_cache_get(&pagecache, PR_WAITOK);
    173       1.1     pooka 	pg->offset = off;
    174       1.5     pooka 	pg->uobject = uobj;
    175       1.1     pooka 
    176      1.22     pooka 	pg->flags = PG_CLEAN|PG_BUSY|PG_FAKE;
    177  1.70.2.4  uebayasi 	if (flags & UVM_PGA_ZERO) {
    178  1.70.2.4  uebayasi 		uvm_pagezero(pg);
    179  1.70.2.4  uebayasi 	}
    180       1.1     pooka 
    181      1.31        ad 	TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue);
    182  1.70.2.4  uebayasi 	(void)rb_tree_insert_node(&uobj->rb_tree, pg);
    183  1.70.2.4  uebayasi 
    184  1.70.2.4  uebayasi 	/*
    185  1.70.2.4  uebayasi 	 * Don't put anons on the LRU page queue.  We can't flush them
    186  1.70.2.4  uebayasi 	 * (there's no concept of swap in a rump kernel), so no reason
    187  1.70.2.4  uebayasi 	 * to bother with them.
    188  1.70.2.4  uebayasi 	 */
    189  1.70.2.4  uebayasi 	if (!UVM_OBJ_IS_AOBJ(uobj)) {
    190  1.70.2.4  uebayasi 		atomic_inc_uint(&vmpage_onqueue);
    191  1.70.2.4  uebayasi 		mutex_enter(&uvm_pageqlock);
    192  1.70.2.4  uebayasi 		TAILQ_INSERT_TAIL(&vmpage_lruqueue, pg, pageq.queue);
    193  1.70.2.4  uebayasi 		mutex_exit(&uvm_pageqlock);
    194  1.70.2.4  uebayasi 	}
    195  1.70.2.4  uebayasi 
    196      1.59     pooka 	uobj->uo_npages++;
    197      1.21     pooka 
    198       1.1     pooka 	return pg;
    199       1.1     pooka }
    200       1.1     pooka 
    201      1.21     pooka /*
    202      1.21     pooka  * Release a page.
    203      1.21     pooka  *
    204      1.22     pooka  * Called with the vm object locked.
    205      1.21     pooka  */
    206       1.1     pooka void
    207      1.22     pooka uvm_pagefree(struct vm_page *pg)
    208       1.1     pooka {
    209       1.5     pooka 	struct uvm_object *uobj = pg->uobject;
    210       1.1     pooka 
    211  1.70.2.4  uebayasi 	KASSERT(mutex_owned(&uvm_pageqlock));
    212  1.70.2.4  uebayasi 	KASSERT(mutex_owned(&uobj->vmobjlock));
    213  1.70.2.4  uebayasi 
    214      1.22     pooka 	if (pg->flags & PG_WANTED)
    215      1.22     pooka 		wakeup(pg);
    216      1.22     pooka 
    217      1.31        ad 	TAILQ_REMOVE(&uobj->memq, pg, listq.queue);
    218      1.15     pooka 
    219  1.70.2.4  uebayasi 	uobj->uo_npages--;
    220  1.70.2.4  uebayasi 	rb_tree_remove_node(&uobj->rb_tree, pg);
    221       1.1     pooka 
    222  1.70.2.4  uebayasi 	if (!UVM_OBJ_IS_AOBJ(uobj)) {
    223  1.70.2.4  uebayasi 		TAILQ_REMOVE(&vmpage_lruqueue, pg, pageq.queue);
    224  1.70.2.4  uebayasi 		atomic_dec_uint(&vmpage_onqueue);
    225      1.23     pooka 	}
    226       1.1     pooka 
    227  1.70.2.4  uebayasi 	pool_cache_put(&pagecache, pg);
    228       1.1     pooka }
    229       1.1     pooka 
    230       1.1     pooka void
    231  1.70.2.4  uebayasi uvm_pagezero(struct vm_page *pg)
    232       1.1     pooka {
    233       1.1     pooka 
    234  1.70.2.4  uebayasi 	pg->flags &= ~PG_CLEAN;
    235  1.70.2.4  uebayasi 	memset((void *)pg->uanon, 0, PAGE_SIZE);
    236       1.1     pooka }
    237       1.1     pooka 
    238       1.1     pooka /*
    239       1.1     pooka  * Misc routines
    240       1.1     pooka  */
    241       1.1     pooka 
    242      1.61     pooka static kmutex_t pagermtx;
    243      1.61     pooka 
    244       1.1     pooka void
    245  1.70.2.3  uebayasi uvm_init(void)
    246       1.1     pooka {
    247  1.70.2.3  uebayasi 	char buf[64];
    248  1.70.2.3  uebayasi 	int error;
    249  1.70.2.3  uebayasi 
    250  1.70.2.3  uebayasi 	if (rumpuser_getenv("RUMP_MEMLIMIT", buf, sizeof(buf), &error) == 0) {
    251  1.70.2.4  uebayasi 		rump_physmemlimit = strtoll(buf, NULL, 10);
    252  1.70.2.3  uebayasi 		/* it's not like we'd get far with, say, 1 byte, but ... */
    253  1.70.2.4  uebayasi 		if (rump_physmemlimit == 0)
    254  1.70.2.3  uebayasi 			panic("uvm_init: no memory available");
    255  1.70.2.3  uebayasi #define HUMANIZE_BYTES 9
    256  1.70.2.3  uebayasi 		CTASSERT(sizeof(buf) >= HUMANIZE_BYTES);
    257  1.70.2.4  uebayasi 		format_bytes(buf, HUMANIZE_BYTES, rump_physmemlimit);
    258  1.70.2.3  uebayasi #undef HUMANIZE_BYTES
    259  1.70.2.4  uebayasi 		dddlim = 9 * (rump_physmemlimit / 10);
    260  1.70.2.3  uebayasi 	} else {
    261  1.70.2.3  uebayasi 		strlcpy(buf, "unlimited (host limit)", sizeof(buf));
    262  1.70.2.3  uebayasi 	}
    263  1.70.2.3  uebayasi 	aprint_verbose("total memory = %s\n", buf);
    264       1.1     pooka 
    265  1.70.2.4  uebayasi 	TAILQ_INIT(&vmpage_lruqueue);
    266  1.70.2.4  uebayasi 
    267  1.70.2.3  uebayasi 	uvmexp.free = 1024*1024; /* XXX: arbitrary & not updated */
    268      1.21     pooka 
    269      1.61     pooka 	mutex_init(&pagermtx, MUTEX_DEFAULT, 0);
    270      1.25        ad 	mutex_init(&uvm_pageqlock, MUTEX_DEFAULT, 0);
    271  1.70.2.4  uebayasi 	mutex_init(&uvm_swap_data_lock, MUTEX_DEFAULT, 0);
    272      1.35     pooka 
    273  1.70.2.3  uebayasi 	mutex_init(&pdaemonmtx, MUTEX_DEFAULT, 0);
    274  1.70.2.3  uebayasi 	cv_init(&pdaemoncv, "pdaemon");
    275  1.70.2.3  uebayasi 	cv_init(&oomwait, "oomwait");
    276  1.70.2.3  uebayasi 
    277      1.50     pooka 	kernel_map->pmap = pmap_kernel();
    278      1.35     pooka 	callback_head_init(&kernel_map_store.vmk_reclaim_callback, IPL_VM);
    279      1.50     pooka 	kmem_map->pmap = pmap_kernel();
    280      1.50     pooka 	callback_head_init(&kmem_map_store.vmk_reclaim_callback, IPL_VM);
    281  1.70.2.4  uebayasi 
    282  1.70.2.4  uebayasi 	pool_cache_bootstrap(&pagecache, sizeof(struct vm_page), 0, 0, 0,
    283  1.70.2.4  uebayasi 	    "page$", NULL, IPL_NONE, pgctor, pgdtor, NULL);
    284       1.1     pooka }
    285       1.1     pooka 
    286  1.70.2.3  uebayasi void
    287  1.70.2.3  uebayasi uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin, vaddr_t vmax)
    288  1.70.2.3  uebayasi {
    289  1.70.2.3  uebayasi 
    290  1.70.2.3  uebayasi 	vm->vm_map.pmap = pmap_kernel();
    291  1.70.2.3  uebayasi 	vm->vm_refcnt = 1;
    292  1.70.2.3  uebayasi }
    293       1.1     pooka 
    294       1.1     pooka void
    295       1.7     pooka uvm_pagewire(struct vm_page *pg)
    296       1.7     pooka {
    297       1.7     pooka 
    298       1.7     pooka 	/* nada */
    299       1.7     pooka }
    300       1.7     pooka 
    301       1.7     pooka void
    302       1.7     pooka uvm_pageunwire(struct vm_page *pg)
    303       1.7     pooka {
    304       1.7     pooka 
    305       1.7     pooka 	/* nada */
    306       1.7     pooka }
    307       1.7     pooka 
    308  1.70.2.3  uebayasi /* where's your schmonz now? */
    309  1.70.2.3  uebayasi #define PUNLIMIT(a)	\
    310  1.70.2.3  uebayasi p->p_rlimit[a].rlim_cur = p->p_rlimit[a].rlim_max = RLIM_INFINITY;
    311  1.70.2.3  uebayasi void
    312  1.70.2.3  uebayasi uvm_init_limits(struct proc *p)
    313  1.70.2.3  uebayasi {
    314  1.70.2.3  uebayasi 
    315  1.70.2.3  uebayasi 	PUNLIMIT(RLIMIT_STACK);
    316  1.70.2.3  uebayasi 	PUNLIMIT(RLIMIT_DATA);
    317  1.70.2.3  uebayasi 	PUNLIMIT(RLIMIT_RSS);
    318  1.70.2.3  uebayasi 	PUNLIMIT(RLIMIT_AS);
    319  1.70.2.3  uebayasi 	/* nice, cascade */
    320  1.70.2.3  uebayasi }
    321  1.70.2.3  uebayasi #undef PUNLIMIT
    322  1.70.2.3  uebayasi 
    323      1.69     pooka /*
    324      1.69     pooka  * This satisfies the "disgusting mmap hack" used by proplib.
    325      1.69     pooka  * We probably should grow some more assertables to make sure we're
    326      1.69     pooka  * not satisfying anything we shouldn't be satisfying.  At least we
    327      1.69     pooka  * should make sure it's the local machine we're mmapping ...
    328      1.69     pooka  */
    329      1.49     pooka int
    330      1.49     pooka uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
    331      1.49     pooka 	vm_prot_t maxprot, int flags, void *handle, voff_t off, vsize_t locklim)
    332      1.49     pooka {
    333      1.69     pooka 	void *uaddr;
    334      1.69     pooka 	int error;
    335      1.49     pooka 
    336      1.69     pooka 	if (prot != (VM_PROT_READ | VM_PROT_WRITE))
    337      1.69     pooka 		panic("uvm_mmap() variant unsupported");
    338      1.69     pooka 	if (flags != (MAP_PRIVATE | MAP_ANON))
    339      1.69     pooka 		panic("uvm_mmap() variant unsupported");
    340      1.69     pooka 	/* no reason in particular, but cf. uvm_default_mapaddr() */
    341      1.69     pooka 	if (*addr != 0)
    342      1.69     pooka 		panic("uvm_mmap() variant unsupported");
    343      1.69     pooka 
    344  1.70.2.3  uebayasi 	uaddr = rumpuser_anonmmap(NULL, size, 0, 0, &error);
    345      1.69     pooka 	if (uaddr == NULL)
    346      1.69     pooka 		return error;
    347      1.69     pooka 
    348      1.69     pooka 	*addr = (vaddr_t)uaddr;
    349      1.69     pooka 	return 0;
    350      1.49     pooka }
    351      1.49     pooka 
    352      1.61     pooka struct pagerinfo {
    353      1.61     pooka 	vaddr_t pgr_kva;
    354      1.61     pooka 	int pgr_npages;
    355      1.61     pooka 	struct vm_page **pgr_pgs;
    356      1.61     pooka 	bool pgr_read;
    357      1.61     pooka 
    358      1.61     pooka 	LIST_ENTRY(pagerinfo) pgr_entries;
    359      1.61     pooka };
    360      1.61     pooka static LIST_HEAD(, pagerinfo) pagerlist = LIST_HEAD_INITIALIZER(pagerlist);
    361      1.61     pooka 
    362      1.61     pooka /*
    363      1.61     pooka  * Pager "map" in routine.  Instead of mapping, we allocate memory
    364      1.61     pooka  * and copy page contents there.  Not optimal or even strictly
    365      1.61     pooka  * correct (the caller might modify the page contents after mapping
    366      1.61     pooka  * them in), but what the heck.  Assumes UVMPAGER_MAPIN_WAITOK.
    367      1.61     pooka  */
    368       1.7     pooka vaddr_t
    369      1.61     pooka uvm_pagermapin(struct vm_page **pgs, int npages, int flags)
    370       1.7     pooka {
    371      1.61     pooka 	struct pagerinfo *pgri;
    372      1.61     pooka 	vaddr_t curkva;
    373      1.61     pooka 	int i;
    374      1.61     pooka 
    375      1.61     pooka 	/* allocate structures */
    376      1.61     pooka 	pgri = kmem_alloc(sizeof(*pgri), KM_SLEEP);
    377      1.61     pooka 	pgri->pgr_kva = (vaddr_t)kmem_alloc(npages * PAGE_SIZE, KM_SLEEP);
    378      1.61     pooka 	pgri->pgr_npages = npages;
    379      1.61     pooka 	pgri->pgr_pgs = kmem_alloc(sizeof(struct vm_page *) * npages, KM_SLEEP);
    380      1.61     pooka 	pgri->pgr_read = (flags & UVMPAGER_MAPIN_READ) != 0;
    381      1.61     pooka 
    382      1.61     pooka 	/* copy contents to "mapped" memory */
    383      1.61     pooka 	for (i = 0, curkva = pgri->pgr_kva;
    384      1.61     pooka 	    i < npages;
    385      1.61     pooka 	    i++, curkva += PAGE_SIZE) {
    386      1.61     pooka 		/*
    387      1.61     pooka 		 * We need to copy the previous contents of the pages to
    388      1.61     pooka 		 * the window even if we are reading from the
    389      1.61     pooka 		 * device, since the device might not fill the contents of
    390      1.61     pooka 		 * the full mapped range and we will end up corrupting
    391      1.61     pooka 		 * data when we unmap the window.
    392      1.61     pooka 		 */
    393      1.61     pooka 		memcpy((void*)curkva, pgs[i]->uanon, PAGE_SIZE);
    394      1.61     pooka 		pgri->pgr_pgs[i] = pgs[i];
    395      1.61     pooka 	}
    396      1.61     pooka 
    397      1.61     pooka 	mutex_enter(&pagermtx);
    398      1.61     pooka 	LIST_INSERT_HEAD(&pagerlist, pgri, pgr_entries);
    399      1.61     pooka 	mutex_exit(&pagermtx);
    400       1.7     pooka 
    401      1.61     pooka 	return pgri->pgr_kva;
    402       1.7     pooka }
    403       1.7     pooka 
    404      1.61     pooka /*
    405      1.61     pooka  * map out the pager window.  return contents from VA to page storage
    406      1.61     pooka  * and free structures.
    407      1.61     pooka  *
    408      1.61     pooka  * Note: does not currently support partial frees
    409      1.61     pooka  */
    410      1.61     pooka void
    411      1.61     pooka uvm_pagermapout(vaddr_t kva, int npages)
    412       1.7     pooka {
    413      1.61     pooka 	struct pagerinfo *pgri;
    414      1.61     pooka 	vaddr_t curkva;
    415      1.61     pooka 	int i;
    416       1.7     pooka 
    417      1.61     pooka 	mutex_enter(&pagermtx);
    418      1.61     pooka 	LIST_FOREACH(pgri, &pagerlist, pgr_entries) {
    419      1.61     pooka 		if (pgri->pgr_kva == kva)
    420      1.61     pooka 			break;
    421      1.61     pooka 	}
    422      1.61     pooka 	KASSERT(pgri);
    423      1.61     pooka 	if (pgri->pgr_npages != npages)
    424      1.61     pooka 		panic("uvm_pagermapout: partial unmapping not supported");
    425      1.61     pooka 	LIST_REMOVE(pgri, pgr_entries);
    426      1.61     pooka 	mutex_exit(&pagermtx);
    427      1.61     pooka 
    428      1.61     pooka 	if (pgri->pgr_read) {
    429      1.61     pooka 		for (i = 0, curkva = pgri->pgr_kva;
    430      1.61     pooka 		    i < pgri->pgr_npages;
    431      1.61     pooka 		    i++, curkva += PAGE_SIZE) {
    432      1.61     pooka 			memcpy(pgri->pgr_pgs[i]->uanon,(void*)curkva,PAGE_SIZE);
    433      1.21     pooka 		}
    434      1.21     pooka 	}
    435      1.10     pooka 
    436      1.61     pooka 	kmem_free(pgri->pgr_pgs, npages * sizeof(struct vm_page *));
    437      1.61     pooka 	kmem_free((void*)pgri->pgr_kva, npages * PAGE_SIZE);
    438      1.61     pooka 	kmem_free(pgri, sizeof(*pgri));
    439       1.7     pooka }
    440       1.7     pooka 
    441      1.61     pooka /*
    442      1.61     pooka  * convert va in pager window to page structure.
    443      1.61     pooka  * XXX: how expensive is this (global lock, list traversal)?
    444      1.61     pooka  */
    445      1.14     pooka struct vm_page *
    446      1.14     pooka uvm_pageratop(vaddr_t va)
    447      1.14     pooka {
    448      1.61     pooka 	struct pagerinfo *pgri;
    449      1.61     pooka 	struct vm_page *pg = NULL;
    450      1.61     pooka 	int i;
    451      1.14     pooka 
    452      1.61     pooka 	mutex_enter(&pagermtx);
    453      1.61     pooka 	LIST_FOREACH(pgri, &pagerlist, pgr_entries) {
    454      1.61     pooka 		if (pgri->pgr_kva <= va
    455      1.61     pooka 		    && va < pgri->pgr_kva + pgri->pgr_npages*PAGE_SIZE)
    456      1.21     pooka 			break;
    457      1.61     pooka 	}
    458      1.61     pooka 	if (pgri) {
    459      1.61     pooka 		i = (va - pgri->pgr_kva) >> PAGE_SHIFT;
    460      1.61     pooka 		pg = pgri->pgr_pgs[i];
    461      1.61     pooka 	}
    462      1.61     pooka 	mutex_exit(&pagermtx);
    463      1.21     pooka 
    464      1.61     pooka 	return pg;
    465      1.61     pooka }
    466      1.15     pooka 
    467  1.70.2.4  uebayasi /*
    468  1.70.2.4  uebayasi  * Called with the vm object locked.
    469  1.70.2.4  uebayasi  *
    470  1.70.2.4  uebayasi  * Put vnode object pages at the end of the access queue to indicate
    471  1.70.2.4  uebayasi  * they have been recently accessed and should not be immediate
    472  1.70.2.4  uebayasi  * candidates for pageout.  Do not do this for lookups done by
    473  1.70.2.4  uebayasi  * the pagedaemon to mimic pmap_kentered mappings which don't track
    474  1.70.2.4  uebayasi  * access information.
    475  1.70.2.4  uebayasi  */
    476      1.61     pooka struct vm_page *
    477      1.61     pooka uvm_pagelookup(struct uvm_object *uobj, voff_t off)
    478      1.61     pooka {
    479      1.61     pooka 	struct vm_page *pg;
    480  1.70.2.4  uebayasi 	bool ispagedaemon = curlwp == uvm.pagedaemon_lwp;
    481      1.61     pooka 
    482  1.70.2.4  uebayasi 	pg = rb_tree_find_node(&uobj->rb_tree, &off);
    483  1.70.2.4  uebayasi 	if (pg && !UVM_OBJ_IS_AOBJ(pg->uobject) && !ispagedaemon) {
    484  1.70.2.4  uebayasi 		mutex_enter(&uvm_pageqlock);
    485  1.70.2.4  uebayasi 		TAILQ_REMOVE(&vmpage_lruqueue, pg, pageq.queue);
    486  1.70.2.4  uebayasi 		TAILQ_INSERT_TAIL(&vmpage_lruqueue, pg, pageq.queue);
    487  1.70.2.4  uebayasi 		mutex_exit(&uvm_pageqlock);
    488      1.61     pooka 	}
    489      1.61     pooka 
    490  1.70.2.4  uebayasi 	return pg;
    491      1.14     pooka }
    492      1.14     pooka 
    493       1.7     pooka void
    494      1.22     pooka uvm_page_unbusy(struct vm_page **pgs, int npgs)
    495      1.22     pooka {
    496      1.22     pooka 	struct vm_page *pg;
    497      1.22     pooka 	int i;
    498      1.22     pooka 
    499  1.70.2.4  uebayasi 	KASSERT(npgs > 0);
    500  1.70.2.4  uebayasi 	KASSERT(mutex_owned(&pgs[0]->uobject->vmobjlock));
    501  1.70.2.4  uebayasi 
    502      1.22     pooka 	for (i = 0; i < npgs; i++) {
    503      1.22     pooka 		pg = pgs[i];
    504      1.22     pooka 		if (pg == NULL)
    505      1.22     pooka 			continue;
    506      1.22     pooka 
    507      1.22     pooka 		KASSERT(pg->flags & PG_BUSY);
    508      1.22     pooka 		if (pg->flags & PG_WANTED)
    509      1.22     pooka 			wakeup(pg);
    510      1.36     pooka 		if (pg->flags & PG_RELEASED)
    511      1.36     pooka 			uvm_pagefree(pg);
    512      1.36     pooka 		else
    513      1.36     pooka 			pg->flags &= ~(PG_WANTED|PG_BUSY);
    514      1.22     pooka 	}
    515      1.22     pooka }
    516      1.22     pooka 
    517      1.22     pooka void
    518       1.7     pooka uvm_estimatepageable(int *active, int *inactive)
    519       1.7     pooka {
    520       1.7     pooka 
    521      1.19     pooka 	/* XXX: guessing game */
    522      1.19     pooka 	*active = 1024;
    523      1.19     pooka 	*inactive = 1024;
    524       1.7     pooka }
    525       1.7     pooka 
    526      1.39     pooka struct vm_map_kernel *
    527      1.39     pooka vm_map_to_kernel(struct vm_map *map)
    528      1.39     pooka {
    529      1.39     pooka 
    530      1.39     pooka 	return (struct vm_map_kernel *)map;
    531      1.39     pooka }
    532      1.39     pooka 
    533      1.41     pooka bool
    534      1.41     pooka vm_map_starved_p(struct vm_map *map)
    535      1.41     pooka {
    536      1.41     pooka 
    537  1.70.2.3  uebayasi 	if (map->flags & VM_MAP_WANTVA)
    538  1.70.2.3  uebayasi 		return true;
    539      1.41     pooka 
    540  1.70.2.3  uebayasi 	return false;
    541      1.39     pooka }
    542      1.39     pooka 
    543      1.41     pooka int
    544      1.41     pooka uvm_loan(struct vm_map *map, vaddr_t start, vsize_t len, void *v, int flags)
    545      1.41     pooka {
    546      1.41     pooka 
    547      1.41     pooka 	panic("%s: unimplemented", __func__);
    548      1.41     pooka }
    549      1.41     pooka 
    550      1.41     pooka void
    551      1.41     pooka uvm_unloan(void *v, int npages, int flags)
    552      1.41     pooka {
    553      1.41     pooka 
    554      1.41     pooka 	panic("%s: unimplemented", __func__);
    555      1.41     pooka }
    556      1.41     pooka 
    557      1.43     pooka int
    558      1.43     pooka uvm_loanuobjpages(struct uvm_object *uobj, voff_t pgoff, int orignpages,
    559      1.43     pooka 	struct vm_page **opp)
    560      1.43     pooka {
    561      1.43     pooka 
    562  1.70.2.2  uebayasi 	return EBUSY;
    563      1.43     pooka }
    564      1.43     pooka 
    565  1.70.2.3  uebayasi #ifdef DEBUGPRINT
    566      1.56     pooka void
    567      1.56     pooka uvm_object_printit(struct uvm_object *uobj, bool full,
    568      1.56     pooka 	void (*pr)(const char *, ...))
    569      1.56     pooka {
    570      1.56     pooka 
    571  1.70.2.3  uebayasi 	pr("VM OBJECT at %p, refs %d", uobj, uobj->uo_refs);
    572      1.56     pooka }
    573  1.70.2.3  uebayasi #endif
    574      1.56     pooka 
    575      1.68     pooka vaddr_t
    576      1.68     pooka uvm_default_mapaddr(struct proc *p, vaddr_t base, vsize_t sz)
    577      1.68     pooka {
    578      1.68     pooka 
    579      1.68     pooka 	return 0;
    580      1.68     pooka }
    581      1.68     pooka 
    582  1.70.2.2  uebayasi int
    583  1.70.2.2  uebayasi uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
    584  1.70.2.2  uebayasi 	vm_prot_t prot, bool set_max)
    585  1.70.2.2  uebayasi {
    586  1.70.2.2  uebayasi 
    587  1.70.2.2  uebayasi 	return EOPNOTSUPP;
    588  1.70.2.2  uebayasi }
    589  1.70.2.2  uebayasi 
    590       1.9     pooka /*
    591      1.12     pooka  * UVM km
    592      1.12     pooka  */
    593      1.12     pooka 
    594      1.12     pooka vaddr_t
    595      1.12     pooka uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
    596      1.12     pooka {
    597  1.70.2.3  uebayasi 	void *rv, *desired = NULL;
    598      1.50     pooka 	int alignbit, error;
    599      1.50     pooka 
    600  1.70.2.3  uebayasi #ifdef __x86_64__
    601  1.70.2.3  uebayasi 	/*
    602  1.70.2.3  uebayasi 	 * On amd64, allocate all module memory from the lowest 2GB.
    603  1.70.2.3  uebayasi 	 * This is because NetBSD kernel modules are compiled
    604  1.70.2.3  uebayasi 	 * with -mcmodel=kernel and reserve only 4 bytes for
    605  1.70.2.3  uebayasi 	 * offsets.  If we load code compiled with -mcmodel=kernel
    606  1.70.2.3  uebayasi 	 * anywhere except the lowest or highest 2GB, it will not
    607  1.70.2.3  uebayasi 	 * work.  Since userspace does not have access to the highest
    608  1.70.2.3  uebayasi 	 * 2GB, use the lowest 2GB.
    609  1.70.2.3  uebayasi 	 *
    610  1.70.2.3  uebayasi 	 * Note: this assumes the rump kernel resides in
    611  1.70.2.3  uebayasi 	 * the lowest 2GB as well.
    612  1.70.2.3  uebayasi 	 *
    613  1.70.2.3  uebayasi 	 * Note2: yes, it's a quick hack, but since this the only
    614  1.70.2.3  uebayasi 	 * place where we care about the map we're allocating from,
    615  1.70.2.3  uebayasi 	 * just use a simple "if" instead of coming up with a fancy
    616  1.70.2.3  uebayasi 	 * generic solution.
    617  1.70.2.3  uebayasi 	 */
    618  1.70.2.3  uebayasi 	extern struct vm_map *module_map;
    619  1.70.2.3  uebayasi 	if (map == module_map) {
    620  1.70.2.3  uebayasi 		desired = (void *)(0x80000000 - size);
    621  1.70.2.3  uebayasi 	}
    622  1.70.2.3  uebayasi #endif
    623  1.70.2.3  uebayasi 
    624      1.50     pooka 	alignbit = 0;
    625      1.50     pooka 	if (align) {
    626      1.50     pooka 		alignbit = ffs(align)-1;
    627      1.50     pooka 	}
    628      1.50     pooka 
    629  1.70.2.3  uebayasi 	rv = rumpuser_anonmmap(desired, size, alignbit, flags & UVM_KMF_EXEC,
    630  1.70.2.3  uebayasi 	    &error);
    631      1.50     pooka 	if (rv == NULL) {
    632      1.50     pooka 		if (flags & (UVM_KMF_CANFAIL | UVM_KMF_NOWAIT))
    633      1.50     pooka 			return 0;
    634      1.50     pooka 		else
    635      1.50     pooka 			panic("uvm_km_alloc failed");
    636      1.50     pooka 	}
    637      1.12     pooka 
    638      1.50     pooka 	if (flags & UVM_KMF_ZERO)
    639      1.12     pooka 		memset(rv, 0, size);
    640      1.12     pooka 
    641      1.12     pooka 	return (vaddr_t)rv;
    642      1.12     pooka }
    643      1.12     pooka 
    644      1.12     pooka void
    645      1.12     pooka uvm_km_free(struct vm_map *map, vaddr_t vaddr, vsize_t size, uvm_flag_t flags)
    646      1.12     pooka {
    647      1.12     pooka 
    648      1.50     pooka 	rumpuser_unmap((void *)vaddr, size);
    649      1.12     pooka }
    650      1.12     pooka 
    651      1.12     pooka struct vm_map *
    652      1.12     pooka uvm_km_suballoc(struct vm_map *map, vaddr_t *minaddr, vaddr_t *maxaddr,
    653      1.12     pooka 	vsize_t size, int pageable, bool fixed, struct vm_map_kernel *submap)
    654      1.12     pooka {
    655      1.12     pooka 
    656      1.12     pooka 	return (struct vm_map *)417416;
    657      1.12     pooka }
    658      1.40     pooka 
    659      1.40     pooka vaddr_t
    660      1.40     pooka uvm_km_alloc_poolpage(struct vm_map *map, bool waitok)
    661      1.40     pooka {
    662      1.40     pooka 
    663  1.70.2.3  uebayasi 	return (vaddr_t)rump_hypermalloc(PAGE_SIZE, PAGE_SIZE,
    664  1.70.2.3  uebayasi 	    waitok, "kmalloc");
    665      1.40     pooka }
    666      1.40     pooka 
    667      1.40     pooka void
    668      1.40     pooka uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr)
    669      1.40     pooka {
    670      1.40     pooka 
    671  1.70.2.3  uebayasi 	rump_hyperfree((void *)addr, PAGE_SIZE);
    672      1.50     pooka }
    673      1.50     pooka 
    674      1.50     pooka vaddr_t
    675      1.50     pooka uvm_km_alloc_poolpage_cache(struct vm_map *map, bool waitok)
    676      1.50     pooka {
    677      1.50     pooka 
    678  1.70.2.3  uebayasi 	return uvm_km_alloc_poolpage(map, waitok);
    679      1.50     pooka }
    680      1.50     pooka 
    681      1.50     pooka void
    682      1.50     pooka uvm_km_free_poolpage_cache(struct vm_map *map, vaddr_t vaddr)
    683      1.50     pooka {
    684      1.50     pooka 
    685  1.70.2.3  uebayasi 	uvm_km_free_poolpage(map, vaddr);
    686  1.70.2.3  uebayasi }
    687  1.70.2.3  uebayasi 
    688  1.70.2.3  uebayasi void
    689  1.70.2.3  uebayasi uvm_km_va_drain(struct vm_map *map, uvm_flag_t flags)
    690  1.70.2.3  uebayasi {
    691  1.70.2.3  uebayasi 
    692  1.70.2.3  uebayasi 	/* we eventually maybe want some model for available memory */
    693      1.40     pooka }
    694      1.57     pooka 
    695      1.57     pooka /*
    696      1.57     pooka  * Mapping and vm space locking routines.
    697      1.57     pooka  * XXX: these don't work for non-local vmspaces
    698      1.57     pooka  */
    699      1.57     pooka int
    700      1.57     pooka uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access)
    701      1.57     pooka {
    702      1.57     pooka 
    703  1.70.2.3  uebayasi 	KASSERT(vs == &vmspace0);
    704      1.57     pooka 	return 0;
    705      1.57     pooka }
    706      1.57     pooka 
    707      1.57     pooka void
    708      1.57     pooka uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
    709      1.57     pooka {
    710      1.57     pooka 
    711  1.70.2.3  uebayasi 	KASSERT(vs == &vmspace0);
    712      1.57     pooka }
    713      1.57     pooka 
    714      1.57     pooka void
    715      1.57     pooka vmapbuf(struct buf *bp, vsize_t len)
    716      1.57     pooka {
    717      1.57     pooka 
    718      1.57     pooka 	bp->b_saveaddr = bp->b_data;
    719      1.57     pooka }
    720      1.57     pooka 
    721      1.57     pooka void
    722      1.57     pooka vunmapbuf(struct buf *bp, vsize_t len)
    723      1.57     pooka {
    724      1.57     pooka 
    725      1.57     pooka 	bp->b_data = bp->b_saveaddr;
    726      1.57     pooka 	bp->b_saveaddr = 0;
    727      1.57     pooka }
    728      1.61     pooka 
    729      1.61     pooka void
    730  1.70.2.3  uebayasi uvmspace_addref(struct vmspace *vm)
    731      1.61     pooka {
    732      1.61     pooka 
    733  1.70.2.3  uebayasi 	/*
    734  1.70.2.3  uebayasi 	 * there is only vmspace0.  we're not planning on
    735  1.70.2.3  uebayasi 	 * feeding it to the fishes.
    736  1.70.2.3  uebayasi 	 */
    737      1.61     pooka }
    738      1.61     pooka 
    739      1.66     pooka void
    740      1.66     pooka uvmspace_free(struct vmspace *vm)
    741      1.66     pooka {
    742      1.66     pooka 
    743      1.66     pooka 	/* nothing for now */
    744      1.66     pooka }
    745      1.66     pooka 
    746      1.66     pooka int
    747      1.66     pooka uvm_io(struct vm_map *map, struct uio *uio)
    748      1.66     pooka {
    749      1.66     pooka 
    750      1.66     pooka 	/*
    751      1.66     pooka 	 * just do direct uio for now.  but this needs some vmspace
    752      1.66     pooka 	 * olympics for rump_sysproxy.
    753      1.66     pooka 	 */
    754      1.66     pooka 	return uiomove((void *)(vaddr_t)uio->uio_offset, uio->uio_resid, uio);
    755      1.66     pooka }
    756      1.66     pooka 
    757      1.61     pooka /*
    758      1.61     pooka  * page life cycle stuff.  it really doesn't exist, so just stubs.
    759      1.61     pooka  */
    760      1.61     pooka 
    761      1.61     pooka void
    762      1.61     pooka uvm_pageactivate(struct vm_page *pg)
    763      1.61     pooka {
    764      1.61     pooka 
    765      1.61     pooka 	/* nada */
    766      1.61     pooka }
    767      1.61     pooka 
    768      1.61     pooka void
    769      1.61     pooka uvm_pagedeactivate(struct vm_page *pg)
    770      1.61     pooka {
    771      1.61     pooka 
    772      1.61     pooka 	/* nada */
    773      1.61     pooka }
    774      1.61     pooka 
    775      1.61     pooka void
    776      1.61     pooka uvm_pagedequeue(struct vm_page *pg)
    777      1.61     pooka {
    778      1.61     pooka 
    779      1.61     pooka 	/* nada*/
    780      1.61     pooka }
    781      1.61     pooka 
    782      1.61     pooka void
    783      1.61     pooka uvm_pageenqueue(struct vm_page *pg)
    784      1.61     pooka {
    785      1.61     pooka 
    786      1.61     pooka 	/* nada */
    787      1.61     pooka }
    788  1.70.2.1  uebayasi 
    789  1.70.2.4  uebayasi void
    790  1.70.2.4  uebayasi uvmpdpol_anfree(struct vm_anon *an)
    791  1.70.2.4  uebayasi {
    792  1.70.2.4  uebayasi 
    793  1.70.2.4  uebayasi 	/* nada */
    794  1.70.2.4  uebayasi }
    795  1.70.2.4  uebayasi 
    796  1.70.2.3  uebayasi /*
    797  1.70.2.3  uebayasi  * Routines related to the Page Baroness.
    798  1.70.2.3  uebayasi  */
    799  1.70.2.3  uebayasi 
    800  1.70.2.3  uebayasi void
    801  1.70.2.3  uebayasi uvm_wait(const char *msg)
    802  1.70.2.3  uebayasi {
    803  1.70.2.3  uebayasi 
    804  1.70.2.3  uebayasi 	if (__predict_false(curlwp == uvm.pagedaemon_lwp))
    805  1.70.2.3  uebayasi 		panic("pagedaemon out of memory");
    806  1.70.2.3  uebayasi 	if (__predict_false(rump_threads == 0))
    807  1.70.2.3  uebayasi 		panic("pagedaemon missing (RUMP_THREADS = 0)");
    808  1.70.2.3  uebayasi 
    809  1.70.2.3  uebayasi 	mutex_enter(&pdaemonmtx);
    810  1.70.2.3  uebayasi 	pdaemon_waiters++;
    811  1.70.2.3  uebayasi 	cv_signal(&pdaemoncv);
    812  1.70.2.3  uebayasi 	cv_wait(&oomwait, &pdaemonmtx);
    813  1.70.2.3  uebayasi 	mutex_exit(&pdaemonmtx);
    814  1.70.2.3  uebayasi }
    815  1.70.2.3  uebayasi 
    816  1.70.2.3  uebayasi void
    817  1.70.2.3  uebayasi uvm_pageout_start(int npages)
    818  1.70.2.3  uebayasi {
    819  1.70.2.3  uebayasi 
    820  1.70.2.3  uebayasi 	/* we don't have the heuristics */
    821  1.70.2.3  uebayasi }
    822  1.70.2.3  uebayasi 
    823  1.70.2.3  uebayasi void
    824  1.70.2.3  uebayasi uvm_pageout_done(int npages)
    825  1.70.2.3  uebayasi {
    826  1.70.2.3  uebayasi 
    827  1.70.2.3  uebayasi 	/* could wakeup waiters, but just let the pagedaemon do it */
    828  1.70.2.3  uebayasi }
    829  1.70.2.3  uebayasi 
    830  1.70.2.4  uebayasi static bool
    831  1.70.2.4  uebayasi processpage(struct vm_page *pg)
    832  1.70.2.4  uebayasi {
    833  1.70.2.4  uebayasi 	struct uvm_object *uobj;
    834  1.70.2.4  uebayasi 
    835  1.70.2.4  uebayasi 	uobj = pg->uobject;
    836  1.70.2.4  uebayasi 	if (mutex_tryenter(&uobj->vmobjlock)) {
    837  1.70.2.4  uebayasi 		if ((pg->flags & PG_BUSY) == 0) {
    838  1.70.2.4  uebayasi 			mutex_exit(&uvm_pageqlock);
    839  1.70.2.4  uebayasi 			uobj->pgops->pgo_put(uobj, pg->offset,
    840  1.70.2.4  uebayasi 			    pg->offset + PAGE_SIZE,
    841  1.70.2.4  uebayasi 			    PGO_CLEANIT|PGO_FREE);
    842  1.70.2.4  uebayasi 			KASSERT(!mutex_owned(&uobj->vmobjlock));
    843  1.70.2.4  uebayasi 			return true;
    844  1.70.2.4  uebayasi 		} else {
    845  1.70.2.4  uebayasi 			mutex_exit(&uobj->vmobjlock);
    846  1.70.2.4  uebayasi 		}
    847  1.70.2.4  uebayasi 	}
    848  1.70.2.4  uebayasi 
    849  1.70.2.4  uebayasi 	return false;
    850  1.70.2.4  uebayasi }
    851  1.70.2.4  uebayasi 
    852  1.70.2.3  uebayasi /*
    853  1.70.2.4  uebayasi  * The Diabolical pageDaemon Director (DDD).
    854  1.70.2.3  uebayasi  */
    855  1.70.2.3  uebayasi void
    856  1.70.2.3  uebayasi uvm_pageout(void *arg)
    857  1.70.2.3  uebayasi {
    858  1.70.2.4  uebayasi 	struct vm_page *pg;
    859  1.70.2.3  uebayasi 	struct pool *pp, *pp_first;
    860  1.70.2.3  uebayasi 	uint64_t where;
    861  1.70.2.3  uebayasi 	int timo = 0;
    862  1.70.2.4  uebayasi 	int cleaned, skip, skipped;
    863  1.70.2.4  uebayasi 	bool succ = false;
    864  1.70.2.3  uebayasi 
    865  1.70.2.3  uebayasi 	mutex_enter(&pdaemonmtx);
    866  1.70.2.3  uebayasi 	for (;;) {
    867  1.70.2.4  uebayasi 		if (succ) {
    868  1.70.2.4  uebayasi 			kernel_map->flags &= ~VM_MAP_WANTVA;
    869  1.70.2.4  uebayasi 			kmem_map->flags &= ~VM_MAP_WANTVA;
    870  1.70.2.4  uebayasi 			timo = 0;
    871  1.70.2.4  uebayasi 			if (pdaemon_waiters) {
    872  1.70.2.4  uebayasi 				pdaemon_waiters = 0;
    873  1.70.2.4  uebayasi 				cv_broadcast(&oomwait);
    874  1.70.2.4  uebayasi 			}
    875  1.70.2.4  uebayasi 		}
    876  1.70.2.4  uebayasi 		succ = false;
    877  1.70.2.4  uebayasi 
    878  1.70.2.3  uebayasi 		cv_timedwait(&pdaemoncv, &pdaemonmtx, timo);
    879  1.70.2.3  uebayasi 		uvmexp.pdwoke++;
    880  1.70.2.4  uebayasi 
    881  1.70.2.4  uebayasi 		/* tell the world that we are hungry */
    882  1.70.2.3  uebayasi 		kernel_map->flags |= VM_MAP_WANTVA;
    883  1.70.2.4  uebayasi 		kmem_map->flags |= VM_MAP_WANTVA;
    884  1.70.2.4  uebayasi 
    885  1.70.2.4  uebayasi 		if (pdaemon_waiters == 0 && !NEED_PAGEDAEMON())
    886  1.70.2.4  uebayasi 			continue;
    887  1.70.2.3  uebayasi 		mutex_exit(&pdaemonmtx);
    888  1.70.2.3  uebayasi 
    889  1.70.2.4  uebayasi 		/*
    890  1.70.2.4  uebayasi 		 * step one: reclaim the page cache.  this should give
    891  1.70.2.4  uebayasi 		 * us the biggest earnings since whole pages are released
    892  1.70.2.4  uebayasi 		 * into backing memory.
    893  1.70.2.4  uebayasi 		 */
    894  1.70.2.4  uebayasi 		pool_cache_reclaim(&pagecache);
    895  1.70.2.4  uebayasi 		if (!NEED_PAGEDAEMON()) {
    896  1.70.2.4  uebayasi 			succ = true;
    897  1.70.2.4  uebayasi 			mutex_enter(&pdaemonmtx);
    898  1.70.2.4  uebayasi 			continue;
    899  1.70.2.4  uebayasi 		}
    900  1.70.2.4  uebayasi 
    901  1.70.2.4  uebayasi 		/*
    902  1.70.2.4  uebayasi 		 * Ok, so that didn't help.  Next, try to hunt memory
    903  1.70.2.4  uebayasi 		 * by pushing out vnode pages.  The pages might contain
    904  1.70.2.4  uebayasi 		 * useful cached data, but we need the memory.
    905  1.70.2.4  uebayasi 		 */
    906  1.70.2.4  uebayasi 		cleaned = 0;
    907  1.70.2.4  uebayasi 		skip = 0;
    908  1.70.2.4  uebayasi  again:
    909  1.70.2.4  uebayasi 		mutex_enter(&uvm_pageqlock);
    910  1.70.2.4  uebayasi 		while (cleaned < PAGEDAEMON_OBJCHUNK) {
    911  1.70.2.4  uebayasi 			skipped = 0;
    912  1.70.2.4  uebayasi 			TAILQ_FOREACH(pg, &vmpage_lruqueue, pageq.queue) {
    913  1.70.2.4  uebayasi 
    914  1.70.2.4  uebayasi 				/*
    915  1.70.2.4  uebayasi 				 * skip over pages we _might_ have tried
    916  1.70.2.4  uebayasi 				 * to handle earlier.  they might not be
    917  1.70.2.4  uebayasi 				 * exactly the same ones, but I'm not too
    918  1.70.2.4  uebayasi 				 * concerned.
    919  1.70.2.4  uebayasi 				 */
    920  1.70.2.4  uebayasi 				while (skipped++ < skip)
    921  1.70.2.4  uebayasi 					continue;
    922  1.70.2.4  uebayasi 
    923  1.70.2.4  uebayasi 				if (processpage(pg)) {
    924  1.70.2.4  uebayasi 					cleaned++;
    925  1.70.2.4  uebayasi 					goto again;
    926  1.70.2.4  uebayasi 				}
    927  1.70.2.4  uebayasi 
    928  1.70.2.4  uebayasi 				skip++;
    929  1.70.2.4  uebayasi 			}
    930  1.70.2.4  uebayasi 			break;
    931  1.70.2.4  uebayasi 		}
    932  1.70.2.4  uebayasi 		mutex_exit(&uvm_pageqlock);
    933  1.70.2.4  uebayasi 
    934  1.70.2.4  uebayasi 		/*
    935  1.70.2.4  uebayasi 		 * And of course we need to reclaim the page cache
    936  1.70.2.4  uebayasi 		 * again to actually release memory.
    937  1.70.2.4  uebayasi 		 */
    938  1.70.2.4  uebayasi 		pool_cache_reclaim(&pagecache);
    939  1.70.2.4  uebayasi 		if (!NEED_PAGEDAEMON()) {
    940  1.70.2.4  uebayasi 			succ = true;
    941  1.70.2.4  uebayasi 			mutex_enter(&pdaemonmtx);
    942  1.70.2.4  uebayasi 			continue;
    943  1.70.2.4  uebayasi 		}
    944  1.70.2.4  uebayasi 
    945  1.70.2.4  uebayasi 		/*
    946  1.70.2.4  uebayasi 		 * Still not there?  sleeves come off right about now.
    947  1.70.2.4  uebayasi 		 * First: do reclaim on kernel/kmem map.
    948  1.70.2.4  uebayasi 		 */
    949  1.70.2.4  uebayasi 		callback_run_roundrobin(&kernel_map_store.vmk_reclaim_callback,
    950  1.70.2.4  uebayasi 		    NULL);
    951  1.70.2.4  uebayasi 		callback_run_roundrobin(&kmem_map_store.vmk_reclaim_callback,
    952  1.70.2.4  uebayasi 		    NULL);
    953  1.70.2.4  uebayasi 
    954  1.70.2.4  uebayasi 		/*
    955  1.70.2.4  uebayasi 		 * And then drain the pools.  Wipe them out ... all of them.
    956  1.70.2.4  uebayasi 		 */
    957  1.70.2.4  uebayasi 
    958  1.70.2.3  uebayasi 		pool_drain_start(&pp_first, &where);
    959  1.70.2.3  uebayasi 		pp = pp_first;
    960  1.70.2.3  uebayasi 		for (;;) {
    961  1.70.2.4  uebayasi 			rump_vfs_drainbufs(10 /* XXX: estimate better */);
    962  1.70.2.3  uebayasi 			succ = pool_drain_end(pp, where);
    963  1.70.2.3  uebayasi 			if (succ)
    964  1.70.2.3  uebayasi 				break;
    965  1.70.2.3  uebayasi 			pool_drain_start(&pp, &where);
    966  1.70.2.3  uebayasi 			if (pp == pp_first) {
    967  1.70.2.3  uebayasi 				succ = pool_drain_end(pp, where);
    968  1.70.2.3  uebayasi 				break;
    969  1.70.2.3  uebayasi 			}
    970  1.70.2.3  uebayasi 		}
    971  1.70.2.4  uebayasi 
    972  1.70.2.4  uebayasi 		/*
    973  1.70.2.4  uebayasi 		 * Need to use PYEC on our bag of tricks.
    974  1.70.2.4  uebayasi 		 * Unfortunately, the wife just borrowed it.
    975  1.70.2.4  uebayasi 		 */
    976  1.70.2.3  uebayasi 
    977  1.70.2.3  uebayasi 		if (!succ) {
    978  1.70.2.3  uebayasi 			rumpuser_dprintf("pagedaemoness: failed to reclaim "
    979  1.70.2.3  uebayasi 			    "memory ... sleeping (deadlock?)\n");
    980  1.70.2.3  uebayasi 			timo = hz;
    981  1.70.2.3  uebayasi 		}
    982  1.70.2.3  uebayasi 
    983  1.70.2.4  uebayasi 		mutex_enter(&pdaemonmtx);
    984  1.70.2.3  uebayasi 	}
    985  1.70.2.3  uebayasi 
    986  1.70.2.3  uebayasi 	panic("you can swap out any time you like, but you can never leave");
    987  1.70.2.3  uebayasi }
    988  1.70.2.3  uebayasi 
    989  1.70.2.3  uebayasi void
    990  1.70.2.3  uebayasi uvm_kick_pdaemon()
    991  1.70.2.3  uebayasi {
    992  1.70.2.3  uebayasi 
    993  1.70.2.4  uebayasi 	/*
    994  1.70.2.4  uebayasi 	 * Wake up the diabolical pagedaemon director if we are over
    995  1.70.2.4  uebayasi 	 * 90% of the memory limit.  This is a complete and utter
    996  1.70.2.4  uebayasi 	 * stetson-harrison decision which you are allowed to finetune.
    997  1.70.2.4  uebayasi 	 * Don't bother locking.  If we have some unflushed caches,
    998  1.70.2.4  uebayasi 	 * other waker-uppers will deal with the issue.
    999  1.70.2.4  uebayasi 	 */
   1000  1.70.2.4  uebayasi 	if (NEED_PAGEDAEMON()) {
   1001  1.70.2.4  uebayasi 		cv_signal(&pdaemoncv);
   1002  1.70.2.4  uebayasi 	}
   1003  1.70.2.3  uebayasi }
   1004  1.70.2.3  uebayasi 
   1005  1.70.2.3  uebayasi void *
   1006  1.70.2.3  uebayasi rump_hypermalloc(size_t howmuch, int alignment, bool waitok, const char *wmsg)
   1007  1.70.2.3  uebayasi {
   1008  1.70.2.3  uebayasi 	unsigned long newmem;
   1009  1.70.2.3  uebayasi 	void *rv;
   1010  1.70.2.3  uebayasi 
   1011  1.70.2.4  uebayasi 	uvm_kick_pdaemon(); /* ouch */
   1012  1.70.2.4  uebayasi 
   1013  1.70.2.3  uebayasi 	/* first we must be within the limit */
   1014  1.70.2.3  uebayasi  limitagain:
   1015  1.70.2.4  uebayasi 	if (rump_physmemlimit != RUMPMEM_UNLIMITED) {
   1016  1.70.2.3  uebayasi 		newmem = atomic_add_long_nv(&curphysmem, howmuch);
   1017  1.70.2.4  uebayasi 		if (newmem > rump_physmemlimit) {
   1018  1.70.2.3  uebayasi 			newmem = atomic_add_long_nv(&curphysmem, -howmuch);
   1019  1.70.2.3  uebayasi 			if (!waitok)
   1020  1.70.2.3  uebayasi 				return NULL;
   1021  1.70.2.3  uebayasi 			uvm_wait(wmsg);
   1022  1.70.2.3  uebayasi 			goto limitagain;
   1023  1.70.2.3  uebayasi 		}
   1024  1.70.2.3  uebayasi 	}
   1025  1.70.2.3  uebayasi 
   1026  1.70.2.3  uebayasi 	/* second, we must get something from the backend */
   1027  1.70.2.3  uebayasi  again:
   1028  1.70.2.3  uebayasi 	rv = rumpuser_malloc(howmuch, alignment);
   1029  1.70.2.3  uebayasi 	if (__predict_false(rv == NULL && waitok)) {
   1030  1.70.2.3  uebayasi 		uvm_wait(wmsg);
   1031  1.70.2.3  uebayasi 		goto again;
   1032  1.70.2.3  uebayasi 	}
   1033  1.70.2.3  uebayasi 
   1034  1.70.2.3  uebayasi 	return rv;
   1035  1.70.2.3  uebayasi }
   1036  1.70.2.3  uebayasi 
   1037  1.70.2.3  uebayasi void
   1038  1.70.2.3  uebayasi rump_hyperfree(void *what, size_t size)
   1039  1.70.2.3  uebayasi {
   1040  1.70.2.3  uebayasi 
   1041  1.70.2.4  uebayasi 	if (rump_physmemlimit != RUMPMEM_UNLIMITED) {
   1042  1.70.2.3  uebayasi 		atomic_add_long(&curphysmem, -size);
   1043  1.70.2.3  uebayasi 	}
   1044  1.70.2.3  uebayasi 	rumpuser_free(what);
   1045  1.70.2.3  uebayasi }
   1046  1.70.2.3  uebayasi 
   1047  1.70.2.1  uebayasi paddr_t
   1048  1.70.2.1  uebayasi uvm_vm_page_to_phys(const struct vm_page *pg)
   1049  1.70.2.1  uebayasi {
   1050  1.70.2.1  uebayasi 
   1051  1.70.2.1  uebayasi 	return 0;
   1052  1.70.2.1  uebayasi }
   1053