Home | History | Annotate | Line # | Download | only in rumpkern
vm.c revision 1.173.14.1
      1 /*	$NetBSD: vm.c,v 1.173.14.1 2021/07/06 04:22:34 martin Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2007-2011 Antti Kantee.  All Rights Reserved.
      5  *
      6  * Development of this software was supported by
      7  * The Finnish Cultural Foundation and the Research Foundation of
      8  * The Helsinki University of Technology.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     20  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     22  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     25  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Virtual memory emulation routines.
     34  */
     35 
     36 /*
     37  * XXX: we abuse pg->uanon for the virtual address of the storage
     38  * for each page.  phys_addr would fit the job description better,
     39  * except that it will create unnecessary lossage on some platforms
     40  * due to not being a pointer type.
     41  */
     42 
     43 #include <sys/cdefs.h>
     44 __KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.173.14.1 2021/07/06 04:22:34 martin Exp $");
     45 
     46 #include <sys/param.h>
     47 #include <sys/atomic.h>
     48 #include <sys/buf.h>
     49 #include <sys/kernel.h>
     50 #include <sys/kmem.h>
     51 #include <sys/vmem.h>
     52 #include <sys/mman.h>
     53 #include <sys/null.h>
     54 #include <sys/vnode.h>
     55 
     56 #include <machine/pmap.h>
     57 
     58 #include <uvm/uvm.h>
     59 #include <uvm/uvm_ddb.h>
     60 #include <uvm/uvm_pdpolicy.h>
     61 #include <uvm/uvm_prot.h>
     62 #include <uvm/uvm_readahead.h>
     63 #include <uvm/uvm_device.h>
     64 
     65 #include <rump-sys/kern.h>
     66 #include <rump-sys/vfs.h>
     67 
     68 #include <rump/rumpuser.h>
     69 
     70 kmutex_t uvm_pageqlock; /* non-free page lock */
     71 kmutex_t uvm_fpageqlock; /* free page lock, non-gpl license */
     72 kmutex_t uvm_swap_data_lock;
     73 
     74 struct uvmexp uvmexp;
     75 struct uvm uvm;
     76 
     77 #ifdef __uvmexp_pagesize
     78 const int * const uvmexp_pagesize = &uvmexp.pagesize;
     79 const int * const uvmexp_pagemask = &uvmexp.pagemask;
     80 const int * const uvmexp_pageshift = &uvmexp.pageshift;
     81 #endif
     82 
     83 static struct vm_map kernel_map_store;
     84 struct vm_map *kernel_map = &kernel_map_store;
     85 
     86 static struct vm_map module_map_store;
     87 extern struct vm_map *module_map;
     88 
     89 static struct pmap pmap_kernel;
     90 struct pmap rump_pmap_local;
     91 struct pmap *const kernel_pmap_ptr = &pmap_kernel;
     92 
     93 vmem_t *kmem_arena;
     94 vmem_t *kmem_va_arena;
     95 
     96 static unsigned int pdaemon_waiters;
     97 static kmutex_t pdaemonmtx;
     98 static kcondvar_t pdaemoncv, oomwait;
     99 
    100 /* all local non-proc0 processes share this vmspace */
    101 struct vmspace *rump_vmspace_local;
    102 
    103 unsigned long rump_physmemlimit = RUMPMEM_UNLIMITED;
    104 static unsigned long pdlimit = RUMPMEM_UNLIMITED; /* page daemon memlimit */
    105 static unsigned long curphysmem;
    106 static unsigned long dddlim;		/* 90% of memory limit used */
    107 #define NEED_PAGEDAEMON() \
    108     (rump_physmemlimit != RUMPMEM_UNLIMITED && curphysmem > dddlim)
    109 #define PDRESERVE (2*MAXPHYS)
    110 
    111 /*
    112  * Try to free two pages worth of pages from objects.
    113  * If this succesfully frees a full page cache page, we'll
    114  * free the released page plus PAGE_SIZE/sizeof(vm_page).
    115  */
    116 #define PAGEDAEMON_OBJCHUNK (2*PAGE_SIZE / sizeof(struct vm_page))
    117 
    118 /*
    119  * Keep a list of least recently used pages.  Since the only way a
    120  * rump kernel can "access" a page is via lookup, we put the page
    121  * at the back of queue every time a lookup for it is done.  If the
    122  * page is in front of this global queue and we're short of memory,
    123  * it's a candidate for pageout.
    124  */
    125 static struct pglist vmpage_lruqueue;
    126 static unsigned vmpage_onqueue;
    127 
    128 static int
    129 pg_compare_key(void *ctx, const void *n, const void *key)
    130 {
    131 	voff_t a = ((const struct vm_page *)n)->offset;
    132 	voff_t b = *(const voff_t *)key;
    133 
    134 	if (a < b)
    135 		return -1;
    136 	else if (a > b)
    137 		return 1;
    138 	else
    139 		return 0;
    140 }
    141 
    142 static int
    143 pg_compare_nodes(void *ctx, const void *n1, const void *n2)
    144 {
    145 
    146 	return pg_compare_key(ctx, n1, &((const struct vm_page *)n2)->offset);
    147 }
    148 
    149 const rb_tree_ops_t uvm_page_tree_ops = {
    150 	.rbto_compare_nodes = pg_compare_nodes,
    151 	.rbto_compare_key = pg_compare_key,
    152 	.rbto_node_offset = offsetof(struct vm_page, rb_node),
    153 	.rbto_context = NULL
    154 };
    155 
    156 /*
    157  * vm pages
    158  */
    159 
    160 static int
    161 pgctor(void *arg, void *obj, int flags)
    162 {
    163 	struct vm_page *pg = obj;
    164 
    165 	memset(pg, 0, sizeof(*pg));
    166 	pg->uanon = rump_hypermalloc(PAGE_SIZE, PAGE_SIZE,
    167 	    (flags & PR_WAITOK) == PR_WAITOK, "pgalloc");
    168 	return pg->uanon == NULL;
    169 }
    170 
    171 static void
    172 pgdtor(void *arg, void *obj)
    173 {
    174 	struct vm_page *pg = obj;
    175 
    176 	rump_hyperfree(pg->uanon, PAGE_SIZE);
    177 }
    178 
    179 static struct pool_cache pagecache;
    180 
    181 /*
    182  * Called with the object locked.  We don't support anons.
    183  */
    184 struct vm_page *
    185 uvm_pagealloc_strat(struct uvm_object *uobj, voff_t off, struct vm_anon *anon,
    186 	int flags, int strat, int free_list)
    187 {
    188 	struct vm_page *pg;
    189 
    190 	KASSERT(uobj && mutex_owned(uobj->vmobjlock));
    191 	KASSERT(anon == NULL);
    192 
    193 	pg = pool_cache_get(&pagecache, PR_NOWAIT);
    194 	if (__predict_false(pg == NULL)) {
    195 		return NULL;
    196 	}
    197 
    198 	pg->offset = off;
    199 	pg->uobject = uobj;
    200 
    201 	pg->flags = PG_CLEAN|PG_BUSY|PG_FAKE;
    202 	if (flags & UVM_PGA_ZERO) {
    203 		uvm_pagezero(pg);
    204 	}
    205 
    206 	TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue);
    207 	(void)rb_tree_insert_node(&uobj->rb_tree, pg);
    208 
    209 	/*
    210 	 * Don't put anons on the LRU page queue.  We can't flush them
    211 	 * (there's no concept of swap in a rump kernel), so no reason
    212 	 * to bother with them.
    213 	 */
    214 	if (!UVM_OBJ_IS_AOBJ(uobj)) {
    215 		atomic_inc_uint(&vmpage_onqueue);
    216 		mutex_enter(&uvm_pageqlock);
    217 		TAILQ_INSERT_TAIL(&vmpage_lruqueue, pg, pageq.queue);
    218 		mutex_exit(&uvm_pageqlock);
    219 	}
    220 
    221 	uobj->uo_npages++;
    222 
    223 	return pg;
    224 }
    225 
    226 /*
    227  * Release a page.
    228  *
    229  * Called with the vm object locked.
    230  */
    231 void
    232 uvm_pagefree(struct vm_page *pg)
    233 {
    234 	struct uvm_object *uobj = pg->uobject;
    235 
    236 	KASSERT(mutex_owned(&uvm_pageqlock));
    237 	KASSERT(mutex_owned(uobj->vmobjlock));
    238 
    239 	if (pg->flags & PG_WANTED)
    240 		wakeup(pg);
    241 
    242 	TAILQ_REMOVE(&uobj->memq, pg, listq.queue);
    243 
    244 	uobj->uo_npages--;
    245 	rb_tree_remove_node(&uobj->rb_tree, pg);
    246 
    247 	if (!UVM_OBJ_IS_AOBJ(uobj)) {
    248 		TAILQ_REMOVE(&vmpage_lruqueue, pg, pageq.queue);
    249 		atomic_dec_uint(&vmpage_onqueue);
    250 	}
    251 
    252 	pool_cache_put(&pagecache, pg);
    253 }
    254 
    255 void
    256 uvm_pagezero(struct vm_page *pg)
    257 {
    258 
    259 	pg->flags &= ~PG_CLEAN;
    260 	memset((void *)pg->uanon, 0, PAGE_SIZE);
    261 }
    262 
    263 /*
    264  * uvm_page_locked_p: return true if object associated with page is
    265  * locked.  this is a weak check for runtime assertions only.
    266  */
    267 
    268 bool
    269 uvm_page_locked_p(struct vm_page *pg)
    270 {
    271 
    272 	return mutex_owned(pg->uobject->vmobjlock);
    273 }
    274 
    275 /*
    276  * Misc routines
    277  */
    278 
    279 static kmutex_t pagermtx;
    280 
    281 void
    282 uvm_init(void)
    283 {
    284 	char buf[64];
    285 
    286 	if (rumpuser_getparam("RUMP_MEMLIMIT", buf, sizeof(buf)) == 0) {
    287 		unsigned long tmp;
    288 		char *ep;
    289 		int mult;
    290 
    291 		tmp = strtoul(buf, &ep, 10);
    292 		if (strlen(ep) > 1)
    293 			panic("uvm_init: invalid RUMP_MEMLIMIT: %s", buf);
    294 
    295 		/* mini-dehumanize-number */
    296 		mult = 1;
    297 		switch (*ep) {
    298 		case 'k':
    299 			mult = 1024;
    300 			break;
    301 		case 'm':
    302 			mult = 1024*1024;
    303 			break;
    304 		case 'g':
    305 			mult = 1024*1024*1024;
    306 			break;
    307 		case 0:
    308 			break;
    309 		default:
    310 			panic("uvm_init: invalid RUMP_MEMLIMIT: %s", buf);
    311 		}
    312 		rump_physmemlimit = tmp * mult;
    313 
    314 		if (rump_physmemlimit / mult != tmp)
    315 			panic("uvm_init: RUMP_MEMLIMIT overflow: %s", buf);
    316 
    317 		/* reserve some memory for the pager */
    318 		if (rump_physmemlimit <= PDRESERVE)
    319 			panic("uvm_init: system reserves %d bytes of mem, "
    320 			    "only %lu bytes given",
    321 			    PDRESERVE, rump_physmemlimit);
    322 		pdlimit = rump_physmemlimit;
    323 		rump_physmemlimit -= PDRESERVE;
    324 
    325 		if (pdlimit < 1024*1024)
    326 			printf("uvm_init: WARNING: <1MB RAM limit, "
    327 			    "hope you know what you're doing\n");
    328 
    329 #define HUMANIZE_BYTES 9
    330 		CTASSERT(sizeof(buf) >= HUMANIZE_BYTES);
    331 		format_bytes(buf, HUMANIZE_BYTES, rump_physmemlimit);
    332 #undef HUMANIZE_BYTES
    333 		dddlim = 9 * (rump_physmemlimit / 10);
    334 	} else {
    335 		strlcpy(buf, "unlimited (host limit)", sizeof(buf));
    336 	}
    337 	aprint_verbose("total memory = %s\n", buf);
    338 
    339 	TAILQ_INIT(&vmpage_lruqueue);
    340 
    341 	if (rump_physmemlimit == RUMPMEM_UNLIMITED) {
    342 		uvmexp.npages = physmem;
    343 	} else {
    344 		uvmexp.npages = pdlimit >> PAGE_SHIFT;
    345 		uvmexp.reserve_pagedaemon = PDRESERVE >> PAGE_SHIFT;
    346 		uvmexp.freetarg = (rump_physmemlimit-dddlim) >> PAGE_SHIFT;
    347 	}
    348 	/*
    349 	 * uvmexp.free is not used internally or updated.  The reason is
    350 	 * that the memory hypercall allocator is allowed to allocate
    351 	 * non-page sized chunks.  We use a byte count in curphysmem
    352 	 * instead.
    353 	 */
    354 	uvmexp.free = uvmexp.npages;
    355 
    356 #ifndef __uvmexp_pagesize
    357 	uvmexp.pagesize = PAGE_SIZE;
    358 	uvmexp.pagemask = PAGE_MASK;
    359 	uvmexp.pageshift = PAGE_SHIFT;
    360 #else
    361 #define FAKE_PAGE_SHIFT 12
    362 	uvmexp.pageshift = FAKE_PAGE_SHIFT;
    363 	uvmexp.pagesize = 1<<FAKE_PAGE_SHIFT;
    364 	uvmexp.pagemask = (1<<FAKE_PAGE_SHIFT)-1;
    365 #undef FAKE_PAGE_SHIFT
    366 #endif
    367 
    368 	mutex_init(&pagermtx, MUTEX_DEFAULT, IPL_NONE);
    369 	mutex_init(&uvm_pageqlock, MUTEX_DEFAULT, IPL_NONE);
    370 	mutex_init(&uvm_swap_data_lock, MUTEX_DEFAULT, IPL_NONE);
    371 
    372 	/* just to appease linkage */
    373 	mutex_init(&uvm_fpageqlock, MUTEX_SPIN, IPL_VM);
    374 
    375 	mutex_init(&pdaemonmtx, MUTEX_DEFAULT, IPL_NONE);
    376 	cv_init(&pdaemoncv, "pdaemon");
    377 	cv_init(&oomwait, "oomwait");
    378 
    379 	module_map = &module_map_store;
    380 
    381 	kernel_map->pmap = pmap_kernel();
    382 
    383 	pool_subsystem_init();
    384 
    385 	kmem_arena = vmem_create("kmem", 0, 1024*1024, PAGE_SIZE,
    386 	    NULL, NULL, NULL,
    387 	    0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
    388 
    389 	vmem_subsystem_init(kmem_arena);
    390 
    391 	kmem_va_arena = vmem_create("kva", 0, 0, PAGE_SIZE,
    392 	    vmem_alloc, vmem_free, kmem_arena,
    393 	    8 * PAGE_SIZE, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
    394 
    395 	pool_cache_bootstrap(&pagecache, sizeof(struct vm_page), 0, 0, 0,
    396 	    "page$", NULL, IPL_NONE, pgctor, pgdtor, NULL);
    397 
    398 	/* create vmspace used by local clients */
    399 	rump_vmspace_local = kmem_zalloc(sizeof(*rump_vmspace_local), KM_SLEEP);
    400 	uvmspace_init(rump_vmspace_local, &rump_pmap_local, 0, 0, false);
    401 }
    402 
    403 void
    404 uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin, vaddr_t vmax,
    405     bool topdown)
    406 {
    407 
    408 	vm->vm_map.pmap = pmap;
    409 	vm->vm_refcnt = 1;
    410 }
    411 
    412 int
    413 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
    414     bool new_pageable, int lockflags)
    415 {
    416 	return 0;
    417 }
    418 
    419 void
    420 uvm_pagewire(struct vm_page *pg)
    421 {
    422 
    423 	/* nada */
    424 }
    425 
    426 void
    427 uvm_pageunwire(struct vm_page *pg)
    428 {
    429 
    430 	/* nada */
    431 }
    432 
    433 /* where's your schmonz now? */
    434 #define PUNLIMIT(a)	\
    435 p->p_rlimit[a].rlim_cur = p->p_rlimit[a].rlim_max = RLIM_INFINITY;
    436 void
    437 uvm_init_limits(struct proc *p)
    438 {
    439 
    440 #ifndef DFLSSIZ
    441 #define DFLSSIZ (16*1024*1024)
    442 #endif
    443 	p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
    444 	p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
    445 	PUNLIMIT(RLIMIT_DATA);
    446 	PUNLIMIT(RLIMIT_RSS);
    447 	PUNLIMIT(RLIMIT_AS);
    448 	/* nice, cascade */
    449 }
    450 #undef PUNLIMIT
    451 
    452 /*
    453  * This satisfies the "disgusting mmap hack" used by proplib.
    454  */
    455 int
    456 uvm_mmap_anon(struct proc *p, void **addrp, size_t size)
    457 {
    458 	int error;
    459 
    460 	/* no reason in particular, but cf. uvm_default_mapaddr() */
    461 	if (*addrp != NULL)
    462 		panic("uvm_mmap() variant unsupported");
    463 
    464 	if (RUMP_LOCALPROC_P(curproc)) {
    465 		error = rumpuser_anonmmap(NULL, size, 0, 0, addrp);
    466 	} else {
    467 		error = rump_sysproxy_anonmmap(RUMP_SPVM2CTL(p->p_vmspace),
    468 		    size, addrp);
    469 	}
    470 	return error;
    471 }
    472 
    473 /*
    474  * Stubs for things referenced from vfs_vnode.c but not used.
    475  */
    476 const dev_t zerodev;
    477 
    478 struct uvm_object *
    479 udv_attach(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size)
    480 {
    481 	return NULL;
    482 }
    483 
    484 struct pagerinfo {
    485 	vaddr_t pgr_kva;
    486 	int pgr_npages;
    487 	struct vm_page **pgr_pgs;
    488 	bool pgr_read;
    489 
    490 	LIST_ENTRY(pagerinfo) pgr_entries;
    491 };
    492 static LIST_HEAD(, pagerinfo) pagerlist = LIST_HEAD_INITIALIZER(pagerlist);
    493 
    494 /*
    495  * Pager "map" in routine.  Instead of mapping, we allocate memory
    496  * and copy page contents there.  The reason for copying instead of
    497  * mapping is simple: we do not assume we are running on virtual
    498  * memory.  Even if we could emulate virtual memory in some envs
    499  * such as userspace, copying is much faster than trying to awkardly
    500  * cope with remapping (see "Design and Implementation" pp.95-98).
    501  * The downside of the approach is that the pager requires MAXPHYS
    502  * free memory to perform paging, but short of virtual memory or
    503  * making the pager do I/O in page-sized chunks we cannot do much
    504  * about that.
    505  */
    506 vaddr_t
    507 uvm_pagermapin(struct vm_page **pgs, int npages, int flags)
    508 {
    509 	struct pagerinfo *pgri;
    510 	vaddr_t curkva;
    511 	int i;
    512 
    513 	/* allocate structures */
    514 	pgri = kmem_alloc(sizeof(*pgri), KM_SLEEP);
    515 	pgri->pgr_kva = (vaddr_t)kmem_alloc(npages * PAGE_SIZE, KM_SLEEP);
    516 	pgri->pgr_npages = npages;
    517 	pgri->pgr_pgs = kmem_alloc(sizeof(struct vm_page *) * npages, KM_SLEEP);
    518 	pgri->pgr_read = (flags & UVMPAGER_MAPIN_READ) != 0;
    519 
    520 	/* copy contents to "mapped" memory */
    521 	for (i = 0, curkva = pgri->pgr_kva;
    522 	    i < npages;
    523 	    i++, curkva += PAGE_SIZE) {
    524 		/*
    525 		 * We need to copy the previous contents of the pages to
    526 		 * the window even if we are reading from the
    527 		 * device, since the device might not fill the contents of
    528 		 * the full mapped range and we will end up corrupting
    529 		 * data when we unmap the window.
    530 		 */
    531 		memcpy((void*)curkva, pgs[i]->uanon, PAGE_SIZE);
    532 		pgri->pgr_pgs[i] = pgs[i];
    533 	}
    534 
    535 	mutex_enter(&pagermtx);
    536 	LIST_INSERT_HEAD(&pagerlist, pgri, pgr_entries);
    537 	mutex_exit(&pagermtx);
    538 
    539 	return pgri->pgr_kva;
    540 }
    541 
    542 /*
    543  * map out the pager window.  return contents from VA to page storage
    544  * and free structures.
    545  *
    546  * Note: does not currently support partial frees
    547  */
    548 void
    549 uvm_pagermapout(vaddr_t kva, int npages)
    550 {
    551 	struct pagerinfo *pgri;
    552 	vaddr_t curkva;
    553 	int i;
    554 
    555 	mutex_enter(&pagermtx);
    556 	LIST_FOREACH(pgri, &pagerlist, pgr_entries) {
    557 		if (pgri->pgr_kva == kva)
    558 			break;
    559 	}
    560 	KASSERT(pgri);
    561 	if (pgri->pgr_npages != npages)
    562 		panic("uvm_pagermapout: partial unmapping not supported");
    563 	LIST_REMOVE(pgri, pgr_entries);
    564 	mutex_exit(&pagermtx);
    565 
    566 	if (pgri->pgr_read) {
    567 		for (i = 0, curkva = pgri->pgr_kva;
    568 		    i < pgri->pgr_npages;
    569 		    i++, curkva += PAGE_SIZE) {
    570 			memcpy(pgri->pgr_pgs[i]->uanon,(void*)curkva,PAGE_SIZE);
    571 		}
    572 	}
    573 
    574 	kmem_free(pgri->pgr_pgs, npages * sizeof(struct vm_page *));
    575 	kmem_free((void*)pgri->pgr_kva, npages * PAGE_SIZE);
    576 	kmem_free(pgri, sizeof(*pgri));
    577 }
    578 
    579 /*
    580  * convert va in pager window to page structure.
    581  * XXX: how expensive is this (global lock, list traversal)?
    582  */
    583 struct vm_page *
    584 uvm_pageratop(vaddr_t va)
    585 {
    586 	struct pagerinfo *pgri;
    587 	struct vm_page *pg = NULL;
    588 	int i;
    589 
    590 	mutex_enter(&pagermtx);
    591 	LIST_FOREACH(pgri, &pagerlist, pgr_entries) {
    592 		if (pgri->pgr_kva <= va
    593 		    && va < pgri->pgr_kva + pgri->pgr_npages*PAGE_SIZE)
    594 			break;
    595 	}
    596 	if (pgri) {
    597 		i = (va - pgri->pgr_kva) >> PAGE_SHIFT;
    598 		pg = pgri->pgr_pgs[i];
    599 	}
    600 	mutex_exit(&pagermtx);
    601 
    602 	return pg;
    603 }
    604 
    605 /*
    606  * Called with the vm object locked.
    607  *
    608  * Put vnode object pages at the end of the access queue to indicate
    609  * they have been recently accessed and should not be immediate
    610  * candidates for pageout.  Do not do this for lookups done by
    611  * the pagedaemon to mimic pmap_kentered mappings which don't track
    612  * access information.
    613  */
    614 struct vm_page *
    615 uvm_pagelookup(struct uvm_object *uobj, voff_t off)
    616 {
    617 	struct vm_page *pg;
    618 	bool ispagedaemon = curlwp == uvm.pagedaemon_lwp;
    619 
    620 	pg = rb_tree_find_node(&uobj->rb_tree, &off);
    621 	if (pg && !UVM_OBJ_IS_AOBJ(pg->uobject) && !ispagedaemon) {
    622 		mutex_enter(&uvm_pageqlock);
    623 		TAILQ_REMOVE(&vmpage_lruqueue, pg, pageq.queue);
    624 		TAILQ_INSERT_TAIL(&vmpage_lruqueue, pg, pageq.queue);
    625 		mutex_exit(&uvm_pageqlock);
    626 	}
    627 
    628 	return pg;
    629 }
    630 
    631 void
    632 uvm_page_unbusy(struct vm_page **pgs, int npgs)
    633 {
    634 	struct vm_page *pg;
    635 	int i, pageout_done;
    636 
    637 	KASSERT(npgs > 0);
    638 
    639 	pageout_done = 0;
    640 	for (i = 0; i < npgs; i++) {
    641 		pg = pgs[i];
    642 		if (pg == NULL || pg == PGO_DONTCARE) {
    643 			continue;
    644 		}
    645 
    646 #if 0
    647 		KASSERT(uvm_page_owner_locked_p(pg, true));
    648 #else
    649 		/*
    650 		 * uvm_page_owner_locked_p() is not available in rump,
    651 		 * and rump doesn't support amaps anyway.
    652 		 */
    653 		KASSERT(mutex_owned(pg->uobject->vmobjlock));
    654 #endif
    655 		KASSERT(pg->flags & PG_BUSY);
    656 
    657 		if (pg->flags & PG_PAGEOUT) {
    658 			pg->flags &= ~PG_PAGEOUT;
    659 			pg->flags |= PG_RELEASED;
    660 			pageout_done++;
    661 			atomic_inc_uint(&uvmexp.pdfreed);
    662 		}
    663 		if (pg->flags & PG_WANTED) {
    664 			wakeup(pg);
    665 		}
    666 		if (pg->flags & PG_RELEASED) {
    667 			KASSERT(pg->uobject != NULL ||
    668 			    (pg->uanon != NULL && pg->uanon->an_ref > 0));
    669 			pg->flags &= ~PG_RELEASED;
    670 			uvm_pagefree(pg);
    671 		} else {
    672 			KASSERT((pg->flags & PG_FAKE) == 0);
    673 			pg->flags &= ~(PG_WANTED|PG_BUSY);
    674 			UVM_PAGE_OWN(pg, NULL);
    675 		}
    676 	}
    677 	if (pageout_done != 0) {
    678 		uvm_pageout_done(pageout_done);
    679 	}
    680 }
    681 
    682 void
    683 uvm_estimatepageable(int *active, int *inactive)
    684 {
    685 
    686 	/* XXX: guessing game */
    687 	*active = 1024;
    688 	*inactive = 1024;
    689 }
    690 
    691 bool
    692 vm_map_starved_p(struct vm_map *map)
    693 {
    694 
    695 	if (map->flags & VM_MAP_WANTVA)
    696 		return true;
    697 
    698 	return false;
    699 }
    700 
    701 int
    702 uvm_loan(struct vm_map *map, vaddr_t start, vsize_t len, void *v, int flags)
    703 {
    704 
    705 	panic("%s: unimplemented", __func__);
    706 }
    707 
    708 void
    709 uvm_unloan(void *v, int npages, int flags)
    710 {
    711 
    712 	panic("%s: unimplemented", __func__);
    713 }
    714 
    715 int
    716 uvm_loanuobjpages(struct uvm_object *uobj, voff_t pgoff, int orignpages,
    717 	struct vm_page **opp)
    718 {
    719 
    720 	return EBUSY;
    721 }
    722 
    723 struct vm_page *
    724 uvm_loanbreak(struct vm_page *pg)
    725 {
    726 
    727 	panic("%s: unimplemented", __func__);
    728 }
    729 
    730 void
    731 ubc_purge(struct uvm_object *uobj)
    732 {
    733 
    734 }
    735 
    736 vaddr_t
    737 uvm_default_mapaddr(struct proc *p, vaddr_t base, vsize_t sz, int topdown)
    738 {
    739 
    740 	return 0;
    741 }
    742 
    743 int
    744 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
    745 	vm_prot_t prot, bool set_max)
    746 {
    747 
    748 	return EOPNOTSUPP;
    749 }
    750 
    751 int
    752 uvm_map(struct vm_map *map, vaddr_t *startp, vsize_t size,
    753     struct uvm_object *uobj, voff_t uoffset, vsize_t align,
    754     uvm_flag_t flags)
    755 {
    756 
    757 	*startp = (vaddr_t)rump_hypermalloc(size, align, true, "uvm_map");
    758 	return *startp != 0 ? 0 : ENOMEM;
    759 }
    760 
    761 void
    762 uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
    763 {
    764 
    765 	rump_hyperfree((void*)start, end-start);
    766 }
    767 
    768 
    769 /*
    770  * UVM km
    771  */
    772 
    773 vaddr_t
    774 uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
    775 {
    776 	void *rv, *desired = NULL;
    777 	int alignbit, error;
    778 
    779 #ifdef __x86_64__
    780 	/*
    781 	 * On amd64, allocate all module memory from the lowest 2GB.
    782 	 * This is because NetBSD kernel modules are compiled
    783 	 * with -mcmodel=kernel and reserve only 4 bytes for
    784 	 * offsets.  If we load code compiled with -mcmodel=kernel
    785 	 * anywhere except the lowest or highest 2GB, it will not
    786 	 * work.  Since userspace does not have access to the highest
    787 	 * 2GB, use the lowest 2GB.
    788 	 *
    789 	 * Note: this assumes the rump kernel resides in
    790 	 * the lowest 2GB as well.
    791 	 *
    792 	 * Note2: yes, it's a quick hack, but since this the only
    793 	 * place where we care about the map we're allocating from,
    794 	 * just use a simple "if" instead of coming up with a fancy
    795 	 * generic solution.
    796 	 */
    797 	if (map == module_map) {
    798 		desired = (void *)(0x80000000 - size);
    799 	}
    800 #endif
    801 
    802 	if (__predict_false(map == module_map)) {
    803 		alignbit = 0;
    804 		if (align) {
    805 			alignbit = ffs(align)-1;
    806 		}
    807 		error = rumpuser_anonmmap(desired, size, alignbit,
    808 		    flags & UVM_KMF_EXEC, &rv);
    809 	} else {
    810 		error = rumpuser_malloc(size, align, &rv);
    811 	}
    812 
    813 	if (error) {
    814 		if (flags & (UVM_KMF_CANFAIL | UVM_KMF_NOWAIT))
    815 			return 0;
    816 		else
    817 			panic("uvm_km_alloc failed");
    818 	}
    819 
    820 	if (flags & UVM_KMF_ZERO)
    821 		memset(rv, 0, size);
    822 
    823 	return (vaddr_t)rv;
    824 }
    825 
    826 void
    827 uvm_km_free(struct vm_map *map, vaddr_t vaddr, vsize_t size, uvm_flag_t flags)
    828 {
    829 
    830 	if (__predict_false(map == module_map))
    831 		rumpuser_unmap((void *)vaddr, size);
    832 	else
    833 		rumpuser_free((void *)vaddr, size);
    834 }
    835 
    836 int
    837 uvm_km_protect(struct vm_map *map, vaddr_t vaddr, vsize_t size, vm_prot_t prot)
    838 {
    839 	return 0;
    840 }
    841 
    842 struct vm_map *
    843 uvm_km_suballoc(struct vm_map *map, vaddr_t *minaddr, vaddr_t *maxaddr,
    844 	vsize_t size, int pageable, bool fixed, struct vm_map *submap)
    845 {
    846 
    847 	return (struct vm_map *)417416;
    848 }
    849 
    850 int
    851 uvm_km_kmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags,
    852     vmem_addr_t *addr)
    853 {
    854 	vaddr_t va;
    855 	va = (vaddr_t)rump_hypermalloc(size, PAGE_SIZE,
    856 	    (flags & VM_SLEEP), "kmalloc");
    857 
    858 	if (va) {
    859 		*addr = va;
    860 		return 0;
    861 	} else {
    862 		return ENOMEM;
    863 	}
    864 }
    865 
    866 void
    867 uvm_km_kmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
    868 {
    869 
    870 	rump_hyperfree((void *)addr, size);
    871 }
    872 
    873 /*
    874  * VM space locking routines.  We don't really have to do anything,
    875  * since the pages are always "wired" (both local and remote processes).
    876  */
    877 int
    878 uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access)
    879 {
    880 
    881 	return 0;
    882 }
    883 
    884 void
    885 uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
    886 {
    887 
    888 }
    889 
    890 /*
    891  * For the local case the buffer mappers don't need to do anything.
    892  * For the remote case we need to reserve space and copy data in or
    893  * out, depending on B_READ/B_WRITE.
    894  */
    895 int
    896 vmapbuf(struct buf *bp, vsize_t len)
    897 {
    898 	int error = 0;
    899 
    900 	bp->b_saveaddr = bp->b_data;
    901 
    902 	/* remote case */
    903 	if (!RUMP_LOCALPROC_P(curproc)) {
    904 		bp->b_data = rump_hypermalloc(len, 0, true, "vmapbuf");
    905 		if (BUF_ISWRITE(bp)) {
    906 			error = copyin(bp->b_saveaddr, bp->b_data, len);
    907 			if (error) {
    908 				rump_hyperfree(bp->b_data, len);
    909 				bp->b_data = bp->b_saveaddr;
    910 				bp->b_saveaddr = 0;
    911 			}
    912 		}
    913 	}
    914 
    915 	return error;
    916 }
    917 
    918 void
    919 vunmapbuf(struct buf *bp, vsize_t len)
    920 {
    921 
    922 	/* remote case */
    923 	if (!RUMP_LOCALPROC_P(bp->b_proc)) {
    924 		if (BUF_ISREAD(bp)) {
    925 			bp->b_error = copyout_proc(bp->b_proc,
    926 			    bp->b_data, bp->b_saveaddr, len);
    927 		}
    928 		rump_hyperfree(bp->b_data, len);
    929 	}
    930 
    931 	bp->b_data = bp->b_saveaddr;
    932 	bp->b_saveaddr = 0;
    933 }
    934 
    935 void
    936 uvmspace_addref(struct vmspace *vm)
    937 {
    938 
    939 	/*
    940 	 * No dynamically allocated vmspaces exist.
    941 	 */
    942 }
    943 
    944 void
    945 uvmspace_free(struct vmspace *vm)
    946 {
    947 
    948 	/* nothing for now */
    949 }
    950 
    951 /*
    952  * page life cycle stuff.  it really doesn't exist, so just stubs.
    953  */
    954 
    955 void
    956 uvm_pageactivate(struct vm_page *pg)
    957 {
    958 
    959 	/* nada */
    960 }
    961 
    962 void
    963 uvm_pagedeactivate(struct vm_page *pg)
    964 {
    965 
    966 	/* nada */
    967 }
    968 
    969 void
    970 uvm_pagedequeue(struct vm_page *pg)
    971 {
    972 
    973 	/* nada*/
    974 }
    975 
    976 void
    977 uvm_pageenqueue(struct vm_page *pg)
    978 {
    979 
    980 	/* nada */
    981 }
    982 
    983 void
    984 uvmpdpol_anfree(struct vm_anon *an)
    985 {
    986 
    987 	/* nada */
    988 }
    989 
    990 /*
    991  * Physical address accessors.
    992  */
    993 
    994 struct vm_page *
    995 uvm_phys_to_vm_page(paddr_t pa)
    996 {
    997 
    998 	return NULL;
    999 }
   1000 
   1001 paddr_t
   1002 uvm_vm_page_to_phys(const struct vm_page *pg)
   1003 {
   1004 
   1005 	return 0;
   1006 }
   1007 
   1008 vaddr_t
   1009 uvm_uarea_alloc(void)
   1010 {
   1011 
   1012 	/* non-zero */
   1013 	return (vaddr_t)11;
   1014 }
   1015 
   1016 void
   1017 uvm_uarea_free(vaddr_t uarea)
   1018 {
   1019 
   1020 	/* nata, so creamy */
   1021 }
   1022 
   1023 /*
   1024  * Routines related to the Page Baroness.
   1025  */
   1026 
   1027 void
   1028 uvm_wait(const char *msg)
   1029 {
   1030 
   1031 	if (__predict_false(rump_threads == 0))
   1032 		panic("pagedaemon missing (RUMP_THREADS = 0)");
   1033 
   1034 	if (curlwp == uvm.pagedaemon_lwp) {
   1035 		/* is it possible for us to later get memory? */
   1036 		if (!uvmexp.paging)
   1037 			panic("pagedaemon out of memory");
   1038 	}
   1039 
   1040 	mutex_enter(&pdaemonmtx);
   1041 	pdaemon_waiters++;
   1042 	cv_signal(&pdaemoncv);
   1043 	cv_wait(&oomwait, &pdaemonmtx);
   1044 	mutex_exit(&pdaemonmtx);
   1045 }
   1046 
   1047 void
   1048 uvm_pageout_start(int npages)
   1049 {
   1050 
   1051 	mutex_enter(&pdaemonmtx);
   1052 	uvmexp.paging += npages;
   1053 	mutex_exit(&pdaemonmtx);
   1054 }
   1055 
   1056 void
   1057 uvm_pageout_done(int npages)
   1058 {
   1059 
   1060 	if (!npages)
   1061 		return;
   1062 
   1063 	mutex_enter(&pdaemonmtx);
   1064 	KASSERT(uvmexp.paging >= npages);
   1065 	uvmexp.paging -= npages;
   1066 
   1067 	if (pdaemon_waiters) {
   1068 		pdaemon_waiters = 0;
   1069 		cv_broadcast(&oomwait);
   1070 	}
   1071 	mutex_exit(&pdaemonmtx);
   1072 }
   1073 
   1074 static bool
   1075 processpage(struct vm_page *pg, bool *lockrunning)
   1076 {
   1077 	struct uvm_object *uobj;
   1078 
   1079 	uobj = pg->uobject;
   1080 	if (mutex_tryenter(uobj->vmobjlock)) {
   1081 		if ((pg->flags & PG_BUSY) == 0) {
   1082 			mutex_exit(&uvm_pageqlock);
   1083 			uobj->pgops->pgo_put(uobj, pg->offset,
   1084 			    pg->offset + PAGE_SIZE,
   1085 			    PGO_CLEANIT|PGO_FREE);
   1086 			KASSERT(!mutex_owned(uobj->vmobjlock));
   1087 			return true;
   1088 		} else {
   1089 			mutex_exit(uobj->vmobjlock);
   1090 		}
   1091 	} else if (*lockrunning == false && ncpu > 1) {
   1092 		CPU_INFO_ITERATOR cii;
   1093 		struct cpu_info *ci;
   1094 		struct lwp *l;
   1095 
   1096 		l = mutex_owner(uobj->vmobjlock);
   1097 		for (CPU_INFO_FOREACH(cii, ci)) {
   1098 			if (ci->ci_curlwp == l) {
   1099 				*lockrunning = true;
   1100 				break;
   1101 			}
   1102 		}
   1103 	}
   1104 
   1105 	return false;
   1106 }
   1107 
   1108 /*
   1109  * The Diabolical pageDaemon Director (DDD).
   1110  *
   1111  * This routine can always use better heuristics.
   1112  */
   1113 void
   1114 uvm_pageout(void *arg)
   1115 {
   1116 	struct vm_page *pg;
   1117 	struct pool *pp, *pp_first;
   1118 	int cleaned, skip, skipped;
   1119 	bool succ;
   1120 	bool lockrunning;
   1121 
   1122 	mutex_enter(&pdaemonmtx);
   1123 	for (;;) {
   1124 		if (!NEED_PAGEDAEMON()) {
   1125 			kernel_map->flags &= ~VM_MAP_WANTVA;
   1126 		}
   1127 
   1128 		if (pdaemon_waiters) {
   1129 			pdaemon_waiters = 0;
   1130 			cv_broadcast(&oomwait);
   1131 		}
   1132 
   1133 		cv_wait(&pdaemoncv, &pdaemonmtx);
   1134 		uvmexp.pdwoke++;
   1135 
   1136 		/* tell the world that we are hungry */
   1137 		kernel_map->flags |= VM_MAP_WANTVA;
   1138 		mutex_exit(&pdaemonmtx);
   1139 
   1140 		/*
   1141 		 * step one: reclaim the page cache.  this should give
   1142 		 * us the biggest earnings since whole pages are released
   1143 		 * into backing memory.
   1144 		 */
   1145 		pool_cache_reclaim(&pagecache);
   1146 		if (!NEED_PAGEDAEMON()) {
   1147 			mutex_enter(&pdaemonmtx);
   1148 			continue;
   1149 		}
   1150 
   1151 		/*
   1152 		 * Ok, so that didn't help.  Next, try to hunt memory
   1153 		 * by pushing out vnode pages.  The pages might contain
   1154 		 * useful cached data, but we need the memory.
   1155 		 */
   1156 		cleaned = 0;
   1157 		skip = 0;
   1158 		lockrunning = false;
   1159  again:
   1160 		mutex_enter(&uvm_pageqlock);
   1161 		while (cleaned < PAGEDAEMON_OBJCHUNK) {
   1162 			skipped = 0;
   1163 			TAILQ_FOREACH(pg, &vmpage_lruqueue, pageq.queue) {
   1164 
   1165 				/*
   1166 				 * skip over pages we _might_ have tried
   1167 				 * to handle earlier.  they might not be
   1168 				 * exactly the same ones, but I'm not too
   1169 				 * concerned.
   1170 				 */
   1171 				while (skipped++ < skip)
   1172 					continue;
   1173 
   1174 				if (processpage(pg, &lockrunning)) {
   1175 					cleaned++;
   1176 					goto again;
   1177 				}
   1178 
   1179 				skip++;
   1180 			}
   1181 			break;
   1182 		}
   1183 		mutex_exit(&uvm_pageqlock);
   1184 
   1185 		/*
   1186 		 * Ok, someone is running with an object lock held.
   1187 		 * We want to yield the host CPU to make sure the
   1188 		 * thread is not parked on the host.  Since sched_yield()
   1189 		 * doesn't appear to do anything on NetBSD, nanosleep
   1190 		 * for the smallest possible time and hope we're back in
   1191 		 * the game soon.
   1192 		 */
   1193 		if (cleaned == 0 && lockrunning) {
   1194 			rumpuser_clock_sleep(RUMPUSER_CLOCK_RELWALL, 0, 1);
   1195 
   1196 			lockrunning = false;
   1197 			skip = 0;
   1198 
   1199 			/* and here we go again */
   1200 			goto again;
   1201 		}
   1202 
   1203 		/*
   1204 		 * And of course we need to reclaim the page cache
   1205 		 * again to actually release memory.
   1206 		 */
   1207 		pool_cache_reclaim(&pagecache);
   1208 		if (!NEED_PAGEDAEMON()) {
   1209 			mutex_enter(&pdaemonmtx);
   1210 			continue;
   1211 		}
   1212 
   1213 		/*
   1214 		 * And then drain the pools.  Wipe them out ... all of them.
   1215 		 */
   1216 		for (pp_first = NULL;;) {
   1217 			rump_vfs_drainbufs(10 /* XXX: estimate! */);
   1218 
   1219 			succ = pool_drain(&pp);
   1220 			if (succ || pp == pp_first)
   1221 				break;
   1222 
   1223 			if (pp_first == NULL)
   1224 				pp_first = pp;
   1225 		}
   1226 
   1227 		/*
   1228 		 * Need to use PYEC on our bag of tricks.
   1229 		 * Unfortunately, the wife just borrowed it.
   1230 		 */
   1231 
   1232 		mutex_enter(&pdaemonmtx);
   1233 		if (!succ && cleaned == 0 && pdaemon_waiters &&
   1234 		    uvmexp.paging == 0) {
   1235 			rumpuser_dprintf("pagedaemoness: failed to reclaim "
   1236 			    "memory ... sleeping (deadlock?)\n");
   1237 			kpause("pddlk", false, hz, &pdaemonmtx);
   1238 		}
   1239 	}
   1240 
   1241 	panic("you can swap out any time you like, but you can never leave");
   1242 }
   1243 
   1244 void
   1245 uvm_kick_pdaemon()
   1246 {
   1247 
   1248 	/*
   1249 	 * Wake up the diabolical pagedaemon director if we are over
   1250 	 * 90% of the memory limit.  This is a complete and utter
   1251 	 * stetson-harrison decision which you are allowed to finetune.
   1252 	 * Don't bother locking.  If we have some unflushed caches,
   1253 	 * other waker-uppers will deal with the issue.
   1254 	 */
   1255 	if (NEED_PAGEDAEMON()) {
   1256 		cv_signal(&pdaemoncv);
   1257 	}
   1258 }
   1259 
   1260 void *
   1261 rump_hypermalloc(size_t howmuch, int alignment, bool waitok, const char *wmsg)
   1262 {
   1263 	const unsigned long thelimit =
   1264 	    curlwp == uvm.pagedaemon_lwp ? pdlimit : rump_physmemlimit;
   1265 	unsigned long newmem;
   1266 	void *rv;
   1267 	int error;
   1268 
   1269 	uvm_kick_pdaemon(); /* ouch */
   1270 
   1271 	/* first we must be within the limit */
   1272  limitagain:
   1273 	if (thelimit != RUMPMEM_UNLIMITED) {
   1274 		newmem = atomic_add_long_nv(&curphysmem, howmuch);
   1275 		if (newmem > thelimit) {
   1276 			newmem = atomic_add_long_nv(&curphysmem, -howmuch);
   1277 			if (!waitok) {
   1278 				return NULL;
   1279 			}
   1280 			uvm_wait(wmsg);
   1281 			goto limitagain;
   1282 		}
   1283 	}
   1284 
   1285 	/* second, we must get something from the backend */
   1286  again:
   1287 	error = rumpuser_malloc(howmuch, alignment, &rv);
   1288 	if (__predict_false(error && waitok)) {
   1289 		uvm_wait(wmsg);
   1290 		goto again;
   1291 	}
   1292 
   1293 	return rv;
   1294 }
   1295 
   1296 void
   1297 rump_hyperfree(void *what, size_t size)
   1298 {
   1299 
   1300 	if (rump_physmemlimit != RUMPMEM_UNLIMITED) {
   1301 		atomic_add_long(&curphysmem, -size);
   1302 	}
   1303 	rumpuser_free(what, size);
   1304 }
   1305