Home | History | Annotate | Line # | Download | only in rumpkern
vm.c revision 1.30.4.3
      1  1.30.4.3   yamt /*	$NetBSD: vm.c,v 1.30.4.3 2009/08/19 18:48:30 yamt Exp $	*/
      2       1.1  pooka 
      3       1.1  pooka /*
      4       1.1  pooka  * Copyright (c) 2007 Antti Kantee.  All Rights Reserved.
      5       1.1  pooka  *
      6       1.1  pooka  * Development of this software was supported by Google Summer of Code.
      7       1.1  pooka  *
      8       1.1  pooka  * Redistribution and use in source and binary forms, with or without
      9       1.1  pooka  * modification, are permitted provided that the following conditions
     10       1.1  pooka  * are met:
     11       1.1  pooka  * 1. Redistributions of source code must retain the above copyright
     12       1.1  pooka  *    notice, this list of conditions and the following disclaimer.
     13       1.1  pooka  * 2. Redistributions in binary form must reproduce the above copyright
     14       1.1  pooka  *    notice, this list of conditions and the following disclaimer in the
     15       1.1  pooka  *    documentation and/or other materials provided with the distribution.
     16       1.1  pooka  *
     17       1.1  pooka  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     18       1.1  pooka  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     19       1.1  pooka  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     20       1.1  pooka  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     21       1.1  pooka  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     22       1.1  pooka  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     23       1.1  pooka  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     24       1.1  pooka  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     25       1.1  pooka  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     26       1.1  pooka  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     27       1.1  pooka  * SUCH DAMAGE.
     28       1.1  pooka  */
     29       1.1  pooka 
     30       1.1  pooka /*
     31       1.1  pooka  * Virtual memory emulation routines.  Contents:
     32       1.1  pooka  *  + anon objects & pager
     33       1.1  pooka  *  + misc support routines
     34       1.9  pooka  *  + kmem
     35       1.1  pooka  */
     36       1.1  pooka 
     37       1.1  pooka /*
     38       1.5  pooka  * XXX: we abuse pg->uanon for the virtual address of the storage
     39       1.1  pooka  * for each page.  phys_addr would fit the job description better,
     40       1.1  pooka  * except that it will create unnecessary lossage on some platforms
     41       1.1  pooka  * due to not being a pointer type.
     42       1.1  pooka  */
     43       1.1  pooka 
     44  1.30.4.1   yamt #include <sys/cdefs.h>
     45  1.30.4.3   yamt __KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.30.4.3 2009/08/19 18:48:30 yamt Exp $");
     46  1.30.4.1   yamt 
     47       1.1  pooka #include <sys/param.h>
     48  1.30.4.1   yamt #include <sys/atomic.h>
     49       1.1  pooka #include <sys/null.h>
     50       1.1  pooka #include <sys/vnode.h>
     51       1.1  pooka #include <sys/buf.h>
     52       1.9  pooka #include <sys/kmem.h>
     53       1.1  pooka 
     54  1.30.4.1   yamt #include <machine/pmap.h>
     55  1.30.4.1   yamt 
     56  1.30.4.1   yamt #include <rump/rumpuser.h>
     57  1.30.4.1   yamt 
     58       1.1  pooka #include <uvm/uvm.h>
     59  1.30.4.1   yamt #include <uvm/uvm_ddb.h>
     60       1.1  pooka #include <uvm/uvm_prot.h>
     61  1.30.4.2   yamt #include <uvm/uvm_readahead.h>
     62       1.1  pooka 
     63      1.13  pooka #include "rump_private.h"
     64       1.1  pooka 
     65      1.24   yamt static int ao_get(struct uvm_object *, voff_t, struct vm_page **,
     66      1.24   yamt 	int *, int, vm_prot_t, int, int);
     67      1.24   yamt static int ao_put(struct uvm_object *, voff_t, voff_t, int);
     68      1.24   yamt 
     69      1.24   yamt const struct uvm_pagerops aobj_pager = {
     70      1.24   yamt 	.pgo_get = ao_get,
     71      1.24   yamt 	.pgo_put = ao_put,
     72      1.24   yamt };
     73      1.24   yamt 
     74      1.25     ad kmutex_t uvm_pageqlock;
     75      1.25     ad 
     76       1.1  pooka struct uvmexp uvmexp;
     77       1.7  pooka struct uvm uvm;
     78       1.1  pooka 
     79       1.1  pooka struct vmspace rump_vmspace;
     80       1.1  pooka struct vm_map rump_vmmap;
     81  1.30.4.1   yamt static struct vm_map_kernel kmem_map_store;
     82  1.30.4.1   yamt struct vm_map *kmem_map = &kmem_map_store.vmk_map;
     83  1.30.4.1   yamt const struct rb_tree_ops uvm_page_tree_ops;
     84  1.30.4.1   yamt 
     85  1.30.4.1   yamt static struct vm_map_kernel kernel_map_store;
     86  1.30.4.1   yamt struct vm_map *kernel_map = &kernel_map_store.vmk_map;
     87       1.1  pooka 
     88       1.1  pooka /*
     89       1.1  pooka  * vm pages
     90       1.1  pooka  */
     91       1.1  pooka 
     92      1.22  pooka /* called with the object locked */
     93       1.1  pooka struct vm_page *
     94       1.6  pooka rumpvm_makepage(struct uvm_object *uobj, voff_t off)
     95       1.1  pooka {
     96       1.1  pooka 	struct vm_page *pg;
     97       1.1  pooka 
     98      1.27  pooka 	pg = kmem_zalloc(sizeof(struct vm_page), KM_SLEEP);
     99       1.1  pooka 	pg->offset = off;
    100       1.5  pooka 	pg->uobject = uobj;
    101       1.1  pooka 
    102      1.27  pooka 	pg->uanon = (void *)kmem_zalloc(PAGE_SIZE, KM_SLEEP);
    103      1.22  pooka 	pg->flags = PG_CLEAN|PG_BUSY|PG_FAKE;
    104       1.1  pooka 
    105  1.30.4.1   yamt 	TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue);
    106  1.30.4.3   yamt 	uobj->uo_npages++;
    107      1.21  pooka 
    108       1.1  pooka 	return pg;
    109       1.1  pooka }
    110       1.1  pooka 
    111  1.30.4.3   yamt /* these are going away very soon */
    112  1.30.4.3   yamt void rumpvm_enterva(vaddr_t addr, struct vm_page *pg) {}
    113  1.30.4.3   yamt void rumpvm_flushva(struct uvm_object *uobj) {}
    114  1.30.4.3   yamt 
    115  1.30.4.3   yamt struct vm_page *
    116  1.30.4.3   yamt uvm_pagealloc_strat(struct uvm_object *uobj, voff_t off, struct vm_anon *anon,
    117  1.30.4.3   yamt 	int flags, int strat, int free_list)
    118  1.30.4.3   yamt {
    119  1.30.4.3   yamt 
    120  1.30.4.3   yamt 	return rumpvm_makepage(uobj, off);
    121  1.30.4.3   yamt }
    122  1.30.4.3   yamt 
    123      1.21  pooka /*
    124      1.21  pooka  * Release a page.
    125      1.21  pooka  *
    126      1.22  pooka  * Called with the vm object locked.
    127      1.21  pooka  */
    128       1.1  pooka void
    129      1.22  pooka uvm_pagefree(struct vm_page *pg)
    130       1.1  pooka {
    131       1.5  pooka 	struct uvm_object *uobj = pg->uobject;
    132       1.1  pooka 
    133      1.22  pooka 	if (pg->flags & PG_WANTED)
    134      1.22  pooka 		wakeup(pg);
    135      1.22  pooka 
    136  1.30.4.3   yamt 	uobj->uo_npages--;
    137  1.30.4.1   yamt 	TAILQ_REMOVE(&uobj->memq, pg, listq.queue);
    138      1.27  pooka 	kmem_free((void *)pg->uanon, PAGE_SIZE);
    139      1.27  pooka 	kmem_free(pg, sizeof(*pg));
    140       1.1  pooka }
    141       1.1  pooka 
    142      1.15  pooka void
    143  1.30.4.3   yamt uvm_pagezero(struct vm_page *pg)
    144      1.15  pooka {
    145      1.15  pooka 
    146  1.30.4.3   yamt 	pg->flags &= ~PG_CLEAN;
    147  1.30.4.3   yamt 	memset((void *)pg->uanon, 0, PAGE_SIZE);
    148      1.15  pooka }
    149      1.15  pooka 
    150       1.1  pooka /*
    151       1.1  pooka  * Anon object stuff
    152       1.1  pooka  */
    153       1.1  pooka 
    154       1.1  pooka static int
    155       1.1  pooka ao_get(struct uvm_object *uobj, voff_t off, struct vm_page **pgs,
    156       1.1  pooka 	int *npages, int centeridx, vm_prot_t access_type,
    157       1.1  pooka 	int advice, int flags)
    158       1.1  pooka {
    159       1.1  pooka 	struct vm_page *pg;
    160       1.1  pooka 	int i;
    161       1.1  pooka 
    162       1.1  pooka 	if (centeridx)
    163       1.1  pooka 		panic("%s: centeridx != 0 not supported", __func__);
    164       1.1  pooka 
    165       1.1  pooka 	/* loop over pages */
    166       1.1  pooka 	off = trunc_page(off);
    167       1.1  pooka 	for (i = 0; i < *npages; i++) {
    168      1.23  pooka  retrylookup:
    169      1.10  pooka 		pg = uvm_pagelookup(uobj, off + (i << PAGE_SHIFT));
    170       1.1  pooka 		if (pg) {
    171      1.23  pooka 			if (pg->flags & PG_BUSY) {
    172      1.23  pooka 				pg->flags |= PG_WANTED;
    173      1.23  pooka 				UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
    174      1.23  pooka 				    "aogetpg", 0);
    175      1.23  pooka 				goto retrylookup;
    176      1.23  pooka 			}
    177      1.23  pooka 			pg->flags |= PG_BUSY;
    178       1.1  pooka 			pgs[i] = pg;
    179       1.1  pooka 		} else {
    180       1.6  pooka 			pg = rumpvm_makepage(uobj, off + (i << PAGE_SHIFT));
    181       1.1  pooka 			pgs[i] = pg;
    182       1.1  pooka 		}
    183       1.1  pooka 	}
    184      1.26  pooka 	mutex_exit(&uobj->vmobjlock);
    185       1.1  pooka 
    186       1.1  pooka 	return 0;
    187       1.1  pooka 
    188       1.1  pooka }
    189       1.1  pooka 
    190       1.1  pooka static int
    191       1.1  pooka ao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
    192       1.1  pooka {
    193       1.1  pooka 	struct vm_page *pg;
    194       1.1  pooka 
    195       1.1  pooka 	/* we only free all pages for now */
    196      1.23  pooka 	if ((flags & PGO_FREE) == 0 || (flags & PGO_ALLPAGES) == 0) {
    197      1.26  pooka 		mutex_exit(&uobj->vmobjlock);
    198       1.1  pooka 		return 0;
    199      1.23  pooka 	}
    200       1.1  pooka 
    201       1.1  pooka 	while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL)
    202      1.22  pooka 		uvm_pagefree(pg);
    203      1.26  pooka 	mutex_exit(&uobj->vmobjlock);
    204       1.1  pooka 
    205       1.1  pooka 	return 0;
    206       1.1  pooka }
    207       1.1  pooka 
    208       1.1  pooka struct uvm_object *
    209       1.1  pooka uao_create(vsize_t size, int flags)
    210       1.1  pooka {
    211       1.1  pooka 	struct uvm_object *uobj;
    212       1.1  pooka 
    213      1.27  pooka 	uobj = kmem_zalloc(sizeof(struct uvm_object), KM_SLEEP);
    214       1.1  pooka 	uobj->pgops = &aobj_pager;
    215       1.1  pooka 	TAILQ_INIT(&uobj->memq);
    216      1.26  pooka 	mutex_init(&uobj->vmobjlock, MUTEX_DEFAULT, IPL_NONE);
    217       1.1  pooka 
    218       1.1  pooka 	return uobj;
    219       1.1  pooka }
    220       1.1  pooka 
    221       1.1  pooka void
    222       1.1  pooka uao_detach(struct uvm_object *uobj)
    223       1.1  pooka {
    224       1.1  pooka 
    225      1.29  pooka 	mutex_enter(&uobj->vmobjlock);
    226       1.1  pooka 	ao_put(uobj, 0, 0, PGO_ALLPAGES | PGO_FREE);
    227  1.30.4.1   yamt 	mutex_destroy(&uobj->vmobjlock);
    228      1.27  pooka 	kmem_free(uobj, sizeof(*uobj));
    229       1.1  pooka }
    230       1.1  pooka 
    231       1.1  pooka /*
    232       1.1  pooka  * Misc routines
    233       1.1  pooka  */
    234       1.1  pooka 
    235  1.30.4.3   yamt static kmutex_t pagermtx;
    236  1.30.4.1   yamt 
    237       1.1  pooka void
    238  1.30.4.1   yamt rumpvm_init(void)
    239       1.1  pooka {
    240       1.1  pooka 
    241       1.1  pooka 	uvmexp.free = 1024*1024; /* XXX */
    242       1.7  pooka 	uvm.pagedaemon_lwp = NULL; /* doesn't match curlwp */
    243  1.30.4.1   yamt 	rump_vmspace.vm_map.pmap = pmap_kernel();
    244      1.21  pooka 
    245  1.30.4.3   yamt 	mutex_init(&pagermtx, MUTEX_DEFAULT, 0);
    246      1.25     ad 	mutex_init(&uvm_pageqlock, MUTEX_DEFAULT, 0);
    247  1.30.4.1   yamt 
    248  1.30.4.1   yamt 	kernel_map->pmap = pmap_kernel();
    249  1.30.4.1   yamt 	callback_head_init(&kernel_map_store.vmk_reclaim_callback, IPL_VM);
    250  1.30.4.1   yamt 	kmem_map->pmap = pmap_kernel();
    251  1.30.4.1   yamt 	callback_head_init(&kmem_map_store.vmk_reclaim_callback, IPL_VM);
    252       1.1  pooka }
    253       1.1  pooka 
    254       1.1  pooka 
    255       1.1  pooka 
    256       1.1  pooka void
    257       1.7  pooka uvm_pagewire(struct vm_page *pg)
    258       1.7  pooka {
    259       1.7  pooka 
    260       1.7  pooka 	/* nada */
    261       1.7  pooka }
    262       1.7  pooka 
    263       1.7  pooka void
    264       1.7  pooka uvm_pageunwire(struct vm_page *pg)
    265       1.7  pooka {
    266       1.7  pooka 
    267       1.7  pooka 	/* nada */
    268       1.7  pooka }
    269       1.7  pooka 
    270  1.30.4.1   yamt int
    271  1.30.4.1   yamt uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
    272  1.30.4.1   yamt 	vm_prot_t maxprot, int flags, void *handle, voff_t off, vsize_t locklim)
    273  1.30.4.1   yamt {
    274  1.30.4.1   yamt 
    275  1.30.4.1   yamt 	panic("%s: unimplemented", __func__);
    276  1.30.4.1   yamt }
    277  1.30.4.1   yamt 
    278  1.30.4.3   yamt struct pagerinfo {
    279  1.30.4.3   yamt 	vaddr_t pgr_kva;
    280  1.30.4.3   yamt 	int pgr_npages;
    281  1.30.4.3   yamt 	struct vm_page **pgr_pgs;
    282  1.30.4.3   yamt 	bool pgr_read;
    283  1.30.4.3   yamt 
    284  1.30.4.3   yamt 	LIST_ENTRY(pagerinfo) pgr_entries;
    285  1.30.4.3   yamt };
    286  1.30.4.3   yamt static LIST_HEAD(, pagerinfo) pagerlist = LIST_HEAD_INITIALIZER(pagerlist);
    287  1.30.4.3   yamt 
    288  1.30.4.3   yamt /*
    289  1.30.4.3   yamt  * Pager "map" in routine.  Instead of mapping, we allocate memory
    290  1.30.4.3   yamt  * and copy page contents there.  Not optimal or even strictly
    291  1.30.4.3   yamt  * correct (the caller might modify the page contents after mapping
    292  1.30.4.3   yamt  * them in), but what the heck.  Assumes UVMPAGER_MAPIN_WAITOK.
    293  1.30.4.3   yamt  */
    294       1.7  pooka vaddr_t
    295  1.30.4.3   yamt uvm_pagermapin(struct vm_page **pgs, int npages, int flags)
    296       1.7  pooka {
    297  1.30.4.3   yamt 	struct pagerinfo *pgri;
    298  1.30.4.3   yamt 	vaddr_t curkva;
    299  1.30.4.3   yamt 	int i;
    300       1.7  pooka 
    301  1.30.4.3   yamt 	/* allocate structures */
    302  1.30.4.3   yamt 	pgri = kmem_alloc(sizeof(*pgri), KM_SLEEP);
    303  1.30.4.3   yamt 	pgri->pgr_kva = (vaddr_t)kmem_alloc(npages * PAGE_SIZE, KM_SLEEP);
    304  1.30.4.3   yamt 	pgri->pgr_npages = npages;
    305  1.30.4.3   yamt 	pgri->pgr_pgs = kmem_alloc(sizeof(struct vm_page *) * npages, KM_SLEEP);
    306  1.30.4.3   yamt 	pgri->pgr_read = (flags & UVMPAGER_MAPIN_READ) != 0;
    307  1.30.4.3   yamt 
    308  1.30.4.3   yamt 	/* copy contents to "mapped" memory */
    309  1.30.4.3   yamt 	for (i = 0, curkva = pgri->pgr_kva;
    310  1.30.4.3   yamt 	    i < npages;
    311  1.30.4.3   yamt 	    i++, curkva += PAGE_SIZE) {
    312  1.30.4.3   yamt 		/*
    313  1.30.4.3   yamt 		 * We need to copy the previous contents of the pages to
    314  1.30.4.3   yamt 		 * the window even if we are reading from the
    315  1.30.4.3   yamt 		 * device, since the device might not fill the contents of
    316  1.30.4.3   yamt 		 * the full mapped range and we will end up corrupting
    317  1.30.4.3   yamt 		 * data when we unmap the window.
    318  1.30.4.3   yamt 		 */
    319  1.30.4.3   yamt 		memcpy((void*)curkva, pgs[i]->uanon, PAGE_SIZE);
    320  1.30.4.3   yamt 		pgri->pgr_pgs[i] = pgs[i];
    321  1.30.4.3   yamt 	}
    322  1.30.4.3   yamt 
    323  1.30.4.3   yamt 	mutex_enter(&pagermtx);
    324  1.30.4.3   yamt 	LIST_INSERT_HEAD(&pagerlist, pgri, pgr_entries);
    325  1.30.4.3   yamt 	mutex_exit(&pagermtx);
    326  1.30.4.3   yamt 
    327  1.30.4.3   yamt 	return pgri->pgr_kva;
    328       1.7  pooka }
    329       1.7  pooka 
    330  1.30.4.3   yamt /*
    331  1.30.4.3   yamt  * map out the pager window.  return contents from VA to page storage
    332  1.30.4.3   yamt  * and free structures.
    333  1.30.4.3   yamt  *
    334  1.30.4.3   yamt  * Note: does not currently support partial frees
    335  1.30.4.3   yamt  */
    336  1.30.4.3   yamt void
    337  1.30.4.3   yamt uvm_pagermapout(vaddr_t kva, int npages)
    338       1.7  pooka {
    339  1.30.4.3   yamt 	struct pagerinfo *pgri;
    340  1.30.4.3   yamt 	vaddr_t curkva;
    341  1.30.4.3   yamt 	int i;
    342       1.7  pooka 
    343  1.30.4.3   yamt 	mutex_enter(&pagermtx);
    344  1.30.4.3   yamt 	LIST_FOREACH(pgri, &pagerlist, pgr_entries) {
    345  1.30.4.3   yamt 		if (pgri->pgr_kva == kva)
    346  1.30.4.3   yamt 			break;
    347  1.30.4.3   yamt 	}
    348  1.30.4.3   yamt 	KASSERT(pgri);
    349  1.30.4.3   yamt 	if (pgri->pgr_npages != npages)
    350  1.30.4.3   yamt 		panic("uvm_pagermapout: partial unmapping not supported");
    351  1.30.4.3   yamt 	LIST_REMOVE(pgri, pgr_entries);
    352  1.30.4.3   yamt 	mutex_exit(&pagermtx);
    353  1.30.4.3   yamt 
    354  1.30.4.3   yamt 	if (pgri->pgr_read) {
    355  1.30.4.3   yamt 		for (i = 0, curkva = pgri->pgr_kva;
    356  1.30.4.3   yamt 		    i < pgri->pgr_npages;
    357  1.30.4.3   yamt 		    i++, curkva += PAGE_SIZE) {
    358  1.30.4.3   yamt 			memcpy(pgri->pgr_pgs[i]->uanon,(void*)curkva,PAGE_SIZE);
    359      1.21  pooka 		}
    360      1.21  pooka 	}
    361      1.10  pooka 
    362  1.30.4.3   yamt 	kmem_free(pgri->pgr_pgs, npages * sizeof(struct vm_page *));
    363  1.30.4.3   yamt 	kmem_free((void*)pgri->pgr_kva, npages * PAGE_SIZE);
    364  1.30.4.3   yamt 	kmem_free(pgri, sizeof(*pgri));
    365       1.7  pooka }
    366       1.7  pooka 
    367  1.30.4.3   yamt /*
    368  1.30.4.3   yamt  * convert va in pager window to page structure.
    369  1.30.4.3   yamt  * XXX: how expensive is this (global lock, list traversal)?
    370  1.30.4.3   yamt  */
    371      1.14  pooka struct vm_page *
    372      1.14  pooka uvm_pageratop(vaddr_t va)
    373      1.14  pooka {
    374  1.30.4.3   yamt 	struct pagerinfo *pgri;
    375  1.30.4.3   yamt 	struct vm_page *pg = NULL;
    376  1.30.4.3   yamt 	int i;
    377      1.14  pooka 
    378  1.30.4.3   yamt 	mutex_enter(&pagermtx);
    379  1.30.4.3   yamt 	LIST_FOREACH(pgri, &pagerlist, pgr_entries) {
    380  1.30.4.3   yamt 		if (pgri->pgr_kva <= va
    381  1.30.4.3   yamt 		    && va < pgri->pgr_kva + pgri->pgr_npages*PAGE_SIZE)
    382      1.21  pooka 			break;
    383  1.30.4.3   yamt 	}
    384  1.30.4.3   yamt 	if (pgri) {
    385  1.30.4.3   yamt 		i = (va - pgri->pgr_kva) >> PAGE_SHIFT;
    386  1.30.4.3   yamt 		pg = pgri->pgr_pgs[i];
    387  1.30.4.3   yamt 	}
    388  1.30.4.3   yamt 	mutex_exit(&pagermtx);
    389      1.21  pooka 
    390  1.30.4.3   yamt 	return pg;
    391  1.30.4.3   yamt }
    392      1.15  pooka 
    393  1.30.4.3   yamt /* Called with the vm object locked */
    394  1.30.4.3   yamt struct vm_page *
    395  1.30.4.3   yamt uvm_pagelookup(struct uvm_object *uobj, voff_t off)
    396  1.30.4.3   yamt {
    397  1.30.4.3   yamt 	struct vm_page *pg;
    398  1.30.4.3   yamt 
    399  1.30.4.3   yamt 	TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
    400  1.30.4.3   yamt 		if (pg->offset == off) {
    401  1.30.4.3   yamt 			return pg;
    402  1.30.4.3   yamt 		}
    403  1.30.4.3   yamt 	}
    404  1.30.4.3   yamt 
    405  1.30.4.3   yamt 	return NULL;
    406      1.14  pooka }
    407      1.14  pooka 
    408       1.7  pooka void
    409      1.22  pooka uvm_page_unbusy(struct vm_page **pgs, int npgs)
    410      1.22  pooka {
    411      1.22  pooka 	struct vm_page *pg;
    412      1.22  pooka 	int i;
    413      1.22  pooka 
    414      1.22  pooka 	for (i = 0; i < npgs; i++) {
    415      1.22  pooka 		pg = pgs[i];
    416      1.22  pooka 		if (pg == NULL)
    417      1.22  pooka 			continue;
    418      1.22  pooka 
    419      1.22  pooka 		KASSERT(pg->flags & PG_BUSY);
    420      1.22  pooka 		if (pg->flags & PG_WANTED)
    421      1.22  pooka 			wakeup(pg);
    422  1.30.4.1   yamt 		if (pg->flags & PG_RELEASED)
    423  1.30.4.1   yamt 			uvm_pagefree(pg);
    424  1.30.4.1   yamt 		else
    425  1.30.4.1   yamt 			pg->flags &= ~(PG_WANTED|PG_BUSY);
    426      1.22  pooka 	}
    427      1.22  pooka }
    428      1.22  pooka 
    429      1.22  pooka void
    430       1.7  pooka uvm_estimatepageable(int *active, int *inactive)
    431       1.7  pooka {
    432       1.7  pooka 
    433      1.19  pooka 	/* XXX: guessing game */
    434      1.19  pooka 	*active = 1024;
    435      1.19  pooka 	*inactive = 1024;
    436       1.7  pooka }
    437       1.7  pooka 
    438  1.30.4.1   yamt struct vm_map_kernel *
    439  1.30.4.1   yamt vm_map_to_kernel(struct vm_map *map)
    440       1.7  pooka {
    441       1.7  pooka 
    442  1.30.4.1   yamt 	return (struct vm_map_kernel *)map;
    443       1.7  pooka }
    444       1.7  pooka 
    445  1.30.4.1   yamt bool
    446  1.30.4.1   yamt vm_map_starved_p(struct vm_map *map)
    447       1.7  pooka {
    448       1.7  pooka 
    449  1.30.4.1   yamt 	return false;
    450      1.15  pooka }
    451      1.15  pooka 
    452      1.15  pooka void
    453  1.30.4.1   yamt uvm_pageout_start(int npages)
    454      1.15  pooka {
    455      1.15  pooka 
    456  1.30.4.1   yamt 	uvmexp.paging += npages;
    457       1.7  pooka }
    458       1.7  pooka 
    459       1.7  pooka void
    460  1.30.4.1   yamt uvm_pageout_done(int npages)
    461       1.1  pooka {
    462       1.1  pooka 
    463  1.30.4.1   yamt 	uvmexp.paging -= npages;
    464  1.30.4.1   yamt 
    465  1.30.4.1   yamt 	/*
    466  1.30.4.1   yamt 	 * wake up either of pagedaemon or LWPs waiting for it.
    467  1.30.4.1   yamt 	 */
    468  1.30.4.1   yamt 
    469  1.30.4.1   yamt 	if (uvmexp.free <= uvmexp.reserve_kernel) {
    470  1.30.4.1   yamt 		wakeup(&uvm.pagedaemon);
    471  1.30.4.1   yamt 	} else {
    472  1.30.4.1   yamt 		wakeup(&uvmexp.free);
    473  1.30.4.1   yamt 	}
    474       1.1  pooka }
    475       1.1  pooka 
    476  1.30.4.1   yamt /* XXX: following two are unfinished because lwp's are not refcounted yet */
    477       1.1  pooka void
    478  1.30.4.1   yamt uvm_lwp_hold(struct lwp *l)
    479       1.1  pooka {
    480       1.1  pooka 
    481  1.30.4.1   yamt 	atomic_inc_uint(&l->l_holdcnt);
    482       1.1  pooka }
    483       1.1  pooka 
    484       1.1  pooka void
    485  1.30.4.1   yamt uvm_lwp_rele(struct lwp *l)
    486       1.1  pooka {
    487       1.2  pooka 
    488  1.30.4.1   yamt 	atomic_dec_uint(&l->l_holdcnt);
    489  1.30.4.1   yamt }
    490       1.2  pooka 
    491  1.30.4.1   yamt int
    492  1.30.4.1   yamt uvm_loan(struct vm_map *map, vaddr_t start, vsize_t len, void *v, int flags)
    493  1.30.4.1   yamt {
    494       1.1  pooka 
    495  1.30.4.1   yamt 	panic("%s: unimplemented", __func__);
    496       1.1  pooka }
    497       1.1  pooka 
    498  1.30.4.1   yamt void
    499  1.30.4.1   yamt uvm_unloan(void *v, int npages, int flags)
    500      1.11  pooka {
    501      1.11  pooka 
    502  1.30.4.1   yamt 	panic("%s: unimplemented", __func__);
    503      1.11  pooka }
    504      1.11  pooka 
    505  1.30.4.1   yamt int
    506  1.30.4.1   yamt uvm_loanuobjpages(struct uvm_object *uobj, voff_t pgoff, int orignpages,
    507  1.30.4.1   yamt 	struct vm_page **opp)
    508      1.11  pooka {
    509      1.11  pooka 
    510  1.30.4.1   yamt 	panic("%s: unimplemented", __func__);
    511      1.11  pooka }
    512      1.11  pooka 
    513  1.30.4.1   yamt void
    514  1.30.4.1   yamt uvm_object_printit(struct uvm_object *uobj, bool full,
    515  1.30.4.1   yamt 	void (*pr)(const char *, ...))
    516       1.1  pooka {
    517       1.1  pooka 
    518  1.30.4.1   yamt 	/* nada for now */
    519       1.1  pooka }
    520       1.9  pooka 
    521  1.30.4.2   yamt int
    522  1.30.4.2   yamt uvm_readahead(struct uvm_object *uobj, off_t off, off_t size)
    523  1.30.4.2   yamt {
    524  1.30.4.2   yamt 
    525  1.30.4.2   yamt 	/* nada for now */
    526  1.30.4.2   yamt 	return 0;
    527  1.30.4.2   yamt }
    528  1.30.4.2   yamt 
    529       1.9  pooka /*
    530       1.9  pooka  * Kmem
    531       1.9  pooka  */
    532       1.9  pooka 
    533  1.30.4.1   yamt #ifndef RUMP_USE_REAL_ALLOCATORS
    534  1.30.4.1   yamt void
    535  1.30.4.1   yamt kmem_init()
    536  1.30.4.1   yamt {
    537  1.30.4.1   yamt 
    538  1.30.4.1   yamt 	/* nothing to do */
    539  1.30.4.1   yamt }
    540  1.30.4.1   yamt 
    541       1.9  pooka void *
    542       1.9  pooka kmem_alloc(size_t size, km_flag_t kmflag)
    543       1.9  pooka {
    544       1.9  pooka 
    545       1.9  pooka 	return rumpuser_malloc(size, kmflag == KM_NOSLEEP);
    546       1.9  pooka }
    547       1.9  pooka 
    548       1.9  pooka void *
    549       1.9  pooka kmem_zalloc(size_t size, km_flag_t kmflag)
    550       1.9  pooka {
    551       1.9  pooka 	void *rv;
    552       1.9  pooka 
    553       1.9  pooka 	rv = kmem_alloc(size, kmflag);
    554       1.9  pooka 	if (rv)
    555       1.9  pooka 		memset(rv, 0, size);
    556       1.9  pooka 
    557       1.9  pooka 	return rv;
    558       1.9  pooka }
    559       1.9  pooka 
    560       1.9  pooka void
    561       1.9  pooka kmem_free(void *p, size_t size)
    562       1.9  pooka {
    563       1.9  pooka 
    564       1.9  pooka 	rumpuser_free(p);
    565       1.9  pooka }
    566  1.30.4.1   yamt #endif /* RUMP_USE_REAL_ALLOCATORS */
    567      1.12  pooka 
    568      1.12  pooka /*
    569      1.12  pooka  * UVM km
    570      1.12  pooka  */
    571      1.12  pooka 
    572      1.12  pooka vaddr_t
    573      1.12  pooka uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
    574      1.12  pooka {
    575      1.12  pooka 	void *rv;
    576  1.30.4.1   yamt 	int alignbit, error;
    577  1.30.4.1   yamt 
    578  1.30.4.1   yamt 	alignbit = 0;
    579  1.30.4.1   yamt 	if (align) {
    580  1.30.4.1   yamt 		alignbit = ffs(align)-1;
    581  1.30.4.1   yamt 	}
    582      1.12  pooka 
    583  1.30.4.1   yamt 	rv = rumpuser_anonmmap(size, alignbit, flags & UVM_KMF_EXEC, &error);
    584  1.30.4.1   yamt 	if (rv == NULL) {
    585  1.30.4.1   yamt 		if (flags & (UVM_KMF_CANFAIL | UVM_KMF_NOWAIT))
    586  1.30.4.1   yamt 			return 0;
    587  1.30.4.1   yamt 		else
    588  1.30.4.1   yamt 			panic("uvm_km_alloc failed");
    589  1.30.4.1   yamt 	}
    590  1.30.4.1   yamt 
    591  1.30.4.1   yamt 	if (flags & UVM_KMF_ZERO)
    592      1.12  pooka 		memset(rv, 0, size);
    593      1.12  pooka 
    594      1.12  pooka 	return (vaddr_t)rv;
    595      1.12  pooka }
    596      1.12  pooka 
    597      1.12  pooka void
    598      1.12  pooka uvm_km_free(struct vm_map *map, vaddr_t vaddr, vsize_t size, uvm_flag_t flags)
    599      1.12  pooka {
    600      1.12  pooka 
    601  1.30.4.1   yamt 	rumpuser_unmap((void *)vaddr, size);
    602      1.12  pooka }
    603      1.12  pooka 
    604      1.12  pooka struct vm_map *
    605      1.12  pooka uvm_km_suballoc(struct vm_map *map, vaddr_t *minaddr, vaddr_t *maxaddr,
    606      1.12  pooka 	vsize_t size, int pageable, bool fixed, struct vm_map_kernel *submap)
    607      1.12  pooka {
    608      1.12  pooka 
    609      1.12  pooka 	return (struct vm_map *)417416;
    610      1.12  pooka }
    611      1.25     ad 
    612  1.30.4.1   yamt vaddr_t
    613  1.30.4.1   yamt uvm_km_alloc_poolpage(struct vm_map *map, bool waitok)
    614      1.25     ad {
    615      1.25     ad 
    616  1.30.4.1   yamt 	return (vaddr_t)rumpuser_malloc(PAGE_SIZE, !waitok);
    617      1.25     ad }
    618      1.25     ad 
    619      1.25     ad void
    620  1.30.4.1   yamt uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr)
    621      1.25     ad {
    622      1.25     ad 
    623  1.30.4.1   yamt 	rumpuser_unmap((void *)addr, PAGE_SIZE);
    624  1.30.4.1   yamt }
    625      1.25     ad 
    626  1.30.4.1   yamt vaddr_t
    627  1.30.4.1   yamt uvm_km_alloc_poolpage_cache(struct vm_map *map, bool waitok)
    628  1.30.4.1   yamt {
    629  1.30.4.1   yamt 	void *rv;
    630  1.30.4.1   yamt 	int error;
    631      1.25     ad 
    632  1.30.4.1   yamt 	rv = rumpuser_anonmmap(PAGE_SIZE, PAGE_SHIFT, 0, &error);
    633  1.30.4.1   yamt 	if (rv == NULL && waitok)
    634  1.30.4.1   yamt 		panic("fixme: poolpage alloc failed");
    635  1.30.4.1   yamt 
    636  1.30.4.1   yamt 	return (vaddr_t)rv;
    637  1.30.4.1   yamt }
    638  1.30.4.1   yamt 
    639  1.30.4.1   yamt void
    640  1.30.4.1   yamt uvm_km_free_poolpage_cache(struct vm_map *map, vaddr_t vaddr)
    641  1.30.4.1   yamt {
    642  1.30.4.1   yamt 
    643  1.30.4.1   yamt 	rumpuser_unmap((void *)vaddr, PAGE_SIZE);
    644      1.25     ad }
    645  1.30.4.2   yamt 
    646  1.30.4.2   yamt /*
    647  1.30.4.2   yamt  * Mapping and vm space locking routines.
    648  1.30.4.2   yamt  * XXX: these don't work for non-local vmspaces
    649  1.30.4.2   yamt  */
    650  1.30.4.2   yamt int
    651  1.30.4.2   yamt uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access)
    652  1.30.4.2   yamt {
    653  1.30.4.2   yamt 
    654  1.30.4.2   yamt 	KASSERT(vs == &rump_vmspace);
    655  1.30.4.2   yamt 	return 0;
    656  1.30.4.2   yamt }
    657  1.30.4.2   yamt 
    658  1.30.4.2   yamt void
    659  1.30.4.2   yamt uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
    660  1.30.4.2   yamt {
    661  1.30.4.2   yamt 
    662  1.30.4.2   yamt 	KASSERT(vs == &rump_vmspace);
    663  1.30.4.2   yamt }
    664  1.30.4.2   yamt 
    665  1.30.4.2   yamt void
    666  1.30.4.2   yamt vmapbuf(struct buf *bp, vsize_t len)
    667  1.30.4.2   yamt {
    668  1.30.4.2   yamt 
    669  1.30.4.2   yamt 	bp->b_saveaddr = bp->b_data;
    670  1.30.4.2   yamt }
    671  1.30.4.2   yamt 
    672  1.30.4.2   yamt void
    673  1.30.4.2   yamt vunmapbuf(struct buf *bp, vsize_t len)
    674  1.30.4.2   yamt {
    675  1.30.4.2   yamt 
    676  1.30.4.2   yamt 	bp->b_data = bp->b_saveaddr;
    677  1.30.4.2   yamt 	bp->b_saveaddr = 0;
    678  1.30.4.2   yamt }
    679  1.30.4.3   yamt 
    680  1.30.4.3   yamt void
    681  1.30.4.3   yamt uvm_wait(const char *msg)
    682  1.30.4.3   yamt {
    683  1.30.4.3   yamt 
    684  1.30.4.3   yamt 	/* nothing to wait for */
    685  1.30.4.3   yamt }
    686  1.30.4.3   yamt 
    687  1.30.4.3   yamt /*
    688  1.30.4.3   yamt  * page life cycle stuff.  it really doesn't exist, so just stubs.
    689  1.30.4.3   yamt  */
    690  1.30.4.3   yamt 
    691  1.30.4.3   yamt void
    692  1.30.4.3   yamt uvm_pageactivate(struct vm_page *pg)
    693  1.30.4.3   yamt {
    694  1.30.4.3   yamt 
    695  1.30.4.3   yamt 	/* nada */
    696  1.30.4.3   yamt }
    697  1.30.4.3   yamt 
    698  1.30.4.3   yamt void
    699  1.30.4.3   yamt uvm_pagedeactivate(struct vm_page *pg)
    700  1.30.4.3   yamt {
    701  1.30.4.3   yamt 
    702  1.30.4.3   yamt 	/* nada */
    703  1.30.4.3   yamt }
    704  1.30.4.3   yamt 
    705  1.30.4.3   yamt void
    706  1.30.4.3   yamt uvm_pagedequeue(struct vm_page *pg)
    707  1.30.4.3   yamt {
    708  1.30.4.3   yamt 
    709  1.30.4.3   yamt 	/* nada*/
    710  1.30.4.3   yamt }
    711  1.30.4.3   yamt 
    712  1.30.4.3   yamt void
    713  1.30.4.3   yamt uvm_pageenqueue(struct vm_page *pg)
    714  1.30.4.3   yamt {
    715  1.30.4.3   yamt 
    716  1.30.4.3   yamt 	/* nada */
    717  1.30.4.3   yamt }
    718