Home | History | Annotate | Line # | Download | only in rumpkern
vm.c revision 1.46
      1 /*	$NetBSD: vm.c,v 1.46 2008/12/16 14:07:25 pooka Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2007 Antti Kantee.  All Rights Reserved.
      5  *
      6  * Development of this software was supported by Google Summer of Code.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     18  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     20  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     23  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     27  * SUCH DAMAGE.
     28  */
     29 
     30 /*
     31  * Virtual memory emulation routines.  Contents:
     32  *  + anon objects & pager
     33  *  + misc support routines
     34  *  + kmem
     35  */
     36 
     37 /*
     38  * XXX: we abuse pg->uanon for the virtual address of the storage
     39  * for each page.  phys_addr would fit the job description better,
     40  * except that it will create unnecessary lossage on some platforms
     41  * due to not being a pointer type.
     42  */
     43 
     44 #include <sys/param.h>
     45 #include <sys/atomic.h>
     46 #include <sys/null.h>
     47 #include <sys/vnode.h>
     48 #include <sys/buf.h>
     49 #include <sys/kmem.h>
     50 
     51 #include <machine/pmap.h>
     52 
     53 #include <rump/rumpuser.h>
     54 
     55 #include <uvm/uvm.h>
     56 #include <uvm/uvm_prot.h>
     57 
     58 #include "rump_private.h"
     59 
     60 static int ao_get(struct uvm_object *, voff_t, struct vm_page **,
     61 	int *, int, vm_prot_t, int, int);
     62 static int ao_put(struct uvm_object *, voff_t, voff_t, int);
     63 
     64 const struct uvm_pagerops aobj_pager = {
     65 	.pgo_get = ao_get,
     66 	.pgo_put = ao_put,
     67 };
     68 
     69 kmutex_t uvm_pageqlock;
     70 
     71 struct uvmexp uvmexp;
     72 struct uvm uvm;
     73 
     74 struct vmspace rump_vmspace;
     75 struct vm_map rump_vmmap;
     76 const struct rb_tree_ops uvm_page_tree_ops;
     77 
     78 static struct vm_map_kernel kernel_map_store;
     79 struct vm_map *kernel_map = &kernel_map_store.vmk_map;
     80 
     81 /*
     82  * vm pages
     83  */
     84 
     85 /* called with the object locked */
     86 struct vm_page *
     87 rumpvm_makepage(struct uvm_object *uobj, voff_t off)
     88 {
     89 	struct vm_page *pg;
     90 
     91 	pg = kmem_zalloc(sizeof(struct vm_page), KM_SLEEP);
     92 	pg->offset = off;
     93 	pg->uobject = uobj;
     94 
     95 	pg->uanon = (void *)kmem_zalloc(PAGE_SIZE, KM_SLEEP);
     96 	pg->flags = PG_CLEAN|PG_BUSY|PG_FAKE;
     97 
     98 	TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue);
     99 
    100 	return pg;
    101 }
    102 
    103 /*
    104  * Release a page.
    105  *
    106  * Called with the vm object locked.
    107  */
    108 void
    109 uvm_pagefree(struct vm_page *pg)
    110 {
    111 	struct uvm_object *uobj = pg->uobject;
    112 
    113 	if (pg->flags & PG_WANTED)
    114 		wakeup(pg);
    115 
    116 	TAILQ_REMOVE(&uobj->memq, pg, listq.queue);
    117 	kmem_free((void *)pg->uanon, PAGE_SIZE);
    118 	kmem_free(pg, sizeof(*pg));
    119 }
    120 
    121 struct rumpva {
    122 	vaddr_t addr;
    123 	struct vm_page *pg;
    124 
    125 	LIST_ENTRY(rumpva) entries;
    126 };
    127 static LIST_HEAD(, rumpva) rvahead = LIST_HEAD_INITIALIZER(rvahead);
    128 static kmutex_t rvamtx;
    129 
    130 void
    131 rumpvm_enterva(vaddr_t addr, struct vm_page *pg)
    132 {
    133 	struct rumpva *rva;
    134 
    135 	rva = kmem_alloc(sizeof(struct rumpva), KM_SLEEP);
    136 	rva->addr = addr;
    137 	rva->pg = pg;
    138 	mutex_enter(&rvamtx);
    139 	LIST_INSERT_HEAD(&rvahead, rva, entries);
    140 	mutex_exit(&rvamtx);
    141 }
    142 
    143 void
    144 rumpvm_flushva(struct uvm_object *uobj)
    145 {
    146 	struct rumpva *rva, *rva_next;
    147 
    148 	mutex_enter(&rvamtx);
    149 	for (rva = LIST_FIRST(&rvahead); rva; rva = rva_next) {
    150 		rva_next = LIST_NEXT(rva, entries);
    151 		if (rva->pg->uobject == uobj) {
    152 			LIST_REMOVE(rva, entries);
    153 			kmem_free(rva, sizeof(*rva));
    154 		}
    155 	}
    156 	mutex_exit(&rvamtx);
    157 }
    158 
    159 /*
    160  * Anon object stuff
    161  */
    162 
    163 static int
    164 ao_get(struct uvm_object *uobj, voff_t off, struct vm_page **pgs,
    165 	int *npages, int centeridx, vm_prot_t access_type,
    166 	int advice, int flags)
    167 {
    168 	struct vm_page *pg;
    169 	int i;
    170 
    171 	if (centeridx)
    172 		panic("%s: centeridx != 0 not supported", __func__);
    173 
    174 	/* loop over pages */
    175 	off = trunc_page(off);
    176 	for (i = 0; i < *npages; i++) {
    177  retrylookup:
    178 		pg = uvm_pagelookup(uobj, off + (i << PAGE_SHIFT));
    179 		if (pg) {
    180 			if (pg->flags & PG_BUSY) {
    181 				pg->flags |= PG_WANTED;
    182 				UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
    183 				    "aogetpg", 0);
    184 				goto retrylookup;
    185 			}
    186 			pg->flags |= PG_BUSY;
    187 			pgs[i] = pg;
    188 		} else {
    189 			pg = rumpvm_makepage(uobj, off + (i << PAGE_SHIFT));
    190 			pgs[i] = pg;
    191 		}
    192 	}
    193 	mutex_exit(&uobj->vmobjlock);
    194 
    195 	return 0;
    196 
    197 }
    198 
    199 static int
    200 ao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
    201 {
    202 	struct vm_page *pg;
    203 
    204 	/* we only free all pages for now */
    205 	if ((flags & PGO_FREE) == 0 || (flags & PGO_ALLPAGES) == 0) {
    206 		mutex_exit(&uobj->vmobjlock);
    207 		return 0;
    208 	}
    209 
    210 	while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL)
    211 		uvm_pagefree(pg);
    212 	mutex_exit(&uobj->vmobjlock);
    213 
    214 	return 0;
    215 }
    216 
    217 struct uvm_object *
    218 uao_create(vsize_t size, int flags)
    219 {
    220 	struct uvm_object *uobj;
    221 
    222 	uobj = kmem_zalloc(sizeof(struct uvm_object), KM_SLEEP);
    223 	uobj->pgops = &aobj_pager;
    224 	TAILQ_INIT(&uobj->memq);
    225 	mutex_init(&uobj->vmobjlock, MUTEX_DEFAULT, IPL_NONE);
    226 
    227 	return uobj;
    228 }
    229 
    230 void
    231 uao_detach(struct uvm_object *uobj)
    232 {
    233 
    234 	mutex_enter(&uobj->vmobjlock);
    235 	ao_put(uobj, 0, 0, PGO_ALLPAGES | PGO_FREE);
    236 	kmem_free(uobj, sizeof(*uobj));
    237 }
    238 
    239 /*
    240  * Misc routines
    241  */
    242 
    243 void
    244 rumpvm_init()
    245 {
    246 
    247 	uvmexp.free = 1024*1024; /* XXX */
    248 	uvm.pagedaemon_lwp = NULL; /* doesn't match curlwp */
    249 	rump_vmspace.vm_map.pmap = pmap_kernel();
    250 
    251 	mutex_init(&rvamtx, MUTEX_DEFAULT, 0);
    252 	mutex_init(&uvm_pageqlock, MUTEX_DEFAULT, 0);
    253 
    254 	callback_head_init(&kernel_map_store.vmk_reclaim_callback, IPL_VM);
    255 }
    256 
    257 void
    258 uvm_pageactivate(struct vm_page *pg)
    259 {
    260 
    261 	/* nada */
    262 }
    263 
    264 void
    265 uvm_pagewire(struct vm_page *pg)
    266 {
    267 
    268 	/* nada */
    269 }
    270 
    271 void
    272 uvm_pageunwire(struct vm_page *pg)
    273 {
    274 
    275 	/* nada */
    276 }
    277 
    278 vaddr_t
    279 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
    280 {
    281 
    282 	panic("%s: unimplemented", __func__);
    283 }
    284 
    285 /* Called with the vm object locked */
    286 struct vm_page *
    287 uvm_pagelookup(struct uvm_object *uobj, voff_t off)
    288 {
    289 	struct vm_page *pg;
    290 
    291 	TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
    292 		if (pg->offset == off) {
    293 			return pg;
    294 		}
    295 	}
    296 
    297 	return NULL;
    298 }
    299 
    300 struct vm_page *
    301 uvm_pageratop(vaddr_t va)
    302 {
    303 	struct rumpva *rva;
    304 
    305 	mutex_enter(&rvamtx);
    306 	LIST_FOREACH(rva, &rvahead, entries)
    307 		if (rva->addr == va)
    308 			break;
    309 	mutex_exit(&rvamtx);
    310 
    311 	if (rva == NULL)
    312 		panic("%s: va %llu", __func__, (unsigned long long)va);
    313 
    314 	return rva->pg;
    315 }
    316 
    317 void
    318 uvm_page_unbusy(struct vm_page **pgs, int npgs)
    319 {
    320 	struct vm_page *pg;
    321 	int i;
    322 
    323 	for (i = 0; i < npgs; i++) {
    324 		pg = pgs[i];
    325 		if (pg == NULL)
    326 			continue;
    327 
    328 		KASSERT(pg->flags & PG_BUSY);
    329 		if (pg->flags & PG_WANTED)
    330 			wakeup(pg);
    331 		if (pg->flags & PG_RELEASED)
    332 			uvm_pagefree(pg);
    333 		else
    334 			pg->flags &= ~(PG_WANTED|PG_BUSY);
    335 	}
    336 }
    337 
    338 void
    339 uvm_estimatepageable(int *active, int *inactive)
    340 {
    341 
    342 	/* XXX: guessing game */
    343 	*active = 1024;
    344 	*inactive = 1024;
    345 }
    346 
    347 struct vm_map_kernel *
    348 vm_map_to_kernel(struct vm_map *map)
    349 {
    350 
    351 	return (struct vm_map_kernel *)map;
    352 }
    353 
    354 bool
    355 vm_map_starved_p(struct vm_map *map)
    356 {
    357 
    358 	return false;
    359 }
    360 
    361 void
    362 uvm_pageout_start(int npages)
    363 {
    364 
    365 	uvmexp.paging += npages;
    366 }
    367 
    368 void
    369 uvm_pageout_done(int npages)
    370 {
    371 
    372 	uvmexp.paging -= npages;
    373 
    374 	/*
    375 	 * wake up either of pagedaemon or LWPs waiting for it.
    376 	 */
    377 
    378 	if (uvmexp.free <= uvmexp.reserve_kernel) {
    379 		wakeup(&uvm.pagedaemon);
    380 	} else {
    381 		wakeup(&uvmexp.free);
    382 	}
    383 }
    384 
    385 /* XXX: following two are unfinished because lwp's are not refcounted yet */
    386 void
    387 uvm_lwp_hold(struct lwp *l)
    388 {
    389 
    390 	atomic_inc_uint(&l->l_holdcnt);
    391 }
    392 
    393 void
    394 uvm_lwp_rele(struct lwp *l)
    395 {
    396 
    397 	atomic_dec_uint(&l->l_holdcnt);
    398 }
    399 
    400 int
    401 uvm_loan(struct vm_map *map, vaddr_t start, vsize_t len, void *v, int flags)
    402 {
    403 
    404 	panic("%s: unimplemented", __func__);
    405 }
    406 
    407 void
    408 uvm_unloan(void *v, int npages, int flags)
    409 {
    410 
    411 	panic("%s: unimplemented", __func__);
    412 }
    413 
    414 int
    415 uvm_loanuobjpages(struct uvm_object *uobj, voff_t pgoff, int orignpages,
    416 	struct vm_page **opp)
    417 {
    418 
    419 	panic("%s: unimplemented", __func__);
    420 }
    421 
    422 /*
    423  * Kmem
    424  */
    425 
    426 #ifndef RUMP_USE_REAL_KMEM
    427 void *
    428 kmem_alloc(size_t size, km_flag_t kmflag)
    429 {
    430 
    431 	return rumpuser_malloc(size, kmflag == KM_NOSLEEP);
    432 }
    433 
    434 void *
    435 kmem_zalloc(size_t size, km_flag_t kmflag)
    436 {
    437 	void *rv;
    438 
    439 	rv = kmem_alloc(size, kmflag);
    440 	if (rv)
    441 		memset(rv, 0, size);
    442 
    443 	return rv;
    444 }
    445 
    446 void
    447 kmem_free(void *p, size_t size)
    448 {
    449 
    450 	rumpuser_free(p);
    451 }
    452 #endif /* RUMP_USE_REAL_KMEM */
    453 
    454 /*
    455  * UVM km
    456  */
    457 
    458 vaddr_t
    459 uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
    460 {
    461 	void *rv;
    462 
    463 	rv = rumpuser_malloc(size, flags & (UVM_KMF_CANFAIL | UVM_KMF_NOWAIT));
    464 	if (rv && flags & UVM_KMF_ZERO)
    465 		memset(rv, 0, size);
    466 
    467 	return (vaddr_t)rv;
    468 }
    469 
    470 void
    471 uvm_km_free(struct vm_map *map, vaddr_t vaddr, vsize_t size, uvm_flag_t flags)
    472 {
    473 
    474 	rumpuser_free((void *)vaddr);
    475 }
    476 
    477 struct vm_map *
    478 uvm_km_suballoc(struct vm_map *map, vaddr_t *minaddr, vaddr_t *maxaddr,
    479 	vsize_t size, int pageable, bool fixed, struct vm_map_kernel *submap)
    480 {
    481 
    482 	return (struct vm_map *)417416;
    483 }
    484 
    485 vaddr_t
    486 uvm_km_alloc_poolpage(struct vm_map *map, bool waitok)
    487 {
    488 
    489 	return (vaddr_t)rumpuser_malloc(PAGE_SIZE, !waitok);
    490 }
    491 
    492 void
    493 uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr)
    494 {
    495 
    496 	rumpuser_free((void *)addr);
    497 }
    498