Home | History | Annotate | Line # | Download | only in rumpkern
vm.c revision 1.30.4.2
      1 /*	$NetBSD: vm.c,v 1.30.4.2 2009/06/20 07:20:35 yamt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2007 Antti Kantee.  All Rights Reserved.
      5  *
      6  * Development of this software was supported by Google Summer of Code.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     18  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     20  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     23  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     27  * SUCH DAMAGE.
     28  */
     29 
     30 /*
     31  * Virtual memory emulation routines.  Contents:
     32  *  + anon objects & pager
     33  *  + misc support routines
     34  *  + kmem
     35  */
     36 
     37 /*
     38  * XXX: we abuse pg->uanon for the virtual address of the storage
     39  * for each page.  phys_addr would fit the job description better,
     40  * except that it will create unnecessary lossage on some platforms
     41  * due to not being a pointer type.
     42  */
     43 
     44 #include <sys/cdefs.h>
     45 __KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.30.4.2 2009/06/20 07:20:35 yamt Exp $");
     46 
     47 #include <sys/param.h>
     48 #include <sys/atomic.h>
     49 #include <sys/null.h>
     50 #include <sys/vnode.h>
     51 #include <sys/buf.h>
     52 #include <sys/kmem.h>
     53 
     54 #include <machine/pmap.h>
     55 
     56 #include <rump/rumpuser.h>
     57 
     58 #include <uvm/uvm.h>
     59 #include <uvm/uvm_ddb.h>
     60 #include <uvm/uvm_prot.h>
     61 #include <uvm/uvm_readahead.h>
     62 
     63 #include "rump_private.h"
     64 
     65 static int ao_get(struct uvm_object *, voff_t, struct vm_page **,
     66 	int *, int, vm_prot_t, int, int);
     67 static int ao_put(struct uvm_object *, voff_t, voff_t, int);
     68 
     69 const struct uvm_pagerops aobj_pager = {
     70 	.pgo_get = ao_get,
     71 	.pgo_put = ao_put,
     72 };
     73 
     74 kmutex_t uvm_pageqlock;
     75 
     76 struct uvmexp uvmexp;
     77 struct uvm uvm;
     78 
     79 struct vmspace rump_vmspace;
     80 struct vm_map rump_vmmap;
     81 static struct vm_map_kernel kmem_map_store;
     82 struct vm_map *kmem_map = &kmem_map_store.vmk_map;
     83 const struct rb_tree_ops uvm_page_tree_ops;
     84 
     85 static struct vm_map_kernel kernel_map_store;
     86 struct vm_map *kernel_map = &kernel_map_store.vmk_map;
     87 
     88 /*
     89  * vm pages
     90  */
     91 
     92 /* called with the object locked */
     93 struct vm_page *
     94 rumpvm_makepage(struct uvm_object *uobj, voff_t off)
     95 {
     96 	struct vm_page *pg;
     97 
     98 	pg = kmem_zalloc(sizeof(struct vm_page), KM_SLEEP);
     99 	pg->offset = off;
    100 	pg->uobject = uobj;
    101 
    102 	pg->uanon = (void *)kmem_zalloc(PAGE_SIZE, KM_SLEEP);
    103 	pg->flags = PG_CLEAN|PG_BUSY|PG_FAKE;
    104 
    105 	TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue);
    106 
    107 	return pg;
    108 }
    109 
    110 /*
    111  * Release a page.
    112  *
    113  * Called with the vm object locked.
    114  */
    115 void
    116 uvm_pagefree(struct vm_page *pg)
    117 {
    118 	struct uvm_object *uobj = pg->uobject;
    119 
    120 	if (pg->flags & PG_WANTED)
    121 		wakeup(pg);
    122 
    123 	TAILQ_REMOVE(&uobj->memq, pg, listq.queue);
    124 	kmem_free((void *)pg->uanon, PAGE_SIZE);
    125 	kmem_free(pg, sizeof(*pg));
    126 }
    127 
    128 struct rumpva {
    129 	vaddr_t addr;
    130 	struct vm_page *pg;
    131 
    132 	LIST_ENTRY(rumpva) entries;
    133 };
    134 static LIST_HEAD(, rumpva) rvahead = LIST_HEAD_INITIALIZER(rvahead);
    135 static kmutex_t rvamtx;
    136 
    137 void
    138 rumpvm_enterva(vaddr_t addr, struct vm_page *pg)
    139 {
    140 	struct rumpva *rva;
    141 
    142 	rva = kmem_alloc(sizeof(struct rumpva), KM_SLEEP);
    143 	rva->addr = addr;
    144 	rva->pg = pg;
    145 	mutex_enter(&rvamtx);
    146 	LIST_INSERT_HEAD(&rvahead, rva, entries);
    147 	mutex_exit(&rvamtx);
    148 }
    149 
    150 void
    151 rumpvm_flushva(struct uvm_object *uobj)
    152 {
    153 	struct rumpva *rva, *rva_next;
    154 
    155 	mutex_enter(&rvamtx);
    156 	for (rva = LIST_FIRST(&rvahead); rva; rva = rva_next) {
    157 		rva_next = LIST_NEXT(rva, entries);
    158 		if (rva->pg->uobject == uobj) {
    159 			LIST_REMOVE(rva, entries);
    160 			uvm_page_unbusy(&rva->pg, 1);
    161 			kmem_free(rva, sizeof(*rva));
    162 		}
    163 	}
    164 	mutex_exit(&rvamtx);
    165 }
    166 
    167 /*
    168  * Anon object stuff
    169  */
    170 
    171 static int
    172 ao_get(struct uvm_object *uobj, voff_t off, struct vm_page **pgs,
    173 	int *npages, int centeridx, vm_prot_t access_type,
    174 	int advice, int flags)
    175 {
    176 	struct vm_page *pg;
    177 	int i;
    178 
    179 	if (centeridx)
    180 		panic("%s: centeridx != 0 not supported", __func__);
    181 
    182 	/* loop over pages */
    183 	off = trunc_page(off);
    184 	for (i = 0; i < *npages; i++) {
    185  retrylookup:
    186 		pg = uvm_pagelookup(uobj, off + (i << PAGE_SHIFT));
    187 		if (pg) {
    188 			if (pg->flags & PG_BUSY) {
    189 				pg->flags |= PG_WANTED;
    190 				UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
    191 				    "aogetpg", 0);
    192 				goto retrylookup;
    193 			}
    194 			pg->flags |= PG_BUSY;
    195 			pgs[i] = pg;
    196 		} else {
    197 			pg = rumpvm_makepage(uobj, off + (i << PAGE_SHIFT));
    198 			pgs[i] = pg;
    199 		}
    200 	}
    201 	mutex_exit(&uobj->vmobjlock);
    202 
    203 	return 0;
    204 
    205 }
    206 
    207 static int
    208 ao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
    209 {
    210 	struct vm_page *pg;
    211 
    212 	/* we only free all pages for now */
    213 	if ((flags & PGO_FREE) == 0 || (flags & PGO_ALLPAGES) == 0) {
    214 		mutex_exit(&uobj->vmobjlock);
    215 		return 0;
    216 	}
    217 
    218 	while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL)
    219 		uvm_pagefree(pg);
    220 	mutex_exit(&uobj->vmobjlock);
    221 
    222 	return 0;
    223 }
    224 
    225 struct uvm_object *
    226 uao_create(vsize_t size, int flags)
    227 {
    228 	struct uvm_object *uobj;
    229 
    230 	uobj = kmem_zalloc(sizeof(struct uvm_object), KM_SLEEP);
    231 	uobj->pgops = &aobj_pager;
    232 	TAILQ_INIT(&uobj->memq);
    233 	mutex_init(&uobj->vmobjlock, MUTEX_DEFAULT, IPL_NONE);
    234 
    235 	return uobj;
    236 }
    237 
    238 void
    239 uao_detach(struct uvm_object *uobj)
    240 {
    241 
    242 	mutex_enter(&uobj->vmobjlock);
    243 	ao_put(uobj, 0, 0, PGO_ALLPAGES | PGO_FREE);
    244 	mutex_destroy(&uobj->vmobjlock);
    245 	kmem_free(uobj, sizeof(*uobj));
    246 }
    247 
    248 /*
    249  * Misc routines
    250  */
    251 
    252 static kmutex_t cachepgmtx;
    253 
    254 void
    255 rumpvm_init(void)
    256 {
    257 
    258 	uvmexp.free = 1024*1024; /* XXX */
    259 	uvm.pagedaemon_lwp = NULL; /* doesn't match curlwp */
    260 	rump_vmspace.vm_map.pmap = pmap_kernel();
    261 
    262 	mutex_init(&rvamtx, MUTEX_DEFAULT, 0);
    263 	mutex_init(&uvm_pageqlock, MUTEX_DEFAULT, 0);
    264 	mutex_init(&cachepgmtx, MUTEX_DEFAULT, 0);
    265 
    266 	kernel_map->pmap = pmap_kernel();
    267 	callback_head_init(&kernel_map_store.vmk_reclaim_callback, IPL_VM);
    268 	kmem_map->pmap = pmap_kernel();
    269 	callback_head_init(&kmem_map_store.vmk_reclaim_callback, IPL_VM);
    270 }
    271 
    272 void
    273 uvm_pageactivate(struct vm_page *pg)
    274 {
    275 
    276 	/* nada */
    277 }
    278 
    279 void
    280 uvm_pagewire(struct vm_page *pg)
    281 {
    282 
    283 	/* nada */
    284 }
    285 
    286 void
    287 uvm_pageunwire(struct vm_page *pg)
    288 {
    289 
    290 	/* nada */
    291 }
    292 
    293 int
    294 uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
    295 	vm_prot_t maxprot, int flags, void *handle, voff_t off, vsize_t locklim)
    296 {
    297 
    298 	panic("%s: unimplemented", __func__);
    299 }
    300 
    301 vaddr_t
    302 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
    303 {
    304 
    305 	panic("%s: unimplemented", __func__);
    306 }
    307 
    308 /* Called with the vm object locked */
    309 struct vm_page *
    310 uvm_pagelookup(struct uvm_object *uobj, voff_t off)
    311 {
    312 	struct vm_page *pg;
    313 
    314 	TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
    315 		if (pg->offset == off) {
    316 			return pg;
    317 		}
    318 	}
    319 
    320 	return NULL;
    321 }
    322 
    323 struct vm_page *
    324 uvm_pageratop(vaddr_t va)
    325 {
    326 	struct rumpva *rva;
    327 
    328 	mutex_enter(&rvamtx);
    329 	LIST_FOREACH(rva, &rvahead, entries)
    330 		if (rva->addr == va)
    331 			break;
    332 	mutex_exit(&rvamtx);
    333 
    334 	if (rva == NULL)
    335 		panic("%s: va %llu", __func__, (unsigned long long)va);
    336 
    337 	return rva->pg;
    338 }
    339 
    340 void
    341 uvm_page_unbusy(struct vm_page **pgs, int npgs)
    342 {
    343 	struct vm_page *pg;
    344 	int i;
    345 
    346 	for (i = 0; i < npgs; i++) {
    347 		pg = pgs[i];
    348 		if (pg == NULL)
    349 			continue;
    350 
    351 		KASSERT(pg->flags & PG_BUSY);
    352 		if (pg->flags & PG_WANTED)
    353 			wakeup(pg);
    354 		if (pg->flags & PG_RELEASED)
    355 			uvm_pagefree(pg);
    356 		else
    357 			pg->flags &= ~(PG_WANTED|PG_BUSY);
    358 	}
    359 }
    360 
    361 void
    362 uvm_estimatepageable(int *active, int *inactive)
    363 {
    364 
    365 	/* XXX: guessing game */
    366 	*active = 1024;
    367 	*inactive = 1024;
    368 }
    369 
    370 struct vm_map_kernel *
    371 vm_map_to_kernel(struct vm_map *map)
    372 {
    373 
    374 	return (struct vm_map_kernel *)map;
    375 }
    376 
    377 bool
    378 vm_map_starved_p(struct vm_map *map)
    379 {
    380 
    381 	return false;
    382 }
    383 
    384 void
    385 uvm_pageout_start(int npages)
    386 {
    387 
    388 	uvmexp.paging += npages;
    389 }
    390 
    391 void
    392 uvm_pageout_done(int npages)
    393 {
    394 
    395 	uvmexp.paging -= npages;
    396 
    397 	/*
    398 	 * wake up either of pagedaemon or LWPs waiting for it.
    399 	 */
    400 
    401 	if (uvmexp.free <= uvmexp.reserve_kernel) {
    402 		wakeup(&uvm.pagedaemon);
    403 	} else {
    404 		wakeup(&uvmexp.free);
    405 	}
    406 }
    407 
    408 /* XXX: following two are unfinished because lwp's are not refcounted yet */
    409 void
    410 uvm_lwp_hold(struct lwp *l)
    411 {
    412 
    413 	atomic_inc_uint(&l->l_holdcnt);
    414 }
    415 
    416 void
    417 uvm_lwp_rele(struct lwp *l)
    418 {
    419 
    420 	atomic_dec_uint(&l->l_holdcnt);
    421 }
    422 
    423 int
    424 uvm_loan(struct vm_map *map, vaddr_t start, vsize_t len, void *v, int flags)
    425 {
    426 
    427 	panic("%s: unimplemented", __func__);
    428 }
    429 
    430 void
    431 uvm_unloan(void *v, int npages, int flags)
    432 {
    433 
    434 	panic("%s: unimplemented", __func__);
    435 }
    436 
    437 int
    438 uvm_loanuobjpages(struct uvm_object *uobj, voff_t pgoff, int orignpages,
    439 	struct vm_page **opp)
    440 {
    441 
    442 	panic("%s: unimplemented", __func__);
    443 }
    444 
    445 void
    446 uvm_object_printit(struct uvm_object *uobj, bool full,
    447 	void (*pr)(const char *, ...))
    448 {
    449 
    450 	/* nada for now */
    451 }
    452 
    453 int
    454 uvm_readahead(struct uvm_object *uobj, off_t off, off_t size)
    455 {
    456 
    457 	/* nada for now */
    458 	return 0;
    459 }
    460 
    461 /*
    462  * Kmem
    463  */
    464 
    465 #ifndef RUMP_USE_REAL_ALLOCATORS
    466 void
    467 kmem_init()
    468 {
    469 
    470 	/* nothing to do */
    471 }
    472 
    473 void *
    474 kmem_alloc(size_t size, km_flag_t kmflag)
    475 {
    476 
    477 	return rumpuser_malloc(size, kmflag == KM_NOSLEEP);
    478 }
    479 
    480 void *
    481 kmem_zalloc(size_t size, km_flag_t kmflag)
    482 {
    483 	void *rv;
    484 
    485 	rv = kmem_alloc(size, kmflag);
    486 	if (rv)
    487 		memset(rv, 0, size);
    488 
    489 	return rv;
    490 }
    491 
    492 void
    493 kmem_free(void *p, size_t size)
    494 {
    495 
    496 	rumpuser_free(p);
    497 }
    498 #endif /* RUMP_USE_REAL_ALLOCATORS */
    499 
    500 /*
    501  * UVM km
    502  */
    503 
    504 vaddr_t
    505 uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
    506 {
    507 	void *rv;
    508 	int alignbit, error;
    509 
    510 	alignbit = 0;
    511 	if (align) {
    512 		alignbit = ffs(align)-1;
    513 	}
    514 
    515 	rv = rumpuser_anonmmap(size, alignbit, flags & UVM_KMF_EXEC, &error);
    516 	if (rv == NULL) {
    517 		if (flags & (UVM_KMF_CANFAIL | UVM_KMF_NOWAIT))
    518 			return 0;
    519 		else
    520 			panic("uvm_km_alloc failed");
    521 	}
    522 
    523 	if (flags & UVM_KMF_ZERO)
    524 		memset(rv, 0, size);
    525 
    526 	return (vaddr_t)rv;
    527 }
    528 
    529 void
    530 uvm_km_free(struct vm_map *map, vaddr_t vaddr, vsize_t size, uvm_flag_t flags)
    531 {
    532 
    533 	rumpuser_unmap((void *)vaddr, size);
    534 }
    535 
    536 struct vm_map *
    537 uvm_km_suballoc(struct vm_map *map, vaddr_t *minaddr, vaddr_t *maxaddr,
    538 	vsize_t size, int pageable, bool fixed, struct vm_map_kernel *submap)
    539 {
    540 
    541 	return (struct vm_map *)417416;
    542 }
    543 
    544 vaddr_t
    545 uvm_km_alloc_poolpage(struct vm_map *map, bool waitok)
    546 {
    547 
    548 	return (vaddr_t)rumpuser_malloc(PAGE_SIZE, !waitok);
    549 }
    550 
    551 void
    552 uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr)
    553 {
    554 
    555 	rumpuser_unmap((void *)addr, PAGE_SIZE);
    556 }
    557 
    558 vaddr_t
    559 uvm_km_alloc_poolpage_cache(struct vm_map *map, bool waitok)
    560 {
    561 	void *rv;
    562 	int error;
    563 
    564 	rv = rumpuser_anonmmap(PAGE_SIZE, PAGE_SHIFT, 0, &error);
    565 	if (rv == NULL && waitok)
    566 		panic("fixme: poolpage alloc failed");
    567 
    568 	return (vaddr_t)rv;
    569 }
    570 
    571 void
    572 uvm_km_free_poolpage_cache(struct vm_map *map, vaddr_t vaddr)
    573 {
    574 
    575 	rumpuser_unmap((void *)vaddr, PAGE_SIZE);
    576 }
    577 
    578 /*
    579  * Mapping and vm space locking routines.
    580  * XXX: these don't work for non-local vmspaces
    581  */
    582 int
    583 uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access)
    584 {
    585 
    586 	KASSERT(vs == &rump_vmspace);
    587 	return 0;
    588 }
    589 
    590 void
    591 uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
    592 {
    593 
    594 	KASSERT(vs == &rump_vmspace);
    595 }
    596 
    597 void
    598 vmapbuf(struct buf *bp, vsize_t len)
    599 {
    600 
    601 	bp->b_saveaddr = bp->b_data;
    602 }
    603 
    604 void
    605 vunmapbuf(struct buf *bp, vsize_t len)
    606 {
    607 
    608 	bp->b_data = bp->b_saveaddr;
    609 	bp->b_saveaddr = 0;
    610 }
    611