Home | History | Annotate | Line # | Download | only in rumpkern
vm.c revision 1.59
      1 /*	$NetBSD: vm.c,v 1.59 2009/08/03 17:10:51 pooka Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2007 Antti Kantee.  All Rights Reserved.
      5  *
      6  * Development of this software was supported by Google Summer of Code.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     18  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     20  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     23  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     27  * SUCH DAMAGE.
     28  */
     29 
     30 /*
     31  * Virtual memory emulation routines.  Contents:
     32  *  + anon objects & pager
     33  *  + misc support routines
     34  *  + kmem
     35  */
     36 
     37 /*
     38  * XXX: we abuse pg->uanon for the virtual address of the storage
     39  * for each page.  phys_addr would fit the job description better,
     40  * except that it will create unnecessary lossage on some platforms
     41  * due to not being a pointer type.
     42  */
     43 
     44 #include <sys/cdefs.h>
     45 __KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.59 2009/08/03 17:10:51 pooka Exp $");
     46 
     47 #include <sys/param.h>
     48 #include <sys/atomic.h>
     49 #include <sys/null.h>
     50 #include <sys/vnode.h>
     51 #include <sys/buf.h>
     52 #include <sys/kmem.h>
     53 
     54 #include <machine/pmap.h>
     55 
     56 #include <rump/rumpuser.h>
     57 
     58 #include <uvm/uvm.h>
     59 #include <uvm/uvm_ddb.h>
     60 #include <uvm/uvm_prot.h>
     61 #include <uvm/uvm_readahead.h>
     62 
     63 #include "rump_private.h"
     64 
     65 static int ao_get(struct uvm_object *, voff_t, struct vm_page **,
     66 	int *, int, vm_prot_t, int, int);
     67 static int ao_put(struct uvm_object *, voff_t, voff_t, int);
     68 
     69 const struct uvm_pagerops aobj_pager = {
     70 	.pgo_get = ao_get,
     71 	.pgo_put = ao_put,
     72 };
     73 
     74 kmutex_t uvm_pageqlock;
     75 
     76 struct uvmexp uvmexp;
     77 struct uvm uvm;
     78 
     79 struct vmspace rump_vmspace;
     80 struct vm_map rump_vmmap;
     81 static struct vm_map_kernel kmem_map_store;
     82 struct vm_map *kmem_map = &kmem_map_store.vmk_map;
     83 const struct rb_tree_ops uvm_page_tree_ops;
     84 
     85 static struct vm_map_kernel kernel_map_store;
     86 struct vm_map *kernel_map = &kernel_map_store.vmk_map;
     87 
     88 /*
     89  * vm pages
     90  */
     91 
     92 /* called with the object locked */
     93 struct vm_page *
     94 rumpvm_makepage(struct uvm_object *uobj, voff_t off)
     95 {
     96 	struct vm_page *pg;
     97 
     98 	pg = kmem_zalloc(sizeof(struct vm_page), KM_SLEEP);
     99 	pg->offset = off;
    100 	pg->uobject = uobj;
    101 
    102 	pg->uanon = (void *)kmem_zalloc(PAGE_SIZE, KM_SLEEP);
    103 	pg->flags = PG_CLEAN|PG_BUSY|PG_FAKE;
    104 
    105 	TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue);
    106 	uobj->uo_npages++;
    107 
    108 	return pg;
    109 }
    110 
    111 /*
    112  * Release a page.
    113  *
    114  * Called with the vm object locked.
    115  */
    116 void
    117 uvm_pagefree(struct vm_page *pg)
    118 {
    119 	struct uvm_object *uobj = pg->uobject;
    120 
    121 	if (pg->flags & PG_WANTED)
    122 		wakeup(pg);
    123 
    124 	uobj->uo_npages--;
    125 	TAILQ_REMOVE(&uobj->memq, pg, listq.queue);
    126 	kmem_free((void *)pg->uanon, PAGE_SIZE);
    127 	kmem_free(pg, sizeof(*pg));
    128 }
    129 
    130 struct rumpva {
    131 	vaddr_t addr;
    132 	struct vm_page *pg;
    133 
    134 	LIST_ENTRY(rumpva) entries;
    135 };
    136 static LIST_HEAD(, rumpva) rvahead = LIST_HEAD_INITIALIZER(rvahead);
    137 static kmutex_t rvamtx;
    138 
    139 void
    140 rumpvm_enterva(vaddr_t addr, struct vm_page *pg)
    141 {
    142 	struct rumpva *rva;
    143 
    144 	rva = kmem_alloc(sizeof(struct rumpva), KM_SLEEP);
    145 	rva->addr = addr;
    146 	rva->pg = pg;
    147 	mutex_enter(&rvamtx);
    148 	LIST_INSERT_HEAD(&rvahead, rva, entries);
    149 	mutex_exit(&rvamtx);
    150 }
    151 
    152 void
    153 rumpvm_flushva(struct uvm_object *uobj)
    154 {
    155 	struct rumpva *rva, *rva_next;
    156 
    157 	mutex_enter(&rvamtx);
    158 	for (rva = LIST_FIRST(&rvahead); rva; rva = rva_next) {
    159 		rva_next = LIST_NEXT(rva, entries);
    160 		if (rva->pg->uobject == uobj) {
    161 			LIST_REMOVE(rva, entries);
    162 			uvm_page_unbusy(&rva->pg, 1);
    163 			kmem_free(rva, sizeof(*rva));
    164 		}
    165 	}
    166 	mutex_exit(&rvamtx);
    167 }
    168 
    169 /*
    170  * Anon object stuff
    171  */
    172 
    173 static int
    174 ao_get(struct uvm_object *uobj, voff_t off, struct vm_page **pgs,
    175 	int *npages, int centeridx, vm_prot_t access_type,
    176 	int advice, int flags)
    177 {
    178 	struct vm_page *pg;
    179 	int i;
    180 
    181 	if (centeridx)
    182 		panic("%s: centeridx != 0 not supported", __func__);
    183 
    184 	/* loop over pages */
    185 	off = trunc_page(off);
    186 	for (i = 0; i < *npages; i++) {
    187  retrylookup:
    188 		pg = uvm_pagelookup(uobj, off + (i << PAGE_SHIFT));
    189 		if (pg) {
    190 			if (pg->flags & PG_BUSY) {
    191 				pg->flags |= PG_WANTED;
    192 				UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
    193 				    "aogetpg", 0);
    194 				goto retrylookup;
    195 			}
    196 			pg->flags |= PG_BUSY;
    197 			pgs[i] = pg;
    198 		} else {
    199 			pg = rumpvm_makepage(uobj, off + (i << PAGE_SHIFT));
    200 			pgs[i] = pg;
    201 		}
    202 	}
    203 	mutex_exit(&uobj->vmobjlock);
    204 
    205 	return 0;
    206 
    207 }
    208 
    209 static int
    210 ao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
    211 {
    212 	struct vm_page *pg;
    213 
    214 	/* we only free all pages for now */
    215 	if ((flags & PGO_FREE) == 0 || (flags & PGO_ALLPAGES) == 0) {
    216 		mutex_exit(&uobj->vmobjlock);
    217 		return 0;
    218 	}
    219 
    220 	while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL)
    221 		uvm_pagefree(pg);
    222 	mutex_exit(&uobj->vmobjlock);
    223 
    224 	return 0;
    225 }
    226 
    227 struct uvm_object *
    228 uao_create(vsize_t size, int flags)
    229 {
    230 	struct uvm_object *uobj;
    231 
    232 	uobj = kmem_zalloc(sizeof(struct uvm_object), KM_SLEEP);
    233 	uobj->pgops = &aobj_pager;
    234 	TAILQ_INIT(&uobj->memq);
    235 	mutex_init(&uobj->vmobjlock, MUTEX_DEFAULT, IPL_NONE);
    236 
    237 	return uobj;
    238 }
    239 
    240 void
    241 uao_detach(struct uvm_object *uobj)
    242 {
    243 
    244 	mutex_enter(&uobj->vmobjlock);
    245 	ao_put(uobj, 0, 0, PGO_ALLPAGES | PGO_FREE);
    246 	mutex_destroy(&uobj->vmobjlock);
    247 	kmem_free(uobj, sizeof(*uobj));
    248 }
    249 
    250 /*
    251  * Misc routines
    252  */
    253 
    254 static kmutex_t cachepgmtx;
    255 
    256 void
    257 rumpvm_init(void)
    258 {
    259 
    260 	uvmexp.free = 1024*1024; /* XXX */
    261 	uvm.pagedaemon_lwp = NULL; /* doesn't match curlwp */
    262 	rump_vmspace.vm_map.pmap = pmap_kernel();
    263 
    264 	mutex_init(&rvamtx, MUTEX_DEFAULT, 0);
    265 	mutex_init(&uvm_pageqlock, MUTEX_DEFAULT, 0);
    266 	mutex_init(&cachepgmtx, MUTEX_DEFAULT, 0);
    267 
    268 	kernel_map->pmap = pmap_kernel();
    269 	callback_head_init(&kernel_map_store.vmk_reclaim_callback, IPL_VM);
    270 	kmem_map->pmap = pmap_kernel();
    271 	callback_head_init(&kmem_map_store.vmk_reclaim_callback, IPL_VM);
    272 }
    273 
    274 void
    275 uvm_pageactivate(struct vm_page *pg)
    276 {
    277 
    278 	/* nada */
    279 }
    280 
    281 void
    282 uvm_pagewire(struct vm_page *pg)
    283 {
    284 
    285 	/* nada */
    286 }
    287 
    288 void
    289 uvm_pageunwire(struct vm_page *pg)
    290 {
    291 
    292 	/* nada */
    293 }
    294 
    295 int
    296 uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
    297 	vm_prot_t maxprot, int flags, void *handle, voff_t off, vsize_t locklim)
    298 {
    299 
    300 	panic("%s: unimplemented", __func__);
    301 }
    302 
    303 vaddr_t
    304 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
    305 {
    306 
    307 	panic("%s: unimplemented", __func__);
    308 }
    309 
    310 /* Called with the vm object locked */
    311 struct vm_page *
    312 uvm_pagelookup(struct uvm_object *uobj, voff_t off)
    313 {
    314 	struct vm_page *pg;
    315 
    316 	TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
    317 		if (pg->offset == off) {
    318 			return pg;
    319 		}
    320 	}
    321 
    322 	return NULL;
    323 }
    324 
    325 struct vm_page *
    326 uvm_pageratop(vaddr_t va)
    327 {
    328 	struct rumpva *rva;
    329 
    330 	mutex_enter(&rvamtx);
    331 	LIST_FOREACH(rva, &rvahead, entries)
    332 		if (rva->addr == va)
    333 			break;
    334 	mutex_exit(&rvamtx);
    335 
    336 	if (rva == NULL)
    337 		panic("%s: va %llu", __func__, (unsigned long long)va);
    338 
    339 	return rva->pg;
    340 }
    341 
    342 void
    343 uvm_page_unbusy(struct vm_page **pgs, int npgs)
    344 {
    345 	struct vm_page *pg;
    346 	int i;
    347 
    348 	for (i = 0; i < npgs; i++) {
    349 		pg = pgs[i];
    350 		if (pg == NULL)
    351 			continue;
    352 
    353 		KASSERT(pg->flags & PG_BUSY);
    354 		if (pg->flags & PG_WANTED)
    355 			wakeup(pg);
    356 		if (pg->flags & PG_RELEASED)
    357 			uvm_pagefree(pg);
    358 		else
    359 			pg->flags &= ~(PG_WANTED|PG_BUSY);
    360 	}
    361 }
    362 
    363 void
    364 uvm_estimatepageable(int *active, int *inactive)
    365 {
    366 
    367 	/* XXX: guessing game */
    368 	*active = 1024;
    369 	*inactive = 1024;
    370 }
    371 
    372 struct vm_map_kernel *
    373 vm_map_to_kernel(struct vm_map *map)
    374 {
    375 
    376 	return (struct vm_map_kernel *)map;
    377 }
    378 
    379 bool
    380 vm_map_starved_p(struct vm_map *map)
    381 {
    382 
    383 	return false;
    384 }
    385 
    386 void
    387 uvm_pageout_start(int npages)
    388 {
    389 
    390 	uvmexp.paging += npages;
    391 }
    392 
    393 void
    394 uvm_pageout_done(int npages)
    395 {
    396 
    397 	uvmexp.paging -= npages;
    398 
    399 	/*
    400 	 * wake up either of pagedaemon or LWPs waiting for it.
    401 	 */
    402 
    403 	if (uvmexp.free <= uvmexp.reserve_kernel) {
    404 		wakeup(&uvm.pagedaemon);
    405 	} else {
    406 		wakeup(&uvmexp.free);
    407 	}
    408 }
    409 
    410 /* XXX: following two are unfinished because lwp's are not refcounted yet */
    411 void
    412 uvm_lwp_hold(struct lwp *l)
    413 {
    414 
    415 	atomic_inc_uint(&l->l_holdcnt);
    416 }
    417 
    418 void
    419 uvm_lwp_rele(struct lwp *l)
    420 {
    421 
    422 	atomic_dec_uint(&l->l_holdcnt);
    423 }
    424 
    425 int
    426 uvm_loan(struct vm_map *map, vaddr_t start, vsize_t len, void *v, int flags)
    427 {
    428 
    429 	panic("%s: unimplemented", __func__);
    430 }
    431 
    432 void
    433 uvm_unloan(void *v, int npages, int flags)
    434 {
    435 
    436 	panic("%s: unimplemented", __func__);
    437 }
    438 
    439 int
    440 uvm_loanuobjpages(struct uvm_object *uobj, voff_t pgoff, int orignpages,
    441 	struct vm_page **opp)
    442 {
    443 
    444 	panic("%s: unimplemented", __func__);
    445 }
    446 
    447 void
    448 uvm_object_printit(struct uvm_object *uobj, bool full,
    449 	void (*pr)(const char *, ...))
    450 {
    451 
    452 	/* nada for now */
    453 }
    454 
    455 int
    456 uvm_readahead(struct uvm_object *uobj, off_t off, off_t size)
    457 {
    458 
    459 	/* nada for now */
    460 	return 0;
    461 }
    462 
    463 /*
    464  * Kmem
    465  */
    466 
    467 #ifndef RUMP_USE_REAL_ALLOCATORS
    468 void
    469 kmem_init()
    470 {
    471 
    472 	/* nothing to do */
    473 }
    474 
    475 void *
    476 kmem_alloc(size_t size, km_flag_t kmflag)
    477 {
    478 
    479 	return rumpuser_malloc(size, kmflag == KM_NOSLEEP);
    480 }
    481 
    482 void *
    483 kmem_zalloc(size_t size, km_flag_t kmflag)
    484 {
    485 	void *rv;
    486 
    487 	rv = kmem_alloc(size, kmflag);
    488 	if (rv)
    489 		memset(rv, 0, size);
    490 
    491 	return rv;
    492 }
    493 
    494 void
    495 kmem_free(void *p, size_t size)
    496 {
    497 
    498 	rumpuser_free(p);
    499 }
    500 #endif /* RUMP_USE_REAL_ALLOCATORS */
    501 
    502 /*
    503  * UVM km
    504  */
    505 
    506 vaddr_t
    507 uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
    508 {
    509 	void *rv;
    510 	int alignbit, error;
    511 
    512 	alignbit = 0;
    513 	if (align) {
    514 		alignbit = ffs(align)-1;
    515 	}
    516 
    517 	rv = rumpuser_anonmmap(size, alignbit, flags & UVM_KMF_EXEC, &error);
    518 	if (rv == NULL) {
    519 		if (flags & (UVM_KMF_CANFAIL | UVM_KMF_NOWAIT))
    520 			return 0;
    521 		else
    522 			panic("uvm_km_alloc failed");
    523 	}
    524 
    525 	if (flags & UVM_KMF_ZERO)
    526 		memset(rv, 0, size);
    527 
    528 	return (vaddr_t)rv;
    529 }
    530 
    531 void
    532 uvm_km_free(struct vm_map *map, vaddr_t vaddr, vsize_t size, uvm_flag_t flags)
    533 {
    534 
    535 	rumpuser_unmap((void *)vaddr, size);
    536 }
    537 
    538 struct vm_map *
    539 uvm_km_suballoc(struct vm_map *map, vaddr_t *minaddr, vaddr_t *maxaddr,
    540 	vsize_t size, int pageable, bool fixed, struct vm_map_kernel *submap)
    541 {
    542 
    543 	return (struct vm_map *)417416;
    544 }
    545 
    546 vaddr_t
    547 uvm_km_alloc_poolpage(struct vm_map *map, bool waitok)
    548 {
    549 
    550 	return (vaddr_t)rumpuser_malloc(PAGE_SIZE, !waitok);
    551 }
    552 
    553 void
    554 uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr)
    555 {
    556 
    557 	rumpuser_unmap((void *)addr, PAGE_SIZE);
    558 }
    559 
    560 vaddr_t
    561 uvm_km_alloc_poolpage_cache(struct vm_map *map, bool waitok)
    562 {
    563 	void *rv;
    564 	int error;
    565 
    566 	rv = rumpuser_anonmmap(PAGE_SIZE, PAGE_SHIFT, 0, &error);
    567 	if (rv == NULL && waitok)
    568 		panic("fixme: poolpage alloc failed");
    569 
    570 	return (vaddr_t)rv;
    571 }
    572 
    573 void
    574 uvm_km_free_poolpage_cache(struct vm_map *map, vaddr_t vaddr)
    575 {
    576 
    577 	rumpuser_unmap((void *)vaddr, PAGE_SIZE);
    578 }
    579 
    580 /*
    581  * Mapping and vm space locking routines.
    582  * XXX: these don't work for non-local vmspaces
    583  */
    584 int
    585 uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access)
    586 {
    587 
    588 	KASSERT(vs == &rump_vmspace);
    589 	return 0;
    590 }
    591 
    592 void
    593 uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
    594 {
    595 
    596 	KASSERT(vs == &rump_vmspace);
    597 }
    598 
    599 void
    600 vmapbuf(struct buf *bp, vsize_t len)
    601 {
    602 
    603 	bp->b_saveaddr = bp->b_data;
    604 }
    605 
    606 void
    607 vunmapbuf(struct buf *bp, vsize_t len)
    608 {
    609 
    610 	bp->b_data = bp->b_saveaddr;
    611 	bp->b_saveaddr = 0;
    612 }
    613