Home | History | Annotate | Line # | Download | only in rumpkern
vm.c revision 1.55
      1 /*	$NetBSD: vm.c,v 1.55 2009/04/28 14:00:42 pooka Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2007 Antti Kantee.  All Rights Reserved.
      5  *
      6  * Development of this software was supported by Google Summer of Code.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     18  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     20  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     23  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     27  * SUCH DAMAGE.
     28  */
     29 
     30 /*
     31  * Virtual memory emulation routines.  Contents:
     32  *  + anon objects & pager
     33  *  + misc support routines
     34  *  + kmem
     35  */
     36 
     37 /*
     38  * XXX: we abuse pg->uanon for the virtual address of the storage
     39  * for each page.  phys_addr would fit the job description better,
     40  * except that it will create unnecessary lossage on some platforms
     41  * due to not being a pointer type.
     42  */
     43 
     44 #include <sys/cdefs.h>
     45 __KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.55 2009/04/28 14:00:42 pooka Exp $");
     46 
     47 #include <sys/param.h>
     48 #include <sys/atomic.h>
     49 #include <sys/null.h>
     50 #include <sys/vnode.h>
     51 #include <sys/buf.h>
     52 #include <sys/kmem.h>
     53 
     54 #include <machine/pmap.h>
     55 
     56 #include <rump/rumpuser.h>
     57 
     58 #include <uvm/uvm.h>
     59 #include <uvm/uvm_prot.h>
     60 
     61 #include "rump_private.h"
     62 
     63 static int ao_get(struct uvm_object *, voff_t, struct vm_page **,
     64 	int *, int, vm_prot_t, int, int);
     65 static int ao_put(struct uvm_object *, voff_t, voff_t, int);
     66 
     67 const struct uvm_pagerops aobj_pager = {
     68 	.pgo_get = ao_get,
     69 	.pgo_put = ao_put,
     70 };
     71 
     72 kmutex_t uvm_pageqlock;
     73 
     74 struct uvmexp uvmexp;
     75 struct uvm uvm;
     76 
     77 struct vmspace rump_vmspace;
     78 struct vm_map rump_vmmap;
     79 static struct vm_map_kernel kmem_map_store;
     80 struct vm_map *kmem_map = &kmem_map_store.vmk_map;
     81 const struct rb_tree_ops uvm_page_tree_ops;
     82 
     83 static struct vm_map_kernel kernel_map_store;
     84 struct vm_map *kernel_map = &kernel_map_store.vmk_map;
     85 
     86 /*
     87  * vm pages
     88  */
     89 
     90 /* called with the object locked */
     91 struct vm_page *
     92 rumpvm_makepage(struct uvm_object *uobj, voff_t off)
     93 {
     94 	struct vm_page *pg;
     95 
     96 	pg = kmem_zalloc(sizeof(struct vm_page), KM_SLEEP);
     97 	pg->offset = off;
     98 	pg->uobject = uobj;
     99 
    100 	pg->uanon = (void *)kmem_zalloc(PAGE_SIZE, KM_SLEEP);
    101 	pg->flags = PG_CLEAN|PG_BUSY|PG_FAKE;
    102 
    103 	TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue);
    104 
    105 	return pg;
    106 }
    107 
    108 /*
    109  * Release a page.
    110  *
    111  * Called with the vm object locked.
    112  */
    113 void
    114 uvm_pagefree(struct vm_page *pg)
    115 {
    116 	struct uvm_object *uobj = pg->uobject;
    117 
    118 	if (pg->flags & PG_WANTED)
    119 		wakeup(pg);
    120 
    121 	TAILQ_REMOVE(&uobj->memq, pg, listq.queue);
    122 	kmem_free((void *)pg->uanon, PAGE_SIZE);
    123 	kmem_free(pg, sizeof(*pg));
    124 }
    125 
    126 struct rumpva {
    127 	vaddr_t addr;
    128 	struct vm_page *pg;
    129 
    130 	LIST_ENTRY(rumpva) entries;
    131 };
    132 static LIST_HEAD(, rumpva) rvahead = LIST_HEAD_INITIALIZER(rvahead);
    133 static kmutex_t rvamtx;
    134 
    135 void
    136 rumpvm_enterva(vaddr_t addr, struct vm_page *pg)
    137 {
    138 	struct rumpva *rva;
    139 
    140 	rva = kmem_alloc(sizeof(struct rumpva), KM_SLEEP);
    141 	rva->addr = addr;
    142 	rva->pg = pg;
    143 	mutex_enter(&rvamtx);
    144 	LIST_INSERT_HEAD(&rvahead, rva, entries);
    145 	mutex_exit(&rvamtx);
    146 }
    147 
    148 void
    149 rumpvm_flushva(struct uvm_object *uobj)
    150 {
    151 	struct rumpva *rva, *rva_next;
    152 
    153 	mutex_enter(&rvamtx);
    154 	for (rva = LIST_FIRST(&rvahead); rva; rva = rva_next) {
    155 		rva_next = LIST_NEXT(rva, entries);
    156 		if (rva->pg->uobject == uobj) {
    157 			LIST_REMOVE(rva, entries);
    158 			uvm_page_unbusy(&rva->pg, 1);
    159 			kmem_free(rva, sizeof(*rva));
    160 		}
    161 	}
    162 	mutex_exit(&rvamtx);
    163 }
    164 
    165 /*
    166  * Anon object stuff
    167  */
    168 
    169 static int
    170 ao_get(struct uvm_object *uobj, voff_t off, struct vm_page **pgs,
    171 	int *npages, int centeridx, vm_prot_t access_type,
    172 	int advice, int flags)
    173 {
    174 	struct vm_page *pg;
    175 	int i;
    176 
    177 	if (centeridx)
    178 		panic("%s: centeridx != 0 not supported", __func__);
    179 
    180 	/* loop over pages */
    181 	off = trunc_page(off);
    182 	for (i = 0; i < *npages; i++) {
    183  retrylookup:
    184 		pg = uvm_pagelookup(uobj, off + (i << PAGE_SHIFT));
    185 		if (pg) {
    186 			if (pg->flags & PG_BUSY) {
    187 				pg->flags |= PG_WANTED;
    188 				UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
    189 				    "aogetpg", 0);
    190 				goto retrylookup;
    191 			}
    192 			pg->flags |= PG_BUSY;
    193 			pgs[i] = pg;
    194 		} else {
    195 			pg = rumpvm_makepage(uobj, off + (i << PAGE_SHIFT));
    196 			pgs[i] = pg;
    197 		}
    198 	}
    199 	mutex_exit(&uobj->vmobjlock);
    200 
    201 	return 0;
    202 
    203 }
    204 
    205 static int
    206 ao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
    207 {
    208 	struct vm_page *pg;
    209 
    210 	/* we only free all pages for now */
    211 	if ((flags & PGO_FREE) == 0 || (flags & PGO_ALLPAGES) == 0) {
    212 		mutex_exit(&uobj->vmobjlock);
    213 		return 0;
    214 	}
    215 
    216 	while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL)
    217 		uvm_pagefree(pg);
    218 	mutex_exit(&uobj->vmobjlock);
    219 
    220 	return 0;
    221 }
    222 
    223 struct uvm_object *
    224 uao_create(vsize_t size, int flags)
    225 {
    226 	struct uvm_object *uobj;
    227 
    228 	uobj = kmem_zalloc(sizeof(struct uvm_object), KM_SLEEP);
    229 	uobj->pgops = &aobj_pager;
    230 	TAILQ_INIT(&uobj->memq);
    231 	mutex_init(&uobj->vmobjlock, MUTEX_DEFAULT, IPL_NONE);
    232 
    233 	return uobj;
    234 }
    235 
    236 void
    237 uao_detach(struct uvm_object *uobj)
    238 {
    239 
    240 	mutex_enter(&uobj->vmobjlock);
    241 	ao_put(uobj, 0, 0, PGO_ALLPAGES | PGO_FREE);
    242 	mutex_destroy(&uobj->vmobjlock);
    243 	kmem_free(uobj, sizeof(*uobj));
    244 }
    245 
    246 /*
    247  * Misc routines
    248  */
    249 
    250 static kmutex_t cachepgmtx;
    251 
    252 void
    253 rumpvm_init(void)
    254 {
    255 
    256 	uvmexp.free = 1024*1024; /* XXX */
    257 	uvm.pagedaemon_lwp = NULL; /* doesn't match curlwp */
    258 	rump_vmspace.vm_map.pmap = pmap_kernel();
    259 
    260 	mutex_init(&rvamtx, MUTEX_DEFAULT, 0);
    261 	mutex_init(&uvm_pageqlock, MUTEX_DEFAULT, 0);
    262 	mutex_init(&cachepgmtx, MUTEX_DEFAULT, 0);
    263 
    264 	kernel_map->pmap = pmap_kernel();
    265 	callback_head_init(&kernel_map_store.vmk_reclaim_callback, IPL_VM);
    266 	kmem_map->pmap = pmap_kernel();
    267 	callback_head_init(&kmem_map_store.vmk_reclaim_callback, IPL_VM);
    268 }
    269 
    270 void
    271 uvm_pageactivate(struct vm_page *pg)
    272 {
    273 
    274 	/* nada */
    275 }
    276 
    277 void
    278 uvm_pagewire(struct vm_page *pg)
    279 {
    280 
    281 	/* nada */
    282 }
    283 
    284 void
    285 uvm_pageunwire(struct vm_page *pg)
    286 {
    287 
    288 	/* nada */
    289 }
    290 
    291 int
    292 uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
    293 	vm_prot_t maxprot, int flags, void *handle, voff_t off, vsize_t locklim)
    294 {
    295 
    296 	panic("%s: unimplemented", __func__);
    297 }
    298 
    299 vaddr_t
    300 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
    301 {
    302 
    303 	panic("%s: unimplemented", __func__);
    304 }
    305 
    306 /* Called with the vm object locked */
    307 struct vm_page *
    308 uvm_pagelookup(struct uvm_object *uobj, voff_t off)
    309 {
    310 	struct vm_page *pg;
    311 
    312 	TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
    313 		if (pg->offset == off) {
    314 			return pg;
    315 		}
    316 	}
    317 
    318 	return NULL;
    319 }
    320 
    321 struct vm_page *
    322 uvm_pageratop(vaddr_t va)
    323 {
    324 	struct rumpva *rva;
    325 
    326 	mutex_enter(&rvamtx);
    327 	LIST_FOREACH(rva, &rvahead, entries)
    328 		if (rva->addr == va)
    329 			break;
    330 	mutex_exit(&rvamtx);
    331 
    332 	if (rva == NULL)
    333 		panic("%s: va %llu", __func__, (unsigned long long)va);
    334 
    335 	return rva->pg;
    336 }
    337 
    338 void
    339 uvm_page_unbusy(struct vm_page **pgs, int npgs)
    340 {
    341 	struct vm_page *pg;
    342 	int i;
    343 
    344 	for (i = 0; i < npgs; i++) {
    345 		pg = pgs[i];
    346 		if (pg == NULL)
    347 			continue;
    348 
    349 		KASSERT(pg->flags & PG_BUSY);
    350 		if (pg->flags & PG_WANTED)
    351 			wakeup(pg);
    352 		if (pg->flags & PG_RELEASED)
    353 			uvm_pagefree(pg);
    354 		else
    355 			pg->flags &= ~(PG_WANTED|PG_BUSY);
    356 	}
    357 }
    358 
    359 void
    360 uvm_estimatepageable(int *active, int *inactive)
    361 {
    362 
    363 	/* XXX: guessing game */
    364 	*active = 1024;
    365 	*inactive = 1024;
    366 }
    367 
    368 struct vm_map_kernel *
    369 vm_map_to_kernel(struct vm_map *map)
    370 {
    371 
    372 	return (struct vm_map_kernel *)map;
    373 }
    374 
    375 bool
    376 vm_map_starved_p(struct vm_map *map)
    377 {
    378 
    379 	return false;
    380 }
    381 
    382 void
    383 uvm_pageout_start(int npages)
    384 {
    385 
    386 	uvmexp.paging += npages;
    387 }
    388 
    389 void
    390 uvm_pageout_done(int npages)
    391 {
    392 
    393 	uvmexp.paging -= npages;
    394 
    395 	/*
    396 	 * wake up either of pagedaemon or LWPs waiting for it.
    397 	 */
    398 
    399 	if (uvmexp.free <= uvmexp.reserve_kernel) {
    400 		wakeup(&uvm.pagedaemon);
    401 	} else {
    402 		wakeup(&uvmexp.free);
    403 	}
    404 }
    405 
    406 /* XXX: following two are unfinished because lwp's are not refcounted yet */
    407 void
    408 uvm_lwp_hold(struct lwp *l)
    409 {
    410 
    411 	atomic_inc_uint(&l->l_holdcnt);
    412 }
    413 
    414 void
    415 uvm_lwp_rele(struct lwp *l)
    416 {
    417 
    418 	atomic_dec_uint(&l->l_holdcnt);
    419 }
    420 
    421 int
    422 uvm_loan(struct vm_map *map, vaddr_t start, vsize_t len, void *v, int flags)
    423 {
    424 
    425 	panic("%s: unimplemented", __func__);
    426 }
    427 
    428 void
    429 uvm_unloan(void *v, int npages, int flags)
    430 {
    431 
    432 	panic("%s: unimplemented", __func__);
    433 }
    434 
    435 int
    436 uvm_loanuobjpages(struct uvm_object *uobj, voff_t pgoff, int orignpages,
    437 	struct vm_page **opp)
    438 {
    439 
    440 	panic("%s: unimplemented", __func__);
    441 }
    442 
    443 /*
    444  * Kmem
    445  */
    446 
    447 #ifndef RUMP_USE_REAL_ALLOCATORS
    448 void
    449 kmem_init()
    450 {
    451 
    452 	/* nothing to do */
    453 }
    454 
    455 void *
    456 kmem_alloc(size_t size, km_flag_t kmflag)
    457 {
    458 
    459 	return rumpuser_malloc(size, kmflag == KM_NOSLEEP);
    460 }
    461 
    462 void *
    463 kmem_zalloc(size_t size, km_flag_t kmflag)
    464 {
    465 	void *rv;
    466 
    467 	rv = kmem_alloc(size, kmflag);
    468 	if (rv)
    469 		memset(rv, 0, size);
    470 
    471 	return rv;
    472 }
    473 
    474 void
    475 kmem_free(void *p, size_t size)
    476 {
    477 
    478 	rumpuser_free(p);
    479 }
    480 #endif /* RUMP_USE_REAL_ALLOCATORS */
    481 
    482 /*
    483  * UVM km
    484  */
    485 
    486 vaddr_t
    487 uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
    488 {
    489 	void *rv;
    490 	int alignbit, error;
    491 
    492 	alignbit = 0;
    493 	if (align) {
    494 		alignbit = ffs(align)-1;
    495 	}
    496 
    497 	rv = rumpuser_anonmmap(size, alignbit, flags & UVM_KMF_EXEC, &error);
    498 	if (rv == NULL) {
    499 		if (flags & (UVM_KMF_CANFAIL | UVM_KMF_NOWAIT))
    500 			return 0;
    501 		else
    502 			panic("uvm_km_alloc failed");
    503 	}
    504 
    505 	if (flags & UVM_KMF_ZERO)
    506 		memset(rv, 0, size);
    507 
    508 	return (vaddr_t)rv;
    509 }
    510 
    511 void
    512 uvm_km_free(struct vm_map *map, vaddr_t vaddr, vsize_t size, uvm_flag_t flags)
    513 {
    514 
    515 	rumpuser_unmap((void *)vaddr, size);
    516 }
    517 
    518 struct vm_map *
    519 uvm_km_suballoc(struct vm_map *map, vaddr_t *minaddr, vaddr_t *maxaddr,
    520 	vsize_t size, int pageable, bool fixed, struct vm_map_kernel *submap)
    521 {
    522 
    523 	return (struct vm_map *)417416;
    524 }
    525 
    526 vaddr_t
    527 uvm_km_alloc_poolpage(struct vm_map *map, bool waitok)
    528 {
    529 
    530 	return (vaddr_t)rumpuser_malloc(PAGE_SIZE, !waitok);
    531 }
    532 
    533 void
    534 uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr)
    535 {
    536 
    537 	rumpuser_unmap((void *)addr, PAGE_SIZE);
    538 }
    539 
    540 vaddr_t
    541 uvm_km_alloc_poolpage_cache(struct vm_map *map, bool waitok)
    542 {
    543 	void *rv;
    544 	int error;
    545 
    546 	rv = rumpuser_anonmmap(PAGE_SIZE, PAGE_SHIFT, 0, &error);
    547 	if (rv == NULL && waitok)
    548 		panic("fixme: poolpage alloc failed");
    549 
    550 	return (vaddr_t)rv;
    551 }
    552 
    553 void
    554 uvm_km_free_poolpage_cache(struct vm_map *map, vaddr_t vaddr)
    555 {
    556 
    557 	rumpuser_unmap((void *)vaddr, PAGE_SIZE);
    558 }
    559