Home | History | Annotate | Line # | Download | only in rumpkern
vm.c revision 1.41.2.1
      1 /*	$NetBSD: vm.c,v 1.41.2.1 2009/01/19 13:20:25 skrll Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2007 Antti Kantee.  All Rights Reserved.
      5  *
      6  * Development of this software was supported by Google Summer of Code.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     18  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     20  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     23  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     27  * SUCH DAMAGE.
     28  */
     29 
     30 /*
     31  * Virtual memory emulation routines.  Contents:
     32  *  + anon objects & pager
     33  *  + misc support routines
     34  *  + kmem
     35  */
     36 
     37 /*
     38  * XXX: we abuse pg->uanon for the virtual address of the storage
     39  * for each page.  phys_addr would fit the job description better,
     40  * except that it will create unnecessary lossage on some platforms
     41  * due to not being a pointer type.
     42  */
     43 
     44 #include <sys/cdefs.h>
     45 __KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.41.2.1 2009/01/19 13:20:25 skrll Exp $");
     46 
     47 #include <sys/param.h>
     48 #include <sys/atomic.h>
     49 #include <sys/null.h>
     50 #include <sys/vnode.h>
     51 #include <sys/buf.h>
     52 #include <sys/kmem.h>
     53 
     54 #include <machine/pmap.h>
     55 
     56 #include <rump/rumpuser.h>
     57 
     58 #include <uvm/uvm.h>
     59 #include <uvm/uvm_prot.h>
     60 
     61 #include "rump_private.h"
     62 
     63 static int ao_get(struct uvm_object *, voff_t, struct vm_page **,
     64 	int *, int, vm_prot_t, int, int);
     65 static int ao_put(struct uvm_object *, voff_t, voff_t, int);
     66 
     67 const struct uvm_pagerops aobj_pager = {
     68 	.pgo_get = ao_get,
     69 	.pgo_put = ao_put,
     70 };
     71 
     72 kmutex_t uvm_pageqlock;
     73 
     74 struct uvmexp uvmexp;
     75 struct uvm uvm;
     76 
     77 struct vmspace rump_vmspace;
     78 struct vm_map rump_vmmap;
     79 const struct rb_tree_ops uvm_page_tree_ops;
     80 
     81 static struct vm_map_kernel kernel_map_store;
     82 struct vm_map *kernel_map = &kernel_map_store.vmk_map;
     83 
     84 /*
     85  * vm pages
     86  */
     87 
     88 /* called with the object locked */
     89 struct vm_page *
     90 rumpvm_makepage(struct uvm_object *uobj, voff_t off)
     91 {
     92 	struct vm_page *pg;
     93 
     94 	pg = kmem_zalloc(sizeof(struct vm_page), KM_SLEEP);
     95 	pg->offset = off;
     96 	pg->uobject = uobj;
     97 
     98 	pg->uanon = (void *)kmem_zalloc(PAGE_SIZE, KM_SLEEP);
     99 	pg->flags = PG_CLEAN|PG_BUSY|PG_FAKE;
    100 
    101 	TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue);
    102 
    103 	return pg;
    104 }
    105 
    106 /*
    107  * Release a page.
    108  *
    109  * Called with the vm object locked.
    110  */
    111 void
    112 uvm_pagefree(struct vm_page *pg)
    113 {
    114 	struct uvm_object *uobj = pg->uobject;
    115 
    116 	if (pg->flags & PG_WANTED)
    117 		wakeup(pg);
    118 
    119 	TAILQ_REMOVE(&uobj->memq, pg, listq.queue);
    120 	kmem_free((void *)pg->uanon, PAGE_SIZE);
    121 	kmem_free(pg, sizeof(*pg));
    122 }
    123 
    124 struct rumpva {
    125 	vaddr_t addr;
    126 	struct vm_page *pg;
    127 
    128 	LIST_ENTRY(rumpva) entries;
    129 };
    130 static LIST_HEAD(, rumpva) rvahead = LIST_HEAD_INITIALIZER(rvahead);
    131 static kmutex_t rvamtx;
    132 
    133 void
    134 rumpvm_enterva(vaddr_t addr, struct vm_page *pg)
    135 {
    136 	struct rumpva *rva;
    137 
    138 	rva = kmem_alloc(sizeof(struct rumpva), KM_SLEEP);
    139 	rva->addr = addr;
    140 	rva->pg = pg;
    141 	mutex_enter(&rvamtx);
    142 	LIST_INSERT_HEAD(&rvahead, rva, entries);
    143 	mutex_exit(&rvamtx);
    144 }
    145 
    146 void
    147 rumpvm_flushva(struct uvm_object *uobj)
    148 {
    149 	struct rumpva *rva, *rva_next;
    150 
    151 	mutex_enter(&rvamtx);
    152 	for (rva = LIST_FIRST(&rvahead); rva; rva = rva_next) {
    153 		rva_next = LIST_NEXT(rva, entries);
    154 		if (rva->pg->uobject == uobj) {
    155 			LIST_REMOVE(rva, entries);
    156 			rva->pg->flags &= ~PG_BUSY;
    157 			kmem_free(rva, sizeof(*rva));
    158 		}
    159 	}
    160 	mutex_exit(&rvamtx);
    161 }
    162 
    163 /*
    164  * Anon object stuff
    165  */
    166 
    167 static int
    168 ao_get(struct uvm_object *uobj, voff_t off, struct vm_page **pgs,
    169 	int *npages, int centeridx, vm_prot_t access_type,
    170 	int advice, int flags)
    171 {
    172 	struct vm_page *pg;
    173 	int i;
    174 
    175 	if (centeridx)
    176 		panic("%s: centeridx != 0 not supported", __func__);
    177 
    178 	/* loop over pages */
    179 	off = trunc_page(off);
    180 	for (i = 0; i < *npages; i++) {
    181  retrylookup:
    182 		pg = uvm_pagelookup(uobj, off + (i << PAGE_SHIFT));
    183 		if (pg) {
    184 			if (pg->flags & PG_BUSY) {
    185 				pg->flags |= PG_WANTED;
    186 				UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
    187 				    "aogetpg", 0);
    188 				goto retrylookup;
    189 			}
    190 			pg->flags |= PG_BUSY;
    191 			pgs[i] = pg;
    192 		} else {
    193 			pg = rumpvm_makepage(uobj, off + (i << PAGE_SHIFT));
    194 			pgs[i] = pg;
    195 		}
    196 	}
    197 	mutex_exit(&uobj->vmobjlock);
    198 
    199 	return 0;
    200 
    201 }
    202 
    203 static int
    204 ao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
    205 {
    206 	struct vm_page *pg;
    207 
    208 	/* we only free all pages for now */
    209 	if ((flags & PGO_FREE) == 0 || (flags & PGO_ALLPAGES) == 0) {
    210 		mutex_exit(&uobj->vmobjlock);
    211 		return 0;
    212 	}
    213 
    214 	while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL)
    215 		uvm_pagefree(pg);
    216 	mutex_exit(&uobj->vmobjlock);
    217 
    218 	return 0;
    219 }
    220 
    221 struct uvm_object *
    222 uao_create(vsize_t size, int flags)
    223 {
    224 	struct uvm_object *uobj;
    225 
    226 	uobj = kmem_zalloc(sizeof(struct uvm_object), KM_SLEEP);
    227 	uobj->pgops = &aobj_pager;
    228 	TAILQ_INIT(&uobj->memq);
    229 	mutex_init(&uobj->vmobjlock, MUTEX_DEFAULT, IPL_NONE);
    230 
    231 	return uobj;
    232 }
    233 
    234 void
    235 uao_detach(struct uvm_object *uobj)
    236 {
    237 
    238 	mutex_enter(&uobj->vmobjlock);
    239 	ao_put(uobj, 0, 0, PGO_ALLPAGES | PGO_FREE);
    240 	kmem_free(uobj, sizeof(*uobj));
    241 }
    242 
    243 /*
    244  * Misc routines
    245  */
    246 
    247 void
    248 rumpvm_init()
    249 {
    250 
    251 	uvmexp.free = 1024*1024; /* XXX */
    252 	uvm.pagedaemon_lwp = NULL; /* doesn't match curlwp */
    253 	rump_vmspace.vm_map.pmap = pmap_kernel();
    254 
    255 	mutex_init(&rvamtx, MUTEX_DEFAULT, 0);
    256 	mutex_init(&uvm_pageqlock, MUTEX_DEFAULT, 0);
    257 
    258 	callback_head_init(&kernel_map_store.vmk_reclaim_callback, IPL_VM);
    259 }
    260 
    261 void
    262 uvm_pageactivate(struct vm_page *pg)
    263 {
    264 
    265 	/* nada */
    266 }
    267 
    268 void
    269 uvm_pagewire(struct vm_page *pg)
    270 {
    271 
    272 	/* nada */
    273 }
    274 
    275 void
    276 uvm_pageunwire(struct vm_page *pg)
    277 {
    278 
    279 	/* nada */
    280 }
    281 
    282 int
    283 uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
    284 	vm_prot_t maxprot, int flags, void *handle, voff_t off, vsize_t locklim)
    285 {
    286 
    287 	panic("%s: unimplemented", __func__);
    288 }
    289 
    290 vaddr_t
    291 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
    292 {
    293 
    294 	panic("%s: unimplemented", __func__);
    295 }
    296 
    297 /* Called with the vm object locked */
    298 struct vm_page *
    299 uvm_pagelookup(struct uvm_object *uobj, voff_t off)
    300 {
    301 	struct vm_page *pg;
    302 
    303 	TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
    304 		if (pg->offset == off) {
    305 			return pg;
    306 		}
    307 	}
    308 
    309 	return NULL;
    310 }
    311 
    312 struct vm_page *
    313 uvm_pageratop(vaddr_t va)
    314 {
    315 	struct rumpva *rva;
    316 
    317 	mutex_enter(&rvamtx);
    318 	LIST_FOREACH(rva, &rvahead, entries)
    319 		if (rva->addr == va)
    320 			break;
    321 	mutex_exit(&rvamtx);
    322 
    323 	if (rva == NULL)
    324 		panic("%s: va %llu", __func__, (unsigned long long)va);
    325 
    326 	return rva->pg;
    327 }
    328 
    329 void
    330 uvm_page_unbusy(struct vm_page **pgs, int npgs)
    331 {
    332 	struct vm_page *pg;
    333 	int i;
    334 
    335 	for (i = 0; i < npgs; i++) {
    336 		pg = pgs[i];
    337 		if (pg == NULL)
    338 			continue;
    339 
    340 		KASSERT(pg->flags & PG_BUSY);
    341 		if (pg->flags & PG_WANTED)
    342 			wakeup(pg);
    343 		if (pg->flags & PG_RELEASED)
    344 			uvm_pagefree(pg);
    345 		else
    346 			pg->flags &= ~(PG_WANTED|PG_BUSY);
    347 	}
    348 }
    349 
    350 void
    351 uvm_estimatepageable(int *active, int *inactive)
    352 {
    353 
    354 	/* XXX: guessing game */
    355 	*active = 1024;
    356 	*inactive = 1024;
    357 }
    358 
    359 struct vm_map_kernel *
    360 vm_map_to_kernel(struct vm_map *map)
    361 {
    362 
    363 	return (struct vm_map_kernel *)map;
    364 }
    365 
    366 bool
    367 vm_map_starved_p(struct vm_map *map)
    368 {
    369 
    370 	return false;
    371 }
    372 
    373 void
    374 uvm_pageout_start(int npages)
    375 {
    376 
    377 	uvmexp.paging += npages;
    378 }
    379 
    380 void
    381 uvm_pageout_done(int npages)
    382 {
    383 
    384 	uvmexp.paging -= npages;
    385 
    386 	/*
    387 	 * wake up either of pagedaemon or LWPs waiting for it.
    388 	 */
    389 
    390 	if (uvmexp.free <= uvmexp.reserve_kernel) {
    391 		wakeup(&uvm.pagedaemon);
    392 	} else {
    393 		wakeup(&uvmexp.free);
    394 	}
    395 }
    396 
    397 /* XXX: following two are unfinished because lwp's are not refcounted yet */
    398 void
    399 uvm_lwp_hold(struct lwp *l)
    400 {
    401 
    402 	atomic_inc_uint(&l->l_holdcnt);
    403 }
    404 
    405 void
    406 uvm_lwp_rele(struct lwp *l)
    407 {
    408 
    409 	atomic_dec_uint(&l->l_holdcnt);
    410 }
    411 
    412 int
    413 uvm_loan(struct vm_map *map, vaddr_t start, vsize_t len, void *v, int flags)
    414 {
    415 
    416 	panic("%s: unimplemented", __func__);
    417 }
    418 
    419 void
    420 uvm_unloan(void *v, int npages, int flags)
    421 {
    422 
    423 	panic("%s: unimplemented", __func__);
    424 }
    425 
    426 int
    427 uvm_loanuobjpages(struct uvm_object *uobj, voff_t pgoff, int orignpages,
    428 	struct vm_page **opp)
    429 {
    430 
    431 	panic("%s: unimplemented", __func__);
    432 }
    433 
    434 /*
    435  * Kmem
    436  */
    437 
    438 #ifndef RUMP_USE_REAL_KMEM
    439 void *
    440 kmem_alloc(size_t size, km_flag_t kmflag)
    441 {
    442 
    443 	return rumpuser_malloc(size, kmflag == KM_NOSLEEP);
    444 }
    445 
    446 void *
    447 kmem_zalloc(size_t size, km_flag_t kmflag)
    448 {
    449 	void *rv;
    450 
    451 	rv = kmem_alloc(size, kmflag);
    452 	if (rv)
    453 		memset(rv, 0, size);
    454 
    455 	return rv;
    456 }
    457 
    458 void
    459 kmem_free(void *p, size_t size)
    460 {
    461 
    462 	rumpuser_free(p);
    463 }
    464 #endif /* RUMP_USE_REAL_KMEM */
    465 
    466 /*
    467  * UVM km
    468  */
    469 
    470 vaddr_t
    471 uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
    472 {
    473 	void *rv;
    474 
    475 	rv = rumpuser_malloc(size, flags & (UVM_KMF_CANFAIL | UVM_KMF_NOWAIT));
    476 	if (rv && flags & UVM_KMF_ZERO)
    477 		memset(rv, 0, size);
    478 
    479 	return (vaddr_t)rv;
    480 }
    481 
    482 void
    483 uvm_km_free(struct vm_map *map, vaddr_t vaddr, vsize_t size, uvm_flag_t flags)
    484 {
    485 
    486 	rumpuser_free((void *)vaddr);
    487 }
    488 
    489 struct vm_map *
    490 uvm_km_suballoc(struct vm_map *map, vaddr_t *minaddr, vaddr_t *maxaddr,
    491 	vsize_t size, int pageable, bool fixed, struct vm_map_kernel *submap)
    492 {
    493 
    494 	return (struct vm_map *)417416;
    495 }
    496 
    497 vaddr_t
    498 uvm_km_alloc_poolpage(struct vm_map *map, bool waitok)
    499 {
    500 
    501 	return (vaddr_t)rumpuser_malloc(PAGE_SIZE, !waitok);
    502 }
    503 
    504 void
    505 uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr)
    506 {
    507 
    508 	rumpuser_free((void *)addr);
    509 }
    510