vm.c revision 1.28 1 /* $NetBSD: vm.c,v 1.28 2008/01/27 00:16:22 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*
31 * Virtual memory emulation routines. Contents:
32 * + UBC
33 * + anon objects & pager
34 * + vnode objects & pager
35 * + misc support routines
36 * + kmem
37 */
38
39 /*
40 * XXX: we abuse pg->uanon for the virtual address of the storage
41 * for each page. phys_addr would fit the job description better,
42 * except that it will create unnecessary lossage on some platforms
43 * due to not being a pointer type.
44 */
45
46 #include <sys/param.h>
47 #include <sys/null.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50 #include <sys/kmem.h>
51
52 #include <uvm/uvm.h>
53 #include <uvm/uvm_prot.h>
54 #include <uvm/uvm_readahead.h>
55
56 #include <machine/pmap.h>
57
58 #include "rump_private.h"
59 #include "rumpuser.h"
60
61 /* dumdidumdum */
62 #define len2npages(off, len) \
63 (((((len) + PAGE_MASK) & ~(PAGE_MASK)) >> PAGE_SHIFT) \
64 + (((off & PAGE_MASK) + (len & PAGE_MASK)) > PAGE_SIZE))
65
66 static int vn_get(struct uvm_object *, voff_t, struct vm_page **,
67 int *, int, vm_prot_t, int, int);
68 static int vn_put(struct uvm_object *, voff_t, voff_t, int);
69 static int ao_get(struct uvm_object *, voff_t, struct vm_page **,
70 int *, int, vm_prot_t, int, int);
71 static int ao_put(struct uvm_object *, voff_t, voff_t, int);
72
73 const struct uvm_pagerops uvm_vnodeops = {
74 .pgo_get = vn_get,
75 .pgo_put = vn_put,
76 };
77 const struct uvm_pagerops aobj_pager = {
78 .pgo_get = ao_get,
79 .pgo_put = ao_put,
80 };
81
82 kmutex_t uvm_pageqlock;
83
84 struct uvmexp uvmexp;
85 struct uvm uvm;
86
87 struct vmspace rump_vmspace;
88 struct vm_map rump_vmmap;
89
90 /*
91 * vm pages
92 */
93
94 /* called with the object locked */
95 struct vm_page *
96 rumpvm_makepage(struct uvm_object *uobj, voff_t off)
97 {
98 struct vm_page *pg;
99
100 pg = kmem_zalloc(sizeof(struct vm_page), KM_SLEEP);
101 pg->offset = off;
102 pg->uobject = uobj;
103
104 pg->uanon = (void *)kmem_zalloc(PAGE_SIZE, KM_SLEEP);
105 pg->flags = PG_CLEAN|PG_BUSY|PG_FAKE;
106
107 TAILQ_INSERT_TAIL(&uobj->memq, pg, listq);
108
109 return pg;
110 }
111
112 /*
113 * Release a page.
114 *
115 * Called with the vm object locked.
116 */
117 void
118 uvm_pagefree(struct vm_page *pg)
119 {
120 struct uvm_object *uobj = pg->uobject;
121
122 if (pg->flags & PG_WANTED)
123 wakeup(pg);
124
125 TAILQ_REMOVE(&uobj->memq, pg, listq);
126 kmem_free((void *)pg->uanon, PAGE_SIZE);
127 kmem_free(pg, sizeof(*pg));
128 }
129
130 struct rumpva {
131 vaddr_t addr;
132 struct vm_page *pg;
133
134 LIST_ENTRY(rumpva) entries;
135 };
136 static LIST_HEAD(, rumpva) rvahead = LIST_HEAD_INITIALIZER(rvahead);
137 static kmutex_t rvamtx;
138
139 void
140 rumpvm_enterva(vaddr_t addr, struct vm_page *pg)
141 {
142 struct rumpva *rva;
143
144 rva = kmem_alloc(sizeof(struct rumpva), KM_SLEEP);
145 rva->addr = addr;
146 rva->pg = pg;
147 mutex_enter(&rvamtx);
148 LIST_INSERT_HEAD(&rvahead, rva, entries);
149 mutex_exit(&rvamtx);
150 }
151
152 void
153 rumpvm_flushva()
154 {
155 struct rumpva *rva;
156
157 mutex_enter(&rvamtx);
158 while ((rva = LIST_FIRST(&rvahead)) != NULL) {
159 LIST_REMOVE(rva, entries);
160 kmem_free(rva, sizeof(*rva));
161 }
162 mutex_exit(&rvamtx);
163 }
164
165 /*
166 * vnode pager
167 */
168
169 static int
170 vn_get(struct uvm_object *uobj, voff_t off, struct vm_page **pgs,
171 int *npages, int centeridx, vm_prot_t access_type,
172 int advice, int flags)
173 {
174 struct vnode *vp = (struct vnode *)uobj;
175
176 return VOP_GETPAGES(vp, off, pgs, npages, centeridx, access_type,
177 advice, flags);
178 }
179
180 static int
181 vn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
182 {
183 struct vnode *vp = (struct vnode *)uobj;
184
185 return VOP_PUTPAGES(vp, offlo, offhi, flags);
186 }
187
188 /*
189 * Anon object stuff
190 */
191
192 static int
193 ao_get(struct uvm_object *uobj, voff_t off, struct vm_page **pgs,
194 int *npages, int centeridx, vm_prot_t access_type,
195 int advice, int flags)
196 {
197 struct vm_page *pg;
198 int i;
199
200 if (centeridx)
201 panic("%s: centeridx != 0 not supported", __func__);
202
203 /* loop over pages */
204 off = trunc_page(off);
205 for (i = 0; i < *npages; i++) {
206 retrylookup:
207 pg = uvm_pagelookup(uobj, off + (i << PAGE_SHIFT));
208 if (pg) {
209 if (pg->flags & PG_BUSY) {
210 pg->flags |= PG_WANTED;
211 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
212 "aogetpg", 0);
213 goto retrylookup;
214 }
215 pg->flags |= PG_BUSY;
216 pgs[i] = pg;
217 } else {
218 pg = rumpvm_makepage(uobj, off + (i << PAGE_SHIFT));
219 pgs[i] = pg;
220 }
221 }
222 mutex_exit(&uobj->vmobjlock);
223
224 return 0;
225
226 }
227
228 static int
229 ao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
230 {
231 struct vm_page *pg;
232
233 /* we only free all pages for now */
234 if ((flags & PGO_FREE) == 0 || (flags & PGO_ALLPAGES) == 0) {
235 mutex_exit(&uobj->vmobjlock);
236 return 0;
237 }
238
239 while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL)
240 uvm_pagefree(pg);
241 mutex_exit(&uobj->vmobjlock);
242
243 return 0;
244 }
245
246 struct uvm_object *
247 uao_create(vsize_t size, int flags)
248 {
249 struct uvm_object *uobj;
250
251 uobj = kmem_zalloc(sizeof(struct uvm_object), KM_SLEEP);
252 uobj->pgops = &aobj_pager;
253 TAILQ_INIT(&uobj->memq);
254 mutex_init(&uobj->vmobjlock, MUTEX_DEFAULT, IPL_NONE);
255
256 return uobj;
257 }
258
259 void
260 uao_detach(struct uvm_object *uobj)
261 {
262
263 ao_put(uobj, 0, 0, PGO_ALLPAGES | PGO_FREE);
264 kmem_free(uobj, sizeof(*uobj));
265 }
266
267 /*
268 * UBC
269 */
270
271 struct ubc_window {
272 struct uvm_object *uwin_obj;
273 voff_t uwin_off;
274 uint8_t *uwin_mem;
275 size_t uwin_mapsize;
276
277 LIST_ENTRY(ubc_window) uwin_entries;
278 };
279
280 static LIST_HEAD(, ubc_window) uwinlst = LIST_HEAD_INITIALIZER(uwinlst);
281 static kmutex_t uwinmtx;
282
283 int
284 rump_ubc_magic_uiomove(void *va, size_t n, struct uio *uio, int *rvp,
285 struct ubc_window *uwinp)
286 {
287 struct vm_page **pgs;
288 int npages = len2npages(uio->uio_offset, n);
289 size_t allocsize;
290 int i, rv;
291
292 if (uwinp == NULL) {
293 mutex_enter(&uwinmtx);
294 LIST_FOREACH(uwinp, &uwinlst, uwin_entries)
295 if ((uint8_t *)va >= uwinp->uwin_mem
296 && (uint8_t *)va
297 < (uwinp->uwin_mem + uwinp->uwin_mapsize))
298 break;
299 mutex_exit(&uwinmtx);
300 if (uwinp == NULL) {
301 KASSERT(rvp != NULL);
302 return 0;
303 }
304 }
305
306 allocsize = npages * sizeof(pgs);
307 pgs = kmem_zalloc(allocsize, KM_SLEEP);
308 mutex_enter(&uwinp->uwin_obj->vmobjlock);
309 rv = uwinp->uwin_obj->pgops->pgo_get(uwinp->uwin_obj,
310 uwinp->uwin_off + ((uint8_t *)va - uwinp->uwin_mem),
311 pgs, &npages, 0, 0, 0, 0);
312 if (rv)
313 goto out;
314
315 for (i = 0; i < npages; i++) {
316 size_t xfersize;
317 off_t pageoff;
318
319 pageoff = uio->uio_offset & PAGE_MASK;
320 xfersize = MIN(MIN(n, PAGE_SIZE), PAGE_SIZE-pageoff);
321 uiomove((uint8_t *)pgs[i]->uanon + pageoff, xfersize, uio);
322 if (uio->uio_rw == UIO_WRITE)
323 pgs[i]->flags &= ~PG_CLEAN;
324 n -= xfersize;
325 }
326 uvm_page_unbusy(pgs, npages);
327
328 out:
329 kmem_free(pgs, allocsize);
330 if (rvp)
331 *rvp = rv;
332 return 1;
333 }
334
335 static struct ubc_window *
336 uwin_alloc(struct uvm_object *uobj, voff_t off, vsize_t len)
337 {
338 struct ubc_window *uwinp; /* pronounced: you wimp! */
339
340 uwinp = kmem_alloc(sizeof(struct ubc_window), KM_SLEEP);
341 uwinp->uwin_obj = uobj;
342 uwinp->uwin_off = off;
343 uwinp->uwin_mapsize = len;
344 uwinp->uwin_mem = kmem_alloc(len, KM_SLEEP);
345
346 return uwinp;
347 }
348
349 static void
350 uwin_free(struct ubc_window *uwinp)
351 {
352
353 kmem_free(uwinp->uwin_mem, uwinp->uwin_mapsize);
354 kmem_free(uwinp, sizeof(struct ubc_window));
355 }
356
357 void *
358 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
359 int flags)
360 {
361 struct ubc_window *uwinp;
362
363 uwinp = uwin_alloc(uobj, offset, *lenp);
364 mutex_enter(&uwinmtx);
365 LIST_INSERT_HEAD(&uwinlst, uwinp, uwin_entries);
366 mutex_exit(&uwinmtx);
367
368 DPRINTF(("UBC_ALLOC offset 0x%llx, uwin %p, mem %p\n",
369 (unsigned long long)offset, uwinp, uwinp->uwin_mem));
370
371 return uwinp->uwin_mem;
372 }
373
374 void
375 ubc_release(void *va, int flags)
376 {
377 struct ubc_window *uwinp;
378
379 mutex_enter(&uwinmtx);
380 LIST_FOREACH(uwinp, &uwinlst, uwin_entries)
381 if ((uint8_t *)va >= uwinp->uwin_mem
382 && (uint8_t *)va < (uwinp->uwin_mem + uwinp->uwin_mapsize))
383 break;
384 mutex_exit(&uwinmtx);
385 if (uwinp == NULL)
386 panic("%s: releasing invalid window at %p", __func__, va);
387
388 LIST_REMOVE(uwinp, uwin_entries);
389 uwin_free(uwinp);
390 }
391
392 int
393 ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo,
394 int advice, int flags)
395 {
396 struct ubc_window *uwinp;
397 vsize_t len;
398
399 while (todo > 0) {
400 len = todo;
401
402 uwinp = uwin_alloc(uobj, uio->uio_offset, len);
403 rump_ubc_magic_uiomove(uwinp->uwin_mem, len, uio, NULL, uwinp);
404 uwin_free(uwinp);
405
406 todo -= len;
407 }
408 return 0;
409 }
410
411
412 /*
413 * Misc routines
414 */
415
416 void
417 rumpvm_init()
418 {
419
420 uvmexp.free = 1024*1024; /* XXX */
421 uvm.pagedaemon_lwp = NULL; /* doesn't match curlwp */
422
423 mutex_init(&rvamtx, MUTEX_DEFAULT, 0);
424 mutex_init(&uwinmtx, MUTEX_DEFAULT, 0);
425 mutex_init(&uvm_pageqlock, MUTEX_DEFAULT, 0);
426 }
427
428 void
429 uvm_pageactivate(struct vm_page *pg)
430 {
431
432 /* nada */
433 }
434
435 void
436 uvm_pagewire(struct vm_page *pg)
437 {
438
439 /* nada */
440 }
441
442 void
443 uvm_pageunwire(struct vm_page *pg)
444 {
445
446 /* nada */
447 }
448
449 vaddr_t
450 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
451 {
452
453 panic("%s: unimplemented", __func__);
454 }
455
456 /* Called with the vm object locked */
457 struct vm_page *
458 uvm_pagelookup(struct uvm_object *uobj, voff_t off)
459 {
460 struct vm_page *pg;
461
462 TAILQ_FOREACH(pg, &uobj->memq, listq) {
463 if (pg->offset == off) {
464 return pg;
465 }
466 }
467
468 return NULL;
469 }
470
471 struct vm_page *
472 uvm_pageratop(vaddr_t va)
473 {
474 struct rumpva *rva;
475
476 mutex_enter(&rvamtx);
477 LIST_FOREACH(rva, &rvahead, entries)
478 if (rva->addr == va)
479 break;
480 mutex_exit(&rvamtx);
481
482 if (rva == NULL)
483 panic("%s: va %llu", __func__, (unsigned long long)va);
484
485 return rva->pg;
486 }
487
488 void
489 uvm_page_unbusy(struct vm_page **pgs, int npgs)
490 {
491 struct vm_page *pg;
492 int i;
493
494 for (i = 0; i < npgs; i++) {
495 pg = pgs[i];
496 if (pg == NULL)
497 continue;
498
499 KASSERT(pg->flags & PG_BUSY);
500 if (pg->flags & PG_WANTED)
501 wakeup(pg);
502 pg->flags &= ~(PG_WANTED|PG_BUSY);
503 }
504 }
505
506 void
507 uvm_estimatepageable(int *active, int *inactive)
508 {
509
510 /* XXX: guessing game */
511 *active = 1024;
512 *inactive = 1024;
513 }
514
515 void
516 uvm_aio_biodone1(struct buf *bp)
517 {
518
519 panic("%s: unimplemented", __func__);
520 }
521
522 void
523 uvm_aio_biodone(struct buf *bp)
524 {
525
526 uvm_aio_aiodone(bp);
527 }
528
529 void
530 uvm_aio_aiodone(struct buf *bp)
531 {
532
533 if (((bp->b_flags | bp->b_cflags) & (B_READ | BC_NOCACHE)) == 0 && bioopsp)
534 bioopsp->io_pageiodone(bp);
535 }
536
537 void
538 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
539 {
540
541 vp->v_size = vp->v_writesize = newsize;
542 }
543
544 void
545 uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
546 {
547
548 vp->v_writesize = newsize;
549 }
550
551 void
552 uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
553 {
554 struct uvm_object *uobj = &vp->v_uobj;
555 struct vm_page **pgs;
556 int maxpages = MIN(32, round_page(len) >> PAGE_SHIFT);
557 int rv, npages, i;
558
559 pgs = kmem_zalloc(maxpages * sizeof(pgs), KM_SLEEP);
560 while (len) {
561 npages = MIN(maxpages, round_page(len) >> PAGE_SHIFT);
562 memset(pgs, 0, npages * sizeof(struct vm_page *));
563 mutex_enter(&uobj->vmobjlock);
564 rv = uobj->pgops->pgo_get(uobj, off, pgs, &npages, 0, 0, 0, 0);
565 assert(npages > 0);
566
567 for (i = 0; i < npages; i++) {
568 uint8_t *start;
569 size_t chunkoff, chunklen;
570
571 chunkoff = off & PAGE_MASK;
572 chunklen = MIN(PAGE_SIZE - chunkoff, len);
573 start = (uint8_t *)pgs[i]->uanon + chunkoff;
574
575 memset(start, 0, chunklen);
576 pgs[i]->flags &= PG_CLEAN;
577
578 off += chunklen;
579 len -= chunklen;
580 }
581 uvm_page_unbusy(pgs, npages);
582 }
583 kmem_free(pgs, maxpages * sizeof(pgs));
584
585 return;
586 }
587
588 struct uvm_ractx *
589 uvm_ra_allocctx()
590 {
591
592 return NULL;
593 }
594
595 void
596 uvm_ra_freectx(struct uvm_ractx *ra)
597 {
598
599 return;
600 }
601
602 bool
603 uvn_clean_p(struct uvm_object *uobj)
604 {
605 struct vnode *vp = (void *)uobj;
606
607 return (vp->v_iflag & VI_ONWORKLST) == 0;
608 }
609
610 /*
611 * Kmem
612 */
613
614 void *
615 kmem_alloc(size_t size, km_flag_t kmflag)
616 {
617
618 return rumpuser_malloc(size, kmflag == KM_NOSLEEP);
619 }
620
621 void *
622 kmem_zalloc(size_t size, km_flag_t kmflag)
623 {
624 void *rv;
625
626 rv = kmem_alloc(size, kmflag);
627 if (rv)
628 memset(rv, 0, size);
629
630 return rv;
631 }
632
633 void
634 kmem_free(void *p, size_t size)
635 {
636
637 rumpuser_free(p);
638 }
639
640 /*
641 * UVM km
642 */
643
644 vaddr_t
645 uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
646 {
647 void *rv;
648
649 rv = rumpuser_malloc(size, flags & (UVM_KMF_CANFAIL | UVM_KMF_NOWAIT));
650 if (rv && flags & UVM_KMF_ZERO)
651 memset(rv, 0, size);
652
653 return (vaddr_t)rv;
654 }
655
656 void
657 uvm_km_free(struct vm_map *map, vaddr_t vaddr, vsize_t size, uvm_flag_t flags)
658 {
659
660 rumpuser_free((void *)vaddr);
661 }
662
663 struct vm_map *
664 uvm_km_suballoc(struct vm_map *map, vaddr_t *minaddr, vaddr_t *maxaddr,
665 vsize_t size, int pageable, bool fixed, struct vm_map_kernel *submap)
666 {
667
668 return (struct vm_map *)417416;
669 }
670
671 void
672 uvm_pageout_start(int npages)
673 {
674
675 uvmexp.paging += npages;
676 }
677
678 void
679 uvm_pageout_done(int npages)
680 {
681
682 uvmexp.paging -= npages;
683
684 /*
685 * wake up either of pagedaemon or LWPs waiting for it.
686 */
687
688 if (uvmexp.free <= uvmexp.reserve_kernel) {
689 wakeup(&uvm.pagedaemon);
690 } else {
691 wakeup(&uvmexp.free);
692 }
693 }
694