vm.c revision 1.15 1 /* $NetBSD: vm.c,v 1.15 2007/09/01 21:40:58 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*
31 * Virtual memory emulation routines. Contents:
32 * + UBC
33 * + anon objects & pager
34 * + vnode objects & pager
35 * + misc support routines
36 * + kmem
37 */
38
39 /*
40 * XXX: we abuse pg->uanon for the virtual address of the storage
41 * for each page. phys_addr would fit the job description better,
42 * except that it will create unnecessary lossage on some platforms
43 * due to not being a pointer type.
44 */
45
46 #include <sys/param.h>
47 #include <sys/null.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50 #include <sys/kmem.h>
51
52 #include <uvm/uvm.h>
53 #include <uvm/uvm_prot.h>
54 #include <uvm/uvm_readahead.h>
55
56 #include <machine/pmap.h>
57
58 #include "rump_private.h"
59 #include "rumpuser.h"
60
61 /* dumdidumdum */
62 #define len2npages(off, len) \
63 (((((len) + PAGE_MASK) & ~(PAGE_MASK)) >> PAGE_SHIFT) \
64 + (((off & PAGE_MASK) + (len & PAGE_MASK)) > PAGE_SIZE))
65
66 static int ubc_winvalid;
67 static struct uvm_object *ubc_uobj;
68 static off_t ubc_offset;
69 static int ubc_flags;
70
71 struct uvm_pagerops uvm_vnodeops;
72 struct uvm_pagerops aobj_pager;
73 struct uvmexp uvmexp;
74 struct uvm uvm;
75
76 struct vmspace rump_vmspace;
77 struct vm_map rump_vmmap;
78
79 /*
80 * vm pages
81 */
82
83 struct vm_page *
84 rumpvm_makepage(struct uvm_object *uobj, voff_t off)
85 {
86 struct vm_page *pg;
87
88 pg = rumpuser_malloc(sizeof(struct vm_page), 0);
89 memset(pg, 0, sizeof(struct vm_page));
90 TAILQ_INSERT_TAIL(&uobj->memq, pg, listq);
91 pg->offset = off;
92 pg->uobject = uobj;
93
94 pg->uanon = (void *)rumpuser_malloc(PAGE_SIZE, 0);
95 memset((void *)pg->uanon, 0, PAGE_SIZE);
96 pg->flags = PG_CLEAN;
97
98 return pg;
99 }
100
101 void
102 rumpvm_freepage(struct vm_page *pg)
103 {
104 struct uvm_object *uobj = pg->uobject;
105
106 TAILQ_REMOVE(&uobj->memq, pg, listq);
107 rumpuser_free((void *)pg->uanon);
108 rumpuser_free(pg);
109 }
110
111 struct rumpva {
112 vaddr_t addr;
113 struct vm_page *pg;
114
115 LIST_ENTRY(rumpva) entries;
116 };
117 static LIST_HEAD(, rumpva) rvahead = LIST_HEAD_INITIALIZER(rvahead);
118
119 void
120 rumpvm_enterva(vaddr_t addr, struct vm_page *pg)
121 {
122 struct rumpva *rva;
123
124 rva = rumpuser_malloc(sizeof(struct rumpva), 0);
125 rva->addr = addr;
126 rva->pg = pg;
127 LIST_INSERT_HEAD(&rvahead, rva, entries);
128 }
129
130 void
131 rumpvm_flushva()
132 {
133 struct rumpva *rva;
134
135 while ((rva = LIST_FIRST(&rvahead)) != NULL) {
136 LIST_REMOVE(rva, entries);
137 rumpuser_free(rva);
138 }
139 }
140
141 /*
142 * vnode pager
143 */
144
145 static int
146 vn_get(struct uvm_object *uobj, voff_t off, struct vm_page **pgs,
147 int *npages, int centeridx, vm_prot_t access_type,
148 int advice, int flags)
149 {
150 struct vnode *vp = (struct vnode *)uobj;
151
152 return VOP_GETPAGES(vp, off, pgs, npages, centeridx, access_type,
153 advice, flags);
154 }
155
156 static int
157 vn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
158 {
159 struct vnode *vp = (struct vnode *)uobj;
160
161 return VOP_PUTPAGES(vp, offlo, offhi, flags);
162 }
163
164 /*
165 * Anon object stuff
166 */
167
168 static int
169 ao_get(struct uvm_object *uobj, voff_t off, struct vm_page **pgs,
170 int *npages, int centeridx, vm_prot_t access_type,
171 int advice, int flags)
172 {
173 struct vm_page *pg;
174 int i;
175
176 if (centeridx)
177 panic("%s: centeridx != 0 not supported", __func__);
178
179 /* loop over pages */
180 off = trunc_page(off);
181 for (i = 0; i < *npages; i++) {
182 pg = uvm_pagelookup(uobj, off + (i << PAGE_SHIFT));
183 if (pg) {
184 pgs[i] = pg;
185 } else {
186 pg = rumpvm_makepage(uobj, off + (i << PAGE_SHIFT));
187 pgs[i] = pg;
188 }
189 }
190
191 return 0;
192
193 }
194
195 static int
196 ao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
197 {
198 struct vm_page *pg;
199
200 /* we only free all pages for now */
201 if ((flags & PGO_FREE) == 0 || (flags & PGO_ALLPAGES) == 0)
202 return 0;
203
204 while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL)
205 rumpvm_freepage(pg);
206
207 return 0;
208 }
209
210 struct uvm_object *
211 uao_create(vsize_t size, int flags)
212 {
213 struct uvm_object *uobj;
214
215 uobj = rumpuser_malloc(sizeof(struct uvm_object), 0);
216 memset(uobj, 0, sizeof(struct uvm_object));
217 uobj->pgops = &aobj_pager;
218 TAILQ_INIT(&uobj->memq);
219
220 return uobj;
221 }
222
223 void
224 uao_detach(struct uvm_object *uobj)
225 {
226
227 ao_put(uobj, 0, 0, PGO_ALLPAGES | PGO_FREE);
228 rumpuser_free(uobj);
229 }
230
231 /*
232 * UBC
233 */
234
235 int
236 rump_ubc_magic_uiomove(size_t n, struct uio *uio)
237 {
238 int npages = len2npages(uio->uio_offset, n);
239 struct vm_page *pgs[npages];
240 int i, rv;
241
242 if (ubc_winvalid == 0)
243 panic("%s: ubc window not allocated", __func__);
244
245 memset(pgs, 0, sizeof(pgs));
246 rv = ubc_uobj->pgops->pgo_get(ubc_uobj, ubc_offset,
247 pgs, &npages, 0, 0, 0, 0);
248 if (rv)
249 return rv;
250
251 for (i = 0; i < npages; i++) {
252 size_t xfersize;
253 off_t pageoff;
254
255 pageoff = uio->uio_offset & PAGE_MASK;
256 xfersize = MIN(MIN(n, PAGE_SIZE), PAGE_SIZE-pageoff);
257 uiomove((uint8_t *)pgs[i]->uanon + pageoff, xfersize, uio);
258 if (uio->uio_rw == UIO_WRITE)
259 pgs[i]->flags &= ~PG_CLEAN;
260 ubc_offset += xfersize;
261 n -= xfersize;
262 }
263
264 return 0;
265 }
266
267 void *
268 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
269 int flags)
270 {
271 vsize_t reallen;
272
273 /* XXX: only one window, but that's ok for now */
274 if (ubc_winvalid == 1)
275 panic("%s: ubc window already allocated", __func__);
276
277 printf("UBC_ALLOC offset 0x%x\n", (int)offset);
278 ubc_uobj = uobj;
279 ubc_offset = offset;
280 reallen = round_page(*lenp);
281 ubc_flags = flags;
282
283 ubc_winvalid = 1;
284
285 return RUMP_UBC_MAGIC_WINDOW;
286 }
287
288 void
289 ubc_release(void *va, int flags)
290 {
291
292 ubc_winvalid = 0;
293 }
294
295 int
296 ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo,
297 int advice, int flags)
298 {
299 void *win;
300 vsize_t len;
301
302 while (todo > 0) {
303 len = todo;
304
305 win = ubc_alloc(uobj, uio->uio_offset, &len, 0, flags);
306 rump_ubc_magic_uiomove(len, uio);
307 ubc_release(win, 0);
308
309 todo -= len;
310 }
311 return 0;
312 }
313
314
315 /*
316 * Misc routines
317 */
318
319 void
320 rumpvm_init()
321 {
322
323 uvm_vnodeops.pgo_get = vn_get;
324 uvm_vnodeops.pgo_put = vn_put;
325 aobj_pager.pgo_get = ao_get;
326 aobj_pager.pgo_put = ao_put;
327
328 uvmexp.free = 1024*1024; /* XXX */
329 uvm.pagedaemon_lwp = NULL; /* doesn't match curlwp */
330 }
331
332 void
333 uvm_pageactivate(struct vm_page *pg)
334 {
335
336 /* nada */
337 }
338
339 void
340 uvm_page_unbusy(struct vm_page **pgs, int npgs)
341 {
342
343 /* nada */
344 }
345
346 void
347 uvm_pagewire(struct vm_page *pg)
348 {
349
350 /* nada */
351 }
352
353 void
354 uvm_pageunwire(struct vm_page *pg)
355 {
356
357 /* nada */
358 }
359
360 vaddr_t
361 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
362 {
363
364 panic("%s: unimplemented", __func__);
365 }
366
367 struct vm_page *
368 uvm_pagelookup(struct uvm_object *uobj, voff_t off)
369 {
370 struct vm_page *pg;
371
372 TAILQ_FOREACH(pg, &uobj->memq, listq)
373 if (pg->offset == off)
374 return pg;
375
376 return NULL;
377 }
378
379 struct vm_page *
380 uvm_pageratop(vaddr_t va)
381 {
382 struct rumpva *rva;
383
384 LIST_FOREACH(rva, &rvahead, entries)
385 if (rva->addr == va)
386 return rva->pg;
387
388 panic("%s: va %llu", __func__, (unsigned long long)va);
389 }
390
391 void
392 uvm_estimatepageable(int *active, int *inactive)
393 {
394
395 *active = 0;
396 *inactive = 0;
397 panic("%s: unimplemented", __func__);
398 }
399
400 void
401 uvm_aio_biodone1(struct buf *bp)
402 {
403
404 panic("%s: unimplemented", __func__);
405 }
406
407 void
408 uvm_aio_biodone(struct buf *bp)
409 {
410
411 uvm_aio_aiodone(bp);
412 }
413
414 void
415 uvm_aio_aiodone(struct buf *bp)
416 {
417
418 if ((bp->b_flags & (B_READ | B_NOCACHE)) == 0 && bioops.io_pageiodone)
419 bioops.io_pageiodone(bp);
420 }
421
422 void
423 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
424 {
425
426 vp->v_size = vp->v_writesize = newsize;
427 }
428
429 void
430 uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
431 {
432
433 vp->v_writesize = newsize;
434 }
435
436 void
437 uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
438 {
439 int maxpages = MIN(32, round_page(len) >> PAGE_SHIFT);
440 struct vm_page *pgs[maxpages];
441 struct uvm_object *uobj = &vp->v_uobj;
442 int rv, npages, i;
443
444 while (len) {
445 npages = MIN(maxpages, round_page(len) >> PAGE_SHIFT);
446 memset(pgs, 0, npages * sizeof(struct vm_page *));
447 rv = uobj->pgops->pgo_get(uobj, off, pgs, &npages, 0, 0, 0, 0);
448 assert(npages > 0);
449
450 for (i = 0; i < npages; i++) {
451 uint8_t *start;
452 size_t chunkoff, chunklen;
453
454 chunkoff = off & PAGE_MASK;
455 chunklen = MIN(PAGE_SIZE - chunkoff, len);
456 start = (uint8_t *)pgs[i]->uanon + chunkoff;
457
458 memset(start, 0, chunklen);
459 pgs[i]->flags &= PG_CLEAN;
460
461 off += chunklen;
462 len -= chunklen;
463 }
464 }
465
466 return;
467 }
468
469 struct uvm_ractx *
470 uvm_ra_allocctx()
471 {
472
473 return NULL;
474 }
475
476 void
477 uvm_ra_freectx(struct uvm_ractx *ra)
478 {
479
480 return;
481 }
482
483 bool
484 uvn_clean_p(struct uvm_object *uobj)
485 {
486 struct vnode *vp = (void *)uobj;
487
488 return (vp->v_flag & VONWORKLST) == 0;
489 }
490
491 /*
492 * Kmem
493 */
494
495 void *
496 kmem_alloc(size_t size, km_flag_t kmflag)
497 {
498
499 return rumpuser_malloc(size, kmflag == KM_NOSLEEP);
500 }
501
502 void *
503 kmem_zalloc(size_t size, km_flag_t kmflag)
504 {
505 void *rv;
506
507 rv = kmem_alloc(size, kmflag);
508 if (rv)
509 memset(rv, 0, size);
510
511 return rv;
512 }
513
514 void
515 kmem_free(void *p, size_t size)
516 {
517
518 rumpuser_free(p);
519 }
520
521 /*
522 * UVM km
523 */
524
525 vaddr_t
526 uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
527 {
528 void *rv;
529
530 rv = rumpuser_malloc(size, flags & (UVM_KMF_CANFAIL | UVM_KMF_NOWAIT));
531 if (rv && flags & UVM_KMF_ZERO)
532 memset(rv, 0, size);
533
534 return (vaddr_t)rv;
535 }
536
537 void
538 uvm_km_free(struct vm_map *map, vaddr_t vaddr, vsize_t size, uvm_flag_t flags)
539 {
540
541 rumpuser_free((void *)vaddr);
542 }
543
544 struct vm_map *
545 uvm_km_suballoc(struct vm_map *map, vaddr_t *minaddr, vaddr_t *maxaddr,
546 vsize_t size, int pageable, bool fixed, struct vm_map_kernel *submap)
547 {
548
549 return (struct vm_map *)417416;
550 }
551