vm.c revision 1.13.2.3 1 1.13.2.3 ad /* $NetBSD: vm.c,v 1.13.2.3 2007/10/09 13:45:05 ad Exp $ */
2 1.13.2.2 ad
3 1.13.2.2 ad /*
4 1.13.2.2 ad * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 1.13.2.2 ad *
6 1.13.2.2 ad * Development of this software was supported by Google Summer of Code.
7 1.13.2.2 ad *
8 1.13.2.2 ad * Redistribution and use in source and binary forms, with or without
9 1.13.2.2 ad * modification, are permitted provided that the following conditions
10 1.13.2.2 ad * are met:
11 1.13.2.2 ad * 1. Redistributions of source code must retain the above copyright
12 1.13.2.2 ad * notice, this list of conditions and the following disclaimer.
13 1.13.2.2 ad * 2. Redistributions in binary form must reproduce the above copyright
14 1.13.2.2 ad * notice, this list of conditions and the following disclaimer in the
15 1.13.2.2 ad * documentation and/or other materials provided with the distribution.
16 1.13.2.2 ad *
17 1.13.2.2 ad * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 1.13.2.2 ad * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 1.13.2.2 ad * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 1.13.2.2 ad * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 1.13.2.2 ad * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 1.13.2.2 ad * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 1.13.2.2 ad * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 1.13.2.2 ad * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 1.13.2.2 ad * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 1.13.2.2 ad * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 1.13.2.2 ad * SUCH DAMAGE.
28 1.13.2.2 ad */
29 1.13.2.2 ad
30 1.13.2.2 ad /*
31 1.13.2.2 ad * Virtual memory emulation routines. Contents:
32 1.13.2.2 ad * + UBC
33 1.13.2.2 ad * + anon objects & pager
34 1.13.2.2 ad * + vnode objects & pager
35 1.13.2.2 ad * + misc support routines
36 1.13.2.2 ad * + kmem
37 1.13.2.2 ad */
38 1.13.2.2 ad
39 1.13.2.2 ad /*
40 1.13.2.2 ad * XXX: we abuse pg->uanon for the virtual address of the storage
41 1.13.2.2 ad * for each page. phys_addr would fit the job description better,
42 1.13.2.2 ad * except that it will create unnecessary lossage on some platforms
43 1.13.2.2 ad * due to not being a pointer type.
44 1.13.2.2 ad */
45 1.13.2.2 ad
46 1.13.2.2 ad #include <sys/param.h>
47 1.13.2.2 ad #include <sys/null.h>
48 1.13.2.2 ad #include <sys/vnode.h>
49 1.13.2.2 ad #include <sys/buf.h>
50 1.13.2.2 ad #include <sys/kmem.h>
51 1.13.2.2 ad
52 1.13.2.2 ad #include <uvm/uvm.h>
53 1.13.2.2 ad #include <uvm/uvm_prot.h>
54 1.13.2.2 ad #include <uvm/uvm_readahead.h>
55 1.13.2.2 ad
56 1.13.2.2 ad #include <machine/pmap.h>
57 1.13.2.2 ad
58 1.13.2.3 ad #include "rump_private.h"
59 1.13.2.2 ad #include "rumpuser.h"
60 1.13.2.2 ad
61 1.13.2.2 ad /* dumdidumdum */
62 1.13.2.2 ad #define len2npages(off, len) \
63 1.13.2.2 ad (((((len) + PAGE_MASK) & ~(PAGE_MASK)) >> PAGE_SHIFT) \
64 1.13.2.2 ad + (((off & PAGE_MASK) + (len & PAGE_MASK)) > PAGE_SIZE))
65 1.13.2.2 ad
66 1.13.2.2 ad static int ubc_winvalid;
67 1.13.2.2 ad static struct uvm_object *ubc_uobj;
68 1.13.2.2 ad static off_t ubc_offset;
69 1.13.2.2 ad static int ubc_flags;
70 1.13.2.2 ad
71 1.13.2.2 ad struct uvm_pagerops uvm_vnodeops;
72 1.13.2.2 ad struct uvm_pagerops aobj_pager;
73 1.13.2.2 ad struct uvmexp uvmexp;
74 1.13.2.2 ad struct uvm uvm;
75 1.13.2.2 ad
76 1.13.2.2 ad struct vmspace rump_vmspace;
77 1.13.2.2 ad struct vm_map rump_vmmap;
78 1.13.2.2 ad
79 1.13.2.2 ad /*
80 1.13.2.2 ad * vm pages
81 1.13.2.2 ad */
82 1.13.2.2 ad
83 1.13.2.2 ad struct vm_page *
84 1.13.2.2 ad rumpvm_makepage(struct uvm_object *uobj, voff_t off)
85 1.13.2.2 ad {
86 1.13.2.2 ad struct vm_page *pg;
87 1.13.2.2 ad
88 1.13.2.2 ad pg = rumpuser_malloc(sizeof(struct vm_page), 0);
89 1.13.2.2 ad memset(pg, 0, sizeof(struct vm_page));
90 1.13.2.2 ad TAILQ_INSERT_TAIL(&uobj->memq, pg, listq);
91 1.13.2.2 ad pg->offset = off;
92 1.13.2.2 ad pg->uobject = uobj;
93 1.13.2.2 ad
94 1.13.2.2 ad pg->uanon = (void *)rumpuser_malloc(PAGE_SIZE, 0);
95 1.13.2.2 ad memset((void *)pg->uanon, 0, PAGE_SIZE);
96 1.13.2.2 ad pg->flags = PG_CLEAN;
97 1.13.2.2 ad
98 1.13.2.2 ad return pg;
99 1.13.2.2 ad }
100 1.13.2.2 ad
101 1.13.2.2 ad void
102 1.13.2.2 ad rumpvm_freepage(struct vm_page *pg)
103 1.13.2.2 ad {
104 1.13.2.2 ad struct uvm_object *uobj = pg->uobject;
105 1.13.2.2 ad
106 1.13.2.2 ad TAILQ_REMOVE(&uobj->memq, pg, listq);
107 1.13.2.2 ad rumpuser_free((void *)pg->uanon);
108 1.13.2.2 ad rumpuser_free(pg);
109 1.13.2.2 ad }
110 1.13.2.2 ad
111 1.13.2.3 ad struct rumpva {
112 1.13.2.3 ad vaddr_t addr;
113 1.13.2.3 ad struct vm_page *pg;
114 1.13.2.3 ad
115 1.13.2.3 ad LIST_ENTRY(rumpva) entries;
116 1.13.2.3 ad };
117 1.13.2.3 ad static LIST_HEAD(, rumpva) rvahead = LIST_HEAD_INITIALIZER(rvahead);
118 1.13.2.3 ad
119 1.13.2.3 ad void
120 1.13.2.3 ad rumpvm_enterva(vaddr_t addr, struct vm_page *pg)
121 1.13.2.3 ad {
122 1.13.2.3 ad struct rumpva *rva;
123 1.13.2.3 ad
124 1.13.2.3 ad rva = rumpuser_malloc(sizeof(struct rumpva), 0);
125 1.13.2.3 ad rva->addr = addr;
126 1.13.2.3 ad rva->pg = pg;
127 1.13.2.3 ad LIST_INSERT_HEAD(&rvahead, rva, entries);
128 1.13.2.3 ad }
129 1.13.2.3 ad
130 1.13.2.3 ad void
131 1.13.2.3 ad rumpvm_flushva()
132 1.13.2.3 ad {
133 1.13.2.3 ad struct rumpva *rva;
134 1.13.2.3 ad
135 1.13.2.3 ad while ((rva = LIST_FIRST(&rvahead)) != NULL) {
136 1.13.2.3 ad LIST_REMOVE(rva, entries);
137 1.13.2.3 ad rumpuser_free(rva);
138 1.13.2.3 ad }
139 1.13.2.3 ad }
140 1.13.2.3 ad
141 1.13.2.2 ad /*
142 1.13.2.2 ad * vnode pager
143 1.13.2.2 ad */
144 1.13.2.2 ad
145 1.13.2.2 ad static int
146 1.13.2.2 ad vn_get(struct uvm_object *uobj, voff_t off, struct vm_page **pgs,
147 1.13.2.2 ad int *npages, int centeridx, vm_prot_t access_type,
148 1.13.2.2 ad int advice, int flags)
149 1.13.2.2 ad {
150 1.13.2.2 ad struct vnode *vp = (struct vnode *)uobj;
151 1.13.2.2 ad
152 1.13.2.2 ad return VOP_GETPAGES(vp, off, pgs, npages, centeridx, access_type,
153 1.13.2.2 ad advice, flags);
154 1.13.2.2 ad }
155 1.13.2.2 ad
156 1.13.2.2 ad static int
157 1.13.2.2 ad vn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
158 1.13.2.2 ad {
159 1.13.2.2 ad struct vnode *vp = (struct vnode *)uobj;
160 1.13.2.2 ad
161 1.13.2.2 ad return VOP_PUTPAGES(vp, offlo, offhi, flags);
162 1.13.2.2 ad }
163 1.13.2.2 ad
164 1.13.2.2 ad /*
165 1.13.2.2 ad * Anon object stuff
166 1.13.2.2 ad */
167 1.13.2.2 ad
168 1.13.2.2 ad static int
169 1.13.2.2 ad ao_get(struct uvm_object *uobj, voff_t off, struct vm_page **pgs,
170 1.13.2.2 ad int *npages, int centeridx, vm_prot_t access_type,
171 1.13.2.2 ad int advice, int flags)
172 1.13.2.2 ad {
173 1.13.2.2 ad struct vm_page *pg;
174 1.13.2.2 ad int i;
175 1.13.2.2 ad
176 1.13.2.2 ad if (centeridx)
177 1.13.2.2 ad panic("%s: centeridx != 0 not supported", __func__);
178 1.13.2.2 ad
179 1.13.2.2 ad /* loop over pages */
180 1.13.2.2 ad off = trunc_page(off);
181 1.13.2.2 ad for (i = 0; i < *npages; i++) {
182 1.13.2.2 ad pg = uvm_pagelookup(uobj, off + (i << PAGE_SHIFT));
183 1.13.2.2 ad if (pg) {
184 1.13.2.2 ad pgs[i] = pg;
185 1.13.2.2 ad } else {
186 1.13.2.2 ad pg = rumpvm_makepage(uobj, off + (i << PAGE_SHIFT));
187 1.13.2.2 ad pgs[i] = pg;
188 1.13.2.2 ad }
189 1.13.2.2 ad }
190 1.13.2.2 ad
191 1.13.2.2 ad return 0;
192 1.13.2.2 ad
193 1.13.2.2 ad }
194 1.13.2.2 ad
195 1.13.2.2 ad static int
196 1.13.2.2 ad ao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
197 1.13.2.2 ad {
198 1.13.2.2 ad struct vm_page *pg;
199 1.13.2.2 ad
200 1.13.2.2 ad /* we only free all pages for now */
201 1.13.2.2 ad if ((flags & PGO_FREE) == 0 || (flags & PGO_ALLPAGES) == 0)
202 1.13.2.2 ad return 0;
203 1.13.2.2 ad
204 1.13.2.2 ad while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL)
205 1.13.2.2 ad rumpvm_freepage(pg);
206 1.13.2.2 ad
207 1.13.2.2 ad return 0;
208 1.13.2.2 ad }
209 1.13.2.2 ad
210 1.13.2.2 ad struct uvm_object *
211 1.13.2.2 ad uao_create(vsize_t size, int flags)
212 1.13.2.2 ad {
213 1.13.2.2 ad struct uvm_object *uobj;
214 1.13.2.2 ad
215 1.13.2.2 ad uobj = rumpuser_malloc(sizeof(struct uvm_object), 0);
216 1.13.2.2 ad memset(uobj, 0, sizeof(struct uvm_object));
217 1.13.2.2 ad uobj->pgops = &aobj_pager;
218 1.13.2.2 ad TAILQ_INIT(&uobj->memq);
219 1.13.2.2 ad
220 1.13.2.2 ad return uobj;
221 1.13.2.2 ad }
222 1.13.2.2 ad
223 1.13.2.2 ad void
224 1.13.2.2 ad uao_detach(struct uvm_object *uobj)
225 1.13.2.2 ad {
226 1.13.2.2 ad
227 1.13.2.2 ad ao_put(uobj, 0, 0, PGO_ALLPAGES | PGO_FREE);
228 1.13.2.2 ad rumpuser_free(uobj);
229 1.13.2.2 ad }
230 1.13.2.2 ad
231 1.13.2.2 ad /*
232 1.13.2.2 ad * UBC
233 1.13.2.2 ad */
234 1.13.2.2 ad
235 1.13.2.2 ad int
236 1.13.2.2 ad rump_ubc_magic_uiomove(size_t n, struct uio *uio)
237 1.13.2.2 ad {
238 1.13.2.3 ad struct vm_page **pgs;
239 1.13.2.2 ad int npages = len2npages(uio->uio_offset, n);
240 1.13.2.2 ad int i, rv;
241 1.13.2.2 ad
242 1.13.2.2 ad if (ubc_winvalid == 0)
243 1.13.2.2 ad panic("%s: ubc window not allocated", __func__);
244 1.13.2.2 ad
245 1.13.2.3 ad pgs = rumpuser_malloc(npages * sizeof(pgs), 0);
246 1.13.2.2 ad memset(pgs, 0, sizeof(pgs));
247 1.13.2.2 ad rv = ubc_uobj->pgops->pgo_get(ubc_uobj, ubc_offset,
248 1.13.2.2 ad pgs, &npages, 0, 0, 0, 0);
249 1.13.2.2 ad if (rv)
250 1.13.2.3 ad goto out;
251 1.13.2.2 ad
252 1.13.2.2 ad for (i = 0; i < npages; i++) {
253 1.13.2.2 ad size_t xfersize;
254 1.13.2.2 ad off_t pageoff;
255 1.13.2.2 ad
256 1.13.2.2 ad pageoff = uio->uio_offset & PAGE_MASK;
257 1.13.2.2 ad xfersize = MIN(MIN(n, PAGE_SIZE), PAGE_SIZE-pageoff);
258 1.13.2.2 ad uiomove((uint8_t *)pgs[i]->uanon + pageoff, xfersize, uio);
259 1.13.2.2 ad if (uio->uio_rw == UIO_WRITE)
260 1.13.2.2 ad pgs[i]->flags &= ~PG_CLEAN;
261 1.13.2.2 ad ubc_offset += xfersize;
262 1.13.2.2 ad n -= xfersize;
263 1.13.2.2 ad }
264 1.13.2.2 ad
265 1.13.2.3 ad out:
266 1.13.2.3 ad rumpuser_free(pgs);
267 1.13.2.3 ad return rv;
268 1.13.2.2 ad }
269 1.13.2.2 ad
270 1.13.2.2 ad void *
271 1.13.2.2 ad ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
272 1.13.2.2 ad int flags)
273 1.13.2.2 ad {
274 1.13.2.2 ad vsize_t reallen;
275 1.13.2.2 ad
276 1.13.2.2 ad /* XXX: only one window, but that's ok for now */
277 1.13.2.2 ad if (ubc_winvalid == 1)
278 1.13.2.2 ad panic("%s: ubc window already allocated", __func__);
279 1.13.2.2 ad
280 1.13.2.2 ad printf("UBC_ALLOC offset 0x%x\n", (int)offset);
281 1.13.2.2 ad ubc_uobj = uobj;
282 1.13.2.2 ad ubc_offset = offset;
283 1.13.2.2 ad reallen = round_page(*lenp);
284 1.13.2.2 ad ubc_flags = flags;
285 1.13.2.2 ad
286 1.13.2.2 ad ubc_winvalid = 1;
287 1.13.2.2 ad
288 1.13.2.2 ad return RUMP_UBC_MAGIC_WINDOW;
289 1.13.2.2 ad }
290 1.13.2.2 ad
291 1.13.2.2 ad void
292 1.13.2.2 ad ubc_release(void *va, int flags)
293 1.13.2.2 ad {
294 1.13.2.2 ad
295 1.13.2.2 ad ubc_winvalid = 0;
296 1.13.2.2 ad }
297 1.13.2.2 ad
298 1.13.2.2 ad int
299 1.13.2.2 ad ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo,
300 1.13.2.2 ad int advice, int flags)
301 1.13.2.2 ad {
302 1.13.2.2 ad void *win;
303 1.13.2.2 ad vsize_t len;
304 1.13.2.2 ad
305 1.13.2.2 ad while (todo > 0) {
306 1.13.2.2 ad len = todo;
307 1.13.2.2 ad
308 1.13.2.2 ad win = ubc_alloc(uobj, uio->uio_offset, &len, 0, flags);
309 1.13.2.2 ad rump_ubc_magic_uiomove(len, uio);
310 1.13.2.2 ad ubc_release(win, 0);
311 1.13.2.2 ad
312 1.13.2.2 ad todo -= len;
313 1.13.2.2 ad }
314 1.13.2.2 ad return 0;
315 1.13.2.2 ad }
316 1.13.2.2 ad
317 1.13.2.2 ad
318 1.13.2.2 ad /*
319 1.13.2.2 ad * Misc routines
320 1.13.2.2 ad */
321 1.13.2.2 ad
322 1.13.2.2 ad void
323 1.13.2.2 ad rumpvm_init()
324 1.13.2.2 ad {
325 1.13.2.2 ad
326 1.13.2.2 ad uvm_vnodeops.pgo_get = vn_get;
327 1.13.2.2 ad uvm_vnodeops.pgo_put = vn_put;
328 1.13.2.2 ad aobj_pager.pgo_get = ao_get;
329 1.13.2.2 ad aobj_pager.pgo_put = ao_put;
330 1.13.2.2 ad
331 1.13.2.2 ad uvmexp.free = 1024*1024; /* XXX */
332 1.13.2.2 ad uvm.pagedaemon_lwp = NULL; /* doesn't match curlwp */
333 1.13.2.2 ad }
334 1.13.2.2 ad
335 1.13.2.2 ad void
336 1.13.2.2 ad uvm_pageactivate(struct vm_page *pg)
337 1.13.2.2 ad {
338 1.13.2.2 ad
339 1.13.2.2 ad /* nada */
340 1.13.2.2 ad }
341 1.13.2.2 ad
342 1.13.2.2 ad void
343 1.13.2.2 ad uvm_page_unbusy(struct vm_page **pgs, int npgs)
344 1.13.2.2 ad {
345 1.13.2.2 ad
346 1.13.2.2 ad /* nada */
347 1.13.2.2 ad }
348 1.13.2.2 ad
349 1.13.2.2 ad void
350 1.13.2.2 ad uvm_pagewire(struct vm_page *pg)
351 1.13.2.2 ad {
352 1.13.2.2 ad
353 1.13.2.2 ad /* nada */
354 1.13.2.2 ad }
355 1.13.2.2 ad
356 1.13.2.2 ad void
357 1.13.2.2 ad uvm_pageunwire(struct vm_page *pg)
358 1.13.2.2 ad {
359 1.13.2.2 ad
360 1.13.2.2 ad /* nada */
361 1.13.2.2 ad }
362 1.13.2.2 ad
363 1.13.2.2 ad vaddr_t
364 1.13.2.2 ad uvm_pagermapin(struct vm_page **pps, int npages, int flags)
365 1.13.2.2 ad {
366 1.13.2.2 ad
367 1.13.2.2 ad panic("%s: unimplemented", __func__);
368 1.13.2.2 ad }
369 1.13.2.2 ad
370 1.13.2.2 ad struct vm_page *
371 1.13.2.2 ad uvm_pagelookup(struct uvm_object *uobj, voff_t off)
372 1.13.2.2 ad {
373 1.13.2.2 ad struct vm_page *pg;
374 1.13.2.2 ad
375 1.13.2.2 ad TAILQ_FOREACH(pg, &uobj->memq, listq)
376 1.13.2.2 ad if (pg->offset == off)
377 1.13.2.2 ad return pg;
378 1.13.2.2 ad
379 1.13.2.2 ad return NULL;
380 1.13.2.2 ad }
381 1.13.2.2 ad
382 1.13.2.3 ad struct vm_page *
383 1.13.2.3 ad uvm_pageratop(vaddr_t va)
384 1.13.2.3 ad {
385 1.13.2.3 ad struct rumpva *rva;
386 1.13.2.3 ad
387 1.13.2.3 ad LIST_FOREACH(rva, &rvahead, entries)
388 1.13.2.3 ad if (rva->addr == va)
389 1.13.2.3 ad return rva->pg;
390 1.13.2.3 ad
391 1.13.2.3 ad panic("%s: va %llu", __func__, (unsigned long long)va);
392 1.13.2.3 ad }
393 1.13.2.3 ad
394 1.13.2.2 ad void
395 1.13.2.2 ad uvm_estimatepageable(int *active, int *inactive)
396 1.13.2.2 ad {
397 1.13.2.2 ad
398 1.13.2.2 ad *active = 0;
399 1.13.2.2 ad *inactive = 0;
400 1.13.2.2 ad panic("%s: unimplemented", __func__);
401 1.13.2.2 ad }
402 1.13.2.2 ad
403 1.13.2.2 ad void
404 1.13.2.2 ad uvm_aio_biodone1(struct buf *bp)
405 1.13.2.2 ad {
406 1.13.2.2 ad
407 1.13.2.2 ad panic("%s: unimplemented", __func__);
408 1.13.2.2 ad }
409 1.13.2.2 ad
410 1.13.2.2 ad void
411 1.13.2.2 ad uvm_aio_biodone(struct buf *bp)
412 1.13.2.2 ad {
413 1.13.2.2 ad
414 1.13.2.3 ad uvm_aio_aiodone(bp);
415 1.13.2.3 ad }
416 1.13.2.3 ad
417 1.13.2.3 ad void
418 1.13.2.3 ad uvm_aio_aiodone(struct buf *bp)
419 1.13.2.3 ad {
420 1.13.2.3 ad
421 1.13.2.3 ad if ((bp->b_flags & (B_READ | B_NOCACHE)) == 0 && bioopsp)
422 1.13.2.3 ad bioopsp->io_pageiodone(bp);
423 1.13.2.2 ad }
424 1.13.2.2 ad
425 1.13.2.2 ad void
426 1.13.2.2 ad uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
427 1.13.2.2 ad {
428 1.13.2.2 ad
429 1.13.2.2 ad vp->v_size = vp->v_writesize = newsize;
430 1.13.2.2 ad }
431 1.13.2.2 ad
432 1.13.2.2 ad void
433 1.13.2.2 ad uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
434 1.13.2.2 ad {
435 1.13.2.2 ad
436 1.13.2.2 ad vp->v_writesize = newsize;
437 1.13.2.2 ad }
438 1.13.2.2 ad
439 1.13.2.2 ad void
440 1.13.2.2 ad uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
441 1.13.2.2 ad {
442 1.13.2.2 ad struct uvm_object *uobj = &vp->v_uobj;
443 1.13.2.3 ad struct vm_page **pgs;
444 1.13.2.3 ad int maxpages = MIN(32, round_page(len) >> PAGE_SHIFT);
445 1.13.2.2 ad int rv, npages, i;
446 1.13.2.2 ad
447 1.13.2.3 ad pgs = rumpuser_malloc(maxpages * sizeof(pgs), 0);
448 1.13.2.2 ad while (len) {
449 1.13.2.2 ad npages = MIN(maxpages, round_page(len) >> PAGE_SHIFT);
450 1.13.2.2 ad memset(pgs, 0, npages * sizeof(struct vm_page *));
451 1.13.2.2 ad rv = uobj->pgops->pgo_get(uobj, off, pgs, &npages, 0, 0, 0, 0);
452 1.13.2.2 ad assert(npages > 0);
453 1.13.2.2 ad
454 1.13.2.2 ad for (i = 0; i < npages; i++) {
455 1.13.2.2 ad uint8_t *start;
456 1.13.2.2 ad size_t chunkoff, chunklen;
457 1.13.2.2 ad
458 1.13.2.2 ad chunkoff = off & PAGE_MASK;
459 1.13.2.2 ad chunklen = MIN(PAGE_SIZE - chunkoff, len);
460 1.13.2.2 ad start = (uint8_t *)pgs[i]->uanon + chunkoff;
461 1.13.2.2 ad
462 1.13.2.2 ad memset(start, 0, chunklen);
463 1.13.2.2 ad pgs[i]->flags &= PG_CLEAN;
464 1.13.2.2 ad
465 1.13.2.2 ad off += chunklen;
466 1.13.2.2 ad len -= chunklen;
467 1.13.2.2 ad }
468 1.13.2.2 ad }
469 1.13.2.3 ad rumpuser_free(pgs);
470 1.13.2.2 ad
471 1.13.2.2 ad return;
472 1.13.2.2 ad }
473 1.13.2.2 ad
474 1.13.2.2 ad struct uvm_ractx *
475 1.13.2.2 ad uvm_ra_allocctx()
476 1.13.2.2 ad {
477 1.13.2.2 ad
478 1.13.2.2 ad return NULL;
479 1.13.2.2 ad }
480 1.13.2.2 ad
481 1.13.2.2 ad void
482 1.13.2.2 ad uvm_ra_freectx(struct uvm_ractx *ra)
483 1.13.2.2 ad {
484 1.13.2.2 ad
485 1.13.2.2 ad return;
486 1.13.2.2 ad }
487 1.13.2.2 ad
488 1.13.2.2 ad bool
489 1.13.2.2 ad uvn_clean_p(struct uvm_object *uobj)
490 1.13.2.2 ad {
491 1.13.2.2 ad struct vnode *vp = (void *)uobj;
492 1.13.2.2 ad
493 1.13.2.2 ad return (vp->v_flag & VONWORKLST) == 0;
494 1.13.2.2 ad }
495 1.13.2.2 ad
496 1.13.2.2 ad /*
497 1.13.2.2 ad * Kmem
498 1.13.2.2 ad */
499 1.13.2.2 ad
500 1.13.2.2 ad void *
501 1.13.2.2 ad kmem_alloc(size_t size, km_flag_t kmflag)
502 1.13.2.2 ad {
503 1.13.2.2 ad
504 1.13.2.2 ad return rumpuser_malloc(size, kmflag == KM_NOSLEEP);
505 1.13.2.2 ad }
506 1.13.2.2 ad
507 1.13.2.2 ad void *
508 1.13.2.2 ad kmem_zalloc(size_t size, km_flag_t kmflag)
509 1.13.2.2 ad {
510 1.13.2.2 ad void *rv;
511 1.13.2.2 ad
512 1.13.2.2 ad rv = kmem_alloc(size, kmflag);
513 1.13.2.2 ad if (rv)
514 1.13.2.2 ad memset(rv, 0, size);
515 1.13.2.2 ad
516 1.13.2.2 ad return rv;
517 1.13.2.2 ad }
518 1.13.2.2 ad
519 1.13.2.2 ad void
520 1.13.2.2 ad kmem_free(void *p, size_t size)
521 1.13.2.2 ad {
522 1.13.2.2 ad
523 1.13.2.2 ad rumpuser_free(p);
524 1.13.2.2 ad }
525 1.13.2.2 ad
526 1.13.2.2 ad /*
527 1.13.2.2 ad * UVM km
528 1.13.2.2 ad */
529 1.13.2.2 ad
530 1.13.2.2 ad vaddr_t
531 1.13.2.2 ad uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
532 1.13.2.2 ad {
533 1.13.2.2 ad void *rv;
534 1.13.2.2 ad
535 1.13.2.2 ad rv = rumpuser_malloc(size, flags & (UVM_KMF_CANFAIL | UVM_KMF_NOWAIT));
536 1.13.2.2 ad if (rv && flags & UVM_KMF_ZERO)
537 1.13.2.2 ad memset(rv, 0, size);
538 1.13.2.2 ad
539 1.13.2.2 ad return (vaddr_t)rv;
540 1.13.2.2 ad }
541 1.13.2.2 ad
542 1.13.2.2 ad void
543 1.13.2.2 ad uvm_km_free(struct vm_map *map, vaddr_t vaddr, vsize_t size, uvm_flag_t flags)
544 1.13.2.2 ad {
545 1.13.2.2 ad
546 1.13.2.2 ad rumpuser_free((void *)vaddr);
547 1.13.2.2 ad }
548 1.13.2.2 ad
549 1.13.2.2 ad struct vm_map *
550 1.13.2.2 ad uvm_km_suballoc(struct vm_map *map, vaddr_t *minaddr, vaddr_t *maxaddr,
551 1.13.2.2 ad vsize_t size, int pageable, bool fixed, struct vm_map_kernel *submap)
552 1.13.2.2 ad {
553 1.13.2.2 ad
554 1.13.2.2 ad return (struct vm_map *)417416;
555 1.13.2.2 ad }
556