vm.c revision 1.12.2.3 1 1.12.2.3 skrll /* $NetBSD: vm.c,v 1.12.2.3 2007/09/03 10:23:56 skrll Exp $ */
2 1.12.2.2 skrll
3 1.12.2.2 skrll /*
4 1.12.2.2 skrll * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 1.12.2.2 skrll *
6 1.12.2.2 skrll * Development of this software was supported by Google Summer of Code.
7 1.12.2.2 skrll *
8 1.12.2.2 skrll * Redistribution and use in source and binary forms, with or without
9 1.12.2.2 skrll * modification, are permitted provided that the following conditions
10 1.12.2.2 skrll * are met:
11 1.12.2.2 skrll * 1. Redistributions of source code must retain the above copyright
12 1.12.2.2 skrll * notice, this list of conditions and the following disclaimer.
13 1.12.2.2 skrll * 2. Redistributions in binary form must reproduce the above copyright
14 1.12.2.2 skrll * notice, this list of conditions and the following disclaimer in the
15 1.12.2.2 skrll * documentation and/or other materials provided with the distribution.
16 1.12.2.2 skrll *
17 1.12.2.2 skrll * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 1.12.2.2 skrll * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 1.12.2.2 skrll * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 1.12.2.2 skrll * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 1.12.2.2 skrll * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 1.12.2.2 skrll * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 1.12.2.2 skrll * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 1.12.2.2 skrll * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 1.12.2.2 skrll * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 1.12.2.2 skrll * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 1.12.2.2 skrll * SUCH DAMAGE.
28 1.12.2.2 skrll */
29 1.12.2.2 skrll
30 1.12.2.2 skrll /*
31 1.12.2.2 skrll * Virtual memory emulation routines. Contents:
32 1.12.2.2 skrll * + UBC
33 1.12.2.2 skrll * + anon objects & pager
34 1.12.2.2 skrll * + vnode objects & pager
35 1.12.2.2 skrll * + misc support routines
36 1.12.2.2 skrll * + kmem
37 1.12.2.2 skrll */
38 1.12.2.2 skrll
39 1.12.2.2 skrll /*
40 1.12.2.2 skrll * XXX: we abuse pg->uanon for the virtual address of the storage
41 1.12.2.2 skrll * for each page. phys_addr would fit the job description better,
42 1.12.2.2 skrll * except that it will create unnecessary lossage on some platforms
43 1.12.2.2 skrll * due to not being a pointer type.
44 1.12.2.2 skrll */
45 1.12.2.2 skrll
46 1.12.2.2 skrll #include <sys/param.h>
47 1.12.2.2 skrll #include <sys/null.h>
48 1.12.2.2 skrll #include <sys/vnode.h>
49 1.12.2.2 skrll #include <sys/buf.h>
50 1.12.2.2 skrll #include <sys/kmem.h>
51 1.12.2.2 skrll
52 1.12.2.2 skrll #include <uvm/uvm.h>
53 1.12.2.2 skrll #include <uvm/uvm_prot.h>
54 1.12.2.2 skrll #include <uvm/uvm_readahead.h>
55 1.12.2.2 skrll
56 1.12.2.2 skrll #include <machine/pmap.h>
57 1.12.2.2 skrll
58 1.12.2.3 skrll #include "rump_private.h"
59 1.12.2.2 skrll #include "rumpuser.h"
60 1.12.2.2 skrll
61 1.12.2.2 skrll /* dumdidumdum */
62 1.12.2.2 skrll #define len2npages(off, len) \
63 1.12.2.2 skrll (((((len) + PAGE_MASK) & ~(PAGE_MASK)) >> PAGE_SHIFT) \
64 1.12.2.2 skrll + (((off & PAGE_MASK) + (len & PAGE_MASK)) > PAGE_SIZE))
65 1.12.2.2 skrll
66 1.12.2.2 skrll static int ubc_winvalid;
67 1.12.2.2 skrll static struct uvm_object *ubc_uobj;
68 1.12.2.2 skrll static off_t ubc_offset;
69 1.12.2.2 skrll static int ubc_flags;
70 1.12.2.2 skrll
71 1.12.2.2 skrll struct uvm_pagerops uvm_vnodeops;
72 1.12.2.2 skrll struct uvm_pagerops aobj_pager;
73 1.12.2.2 skrll struct uvmexp uvmexp;
74 1.12.2.2 skrll struct uvm uvm;
75 1.12.2.2 skrll
76 1.12.2.2 skrll struct vmspace rump_vmspace;
77 1.12.2.2 skrll struct vm_map rump_vmmap;
78 1.12.2.2 skrll
79 1.12.2.2 skrll /*
80 1.12.2.2 skrll * vm pages
81 1.12.2.2 skrll */
82 1.12.2.2 skrll
83 1.12.2.2 skrll struct vm_page *
84 1.12.2.2 skrll rumpvm_makepage(struct uvm_object *uobj, voff_t off)
85 1.12.2.2 skrll {
86 1.12.2.2 skrll struct vm_page *pg;
87 1.12.2.2 skrll
88 1.12.2.2 skrll pg = rumpuser_malloc(sizeof(struct vm_page), 0);
89 1.12.2.2 skrll memset(pg, 0, sizeof(struct vm_page));
90 1.12.2.2 skrll TAILQ_INSERT_TAIL(&uobj->memq, pg, listq);
91 1.12.2.2 skrll pg->offset = off;
92 1.12.2.2 skrll pg->uobject = uobj;
93 1.12.2.2 skrll
94 1.12.2.2 skrll pg->uanon = (void *)rumpuser_malloc(PAGE_SIZE, 0);
95 1.12.2.2 skrll memset((void *)pg->uanon, 0, PAGE_SIZE);
96 1.12.2.2 skrll pg->flags = PG_CLEAN;
97 1.12.2.2 skrll
98 1.12.2.2 skrll return pg;
99 1.12.2.2 skrll }
100 1.12.2.2 skrll
101 1.12.2.2 skrll void
102 1.12.2.2 skrll rumpvm_freepage(struct vm_page *pg)
103 1.12.2.2 skrll {
104 1.12.2.2 skrll struct uvm_object *uobj = pg->uobject;
105 1.12.2.2 skrll
106 1.12.2.2 skrll TAILQ_REMOVE(&uobj->memq, pg, listq);
107 1.12.2.2 skrll rumpuser_free((void *)pg->uanon);
108 1.12.2.2 skrll rumpuser_free(pg);
109 1.12.2.2 skrll }
110 1.12.2.2 skrll
111 1.12.2.3 skrll struct rumpva {
112 1.12.2.3 skrll vaddr_t addr;
113 1.12.2.3 skrll struct vm_page *pg;
114 1.12.2.3 skrll
115 1.12.2.3 skrll LIST_ENTRY(rumpva) entries;
116 1.12.2.3 skrll };
117 1.12.2.3 skrll static LIST_HEAD(, rumpva) rvahead = LIST_HEAD_INITIALIZER(rvahead);
118 1.12.2.3 skrll
119 1.12.2.3 skrll void
120 1.12.2.3 skrll rumpvm_enterva(vaddr_t addr, struct vm_page *pg)
121 1.12.2.3 skrll {
122 1.12.2.3 skrll struct rumpva *rva;
123 1.12.2.3 skrll
124 1.12.2.3 skrll rva = rumpuser_malloc(sizeof(struct rumpva), 0);
125 1.12.2.3 skrll rva->addr = addr;
126 1.12.2.3 skrll rva->pg = pg;
127 1.12.2.3 skrll LIST_INSERT_HEAD(&rvahead, rva, entries);
128 1.12.2.3 skrll }
129 1.12.2.3 skrll
130 1.12.2.3 skrll void
131 1.12.2.3 skrll rumpvm_flushva()
132 1.12.2.3 skrll {
133 1.12.2.3 skrll struct rumpva *rva;
134 1.12.2.3 skrll
135 1.12.2.3 skrll while ((rva = LIST_FIRST(&rvahead)) != NULL) {
136 1.12.2.3 skrll LIST_REMOVE(rva, entries);
137 1.12.2.3 skrll rumpuser_free(rva);
138 1.12.2.3 skrll }
139 1.12.2.3 skrll }
140 1.12.2.3 skrll
141 1.12.2.2 skrll /*
142 1.12.2.2 skrll * vnode pager
143 1.12.2.2 skrll */
144 1.12.2.2 skrll
145 1.12.2.2 skrll static int
146 1.12.2.2 skrll vn_get(struct uvm_object *uobj, voff_t off, struct vm_page **pgs,
147 1.12.2.2 skrll int *npages, int centeridx, vm_prot_t access_type,
148 1.12.2.2 skrll int advice, int flags)
149 1.12.2.2 skrll {
150 1.12.2.2 skrll struct vnode *vp = (struct vnode *)uobj;
151 1.12.2.2 skrll
152 1.12.2.2 skrll return VOP_GETPAGES(vp, off, pgs, npages, centeridx, access_type,
153 1.12.2.2 skrll advice, flags);
154 1.12.2.2 skrll }
155 1.12.2.2 skrll
156 1.12.2.2 skrll static int
157 1.12.2.2 skrll vn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
158 1.12.2.2 skrll {
159 1.12.2.2 skrll struct vnode *vp = (struct vnode *)uobj;
160 1.12.2.2 skrll
161 1.12.2.2 skrll return VOP_PUTPAGES(vp, offlo, offhi, flags);
162 1.12.2.2 skrll }
163 1.12.2.2 skrll
164 1.12.2.2 skrll /*
165 1.12.2.2 skrll * Anon object stuff
166 1.12.2.2 skrll */
167 1.12.2.2 skrll
168 1.12.2.2 skrll static int
169 1.12.2.2 skrll ao_get(struct uvm_object *uobj, voff_t off, struct vm_page **pgs,
170 1.12.2.2 skrll int *npages, int centeridx, vm_prot_t access_type,
171 1.12.2.2 skrll int advice, int flags)
172 1.12.2.2 skrll {
173 1.12.2.2 skrll struct vm_page *pg;
174 1.12.2.2 skrll int i;
175 1.12.2.2 skrll
176 1.12.2.2 skrll if (centeridx)
177 1.12.2.2 skrll panic("%s: centeridx != 0 not supported", __func__);
178 1.12.2.2 skrll
179 1.12.2.2 skrll /* loop over pages */
180 1.12.2.2 skrll off = trunc_page(off);
181 1.12.2.2 skrll for (i = 0; i < *npages; i++) {
182 1.12.2.2 skrll pg = uvm_pagelookup(uobj, off + (i << PAGE_SHIFT));
183 1.12.2.2 skrll if (pg) {
184 1.12.2.2 skrll pgs[i] = pg;
185 1.12.2.2 skrll } else {
186 1.12.2.2 skrll pg = rumpvm_makepage(uobj, off + (i << PAGE_SHIFT));
187 1.12.2.2 skrll pgs[i] = pg;
188 1.12.2.2 skrll }
189 1.12.2.2 skrll }
190 1.12.2.2 skrll
191 1.12.2.2 skrll return 0;
192 1.12.2.2 skrll
193 1.12.2.2 skrll }
194 1.12.2.2 skrll
195 1.12.2.2 skrll static int
196 1.12.2.2 skrll ao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
197 1.12.2.2 skrll {
198 1.12.2.2 skrll struct vm_page *pg;
199 1.12.2.2 skrll
200 1.12.2.2 skrll /* we only free all pages for now */
201 1.12.2.2 skrll if ((flags & PGO_FREE) == 0 || (flags & PGO_ALLPAGES) == 0)
202 1.12.2.2 skrll return 0;
203 1.12.2.2 skrll
204 1.12.2.2 skrll while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL)
205 1.12.2.2 skrll rumpvm_freepage(pg);
206 1.12.2.2 skrll
207 1.12.2.2 skrll return 0;
208 1.12.2.2 skrll }
209 1.12.2.2 skrll
210 1.12.2.2 skrll struct uvm_object *
211 1.12.2.2 skrll uao_create(vsize_t size, int flags)
212 1.12.2.2 skrll {
213 1.12.2.2 skrll struct uvm_object *uobj;
214 1.12.2.2 skrll
215 1.12.2.2 skrll uobj = rumpuser_malloc(sizeof(struct uvm_object), 0);
216 1.12.2.2 skrll memset(uobj, 0, sizeof(struct uvm_object));
217 1.12.2.2 skrll uobj->pgops = &aobj_pager;
218 1.12.2.2 skrll TAILQ_INIT(&uobj->memq);
219 1.12.2.2 skrll
220 1.12.2.2 skrll return uobj;
221 1.12.2.2 skrll }
222 1.12.2.2 skrll
223 1.12.2.2 skrll void
224 1.12.2.2 skrll uao_detach(struct uvm_object *uobj)
225 1.12.2.2 skrll {
226 1.12.2.2 skrll
227 1.12.2.2 skrll ao_put(uobj, 0, 0, PGO_ALLPAGES | PGO_FREE);
228 1.12.2.2 skrll rumpuser_free(uobj);
229 1.12.2.2 skrll }
230 1.12.2.2 skrll
231 1.12.2.2 skrll /*
232 1.12.2.2 skrll * UBC
233 1.12.2.2 skrll */
234 1.12.2.2 skrll
235 1.12.2.2 skrll int
236 1.12.2.2 skrll rump_ubc_magic_uiomove(size_t n, struct uio *uio)
237 1.12.2.2 skrll {
238 1.12.2.2 skrll int npages = len2npages(uio->uio_offset, n);
239 1.12.2.2 skrll struct vm_page *pgs[npages];
240 1.12.2.2 skrll int i, rv;
241 1.12.2.2 skrll
242 1.12.2.2 skrll if (ubc_winvalid == 0)
243 1.12.2.2 skrll panic("%s: ubc window not allocated", __func__);
244 1.12.2.2 skrll
245 1.12.2.2 skrll memset(pgs, 0, sizeof(pgs));
246 1.12.2.2 skrll rv = ubc_uobj->pgops->pgo_get(ubc_uobj, ubc_offset,
247 1.12.2.2 skrll pgs, &npages, 0, 0, 0, 0);
248 1.12.2.2 skrll if (rv)
249 1.12.2.2 skrll return rv;
250 1.12.2.2 skrll
251 1.12.2.2 skrll for (i = 0; i < npages; i++) {
252 1.12.2.2 skrll size_t xfersize;
253 1.12.2.2 skrll off_t pageoff;
254 1.12.2.2 skrll
255 1.12.2.2 skrll pageoff = uio->uio_offset & PAGE_MASK;
256 1.12.2.2 skrll xfersize = MIN(MIN(n, PAGE_SIZE), PAGE_SIZE-pageoff);
257 1.12.2.2 skrll uiomove((uint8_t *)pgs[i]->uanon + pageoff, xfersize, uio);
258 1.12.2.2 skrll if (uio->uio_rw == UIO_WRITE)
259 1.12.2.2 skrll pgs[i]->flags &= ~PG_CLEAN;
260 1.12.2.2 skrll ubc_offset += xfersize;
261 1.12.2.2 skrll n -= xfersize;
262 1.12.2.2 skrll }
263 1.12.2.2 skrll
264 1.12.2.2 skrll return 0;
265 1.12.2.2 skrll }
266 1.12.2.2 skrll
267 1.12.2.2 skrll void *
268 1.12.2.2 skrll ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
269 1.12.2.2 skrll int flags)
270 1.12.2.2 skrll {
271 1.12.2.2 skrll vsize_t reallen;
272 1.12.2.2 skrll
273 1.12.2.2 skrll /* XXX: only one window, but that's ok for now */
274 1.12.2.2 skrll if (ubc_winvalid == 1)
275 1.12.2.2 skrll panic("%s: ubc window already allocated", __func__);
276 1.12.2.2 skrll
277 1.12.2.2 skrll printf("UBC_ALLOC offset 0x%x\n", (int)offset);
278 1.12.2.2 skrll ubc_uobj = uobj;
279 1.12.2.2 skrll ubc_offset = offset;
280 1.12.2.2 skrll reallen = round_page(*lenp);
281 1.12.2.2 skrll ubc_flags = flags;
282 1.12.2.2 skrll
283 1.12.2.2 skrll ubc_winvalid = 1;
284 1.12.2.2 skrll
285 1.12.2.2 skrll return RUMP_UBC_MAGIC_WINDOW;
286 1.12.2.2 skrll }
287 1.12.2.2 skrll
288 1.12.2.2 skrll void
289 1.12.2.2 skrll ubc_release(void *va, int flags)
290 1.12.2.2 skrll {
291 1.12.2.2 skrll
292 1.12.2.2 skrll ubc_winvalid = 0;
293 1.12.2.2 skrll }
294 1.12.2.2 skrll
295 1.12.2.2 skrll int
296 1.12.2.2 skrll ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo,
297 1.12.2.2 skrll int advice, int flags)
298 1.12.2.2 skrll {
299 1.12.2.2 skrll void *win;
300 1.12.2.2 skrll vsize_t len;
301 1.12.2.2 skrll
302 1.12.2.2 skrll while (todo > 0) {
303 1.12.2.2 skrll len = todo;
304 1.12.2.2 skrll
305 1.12.2.2 skrll win = ubc_alloc(uobj, uio->uio_offset, &len, 0, flags);
306 1.12.2.2 skrll rump_ubc_magic_uiomove(len, uio);
307 1.12.2.2 skrll ubc_release(win, 0);
308 1.12.2.2 skrll
309 1.12.2.2 skrll todo -= len;
310 1.12.2.2 skrll }
311 1.12.2.2 skrll return 0;
312 1.12.2.2 skrll }
313 1.12.2.2 skrll
314 1.12.2.2 skrll
315 1.12.2.2 skrll /*
316 1.12.2.2 skrll * Misc routines
317 1.12.2.2 skrll */
318 1.12.2.2 skrll
319 1.12.2.2 skrll void
320 1.12.2.2 skrll rumpvm_init()
321 1.12.2.2 skrll {
322 1.12.2.2 skrll
323 1.12.2.2 skrll uvm_vnodeops.pgo_get = vn_get;
324 1.12.2.2 skrll uvm_vnodeops.pgo_put = vn_put;
325 1.12.2.2 skrll aobj_pager.pgo_get = ao_get;
326 1.12.2.2 skrll aobj_pager.pgo_put = ao_put;
327 1.12.2.2 skrll
328 1.12.2.2 skrll uvmexp.free = 1024*1024; /* XXX */
329 1.12.2.2 skrll uvm.pagedaemon_lwp = NULL; /* doesn't match curlwp */
330 1.12.2.2 skrll }
331 1.12.2.2 skrll
332 1.12.2.2 skrll void
333 1.12.2.2 skrll uvm_pageactivate(struct vm_page *pg)
334 1.12.2.2 skrll {
335 1.12.2.2 skrll
336 1.12.2.2 skrll /* nada */
337 1.12.2.2 skrll }
338 1.12.2.2 skrll
339 1.12.2.2 skrll void
340 1.12.2.2 skrll uvm_page_unbusy(struct vm_page **pgs, int npgs)
341 1.12.2.2 skrll {
342 1.12.2.2 skrll
343 1.12.2.2 skrll /* nada */
344 1.12.2.2 skrll }
345 1.12.2.2 skrll
346 1.12.2.2 skrll void
347 1.12.2.2 skrll uvm_pagewire(struct vm_page *pg)
348 1.12.2.2 skrll {
349 1.12.2.2 skrll
350 1.12.2.2 skrll /* nada */
351 1.12.2.2 skrll }
352 1.12.2.2 skrll
353 1.12.2.2 skrll void
354 1.12.2.2 skrll uvm_pageunwire(struct vm_page *pg)
355 1.12.2.2 skrll {
356 1.12.2.2 skrll
357 1.12.2.2 skrll /* nada */
358 1.12.2.2 skrll }
359 1.12.2.2 skrll
360 1.12.2.2 skrll vaddr_t
361 1.12.2.2 skrll uvm_pagermapin(struct vm_page **pps, int npages, int flags)
362 1.12.2.2 skrll {
363 1.12.2.2 skrll
364 1.12.2.2 skrll panic("%s: unimplemented", __func__);
365 1.12.2.2 skrll }
366 1.12.2.2 skrll
367 1.12.2.2 skrll struct vm_page *
368 1.12.2.2 skrll uvm_pagelookup(struct uvm_object *uobj, voff_t off)
369 1.12.2.2 skrll {
370 1.12.2.2 skrll struct vm_page *pg;
371 1.12.2.2 skrll
372 1.12.2.2 skrll TAILQ_FOREACH(pg, &uobj->memq, listq)
373 1.12.2.2 skrll if (pg->offset == off)
374 1.12.2.2 skrll return pg;
375 1.12.2.2 skrll
376 1.12.2.2 skrll return NULL;
377 1.12.2.2 skrll }
378 1.12.2.2 skrll
379 1.12.2.3 skrll struct vm_page *
380 1.12.2.3 skrll uvm_pageratop(vaddr_t va)
381 1.12.2.3 skrll {
382 1.12.2.3 skrll struct rumpva *rva;
383 1.12.2.3 skrll
384 1.12.2.3 skrll LIST_FOREACH(rva, &rvahead, entries)
385 1.12.2.3 skrll if (rva->addr == va)
386 1.12.2.3 skrll return rva->pg;
387 1.12.2.3 skrll
388 1.12.2.3 skrll panic("%s: va %llu", __func__, (unsigned long long)va);
389 1.12.2.3 skrll }
390 1.12.2.3 skrll
391 1.12.2.2 skrll void
392 1.12.2.2 skrll uvm_estimatepageable(int *active, int *inactive)
393 1.12.2.2 skrll {
394 1.12.2.2 skrll
395 1.12.2.2 skrll *active = 0;
396 1.12.2.2 skrll *inactive = 0;
397 1.12.2.2 skrll panic("%s: unimplemented", __func__);
398 1.12.2.2 skrll }
399 1.12.2.2 skrll
400 1.12.2.2 skrll void
401 1.12.2.2 skrll uvm_aio_biodone1(struct buf *bp)
402 1.12.2.2 skrll {
403 1.12.2.2 skrll
404 1.12.2.2 skrll panic("%s: unimplemented", __func__);
405 1.12.2.2 skrll }
406 1.12.2.2 skrll
407 1.12.2.2 skrll void
408 1.12.2.2 skrll uvm_aio_biodone(struct buf *bp)
409 1.12.2.2 skrll {
410 1.12.2.2 skrll
411 1.12.2.3 skrll uvm_aio_aiodone(bp);
412 1.12.2.3 skrll }
413 1.12.2.3 skrll
414 1.12.2.3 skrll void
415 1.12.2.3 skrll uvm_aio_aiodone(struct buf *bp)
416 1.12.2.3 skrll {
417 1.12.2.3 skrll
418 1.12.2.3 skrll if ((bp->b_flags & (B_READ | B_NOCACHE)) == 0 && bioopsp)
419 1.12.2.3 skrll bioopsp->io_pageiodone(bp);
420 1.12.2.2 skrll }
421 1.12.2.2 skrll
422 1.12.2.2 skrll void
423 1.12.2.2 skrll uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
424 1.12.2.2 skrll {
425 1.12.2.2 skrll
426 1.12.2.2 skrll vp->v_size = vp->v_writesize = newsize;
427 1.12.2.2 skrll }
428 1.12.2.2 skrll
429 1.12.2.2 skrll void
430 1.12.2.2 skrll uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
431 1.12.2.2 skrll {
432 1.12.2.2 skrll
433 1.12.2.2 skrll vp->v_writesize = newsize;
434 1.12.2.2 skrll }
435 1.12.2.2 skrll
436 1.12.2.2 skrll void
437 1.12.2.2 skrll uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
438 1.12.2.2 skrll {
439 1.12.2.2 skrll int maxpages = MIN(32, round_page(len) >> PAGE_SHIFT);
440 1.12.2.2 skrll struct vm_page *pgs[maxpages];
441 1.12.2.2 skrll struct uvm_object *uobj = &vp->v_uobj;
442 1.12.2.2 skrll int rv, npages, i;
443 1.12.2.2 skrll
444 1.12.2.2 skrll while (len) {
445 1.12.2.2 skrll npages = MIN(maxpages, round_page(len) >> PAGE_SHIFT);
446 1.12.2.2 skrll memset(pgs, 0, npages * sizeof(struct vm_page *));
447 1.12.2.2 skrll rv = uobj->pgops->pgo_get(uobj, off, pgs, &npages, 0, 0, 0, 0);
448 1.12.2.2 skrll assert(npages > 0);
449 1.12.2.2 skrll
450 1.12.2.2 skrll for (i = 0; i < npages; i++) {
451 1.12.2.2 skrll uint8_t *start;
452 1.12.2.2 skrll size_t chunkoff, chunklen;
453 1.12.2.2 skrll
454 1.12.2.2 skrll chunkoff = off & PAGE_MASK;
455 1.12.2.2 skrll chunklen = MIN(PAGE_SIZE - chunkoff, len);
456 1.12.2.2 skrll start = (uint8_t *)pgs[i]->uanon + chunkoff;
457 1.12.2.2 skrll
458 1.12.2.2 skrll memset(start, 0, chunklen);
459 1.12.2.2 skrll pgs[i]->flags &= PG_CLEAN;
460 1.12.2.2 skrll
461 1.12.2.2 skrll off += chunklen;
462 1.12.2.2 skrll len -= chunklen;
463 1.12.2.2 skrll }
464 1.12.2.2 skrll }
465 1.12.2.2 skrll
466 1.12.2.2 skrll return;
467 1.12.2.2 skrll }
468 1.12.2.2 skrll
469 1.12.2.2 skrll struct uvm_ractx *
470 1.12.2.2 skrll uvm_ra_allocctx()
471 1.12.2.2 skrll {
472 1.12.2.2 skrll
473 1.12.2.2 skrll return NULL;
474 1.12.2.2 skrll }
475 1.12.2.2 skrll
476 1.12.2.2 skrll void
477 1.12.2.2 skrll uvm_ra_freectx(struct uvm_ractx *ra)
478 1.12.2.2 skrll {
479 1.12.2.2 skrll
480 1.12.2.2 skrll return;
481 1.12.2.2 skrll }
482 1.12.2.2 skrll
483 1.12.2.2 skrll bool
484 1.12.2.2 skrll uvn_clean_p(struct uvm_object *uobj)
485 1.12.2.2 skrll {
486 1.12.2.2 skrll struct vnode *vp = (void *)uobj;
487 1.12.2.2 skrll
488 1.12.2.2 skrll return (vp->v_flag & VONWORKLST) == 0;
489 1.12.2.2 skrll }
490 1.12.2.2 skrll
491 1.12.2.2 skrll /*
492 1.12.2.2 skrll * Kmem
493 1.12.2.2 skrll */
494 1.12.2.2 skrll
495 1.12.2.2 skrll void *
496 1.12.2.2 skrll kmem_alloc(size_t size, km_flag_t kmflag)
497 1.12.2.2 skrll {
498 1.12.2.2 skrll
499 1.12.2.2 skrll return rumpuser_malloc(size, kmflag == KM_NOSLEEP);
500 1.12.2.2 skrll }
501 1.12.2.2 skrll
502 1.12.2.2 skrll void *
503 1.12.2.2 skrll kmem_zalloc(size_t size, km_flag_t kmflag)
504 1.12.2.2 skrll {
505 1.12.2.2 skrll void *rv;
506 1.12.2.2 skrll
507 1.12.2.2 skrll rv = kmem_alloc(size, kmflag);
508 1.12.2.2 skrll if (rv)
509 1.12.2.2 skrll memset(rv, 0, size);
510 1.12.2.2 skrll
511 1.12.2.2 skrll return rv;
512 1.12.2.2 skrll }
513 1.12.2.2 skrll
514 1.12.2.2 skrll void
515 1.12.2.2 skrll kmem_free(void *p, size_t size)
516 1.12.2.2 skrll {
517 1.12.2.2 skrll
518 1.12.2.2 skrll rumpuser_free(p);
519 1.12.2.2 skrll }
520 1.12.2.2 skrll
521 1.12.2.2 skrll /*
522 1.12.2.2 skrll * UVM km
523 1.12.2.2 skrll */
524 1.12.2.2 skrll
525 1.12.2.2 skrll vaddr_t
526 1.12.2.2 skrll uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
527 1.12.2.2 skrll {
528 1.12.2.2 skrll void *rv;
529 1.12.2.2 skrll
530 1.12.2.2 skrll rv = rumpuser_malloc(size, flags & (UVM_KMF_CANFAIL | UVM_KMF_NOWAIT));
531 1.12.2.2 skrll if (rv && flags & UVM_KMF_ZERO)
532 1.12.2.2 skrll memset(rv, 0, size);
533 1.12.2.2 skrll
534 1.12.2.2 skrll return (vaddr_t)rv;
535 1.12.2.2 skrll }
536 1.12.2.2 skrll
537 1.12.2.2 skrll void
538 1.12.2.2 skrll uvm_km_free(struct vm_map *map, vaddr_t vaddr, vsize_t size, uvm_flag_t flags)
539 1.12.2.2 skrll {
540 1.12.2.2 skrll
541 1.12.2.2 skrll rumpuser_free((void *)vaddr);
542 1.12.2.2 skrll }
543 1.12.2.2 skrll
544 1.12.2.2 skrll struct vm_map *
545 1.12.2.2 skrll uvm_km_suballoc(struct vm_map *map, vaddr_t *minaddr, vaddr_t *maxaddr,
546 1.12.2.2 skrll vsize_t size, int pageable, bool fixed, struct vm_map_kernel *submap)
547 1.12.2.2 skrll {
548 1.12.2.2 skrll
549 1.12.2.2 skrll return (struct vm_map *)417416;
550 1.12.2.2 skrll }
551