vm.c revision 1.13.2.2 1 1.13.2.2 ad /* $NetBSD: vm.c,v 1.13.2.2 2007/08/20 22:07:31 ad Exp $ */
2 1.13.2.2 ad
3 1.13.2.2 ad /*
4 1.13.2.2 ad * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 1.13.2.2 ad *
6 1.13.2.2 ad * Development of this software was supported by Google Summer of Code.
7 1.13.2.2 ad *
8 1.13.2.2 ad * Redistribution and use in source and binary forms, with or without
9 1.13.2.2 ad * modification, are permitted provided that the following conditions
10 1.13.2.2 ad * are met:
11 1.13.2.2 ad * 1. Redistributions of source code must retain the above copyright
12 1.13.2.2 ad * notice, this list of conditions and the following disclaimer.
13 1.13.2.2 ad * 2. Redistributions in binary form must reproduce the above copyright
14 1.13.2.2 ad * notice, this list of conditions and the following disclaimer in the
15 1.13.2.2 ad * documentation and/or other materials provided with the distribution.
16 1.13.2.2 ad *
17 1.13.2.2 ad * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 1.13.2.2 ad * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 1.13.2.2 ad * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 1.13.2.2 ad * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 1.13.2.2 ad * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 1.13.2.2 ad * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 1.13.2.2 ad * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 1.13.2.2 ad * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 1.13.2.2 ad * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 1.13.2.2 ad * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 1.13.2.2 ad * SUCH DAMAGE.
28 1.13.2.2 ad */
29 1.13.2.2 ad
30 1.13.2.2 ad /*
31 1.13.2.2 ad * Virtual memory emulation routines. Contents:
32 1.13.2.2 ad * + UBC
33 1.13.2.2 ad * + anon objects & pager
34 1.13.2.2 ad * + vnode objects & pager
35 1.13.2.2 ad * + misc support routines
36 1.13.2.2 ad * + kmem
37 1.13.2.2 ad */
38 1.13.2.2 ad
39 1.13.2.2 ad /*
40 1.13.2.2 ad * XXX: we abuse pg->uanon for the virtual address of the storage
41 1.13.2.2 ad * for each page. phys_addr would fit the job description better,
42 1.13.2.2 ad * except that it will create unnecessary lossage on some platforms
43 1.13.2.2 ad * due to not being a pointer type.
44 1.13.2.2 ad */
45 1.13.2.2 ad
46 1.13.2.2 ad #include <sys/param.h>
47 1.13.2.2 ad #include <sys/null.h>
48 1.13.2.2 ad #include <sys/vnode.h>
49 1.13.2.2 ad #include <sys/buf.h>
50 1.13.2.2 ad #include <sys/kmem.h>
51 1.13.2.2 ad
52 1.13.2.2 ad #include <uvm/uvm.h>
53 1.13.2.2 ad #include <uvm/uvm_prot.h>
54 1.13.2.2 ad #include <uvm/uvm_readahead.h>
55 1.13.2.2 ad
56 1.13.2.2 ad #include <machine/pmap.h>
57 1.13.2.2 ad
58 1.13.2.2 ad #include "rump.h"
59 1.13.2.2 ad #include "rumpuser.h"
60 1.13.2.2 ad
61 1.13.2.2 ad /* dumdidumdum */
62 1.13.2.2 ad #define len2npages(off, len) \
63 1.13.2.2 ad (((((len) + PAGE_MASK) & ~(PAGE_MASK)) >> PAGE_SHIFT) \
64 1.13.2.2 ad + (((off & PAGE_MASK) + (len & PAGE_MASK)) > PAGE_SIZE))
65 1.13.2.2 ad
66 1.13.2.2 ad static int ubc_winvalid;
67 1.13.2.2 ad static struct uvm_object *ubc_uobj;
68 1.13.2.2 ad static off_t ubc_offset;
69 1.13.2.2 ad static int ubc_flags;
70 1.13.2.2 ad
71 1.13.2.2 ad struct uvm_pagerops uvm_vnodeops;
72 1.13.2.2 ad struct uvm_pagerops aobj_pager;
73 1.13.2.2 ad struct uvmexp uvmexp;
74 1.13.2.2 ad struct uvm uvm;
75 1.13.2.2 ad
76 1.13.2.2 ad struct vmspace rump_vmspace;
77 1.13.2.2 ad struct vm_map rump_vmmap;
78 1.13.2.2 ad
79 1.13.2.2 ad /*
80 1.13.2.2 ad * vm pages
81 1.13.2.2 ad */
82 1.13.2.2 ad
83 1.13.2.2 ad struct vm_page *
84 1.13.2.2 ad rumpvm_makepage(struct uvm_object *uobj, voff_t off)
85 1.13.2.2 ad {
86 1.13.2.2 ad struct vm_page *pg;
87 1.13.2.2 ad
88 1.13.2.2 ad pg = rumpuser_malloc(sizeof(struct vm_page), 0);
89 1.13.2.2 ad memset(pg, 0, sizeof(struct vm_page));
90 1.13.2.2 ad TAILQ_INSERT_TAIL(&uobj->memq, pg, listq);
91 1.13.2.2 ad pg->offset = off;
92 1.13.2.2 ad pg->uobject = uobj;
93 1.13.2.2 ad
94 1.13.2.2 ad pg->uanon = (void *)rumpuser_malloc(PAGE_SIZE, 0);
95 1.13.2.2 ad memset((void *)pg->uanon, 0, PAGE_SIZE);
96 1.13.2.2 ad pg->flags = PG_CLEAN;
97 1.13.2.2 ad
98 1.13.2.2 ad return pg;
99 1.13.2.2 ad }
100 1.13.2.2 ad
101 1.13.2.2 ad void
102 1.13.2.2 ad rumpvm_freepage(struct vm_page *pg)
103 1.13.2.2 ad {
104 1.13.2.2 ad struct uvm_object *uobj = pg->uobject;
105 1.13.2.2 ad
106 1.13.2.2 ad TAILQ_REMOVE(&uobj->memq, pg, listq);
107 1.13.2.2 ad rumpuser_free((void *)pg->uanon);
108 1.13.2.2 ad rumpuser_free(pg);
109 1.13.2.2 ad }
110 1.13.2.2 ad
111 1.13.2.2 ad /*
112 1.13.2.2 ad * vnode pager
113 1.13.2.2 ad */
114 1.13.2.2 ad
115 1.13.2.2 ad static int
116 1.13.2.2 ad vn_get(struct uvm_object *uobj, voff_t off, struct vm_page **pgs,
117 1.13.2.2 ad int *npages, int centeridx, vm_prot_t access_type,
118 1.13.2.2 ad int advice, int flags)
119 1.13.2.2 ad {
120 1.13.2.2 ad struct vnode *vp = (struct vnode *)uobj;
121 1.13.2.2 ad
122 1.13.2.2 ad return VOP_GETPAGES(vp, off, pgs, npages, centeridx, access_type,
123 1.13.2.2 ad advice, flags);
124 1.13.2.2 ad }
125 1.13.2.2 ad
126 1.13.2.2 ad static int
127 1.13.2.2 ad vn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
128 1.13.2.2 ad {
129 1.13.2.2 ad struct vnode *vp = (struct vnode *)uobj;
130 1.13.2.2 ad
131 1.13.2.2 ad return VOP_PUTPAGES(vp, offlo, offhi, flags);
132 1.13.2.2 ad }
133 1.13.2.2 ad
134 1.13.2.2 ad /*
135 1.13.2.2 ad * Anon object stuff
136 1.13.2.2 ad */
137 1.13.2.2 ad
138 1.13.2.2 ad static int
139 1.13.2.2 ad ao_get(struct uvm_object *uobj, voff_t off, struct vm_page **pgs,
140 1.13.2.2 ad int *npages, int centeridx, vm_prot_t access_type,
141 1.13.2.2 ad int advice, int flags)
142 1.13.2.2 ad {
143 1.13.2.2 ad struct vm_page *pg;
144 1.13.2.2 ad int i;
145 1.13.2.2 ad
146 1.13.2.2 ad if (centeridx)
147 1.13.2.2 ad panic("%s: centeridx != 0 not supported", __func__);
148 1.13.2.2 ad
149 1.13.2.2 ad /* loop over pages */
150 1.13.2.2 ad off = trunc_page(off);
151 1.13.2.2 ad for (i = 0; i < *npages; i++) {
152 1.13.2.2 ad pg = uvm_pagelookup(uobj, off + (i << PAGE_SHIFT));
153 1.13.2.2 ad if (pg) {
154 1.13.2.2 ad pgs[i] = pg;
155 1.13.2.2 ad } else {
156 1.13.2.2 ad pg = rumpvm_makepage(uobj, off + (i << PAGE_SHIFT));
157 1.13.2.2 ad pgs[i] = pg;
158 1.13.2.2 ad }
159 1.13.2.2 ad }
160 1.13.2.2 ad
161 1.13.2.2 ad return 0;
162 1.13.2.2 ad
163 1.13.2.2 ad }
164 1.13.2.2 ad
165 1.13.2.2 ad static int
166 1.13.2.2 ad ao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
167 1.13.2.2 ad {
168 1.13.2.2 ad struct vm_page *pg;
169 1.13.2.2 ad
170 1.13.2.2 ad /* we only free all pages for now */
171 1.13.2.2 ad if ((flags & PGO_FREE) == 0 || (flags & PGO_ALLPAGES) == 0)
172 1.13.2.2 ad return 0;
173 1.13.2.2 ad
174 1.13.2.2 ad while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL)
175 1.13.2.2 ad rumpvm_freepage(pg);
176 1.13.2.2 ad
177 1.13.2.2 ad return 0;
178 1.13.2.2 ad }
179 1.13.2.2 ad
180 1.13.2.2 ad struct uvm_object *
181 1.13.2.2 ad uao_create(vsize_t size, int flags)
182 1.13.2.2 ad {
183 1.13.2.2 ad struct uvm_object *uobj;
184 1.13.2.2 ad
185 1.13.2.2 ad uobj = rumpuser_malloc(sizeof(struct uvm_object), 0);
186 1.13.2.2 ad memset(uobj, 0, sizeof(struct uvm_object));
187 1.13.2.2 ad uobj->pgops = &aobj_pager;
188 1.13.2.2 ad TAILQ_INIT(&uobj->memq);
189 1.13.2.2 ad
190 1.13.2.2 ad return uobj;
191 1.13.2.2 ad }
192 1.13.2.2 ad
193 1.13.2.2 ad void
194 1.13.2.2 ad uao_detach(struct uvm_object *uobj)
195 1.13.2.2 ad {
196 1.13.2.2 ad
197 1.13.2.2 ad ao_put(uobj, 0, 0, PGO_ALLPAGES | PGO_FREE);
198 1.13.2.2 ad rumpuser_free(uobj);
199 1.13.2.2 ad }
200 1.13.2.2 ad
201 1.13.2.2 ad /*
202 1.13.2.2 ad * UBC
203 1.13.2.2 ad */
204 1.13.2.2 ad
205 1.13.2.2 ad int
206 1.13.2.2 ad rump_ubc_magic_uiomove(size_t n, struct uio *uio)
207 1.13.2.2 ad {
208 1.13.2.2 ad int npages = len2npages(uio->uio_offset, n);
209 1.13.2.2 ad struct vm_page *pgs[npages];
210 1.13.2.2 ad int i, rv;
211 1.13.2.2 ad
212 1.13.2.2 ad if (ubc_winvalid == 0)
213 1.13.2.2 ad panic("%s: ubc window not allocated", __func__);
214 1.13.2.2 ad
215 1.13.2.2 ad memset(pgs, 0, sizeof(pgs));
216 1.13.2.2 ad rv = ubc_uobj->pgops->pgo_get(ubc_uobj, ubc_offset,
217 1.13.2.2 ad pgs, &npages, 0, 0, 0, 0);
218 1.13.2.2 ad if (rv)
219 1.13.2.2 ad return rv;
220 1.13.2.2 ad
221 1.13.2.2 ad for (i = 0; i < npages; i++) {
222 1.13.2.2 ad size_t xfersize;
223 1.13.2.2 ad off_t pageoff;
224 1.13.2.2 ad
225 1.13.2.2 ad pageoff = uio->uio_offset & PAGE_MASK;
226 1.13.2.2 ad xfersize = MIN(MIN(n, PAGE_SIZE), PAGE_SIZE-pageoff);
227 1.13.2.2 ad uiomove((uint8_t *)pgs[i]->uanon + pageoff, xfersize, uio);
228 1.13.2.2 ad if (uio->uio_rw == UIO_WRITE)
229 1.13.2.2 ad pgs[i]->flags &= ~PG_CLEAN;
230 1.13.2.2 ad ubc_offset += xfersize;
231 1.13.2.2 ad n -= xfersize;
232 1.13.2.2 ad }
233 1.13.2.2 ad
234 1.13.2.2 ad return 0;
235 1.13.2.2 ad }
236 1.13.2.2 ad
237 1.13.2.2 ad void *
238 1.13.2.2 ad ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
239 1.13.2.2 ad int flags)
240 1.13.2.2 ad {
241 1.13.2.2 ad vsize_t reallen;
242 1.13.2.2 ad
243 1.13.2.2 ad /* XXX: only one window, but that's ok for now */
244 1.13.2.2 ad if (ubc_winvalid == 1)
245 1.13.2.2 ad panic("%s: ubc window already allocated", __func__);
246 1.13.2.2 ad
247 1.13.2.2 ad printf("UBC_ALLOC offset 0x%x\n", (int)offset);
248 1.13.2.2 ad ubc_uobj = uobj;
249 1.13.2.2 ad ubc_offset = offset;
250 1.13.2.2 ad reallen = round_page(*lenp);
251 1.13.2.2 ad ubc_flags = flags;
252 1.13.2.2 ad
253 1.13.2.2 ad ubc_winvalid = 1;
254 1.13.2.2 ad
255 1.13.2.2 ad return RUMP_UBC_MAGIC_WINDOW;
256 1.13.2.2 ad }
257 1.13.2.2 ad
258 1.13.2.2 ad void
259 1.13.2.2 ad ubc_release(void *va, int flags)
260 1.13.2.2 ad {
261 1.13.2.2 ad
262 1.13.2.2 ad ubc_winvalid = 0;
263 1.13.2.2 ad }
264 1.13.2.2 ad
265 1.13.2.2 ad int
266 1.13.2.2 ad ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo,
267 1.13.2.2 ad int advice, int flags)
268 1.13.2.2 ad {
269 1.13.2.2 ad void *win;
270 1.13.2.2 ad vsize_t len;
271 1.13.2.2 ad
272 1.13.2.2 ad while (todo > 0) {
273 1.13.2.2 ad len = todo;
274 1.13.2.2 ad
275 1.13.2.2 ad win = ubc_alloc(uobj, uio->uio_offset, &len, 0, flags);
276 1.13.2.2 ad rump_ubc_magic_uiomove(len, uio);
277 1.13.2.2 ad ubc_release(win, 0);
278 1.13.2.2 ad
279 1.13.2.2 ad todo -= len;
280 1.13.2.2 ad }
281 1.13.2.2 ad return 0;
282 1.13.2.2 ad }
283 1.13.2.2 ad
284 1.13.2.2 ad
285 1.13.2.2 ad /*
286 1.13.2.2 ad * Misc routines
287 1.13.2.2 ad */
288 1.13.2.2 ad
289 1.13.2.2 ad void
290 1.13.2.2 ad rumpvm_init()
291 1.13.2.2 ad {
292 1.13.2.2 ad
293 1.13.2.2 ad uvm_vnodeops.pgo_get = vn_get;
294 1.13.2.2 ad uvm_vnodeops.pgo_put = vn_put;
295 1.13.2.2 ad aobj_pager.pgo_get = ao_get;
296 1.13.2.2 ad aobj_pager.pgo_put = ao_put;
297 1.13.2.2 ad
298 1.13.2.2 ad uvmexp.free = 1024*1024; /* XXX */
299 1.13.2.2 ad uvm.pagedaemon_lwp = NULL; /* doesn't match curlwp */
300 1.13.2.2 ad }
301 1.13.2.2 ad
302 1.13.2.2 ad void
303 1.13.2.2 ad uvm_pageactivate(struct vm_page *pg)
304 1.13.2.2 ad {
305 1.13.2.2 ad
306 1.13.2.2 ad /* nada */
307 1.13.2.2 ad }
308 1.13.2.2 ad
309 1.13.2.2 ad void
310 1.13.2.2 ad uvm_page_unbusy(struct vm_page **pgs, int npgs)
311 1.13.2.2 ad {
312 1.13.2.2 ad
313 1.13.2.2 ad /* nada */
314 1.13.2.2 ad }
315 1.13.2.2 ad
316 1.13.2.2 ad void
317 1.13.2.2 ad uvm_pagewire(struct vm_page *pg)
318 1.13.2.2 ad {
319 1.13.2.2 ad
320 1.13.2.2 ad /* nada */
321 1.13.2.2 ad }
322 1.13.2.2 ad
323 1.13.2.2 ad void
324 1.13.2.2 ad uvm_pageunwire(struct vm_page *pg)
325 1.13.2.2 ad {
326 1.13.2.2 ad
327 1.13.2.2 ad /* nada */
328 1.13.2.2 ad }
329 1.13.2.2 ad
330 1.13.2.2 ad vaddr_t
331 1.13.2.2 ad uvm_pagermapin(struct vm_page **pps, int npages, int flags)
332 1.13.2.2 ad {
333 1.13.2.2 ad
334 1.13.2.2 ad panic("%s: unimplemented", __func__);
335 1.13.2.2 ad }
336 1.13.2.2 ad
337 1.13.2.2 ad struct vm_page *
338 1.13.2.2 ad uvm_pagelookup(struct uvm_object *uobj, voff_t off)
339 1.13.2.2 ad {
340 1.13.2.2 ad struct vm_page *pg;
341 1.13.2.2 ad
342 1.13.2.2 ad TAILQ_FOREACH(pg, &uobj->memq, listq)
343 1.13.2.2 ad if (pg->offset == off)
344 1.13.2.2 ad return pg;
345 1.13.2.2 ad
346 1.13.2.2 ad return NULL;
347 1.13.2.2 ad }
348 1.13.2.2 ad
349 1.13.2.2 ad void
350 1.13.2.2 ad uvm_estimatepageable(int *active, int *inactive)
351 1.13.2.2 ad {
352 1.13.2.2 ad
353 1.13.2.2 ad *active = 0;
354 1.13.2.2 ad *inactive = 0;
355 1.13.2.2 ad panic("%s: unimplemented", __func__);
356 1.13.2.2 ad }
357 1.13.2.2 ad
358 1.13.2.2 ad void
359 1.13.2.2 ad uvm_aio_biodone1(struct buf *bp)
360 1.13.2.2 ad {
361 1.13.2.2 ad
362 1.13.2.2 ad panic("%s: unimplemented", __func__);
363 1.13.2.2 ad }
364 1.13.2.2 ad
365 1.13.2.2 ad void
366 1.13.2.2 ad uvm_aio_biodone(struct buf *bp)
367 1.13.2.2 ad {
368 1.13.2.2 ad
369 1.13.2.2 ad panic("%s: unimplemented", __func__);
370 1.13.2.2 ad }
371 1.13.2.2 ad
372 1.13.2.2 ad void
373 1.13.2.2 ad uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
374 1.13.2.2 ad {
375 1.13.2.2 ad
376 1.13.2.2 ad vp->v_size = vp->v_writesize = newsize;
377 1.13.2.2 ad }
378 1.13.2.2 ad
379 1.13.2.2 ad void
380 1.13.2.2 ad uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
381 1.13.2.2 ad {
382 1.13.2.2 ad
383 1.13.2.2 ad vp->v_writesize = newsize;
384 1.13.2.2 ad }
385 1.13.2.2 ad
386 1.13.2.2 ad void
387 1.13.2.2 ad uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
388 1.13.2.2 ad {
389 1.13.2.2 ad int maxpages = MIN(32, round_page(len) >> PAGE_SHIFT);
390 1.13.2.2 ad struct vm_page *pgs[maxpages];
391 1.13.2.2 ad struct uvm_object *uobj = &vp->v_uobj;
392 1.13.2.2 ad int rv, npages, i;
393 1.13.2.2 ad
394 1.13.2.2 ad while (len) {
395 1.13.2.2 ad npages = MIN(maxpages, round_page(len) >> PAGE_SHIFT);
396 1.13.2.2 ad memset(pgs, 0, npages * sizeof(struct vm_page *));
397 1.13.2.2 ad rv = uobj->pgops->pgo_get(uobj, off, pgs, &npages, 0, 0, 0, 0);
398 1.13.2.2 ad assert(npages > 0);
399 1.13.2.2 ad
400 1.13.2.2 ad for (i = 0; i < npages; i++) {
401 1.13.2.2 ad uint8_t *start;
402 1.13.2.2 ad size_t chunkoff, chunklen;
403 1.13.2.2 ad
404 1.13.2.2 ad chunkoff = off & PAGE_MASK;
405 1.13.2.2 ad chunklen = MIN(PAGE_SIZE - chunkoff, len);
406 1.13.2.2 ad start = (uint8_t *)pgs[i]->uanon + chunkoff;
407 1.13.2.2 ad
408 1.13.2.2 ad memset(start, 0, chunklen);
409 1.13.2.2 ad pgs[i]->flags &= PG_CLEAN;
410 1.13.2.2 ad
411 1.13.2.2 ad off += chunklen;
412 1.13.2.2 ad len -= chunklen;
413 1.13.2.2 ad }
414 1.13.2.2 ad }
415 1.13.2.2 ad
416 1.13.2.2 ad return;
417 1.13.2.2 ad }
418 1.13.2.2 ad
419 1.13.2.2 ad struct uvm_ractx *
420 1.13.2.2 ad uvm_ra_allocctx()
421 1.13.2.2 ad {
422 1.13.2.2 ad
423 1.13.2.2 ad return NULL;
424 1.13.2.2 ad }
425 1.13.2.2 ad
426 1.13.2.2 ad void
427 1.13.2.2 ad uvm_ra_freectx(struct uvm_ractx *ra)
428 1.13.2.2 ad {
429 1.13.2.2 ad
430 1.13.2.2 ad return;
431 1.13.2.2 ad }
432 1.13.2.2 ad
433 1.13.2.2 ad bool
434 1.13.2.2 ad uvn_clean_p(struct uvm_object *uobj)
435 1.13.2.2 ad {
436 1.13.2.2 ad struct vnode *vp = (void *)uobj;
437 1.13.2.2 ad
438 1.13.2.2 ad return (vp->v_flag & VONWORKLST) == 0;
439 1.13.2.2 ad }
440 1.13.2.2 ad
441 1.13.2.2 ad /*
442 1.13.2.2 ad * Kmem
443 1.13.2.2 ad */
444 1.13.2.2 ad
445 1.13.2.2 ad void *
446 1.13.2.2 ad kmem_alloc(size_t size, km_flag_t kmflag)
447 1.13.2.2 ad {
448 1.13.2.2 ad
449 1.13.2.2 ad return rumpuser_malloc(size, kmflag == KM_NOSLEEP);
450 1.13.2.2 ad }
451 1.13.2.2 ad
452 1.13.2.2 ad void *
453 1.13.2.2 ad kmem_zalloc(size_t size, km_flag_t kmflag)
454 1.13.2.2 ad {
455 1.13.2.2 ad void *rv;
456 1.13.2.2 ad
457 1.13.2.2 ad rv = kmem_alloc(size, kmflag);
458 1.13.2.2 ad if (rv)
459 1.13.2.2 ad memset(rv, 0, size);
460 1.13.2.2 ad
461 1.13.2.2 ad return rv;
462 1.13.2.2 ad }
463 1.13.2.2 ad
464 1.13.2.2 ad void
465 1.13.2.2 ad kmem_free(void *p, size_t size)
466 1.13.2.2 ad {
467 1.13.2.2 ad
468 1.13.2.2 ad rumpuser_free(p);
469 1.13.2.2 ad }
470 1.13.2.2 ad
471 1.13.2.2 ad /*
472 1.13.2.2 ad * UVM km
473 1.13.2.2 ad */
474 1.13.2.2 ad
475 1.13.2.2 ad vaddr_t
476 1.13.2.2 ad uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
477 1.13.2.2 ad {
478 1.13.2.2 ad void *rv;
479 1.13.2.2 ad
480 1.13.2.2 ad rv = rumpuser_malloc(size, flags & (UVM_KMF_CANFAIL | UVM_KMF_NOWAIT));
481 1.13.2.2 ad if (rv && flags & UVM_KMF_ZERO)
482 1.13.2.2 ad memset(rv, 0, size);
483 1.13.2.2 ad
484 1.13.2.2 ad return (vaddr_t)rv;
485 1.13.2.2 ad }
486 1.13.2.2 ad
487 1.13.2.2 ad void
488 1.13.2.2 ad uvm_km_free(struct vm_map *map, vaddr_t vaddr, vsize_t size, uvm_flag_t flags)
489 1.13.2.2 ad {
490 1.13.2.2 ad
491 1.13.2.2 ad rumpuser_free((void *)vaddr);
492 1.13.2.2 ad }
493 1.13.2.2 ad
494 1.13.2.2 ad struct vm_map *
495 1.13.2.2 ad uvm_km_suballoc(struct vm_map *map, vaddr_t *minaddr, vaddr_t *maxaddr,
496 1.13.2.2 ad vsize_t size, int pageable, bool fixed, struct vm_map_kernel *submap)
497 1.13.2.2 ad {
498 1.13.2.2 ad
499 1.13.2.2 ad return (struct vm_map *)417416;
500 1.13.2.2 ad }
501