vm.c revision 1.59 1 1.59 pooka /* $NetBSD: vm.c,v 1.59 2009/08/03 17:10:51 pooka Exp $ */
2 1.1 pooka
3 1.1 pooka /*
4 1.1 pooka * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 1.1 pooka *
6 1.1 pooka * Development of this software was supported by Google Summer of Code.
7 1.1 pooka *
8 1.1 pooka * Redistribution and use in source and binary forms, with or without
9 1.1 pooka * modification, are permitted provided that the following conditions
10 1.1 pooka * are met:
11 1.1 pooka * 1. Redistributions of source code must retain the above copyright
12 1.1 pooka * notice, this list of conditions and the following disclaimer.
13 1.1 pooka * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 pooka * notice, this list of conditions and the following disclaimer in the
15 1.1 pooka * documentation and/or other materials provided with the distribution.
16 1.1 pooka *
17 1.1 pooka * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 1.1 pooka * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 1.1 pooka * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 1.1 pooka * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 1.1 pooka * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 1.1 pooka * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 1.1 pooka * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 1.1 pooka * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 1.1 pooka * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 1.1 pooka * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 1.1 pooka * SUCH DAMAGE.
28 1.1 pooka */
29 1.1 pooka
30 1.1 pooka /*
31 1.1 pooka * Virtual memory emulation routines. Contents:
32 1.1 pooka * + anon objects & pager
33 1.1 pooka * + misc support routines
34 1.9 pooka * + kmem
35 1.1 pooka */
36 1.1 pooka
37 1.1 pooka /*
38 1.5 pooka * XXX: we abuse pg->uanon for the virtual address of the storage
39 1.1 pooka * for each page. phys_addr would fit the job description better,
40 1.1 pooka * except that it will create unnecessary lossage on some platforms
41 1.1 pooka * due to not being a pointer type.
42 1.1 pooka */
43 1.1 pooka
44 1.48 pooka #include <sys/cdefs.h>
45 1.59 pooka __KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.59 2009/08/03 17:10:51 pooka Exp $");
46 1.48 pooka
47 1.1 pooka #include <sys/param.h>
48 1.40 pooka #include <sys/atomic.h>
49 1.1 pooka #include <sys/null.h>
50 1.1 pooka #include <sys/vnode.h>
51 1.1 pooka #include <sys/buf.h>
52 1.9 pooka #include <sys/kmem.h>
53 1.1 pooka
54 1.34 pooka #include <machine/pmap.h>
55 1.34 pooka
56 1.34 pooka #include <rump/rumpuser.h>
57 1.34 pooka
58 1.1 pooka #include <uvm/uvm.h>
59 1.56 pooka #include <uvm/uvm_ddb.h>
60 1.1 pooka #include <uvm/uvm_prot.h>
61 1.58 he #include <uvm/uvm_readahead.h>
62 1.1 pooka
63 1.13 pooka #include "rump_private.h"
64 1.1 pooka
65 1.24 yamt static int ao_get(struct uvm_object *, voff_t, struct vm_page **,
66 1.24 yamt int *, int, vm_prot_t, int, int);
67 1.24 yamt static int ao_put(struct uvm_object *, voff_t, voff_t, int);
68 1.24 yamt
69 1.24 yamt const struct uvm_pagerops aobj_pager = {
70 1.24 yamt .pgo_get = ao_get,
71 1.24 yamt .pgo_put = ao_put,
72 1.24 yamt };
73 1.24 yamt
74 1.25 ad kmutex_t uvm_pageqlock;
75 1.25 ad
76 1.1 pooka struct uvmexp uvmexp;
77 1.7 pooka struct uvm uvm;
78 1.1 pooka
79 1.1 pooka struct vmspace rump_vmspace;
80 1.1 pooka struct vm_map rump_vmmap;
81 1.50 pooka static struct vm_map_kernel kmem_map_store;
82 1.50 pooka struct vm_map *kmem_map = &kmem_map_store.vmk_map;
83 1.32 ad const struct rb_tree_ops uvm_page_tree_ops;
84 1.1 pooka
85 1.35 pooka static struct vm_map_kernel kernel_map_store;
86 1.35 pooka struct vm_map *kernel_map = &kernel_map_store.vmk_map;
87 1.35 pooka
88 1.1 pooka /*
89 1.1 pooka * vm pages
90 1.1 pooka */
91 1.1 pooka
92 1.22 pooka /* called with the object locked */
93 1.1 pooka struct vm_page *
94 1.6 pooka rumpvm_makepage(struct uvm_object *uobj, voff_t off)
95 1.1 pooka {
96 1.1 pooka struct vm_page *pg;
97 1.1 pooka
98 1.27 pooka pg = kmem_zalloc(sizeof(struct vm_page), KM_SLEEP);
99 1.1 pooka pg->offset = off;
100 1.5 pooka pg->uobject = uobj;
101 1.1 pooka
102 1.27 pooka pg->uanon = (void *)kmem_zalloc(PAGE_SIZE, KM_SLEEP);
103 1.22 pooka pg->flags = PG_CLEAN|PG_BUSY|PG_FAKE;
104 1.1 pooka
105 1.31 ad TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue);
106 1.59 pooka uobj->uo_npages++;
107 1.21 pooka
108 1.1 pooka return pg;
109 1.1 pooka }
110 1.1 pooka
111 1.21 pooka /*
112 1.21 pooka * Release a page.
113 1.21 pooka *
114 1.22 pooka * Called with the vm object locked.
115 1.21 pooka */
116 1.1 pooka void
117 1.22 pooka uvm_pagefree(struct vm_page *pg)
118 1.1 pooka {
119 1.5 pooka struct uvm_object *uobj = pg->uobject;
120 1.1 pooka
121 1.22 pooka if (pg->flags & PG_WANTED)
122 1.22 pooka wakeup(pg);
123 1.22 pooka
124 1.59 pooka uobj->uo_npages--;
125 1.31 ad TAILQ_REMOVE(&uobj->memq, pg, listq.queue);
126 1.27 pooka kmem_free((void *)pg->uanon, PAGE_SIZE);
127 1.27 pooka kmem_free(pg, sizeof(*pg));
128 1.1 pooka }
129 1.1 pooka
130 1.15 pooka struct rumpva {
131 1.15 pooka vaddr_t addr;
132 1.15 pooka struct vm_page *pg;
133 1.15 pooka
134 1.15 pooka LIST_ENTRY(rumpva) entries;
135 1.15 pooka };
136 1.15 pooka static LIST_HEAD(, rumpva) rvahead = LIST_HEAD_INITIALIZER(rvahead);
137 1.21 pooka static kmutex_t rvamtx;
138 1.15 pooka
139 1.15 pooka void
140 1.15 pooka rumpvm_enterva(vaddr_t addr, struct vm_page *pg)
141 1.15 pooka {
142 1.15 pooka struct rumpva *rva;
143 1.15 pooka
144 1.27 pooka rva = kmem_alloc(sizeof(struct rumpva), KM_SLEEP);
145 1.15 pooka rva->addr = addr;
146 1.15 pooka rva->pg = pg;
147 1.21 pooka mutex_enter(&rvamtx);
148 1.15 pooka LIST_INSERT_HEAD(&rvahead, rva, entries);
149 1.21 pooka mutex_exit(&rvamtx);
150 1.15 pooka }
151 1.15 pooka
152 1.15 pooka void
153 1.46 pooka rumpvm_flushva(struct uvm_object *uobj)
154 1.15 pooka {
155 1.46 pooka struct rumpva *rva, *rva_next;
156 1.15 pooka
157 1.21 pooka mutex_enter(&rvamtx);
158 1.46 pooka for (rva = LIST_FIRST(&rvahead); rva; rva = rva_next) {
159 1.46 pooka rva_next = LIST_NEXT(rva, entries);
160 1.46 pooka if (rva->pg->uobject == uobj) {
161 1.46 pooka LIST_REMOVE(rva, entries);
162 1.52 pooka uvm_page_unbusy(&rva->pg, 1);
163 1.46 pooka kmem_free(rva, sizeof(*rva));
164 1.46 pooka }
165 1.15 pooka }
166 1.21 pooka mutex_exit(&rvamtx);
167 1.15 pooka }
168 1.15 pooka
169 1.1 pooka /*
170 1.1 pooka * Anon object stuff
171 1.1 pooka */
172 1.1 pooka
173 1.1 pooka static int
174 1.1 pooka ao_get(struct uvm_object *uobj, voff_t off, struct vm_page **pgs,
175 1.1 pooka int *npages, int centeridx, vm_prot_t access_type,
176 1.1 pooka int advice, int flags)
177 1.1 pooka {
178 1.1 pooka struct vm_page *pg;
179 1.1 pooka int i;
180 1.1 pooka
181 1.1 pooka if (centeridx)
182 1.1 pooka panic("%s: centeridx != 0 not supported", __func__);
183 1.1 pooka
184 1.1 pooka /* loop over pages */
185 1.1 pooka off = trunc_page(off);
186 1.1 pooka for (i = 0; i < *npages; i++) {
187 1.23 pooka retrylookup:
188 1.10 pooka pg = uvm_pagelookup(uobj, off + (i << PAGE_SHIFT));
189 1.1 pooka if (pg) {
190 1.23 pooka if (pg->flags & PG_BUSY) {
191 1.23 pooka pg->flags |= PG_WANTED;
192 1.23 pooka UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
193 1.23 pooka "aogetpg", 0);
194 1.23 pooka goto retrylookup;
195 1.23 pooka }
196 1.23 pooka pg->flags |= PG_BUSY;
197 1.1 pooka pgs[i] = pg;
198 1.1 pooka } else {
199 1.6 pooka pg = rumpvm_makepage(uobj, off + (i << PAGE_SHIFT));
200 1.1 pooka pgs[i] = pg;
201 1.1 pooka }
202 1.1 pooka }
203 1.26 pooka mutex_exit(&uobj->vmobjlock);
204 1.1 pooka
205 1.1 pooka return 0;
206 1.1 pooka
207 1.1 pooka }
208 1.1 pooka
209 1.1 pooka static int
210 1.1 pooka ao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
211 1.1 pooka {
212 1.1 pooka struct vm_page *pg;
213 1.1 pooka
214 1.1 pooka /* we only free all pages for now */
215 1.23 pooka if ((flags & PGO_FREE) == 0 || (flags & PGO_ALLPAGES) == 0) {
216 1.26 pooka mutex_exit(&uobj->vmobjlock);
217 1.1 pooka return 0;
218 1.23 pooka }
219 1.1 pooka
220 1.1 pooka while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL)
221 1.22 pooka uvm_pagefree(pg);
222 1.26 pooka mutex_exit(&uobj->vmobjlock);
223 1.1 pooka
224 1.1 pooka return 0;
225 1.1 pooka }
226 1.1 pooka
227 1.1 pooka struct uvm_object *
228 1.1 pooka uao_create(vsize_t size, int flags)
229 1.1 pooka {
230 1.1 pooka struct uvm_object *uobj;
231 1.1 pooka
232 1.27 pooka uobj = kmem_zalloc(sizeof(struct uvm_object), KM_SLEEP);
233 1.1 pooka uobj->pgops = &aobj_pager;
234 1.1 pooka TAILQ_INIT(&uobj->memq);
235 1.26 pooka mutex_init(&uobj->vmobjlock, MUTEX_DEFAULT, IPL_NONE);
236 1.1 pooka
237 1.1 pooka return uobj;
238 1.1 pooka }
239 1.1 pooka
240 1.1 pooka void
241 1.1 pooka uao_detach(struct uvm_object *uobj)
242 1.1 pooka {
243 1.1 pooka
244 1.29 pooka mutex_enter(&uobj->vmobjlock);
245 1.1 pooka ao_put(uobj, 0, 0, PGO_ALLPAGES | PGO_FREE);
246 1.55 pooka mutex_destroy(&uobj->vmobjlock);
247 1.27 pooka kmem_free(uobj, sizeof(*uobj));
248 1.1 pooka }
249 1.1 pooka
250 1.1 pooka /*
251 1.1 pooka * Misc routines
252 1.1 pooka */
253 1.1 pooka
254 1.50 pooka static kmutex_t cachepgmtx;
255 1.50 pooka
256 1.1 pooka void
257 1.53 cegger rumpvm_init(void)
258 1.1 pooka {
259 1.1 pooka
260 1.1 pooka uvmexp.free = 1024*1024; /* XXX */
261 1.7 pooka uvm.pagedaemon_lwp = NULL; /* doesn't match curlwp */
262 1.38 pooka rump_vmspace.vm_map.pmap = pmap_kernel();
263 1.21 pooka
264 1.21 pooka mutex_init(&rvamtx, MUTEX_DEFAULT, 0);
265 1.25 ad mutex_init(&uvm_pageqlock, MUTEX_DEFAULT, 0);
266 1.50 pooka mutex_init(&cachepgmtx, MUTEX_DEFAULT, 0);
267 1.35 pooka
268 1.50 pooka kernel_map->pmap = pmap_kernel();
269 1.35 pooka callback_head_init(&kernel_map_store.vmk_reclaim_callback, IPL_VM);
270 1.50 pooka kmem_map->pmap = pmap_kernel();
271 1.50 pooka callback_head_init(&kmem_map_store.vmk_reclaim_callback, IPL_VM);
272 1.1 pooka }
273 1.1 pooka
274 1.1 pooka void
275 1.1 pooka uvm_pageactivate(struct vm_page *pg)
276 1.1 pooka {
277 1.1 pooka
278 1.1 pooka /* nada */
279 1.1 pooka }
280 1.1 pooka
281 1.1 pooka void
282 1.7 pooka uvm_pagewire(struct vm_page *pg)
283 1.7 pooka {
284 1.7 pooka
285 1.7 pooka /* nada */
286 1.7 pooka }
287 1.7 pooka
288 1.7 pooka void
289 1.7 pooka uvm_pageunwire(struct vm_page *pg)
290 1.7 pooka {
291 1.7 pooka
292 1.7 pooka /* nada */
293 1.7 pooka }
294 1.7 pooka
295 1.49 pooka int
296 1.49 pooka uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
297 1.49 pooka vm_prot_t maxprot, int flags, void *handle, voff_t off, vsize_t locklim)
298 1.49 pooka {
299 1.49 pooka
300 1.49 pooka panic("%s: unimplemented", __func__);
301 1.49 pooka }
302 1.49 pooka
303 1.7 pooka vaddr_t
304 1.7 pooka uvm_pagermapin(struct vm_page **pps, int npages, int flags)
305 1.7 pooka {
306 1.7 pooka
307 1.7 pooka panic("%s: unimplemented", __func__);
308 1.7 pooka }
309 1.7 pooka
310 1.22 pooka /* Called with the vm object locked */
311 1.7 pooka struct vm_page *
312 1.7 pooka uvm_pagelookup(struct uvm_object *uobj, voff_t off)
313 1.7 pooka {
314 1.10 pooka struct vm_page *pg;
315 1.7 pooka
316 1.31 ad TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
317 1.21 pooka if (pg->offset == off) {
318 1.10 pooka return pg;
319 1.21 pooka }
320 1.21 pooka }
321 1.10 pooka
322 1.10 pooka return NULL;
323 1.7 pooka }
324 1.7 pooka
325 1.14 pooka struct vm_page *
326 1.14 pooka uvm_pageratop(vaddr_t va)
327 1.14 pooka {
328 1.15 pooka struct rumpva *rva;
329 1.14 pooka
330 1.21 pooka mutex_enter(&rvamtx);
331 1.15 pooka LIST_FOREACH(rva, &rvahead, entries)
332 1.15 pooka if (rva->addr == va)
333 1.21 pooka break;
334 1.21 pooka mutex_exit(&rvamtx);
335 1.21 pooka
336 1.21 pooka if (rva == NULL)
337 1.21 pooka panic("%s: va %llu", __func__, (unsigned long long)va);
338 1.15 pooka
339 1.21 pooka return rva->pg;
340 1.14 pooka }
341 1.14 pooka
342 1.7 pooka void
343 1.22 pooka uvm_page_unbusy(struct vm_page **pgs, int npgs)
344 1.22 pooka {
345 1.22 pooka struct vm_page *pg;
346 1.22 pooka int i;
347 1.22 pooka
348 1.22 pooka for (i = 0; i < npgs; i++) {
349 1.22 pooka pg = pgs[i];
350 1.22 pooka if (pg == NULL)
351 1.22 pooka continue;
352 1.22 pooka
353 1.22 pooka KASSERT(pg->flags & PG_BUSY);
354 1.22 pooka if (pg->flags & PG_WANTED)
355 1.22 pooka wakeup(pg);
356 1.36 pooka if (pg->flags & PG_RELEASED)
357 1.36 pooka uvm_pagefree(pg);
358 1.36 pooka else
359 1.36 pooka pg->flags &= ~(PG_WANTED|PG_BUSY);
360 1.22 pooka }
361 1.22 pooka }
362 1.22 pooka
363 1.22 pooka void
364 1.7 pooka uvm_estimatepageable(int *active, int *inactive)
365 1.7 pooka {
366 1.7 pooka
367 1.19 pooka /* XXX: guessing game */
368 1.19 pooka *active = 1024;
369 1.19 pooka *inactive = 1024;
370 1.7 pooka }
371 1.7 pooka
372 1.39 pooka struct vm_map_kernel *
373 1.39 pooka vm_map_to_kernel(struct vm_map *map)
374 1.39 pooka {
375 1.39 pooka
376 1.39 pooka return (struct vm_map_kernel *)map;
377 1.39 pooka }
378 1.39 pooka
379 1.41 pooka bool
380 1.41 pooka vm_map_starved_p(struct vm_map *map)
381 1.41 pooka {
382 1.41 pooka
383 1.41 pooka return false;
384 1.41 pooka }
385 1.41 pooka
386 1.39 pooka void
387 1.39 pooka uvm_pageout_start(int npages)
388 1.39 pooka {
389 1.39 pooka
390 1.39 pooka uvmexp.paging += npages;
391 1.39 pooka }
392 1.39 pooka
393 1.39 pooka void
394 1.39 pooka uvm_pageout_done(int npages)
395 1.39 pooka {
396 1.39 pooka
397 1.39 pooka uvmexp.paging -= npages;
398 1.39 pooka
399 1.39 pooka /*
400 1.39 pooka * wake up either of pagedaemon or LWPs waiting for it.
401 1.39 pooka */
402 1.39 pooka
403 1.39 pooka if (uvmexp.free <= uvmexp.reserve_kernel) {
404 1.39 pooka wakeup(&uvm.pagedaemon);
405 1.39 pooka } else {
406 1.39 pooka wakeup(&uvmexp.free);
407 1.39 pooka }
408 1.39 pooka }
409 1.39 pooka
410 1.40 pooka /* XXX: following two are unfinished because lwp's are not refcounted yet */
411 1.40 pooka void
412 1.40 pooka uvm_lwp_hold(struct lwp *l)
413 1.40 pooka {
414 1.40 pooka
415 1.40 pooka atomic_inc_uint(&l->l_holdcnt);
416 1.40 pooka }
417 1.40 pooka
418 1.40 pooka void
419 1.40 pooka uvm_lwp_rele(struct lwp *l)
420 1.40 pooka {
421 1.40 pooka
422 1.40 pooka atomic_dec_uint(&l->l_holdcnt);
423 1.40 pooka }
424 1.40 pooka
425 1.41 pooka int
426 1.41 pooka uvm_loan(struct vm_map *map, vaddr_t start, vsize_t len, void *v, int flags)
427 1.41 pooka {
428 1.41 pooka
429 1.41 pooka panic("%s: unimplemented", __func__);
430 1.41 pooka }
431 1.41 pooka
432 1.41 pooka void
433 1.41 pooka uvm_unloan(void *v, int npages, int flags)
434 1.41 pooka {
435 1.41 pooka
436 1.41 pooka panic("%s: unimplemented", __func__);
437 1.41 pooka }
438 1.41 pooka
439 1.43 pooka int
440 1.43 pooka uvm_loanuobjpages(struct uvm_object *uobj, voff_t pgoff, int orignpages,
441 1.43 pooka struct vm_page **opp)
442 1.43 pooka {
443 1.43 pooka
444 1.43 pooka panic("%s: unimplemented", __func__);
445 1.43 pooka }
446 1.43 pooka
447 1.56 pooka void
448 1.56 pooka uvm_object_printit(struct uvm_object *uobj, bool full,
449 1.56 pooka void (*pr)(const char *, ...))
450 1.56 pooka {
451 1.56 pooka
452 1.56 pooka /* nada for now */
453 1.56 pooka }
454 1.56 pooka
455 1.58 he int
456 1.58 he uvm_readahead(struct uvm_object *uobj, off_t off, off_t size)
457 1.58 he {
458 1.58 he
459 1.58 he /* nada for now */
460 1.58 he return 0;
461 1.58 he }
462 1.58 he
463 1.9 pooka /*
464 1.9 pooka * Kmem
465 1.9 pooka */
466 1.9 pooka
467 1.50 pooka #ifndef RUMP_USE_REAL_ALLOCATORS
468 1.54 pooka void
469 1.54 pooka kmem_init()
470 1.54 pooka {
471 1.54 pooka
472 1.54 pooka /* nothing to do */
473 1.54 pooka }
474 1.54 pooka
475 1.9 pooka void *
476 1.9 pooka kmem_alloc(size_t size, km_flag_t kmflag)
477 1.9 pooka {
478 1.9 pooka
479 1.9 pooka return rumpuser_malloc(size, kmflag == KM_NOSLEEP);
480 1.9 pooka }
481 1.9 pooka
482 1.9 pooka void *
483 1.9 pooka kmem_zalloc(size_t size, km_flag_t kmflag)
484 1.9 pooka {
485 1.9 pooka void *rv;
486 1.9 pooka
487 1.9 pooka rv = kmem_alloc(size, kmflag);
488 1.9 pooka if (rv)
489 1.9 pooka memset(rv, 0, size);
490 1.9 pooka
491 1.9 pooka return rv;
492 1.9 pooka }
493 1.9 pooka
494 1.9 pooka void
495 1.9 pooka kmem_free(void *p, size_t size)
496 1.9 pooka {
497 1.9 pooka
498 1.9 pooka rumpuser_free(p);
499 1.9 pooka }
500 1.50 pooka #endif /* RUMP_USE_REAL_ALLOCATORS */
501 1.12 pooka
502 1.12 pooka /*
503 1.12 pooka * UVM km
504 1.12 pooka */
505 1.12 pooka
506 1.12 pooka vaddr_t
507 1.12 pooka uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
508 1.12 pooka {
509 1.12 pooka void *rv;
510 1.50 pooka int alignbit, error;
511 1.50 pooka
512 1.50 pooka alignbit = 0;
513 1.50 pooka if (align) {
514 1.50 pooka alignbit = ffs(align)-1;
515 1.50 pooka }
516 1.50 pooka
517 1.50 pooka rv = rumpuser_anonmmap(size, alignbit, flags & UVM_KMF_EXEC, &error);
518 1.50 pooka if (rv == NULL) {
519 1.50 pooka if (flags & (UVM_KMF_CANFAIL | UVM_KMF_NOWAIT))
520 1.50 pooka return 0;
521 1.50 pooka else
522 1.50 pooka panic("uvm_km_alloc failed");
523 1.50 pooka }
524 1.12 pooka
525 1.50 pooka if (flags & UVM_KMF_ZERO)
526 1.12 pooka memset(rv, 0, size);
527 1.12 pooka
528 1.12 pooka return (vaddr_t)rv;
529 1.12 pooka }
530 1.12 pooka
531 1.12 pooka void
532 1.12 pooka uvm_km_free(struct vm_map *map, vaddr_t vaddr, vsize_t size, uvm_flag_t flags)
533 1.12 pooka {
534 1.12 pooka
535 1.50 pooka rumpuser_unmap((void *)vaddr, size);
536 1.12 pooka }
537 1.12 pooka
538 1.12 pooka struct vm_map *
539 1.12 pooka uvm_km_suballoc(struct vm_map *map, vaddr_t *minaddr, vaddr_t *maxaddr,
540 1.12 pooka vsize_t size, int pageable, bool fixed, struct vm_map_kernel *submap)
541 1.12 pooka {
542 1.12 pooka
543 1.12 pooka return (struct vm_map *)417416;
544 1.12 pooka }
545 1.40 pooka
546 1.40 pooka vaddr_t
547 1.40 pooka uvm_km_alloc_poolpage(struct vm_map *map, bool waitok)
548 1.40 pooka {
549 1.40 pooka
550 1.40 pooka return (vaddr_t)rumpuser_malloc(PAGE_SIZE, !waitok);
551 1.40 pooka }
552 1.40 pooka
553 1.40 pooka void
554 1.40 pooka uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr)
555 1.40 pooka {
556 1.40 pooka
557 1.50 pooka rumpuser_unmap((void *)addr, PAGE_SIZE);
558 1.50 pooka }
559 1.50 pooka
560 1.50 pooka vaddr_t
561 1.50 pooka uvm_km_alloc_poolpage_cache(struct vm_map *map, bool waitok)
562 1.50 pooka {
563 1.50 pooka void *rv;
564 1.50 pooka int error;
565 1.50 pooka
566 1.50 pooka rv = rumpuser_anonmmap(PAGE_SIZE, PAGE_SHIFT, 0, &error);
567 1.50 pooka if (rv == NULL && waitok)
568 1.50 pooka panic("fixme: poolpage alloc failed");
569 1.50 pooka
570 1.50 pooka return (vaddr_t)rv;
571 1.50 pooka }
572 1.50 pooka
573 1.50 pooka void
574 1.50 pooka uvm_km_free_poolpage_cache(struct vm_map *map, vaddr_t vaddr)
575 1.50 pooka {
576 1.50 pooka
577 1.50 pooka rumpuser_unmap((void *)vaddr, PAGE_SIZE);
578 1.40 pooka }
579 1.57 pooka
580 1.57 pooka /*
581 1.57 pooka * Mapping and vm space locking routines.
582 1.57 pooka * XXX: these don't work for non-local vmspaces
583 1.57 pooka */
584 1.57 pooka int
585 1.57 pooka uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access)
586 1.57 pooka {
587 1.57 pooka
588 1.57 pooka KASSERT(vs == &rump_vmspace);
589 1.57 pooka return 0;
590 1.57 pooka }
591 1.57 pooka
592 1.57 pooka void
593 1.57 pooka uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
594 1.57 pooka {
595 1.57 pooka
596 1.57 pooka KASSERT(vs == &rump_vmspace);
597 1.57 pooka }
598 1.57 pooka
599 1.57 pooka void
600 1.57 pooka vmapbuf(struct buf *bp, vsize_t len)
601 1.57 pooka {
602 1.57 pooka
603 1.57 pooka bp->b_saveaddr = bp->b_data;
604 1.57 pooka }
605 1.57 pooka
606 1.57 pooka void
607 1.57 pooka vunmapbuf(struct buf *bp, vsize_t len)
608 1.57 pooka {
609 1.57 pooka
610 1.57 pooka bp->b_data = bp->b_saveaddr;
611 1.57 pooka bp->b_saveaddr = 0;
612 1.57 pooka }
613