1 1.197 martin /* $NetBSD: vm.c,v 1.197 2023/09/24 09:33:26 martin Exp $ */ 2 1.1 pooka 3 1.1 pooka /* 4 1.114 pooka * Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved. 5 1.1 pooka * 6 1.76 pooka * Development of this software was supported by 7 1.76 pooka * The Finnish Cultural Foundation and the Research Foundation of 8 1.76 pooka * The Helsinki University of Technology. 9 1.1 pooka * 10 1.1 pooka * Redistribution and use in source and binary forms, with or without 11 1.1 pooka * modification, are permitted provided that the following conditions 12 1.1 pooka * are met: 13 1.1 pooka * 1. Redistributions of source code must retain the above copyright 14 1.1 pooka * notice, this list of conditions and the following disclaimer. 15 1.1 pooka * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 pooka * notice, this list of conditions and the following disclaimer in the 17 1.1 pooka * documentation and/or other materials provided with the distribution. 18 1.1 pooka * 19 1.1 pooka * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 20 1.1 pooka * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 1.1 pooka * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 1.1 pooka * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 1.1 pooka * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 1.1 pooka * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 1.1 pooka * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 1.1 pooka * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 1.1 pooka * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 1.1 pooka * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 1.1 pooka * SUCH DAMAGE. 30 1.1 pooka */ 31 1.1 pooka 32 1.1 pooka /* 33 1.88 pooka * Virtual memory emulation routines. 34 1.1 pooka */ 35 1.1 pooka 36 1.1 pooka /* 37 1.5 pooka * XXX: we abuse pg->uanon for the virtual address of the storage 38 1.1 pooka * for each page. phys_addr would fit the job description better, 39 1.1 pooka * except that it will create unnecessary lossage on some platforms 40 1.1 pooka * due to not being a pointer type. 41 1.1 pooka */ 42 1.1 pooka 43 1.48 pooka #include <sys/cdefs.h> 44 1.197 martin __KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.197 2023/09/24 09:33:26 martin Exp $"); 45 1.48 pooka 46 1.1 pooka #include <sys/param.h> 47 1.40 pooka #include <sys/atomic.h> 48 1.80 pooka #include <sys/buf.h> 49 1.80 pooka #include <sys/kernel.h> 50 1.67 pooka #include <sys/kmem.h> 51 1.121 para #include <sys/vmem.h> 52 1.69 pooka #include <sys/mman.h> 53 1.1 pooka #include <sys/null.h> 54 1.1 pooka #include <sys/vnode.h> 55 1.175 ad #include <sys/radixtree.h> 56 1.194 riastrad #include <sys/module.h> 57 1.1 pooka 58 1.34 pooka #include <machine/pmap.h> 59 1.34 pooka 60 1.193 riastrad #if defined(__i386__) || defined(__x86_64__) 61 1.193 riastrad /* 62 1.193 riastrad * This file abuses the pmap abstraction to create its own statically 63 1.193 riastrad * allocated struct pmap object, even though it can't do anything 64 1.193 riastrad * useful with such a thing from userland. On x86 the struct pmap 65 1.193 riastrad * definition is private, so we have to go to extra effort to abuse it 66 1.193 riastrad * there. This should be fixed -- all of the struct pmap definitions 67 1.193 riastrad * should be private, and then rump can furnish its own fake struct 68 1.193 riastrad * pmap without clashing with anything. 69 1.193 riastrad */ 70 1.193 riastrad #include <machine/pmap_private.h> 71 1.193 riastrad #endif 72 1.193 riastrad 73 1.1 pooka #include <uvm/uvm.h> 74 1.56 pooka #include <uvm/uvm_ddb.h> 75 1.88 pooka #include <uvm/uvm_pdpolicy.h> 76 1.1 pooka #include <uvm/uvm_prot.h> 77 1.58 he #include <uvm/uvm_readahead.h> 78 1.160 chs #include <uvm/uvm_device.h> 79 1.1 pooka 80 1.169 pooka #include <rump-sys/kern.h> 81 1.169 pooka #include <rump-sys/vfs.h> 82 1.169 pooka 83 1.169 pooka #include <rump/rumpuser.h> 84 1.1 pooka 85 1.174 ad kmutex_t vmpage_lruqueue_lock; /* non-free page lock */ 86 1.88 pooka kmutex_t uvm_swap_data_lock; 87 1.25 ad 88 1.1 pooka struct uvmexp uvmexp; 89 1.7 pooka struct uvm uvm; 90 1.1 pooka 91 1.112 pooka #ifdef __uvmexp_pagesize 92 1.123 martin const int * const uvmexp_pagesize = &uvmexp.pagesize; 93 1.123 martin const int * const uvmexp_pagemask = &uvmexp.pagemask; 94 1.123 martin const int * const uvmexp_pageshift = &uvmexp.pageshift; 95 1.112 pooka #endif 96 1.112 pooka 97 1.121 para static struct vm_map kernel_map_store; 98 1.121 para struct vm_map *kernel_map = &kernel_map_store; 99 1.121 para 100 1.130 pooka static struct vm_map module_map_store; 101 1.130 pooka 102 1.164 pooka static struct pmap pmap_kernel; 103 1.164 pooka struct pmap rump_pmap_local; 104 1.164 pooka struct pmap *const kernel_pmap_ptr = &pmap_kernel; 105 1.164 pooka 106 1.121 para vmem_t *kmem_arena; 107 1.121 para vmem_t *kmem_va_arena; 108 1.35 pooka 109 1.80 pooka static unsigned int pdaemon_waiters; 110 1.80 pooka static kmutex_t pdaemonmtx; 111 1.80 pooka static kcondvar_t pdaemoncv, oomwait; 112 1.80 pooka 113 1.162 pooka /* all local non-proc0 processes share this vmspace */ 114 1.162 pooka struct vmspace *rump_vmspace_local; 115 1.162 pooka 116 1.91 pooka unsigned long rump_physmemlimit = RUMPMEM_UNLIMITED; 117 1.147 pooka static unsigned long pdlimit = RUMPMEM_UNLIMITED; /* page daemon memlimit */ 118 1.84 pooka static unsigned long curphysmem; 119 1.92 pooka static unsigned long dddlim; /* 90% of memory limit used */ 120 1.92 pooka #define NEED_PAGEDAEMON() \ 121 1.92 pooka (rump_physmemlimit != RUMPMEM_UNLIMITED && curphysmem > dddlim) 122 1.158 pooka #define PDRESERVE (2*MAXPHYS) 123 1.92 pooka 124 1.92 pooka /* 125 1.92 pooka * Try to free two pages worth of pages from objects. 126 1.192 andvar * If this successfully frees a full page cache page, we'll 127 1.120 yamt * free the released page plus PAGE_SIZE/sizeof(vm_page). 128 1.92 pooka */ 129 1.92 pooka #define PAGEDAEMON_OBJCHUNK (2*PAGE_SIZE / sizeof(struct vm_page)) 130 1.92 pooka 131 1.92 pooka /* 132 1.92 pooka * Keep a list of least recently used pages. Since the only way a 133 1.92 pooka * rump kernel can "access" a page is via lookup, we put the page 134 1.92 pooka * at the back of queue every time a lookup for it is done. If the 135 1.92 pooka * page is in front of this global queue and we're short of memory, 136 1.92 pooka * it's a candidate for pageout. 137 1.92 pooka */ 138 1.92 pooka static struct pglist vmpage_lruqueue; 139 1.92 pooka static unsigned vmpage_onqueue; 140 1.84 pooka 141 1.1 pooka /* 142 1.1 pooka * vm pages 143 1.1 pooka */ 144 1.1 pooka 145 1.90 pooka static int 146 1.90 pooka pgctor(void *arg, void *obj, int flags) 147 1.90 pooka { 148 1.90 pooka struct vm_page *pg = obj; 149 1.90 pooka 150 1.90 pooka memset(pg, 0, sizeof(*pg)); 151 1.103 pooka pg->uanon = rump_hypermalloc(PAGE_SIZE, PAGE_SIZE, 152 1.103 pooka (flags & PR_WAITOK) == PR_WAITOK, "pgalloc"); 153 1.103 pooka return pg->uanon == NULL; 154 1.90 pooka } 155 1.90 pooka 156 1.90 pooka static void 157 1.90 pooka pgdtor(void *arg, void *obj) 158 1.90 pooka { 159 1.90 pooka struct vm_page *pg = obj; 160 1.90 pooka 161 1.90 pooka rump_hyperfree(pg->uanon, PAGE_SIZE); 162 1.90 pooka } 163 1.90 pooka 164 1.90 pooka static struct pool_cache pagecache; 165 1.90 pooka 166 1.195 riastrad /* stub for UVM_OBJ_IS_VNODE */ 167 1.195 riastrad struct uvm_pagerops rump_uvm_vnodeops; 168 1.195 riastrad __weak_alias(uvm_vnodeops,rump_uvm_vnodeops); 169 1.195 riastrad 170 1.92 pooka /* 171 1.92 pooka * Called with the object locked. We don't support anons. 172 1.92 pooka */ 173 1.1 pooka struct vm_page * 174 1.76 pooka uvm_pagealloc_strat(struct uvm_object *uobj, voff_t off, struct vm_anon *anon, 175 1.76 pooka int flags, int strat, int free_list) 176 1.1 pooka { 177 1.1 pooka struct vm_page *pg; 178 1.1 pooka 179 1.184 ad KASSERT(uobj && rw_write_held(uobj->vmobjlock)); 180 1.92 pooka KASSERT(anon == NULL); 181 1.92 pooka 182 1.103 pooka pg = pool_cache_get(&pagecache, PR_NOWAIT); 183 1.104 pooka if (__predict_false(pg == NULL)) { 184 1.103 pooka return NULL; 185 1.104 pooka } 186 1.181 ad mutex_init(&pg->interlock, MUTEX_DEFAULT, IPL_NONE); 187 1.103 pooka 188 1.1 pooka pg->offset = off; 189 1.5 pooka pg->uobject = uobj; 190 1.1 pooka 191 1.175 ad if (radix_tree_insert_node(&uobj->uo_pages, off >> PAGE_SHIFT, 192 1.175 ad pg) != 0) { 193 1.175 ad pool_cache_put(&pagecache, pg); 194 1.175 ad return NULL; 195 1.175 ad } 196 1.185 ad 197 1.188 ad if (UVM_OBJ_IS_VNODE(uobj)) { 198 1.188 ad if (uobj->uo_npages == 0) { 199 1.188 ad struct vnode *vp = (struct vnode *)uobj; 200 1.188 ad mutex_enter(vp->v_interlock); 201 1.188 ad vp->v_iflag |= VI_PAGES; 202 1.188 ad mutex_exit(vp->v_interlock); 203 1.188 ad } 204 1.188 ad pg->flags |= PG_FILE; 205 1.188 ad } 206 1.189 ad uobj->uo_npages++; 207 1.188 ad 208 1.185 ad pg->flags = PG_CLEAN|PG_BUSY|PG_FAKE; 209 1.185 ad if (flags & UVM_PGA_ZERO) { 210 1.185 ad uvm_pagezero(pg); 211 1.185 ad } 212 1.89 pooka 213 1.92 pooka /* 214 1.93 pooka * Don't put anons on the LRU page queue. We can't flush them 215 1.93 pooka * (there's no concept of swap in a rump kernel), so no reason 216 1.93 pooka * to bother with them. 217 1.92 pooka */ 218 1.93 pooka if (!UVM_OBJ_IS_AOBJ(uobj)) { 219 1.92 pooka atomic_inc_uint(&vmpage_onqueue); 220 1.174 ad mutex_enter(&vmpage_lruqueue_lock); 221 1.92 pooka TAILQ_INSERT_TAIL(&vmpage_lruqueue, pg, pageq.queue); 222 1.174 ad mutex_exit(&vmpage_lruqueue_lock); 223 1.188 ad } else { 224 1.188 ad pg->flags |= PG_AOBJ; 225 1.92 pooka } 226 1.92 pooka 227 1.1 pooka return pg; 228 1.1 pooka } 229 1.1 pooka 230 1.21 pooka /* 231 1.21 pooka * Release a page. 232 1.21 pooka * 233 1.22 pooka * Called with the vm object locked. 234 1.21 pooka */ 235 1.1 pooka void 236 1.22 pooka uvm_pagefree(struct vm_page *pg) 237 1.1 pooka { 238 1.5 pooka struct uvm_object *uobj = pg->uobject; 239 1.175 ad struct vm_page *pg2 __unused; 240 1.1 pooka 241 1.184 ad KASSERT(rw_write_held(uobj->vmobjlock)); 242 1.92 pooka 243 1.186 ad mutex_enter(&pg->interlock); 244 1.188 ad uvm_pagewakeup(pg); 245 1.186 ad mutex_exit(&pg->interlock); 246 1.22 pooka 247 1.59 pooka uobj->uo_npages--; 248 1.175 ad pg2 = radix_tree_remove_node(&uobj->uo_pages, pg->offset >> PAGE_SHIFT); 249 1.175 ad KASSERT(pg == pg2); 250 1.92 pooka 251 1.93 pooka if (!UVM_OBJ_IS_AOBJ(uobj)) { 252 1.174 ad mutex_enter(&vmpage_lruqueue_lock); 253 1.92 pooka TAILQ_REMOVE(&vmpage_lruqueue, pg, pageq.queue); 254 1.174 ad mutex_exit(&vmpage_lruqueue_lock); 255 1.92 pooka atomic_dec_uint(&vmpage_onqueue); 256 1.92 pooka } 257 1.92 pooka 258 1.185 ad if (UVM_OBJ_IS_VNODE(uobj) && uobj->uo_npages == 0) { 259 1.185 ad struct vnode *vp = (struct vnode *)uobj; 260 1.185 ad mutex_enter(vp->v_interlock); 261 1.185 ad vp->v_iflag &= ~VI_PAGES; 262 1.185 ad mutex_exit(vp->v_interlock); 263 1.185 ad } 264 1.185 ad 265 1.181 ad mutex_destroy(&pg->interlock); 266 1.90 pooka pool_cache_put(&pagecache, pg); 267 1.1 pooka } 268 1.1 pooka 269 1.15 pooka void 270 1.61 pooka uvm_pagezero(struct vm_page *pg) 271 1.15 pooka { 272 1.15 pooka 273 1.183 ad uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY); 274 1.61 pooka memset((void *)pg->uanon, 0, PAGE_SIZE); 275 1.15 pooka } 276 1.15 pooka 277 1.1 pooka /* 278 1.178 ad * uvm_page_owner_locked_p: return true if object associated with page is 279 1.136 yamt * locked. this is a weak check for runtime assertions only. 280 1.136 yamt */ 281 1.136 yamt 282 1.136 yamt bool 283 1.184 ad uvm_page_owner_locked_p(struct vm_page *pg, bool exclusive) 284 1.136 yamt { 285 1.136 yamt 286 1.184 ad if (exclusive) 287 1.184 ad return rw_write_held(pg->uobject->vmobjlock); 288 1.184 ad else 289 1.184 ad return rw_lock_held(pg->uobject->vmobjlock); 290 1.136 yamt } 291 1.136 yamt 292 1.136 yamt /* 293 1.1 pooka * Misc routines 294 1.1 pooka */ 295 1.1 pooka 296 1.61 pooka static kmutex_t pagermtx; 297 1.61 pooka 298 1.1 pooka void 299 1.79 pooka uvm_init(void) 300 1.1 pooka { 301 1.84 pooka char buf[64]; 302 1.84 pooka 303 1.141 pooka if (rumpuser_getparam("RUMP_MEMLIMIT", buf, sizeof(buf)) == 0) { 304 1.105 pooka unsigned long tmp; 305 1.105 pooka char *ep; 306 1.105 pooka int mult; 307 1.105 pooka 308 1.109 pooka tmp = strtoul(buf, &ep, 10); 309 1.105 pooka if (strlen(ep) > 1) 310 1.105 pooka panic("uvm_init: invalid RUMP_MEMLIMIT: %s", buf); 311 1.105 pooka 312 1.105 pooka /* mini-dehumanize-number */ 313 1.105 pooka mult = 1; 314 1.105 pooka switch (*ep) { 315 1.105 pooka case 'k': 316 1.105 pooka mult = 1024; 317 1.105 pooka break; 318 1.105 pooka case 'm': 319 1.105 pooka mult = 1024*1024; 320 1.105 pooka break; 321 1.105 pooka case 'g': 322 1.105 pooka mult = 1024*1024*1024; 323 1.105 pooka break; 324 1.105 pooka case 0: 325 1.105 pooka break; 326 1.105 pooka default: 327 1.105 pooka panic("uvm_init: invalid RUMP_MEMLIMIT: %s", buf); 328 1.105 pooka } 329 1.105 pooka rump_physmemlimit = tmp * mult; 330 1.105 pooka 331 1.105 pooka if (rump_physmemlimit / mult != tmp) 332 1.105 pooka panic("uvm_init: RUMP_MEMLIMIT overflow: %s", buf); 333 1.147 pooka 334 1.147 pooka /* reserve some memory for the pager */ 335 1.158 pooka if (rump_physmemlimit <= PDRESERVE) 336 1.158 pooka panic("uvm_init: system reserves %d bytes of mem, " 337 1.158 pooka "only %lu bytes given", 338 1.158 pooka PDRESERVE, rump_physmemlimit); 339 1.147 pooka pdlimit = rump_physmemlimit; 340 1.158 pooka rump_physmemlimit -= PDRESERVE; 341 1.105 pooka 342 1.157 pooka if (pdlimit < 1024*1024) 343 1.157 pooka printf("uvm_init: WARNING: <1MB RAM limit, " 344 1.157 pooka "hope you know what you're doing\n"); 345 1.157 pooka 346 1.84 pooka #define HUMANIZE_BYTES 9 347 1.84 pooka CTASSERT(sizeof(buf) >= HUMANIZE_BYTES); 348 1.91 pooka format_bytes(buf, HUMANIZE_BYTES, rump_physmemlimit); 349 1.84 pooka #undef HUMANIZE_BYTES 350 1.92 pooka dddlim = 9 * (rump_physmemlimit / 10); 351 1.84 pooka } else { 352 1.84 pooka strlcpy(buf, "unlimited (host limit)", sizeof(buf)); 353 1.84 pooka } 354 1.84 pooka aprint_verbose("total memory = %s\n", buf); 355 1.1 pooka 356 1.92 pooka TAILQ_INIT(&vmpage_lruqueue); 357 1.92 pooka 358 1.157 pooka if (rump_physmemlimit == RUMPMEM_UNLIMITED) { 359 1.157 pooka uvmexp.npages = physmem; 360 1.157 pooka } else { 361 1.157 pooka uvmexp.npages = pdlimit >> PAGE_SHIFT; 362 1.158 pooka uvmexp.reserve_pagedaemon = PDRESERVE >> PAGE_SHIFT; 363 1.157 pooka uvmexp.freetarg = (rump_physmemlimit-dddlim) >> PAGE_SHIFT; 364 1.157 pooka } 365 1.157 pooka /* 366 1.157 pooka * uvmexp.free is not used internally or updated. The reason is 367 1.157 pooka * that the memory hypercall allocator is allowed to allocate 368 1.157 pooka * non-page sized chunks. We use a byte count in curphysmem 369 1.157 pooka * instead. 370 1.157 pooka */ 371 1.157 pooka uvmexp.free = uvmexp.npages; 372 1.21 pooka 373 1.112 pooka #ifndef __uvmexp_pagesize 374 1.112 pooka uvmexp.pagesize = PAGE_SIZE; 375 1.112 pooka uvmexp.pagemask = PAGE_MASK; 376 1.112 pooka uvmexp.pageshift = PAGE_SHIFT; 377 1.112 pooka #else 378 1.197 martin uvmexp.pagesize = rumpuser_getpagesize(); 379 1.197 martin uvmexp.pagemask = uvmexp.pagesize-1; 380 1.197 martin uvmexp.pageshift = ffs(uvmexp.pagesize)-1; 381 1.112 pooka #endif 382 1.112 pooka 383 1.140 pooka mutex_init(&pagermtx, MUTEX_DEFAULT, IPL_NONE); 384 1.174 ad mutex_init(&vmpage_lruqueue_lock, MUTEX_DEFAULT, IPL_NONE); 385 1.140 pooka mutex_init(&uvm_swap_data_lock, MUTEX_DEFAULT, IPL_NONE); 386 1.188 ad mutex_init(&pdaemonmtx, MUTEX_DEFAULT, IPL_NONE); 387 1.35 pooka 388 1.80 pooka cv_init(&pdaemoncv, "pdaemon"); 389 1.80 pooka cv_init(&oomwait, "oomwait"); 390 1.80 pooka 391 1.130 pooka module_map = &module_map_store; 392 1.130 pooka 393 1.50 pooka kernel_map->pmap = pmap_kernel(); 394 1.121 para 395 1.122 njoly pool_subsystem_init(); 396 1.128 pooka 397 1.121 para kmem_arena = vmem_create("kmem", 0, 1024*1024, PAGE_SIZE, 398 1.121 para NULL, NULL, NULL, 399 1.121 para 0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM); 400 1.121 para 401 1.135 para vmem_subsystem_init(kmem_arena); 402 1.121 para 403 1.121 para kmem_va_arena = vmem_create("kva", 0, 0, PAGE_SIZE, 404 1.121 para vmem_alloc, vmem_free, kmem_arena, 405 1.124 para 8 * PAGE_SIZE, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM); 406 1.90 pooka 407 1.90 pooka pool_cache_bootstrap(&pagecache, sizeof(struct vm_page), 0, 0, 0, 408 1.90 pooka "page$", NULL, IPL_NONE, pgctor, pgdtor, NULL); 409 1.162 pooka 410 1.175 ad radix_tree_init(); 411 1.175 ad 412 1.162 pooka /* create vmspace used by local clients */ 413 1.162 pooka rump_vmspace_local = kmem_zalloc(sizeof(*rump_vmspace_local), KM_SLEEP); 414 1.164 pooka uvmspace_init(rump_vmspace_local, &rump_pmap_local, 0, 0, false); 415 1.1 pooka } 416 1.1 pooka 417 1.83 pooka void 418 1.145 martin uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin, vaddr_t vmax, 419 1.145 martin bool topdown) 420 1.83 pooka { 421 1.83 pooka 422 1.162 pooka vm->vm_map.pmap = pmap; 423 1.83 pooka vm->vm_refcnt = 1; 424 1.83 pooka } 425 1.1 pooka 426 1.173 nat int 427 1.173 nat uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end, 428 1.173 nat bool new_pageable, int lockflags) 429 1.173 nat { 430 1.173 nat return 0; 431 1.173 nat } 432 1.173 nat 433 1.1 pooka void 434 1.7 pooka uvm_pagewire(struct vm_page *pg) 435 1.7 pooka { 436 1.7 pooka 437 1.7 pooka /* nada */ 438 1.7 pooka } 439 1.7 pooka 440 1.7 pooka void 441 1.7 pooka uvm_pageunwire(struct vm_page *pg) 442 1.7 pooka { 443 1.7 pooka 444 1.7 pooka /* nada */ 445 1.7 pooka } 446 1.7 pooka 447 1.177 ad int 448 1.190 ad uvm_availmem(bool cached) 449 1.177 ad { 450 1.177 ad 451 1.177 ad return uvmexp.free; 452 1.177 ad } 453 1.177 ad 454 1.180 ad void 455 1.180 ad uvm_pagelock(struct vm_page *pg) 456 1.180 ad { 457 1.180 ad 458 1.180 ad mutex_enter(&pg->interlock); 459 1.180 ad } 460 1.180 ad 461 1.180 ad void 462 1.180 ad uvm_pagelock2(struct vm_page *pg1, struct vm_page *pg2) 463 1.180 ad { 464 1.180 ad 465 1.180 ad if (pg1 < pg2) { 466 1.180 ad mutex_enter(&pg1->interlock); 467 1.180 ad mutex_enter(&pg2->interlock); 468 1.180 ad } else { 469 1.180 ad mutex_enter(&pg2->interlock); 470 1.180 ad mutex_enter(&pg1->interlock); 471 1.180 ad } 472 1.180 ad } 473 1.180 ad 474 1.180 ad void 475 1.180 ad uvm_pageunlock(struct vm_page *pg) 476 1.180 ad { 477 1.180 ad 478 1.180 ad mutex_exit(&pg->interlock); 479 1.180 ad } 480 1.180 ad 481 1.180 ad void 482 1.180 ad uvm_pageunlock2(struct vm_page *pg1, struct vm_page *pg2) 483 1.180 ad { 484 1.180 ad 485 1.180 ad mutex_exit(&pg1->interlock); 486 1.180 ad mutex_exit(&pg2->interlock); 487 1.180 ad } 488 1.180 ad 489 1.83 pooka /* where's your schmonz now? */ 490 1.83 pooka #define PUNLIMIT(a) \ 491 1.83 pooka p->p_rlimit[a].rlim_cur = p->p_rlimit[a].rlim_max = RLIM_INFINITY; 492 1.83 pooka void 493 1.83 pooka uvm_init_limits(struct proc *p) 494 1.83 pooka { 495 1.83 pooka 496 1.155 pooka #ifndef DFLSSIZ 497 1.155 pooka #define DFLSSIZ (16*1024*1024) 498 1.155 pooka #endif 499 1.154 pooka p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ; 500 1.154 pooka p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ; 501 1.83 pooka PUNLIMIT(RLIMIT_DATA); 502 1.83 pooka PUNLIMIT(RLIMIT_RSS); 503 1.83 pooka PUNLIMIT(RLIMIT_AS); 504 1.83 pooka /* nice, cascade */ 505 1.83 pooka } 506 1.83 pooka #undef PUNLIMIT 507 1.83 pooka 508 1.69 pooka /* 509 1.69 pooka * This satisfies the "disgusting mmap hack" used by proplib. 510 1.69 pooka */ 511 1.49 pooka int 512 1.160 chs uvm_mmap_anon(struct proc *p, void **addrp, size_t size) 513 1.49 pooka { 514 1.69 pooka int error; 515 1.49 pooka 516 1.69 pooka /* no reason in particular, but cf. uvm_default_mapaddr() */ 517 1.160 chs if (*addrp != NULL) 518 1.69 pooka panic("uvm_mmap() variant unsupported"); 519 1.69 pooka 520 1.106 pooka if (RUMP_LOCALPROC_P(curproc)) { 521 1.160 chs error = rumpuser_anonmmap(NULL, size, 0, 0, addrp); 522 1.98 pooka } else { 523 1.166 pooka error = rump_sysproxy_anonmmap(RUMP_SPVM2CTL(p->p_vmspace), 524 1.160 chs size, addrp); 525 1.98 pooka } 526 1.160 chs return error; 527 1.160 chs } 528 1.69 pooka 529 1.160 chs /* 530 1.160 chs * Stubs for things referenced from vfs_vnode.c but not used. 531 1.160 chs */ 532 1.160 chs const dev_t zerodev; 533 1.160 chs 534 1.160 chs struct uvm_object * 535 1.160 chs udv_attach(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size) 536 1.160 chs { 537 1.160 chs return NULL; 538 1.49 pooka } 539 1.49 pooka 540 1.61 pooka struct pagerinfo { 541 1.61 pooka vaddr_t pgr_kva; 542 1.61 pooka int pgr_npages; 543 1.61 pooka struct vm_page **pgr_pgs; 544 1.61 pooka bool pgr_read; 545 1.61 pooka 546 1.61 pooka LIST_ENTRY(pagerinfo) pgr_entries; 547 1.61 pooka }; 548 1.61 pooka static LIST_HEAD(, pagerinfo) pagerlist = LIST_HEAD_INITIALIZER(pagerlist); 549 1.61 pooka 550 1.61 pooka /* 551 1.61 pooka * Pager "map" in routine. Instead of mapping, we allocate memory 552 1.159 pooka * and copy page contents there. The reason for copying instead of 553 1.159 pooka * mapping is simple: we do not assume we are running on virtual 554 1.159 pooka * memory. Even if we could emulate virtual memory in some envs 555 1.159 pooka * such as userspace, copying is much faster than trying to awkardly 556 1.159 pooka * cope with remapping (see "Design and Implementation" pp.95-98). 557 1.159 pooka * The downside of the approach is that the pager requires MAXPHYS 558 1.159 pooka * free memory to perform paging, but short of virtual memory or 559 1.159 pooka * making the pager do I/O in page-sized chunks we cannot do much 560 1.159 pooka * about that. 561 1.61 pooka */ 562 1.7 pooka vaddr_t 563 1.61 pooka uvm_pagermapin(struct vm_page **pgs, int npages, int flags) 564 1.7 pooka { 565 1.61 pooka struct pagerinfo *pgri; 566 1.61 pooka vaddr_t curkva; 567 1.61 pooka int i; 568 1.61 pooka 569 1.61 pooka /* allocate structures */ 570 1.61 pooka pgri = kmem_alloc(sizeof(*pgri), KM_SLEEP); 571 1.61 pooka pgri->pgr_kva = (vaddr_t)kmem_alloc(npages * PAGE_SIZE, KM_SLEEP); 572 1.61 pooka pgri->pgr_npages = npages; 573 1.61 pooka pgri->pgr_pgs = kmem_alloc(sizeof(struct vm_page *) * npages, KM_SLEEP); 574 1.61 pooka pgri->pgr_read = (flags & UVMPAGER_MAPIN_READ) != 0; 575 1.61 pooka 576 1.61 pooka /* copy contents to "mapped" memory */ 577 1.61 pooka for (i = 0, curkva = pgri->pgr_kva; 578 1.61 pooka i < npages; 579 1.61 pooka i++, curkva += PAGE_SIZE) { 580 1.61 pooka /* 581 1.61 pooka * We need to copy the previous contents of the pages to 582 1.61 pooka * the window even if we are reading from the 583 1.61 pooka * device, since the device might not fill the contents of 584 1.61 pooka * the full mapped range and we will end up corrupting 585 1.61 pooka * data when we unmap the window. 586 1.61 pooka */ 587 1.61 pooka memcpy((void*)curkva, pgs[i]->uanon, PAGE_SIZE); 588 1.61 pooka pgri->pgr_pgs[i] = pgs[i]; 589 1.61 pooka } 590 1.61 pooka 591 1.61 pooka mutex_enter(&pagermtx); 592 1.61 pooka LIST_INSERT_HEAD(&pagerlist, pgri, pgr_entries); 593 1.61 pooka mutex_exit(&pagermtx); 594 1.7 pooka 595 1.61 pooka return pgri->pgr_kva; 596 1.7 pooka } 597 1.7 pooka 598 1.61 pooka /* 599 1.61 pooka * map out the pager window. return contents from VA to page storage 600 1.61 pooka * and free structures. 601 1.61 pooka * 602 1.61 pooka * Note: does not currently support partial frees 603 1.61 pooka */ 604 1.61 pooka void 605 1.61 pooka uvm_pagermapout(vaddr_t kva, int npages) 606 1.7 pooka { 607 1.61 pooka struct pagerinfo *pgri; 608 1.61 pooka vaddr_t curkva; 609 1.61 pooka int i; 610 1.7 pooka 611 1.61 pooka mutex_enter(&pagermtx); 612 1.61 pooka LIST_FOREACH(pgri, &pagerlist, pgr_entries) { 613 1.61 pooka if (pgri->pgr_kva == kva) 614 1.61 pooka break; 615 1.61 pooka } 616 1.61 pooka KASSERT(pgri); 617 1.61 pooka if (pgri->pgr_npages != npages) 618 1.61 pooka panic("uvm_pagermapout: partial unmapping not supported"); 619 1.61 pooka LIST_REMOVE(pgri, pgr_entries); 620 1.61 pooka mutex_exit(&pagermtx); 621 1.61 pooka 622 1.61 pooka if (pgri->pgr_read) { 623 1.61 pooka for (i = 0, curkva = pgri->pgr_kva; 624 1.61 pooka i < pgri->pgr_npages; 625 1.61 pooka i++, curkva += PAGE_SIZE) { 626 1.61 pooka memcpy(pgri->pgr_pgs[i]->uanon,(void*)curkva,PAGE_SIZE); 627 1.21 pooka } 628 1.21 pooka } 629 1.10 pooka 630 1.61 pooka kmem_free(pgri->pgr_pgs, npages * sizeof(struct vm_page *)); 631 1.61 pooka kmem_free((void*)pgri->pgr_kva, npages * PAGE_SIZE); 632 1.61 pooka kmem_free(pgri, sizeof(*pgri)); 633 1.7 pooka } 634 1.7 pooka 635 1.61 pooka /* 636 1.61 pooka * convert va in pager window to page structure. 637 1.61 pooka * XXX: how expensive is this (global lock, list traversal)? 638 1.61 pooka */ 639 1.14 pooka struct vm_page * 640 1.14 pooka uvm_pageratop(vaddr_t va) 641 1.14 pooka { 642 1.61 pooka struct pagerinfo *pgri; 643 1.61 pooka struct vm_page *pg = NULL; 644 1.61 pooka int i; 645 1.14 pooka 646 1.61 pooka mutex_enter(&pagermtx); 647 1.61 pooka LIST_FOREACH(pgri, &pagerlist, pgr_entries) { 648 1.61 pooka if (pgri->pgr_kva <= va 649 1.61 pooka && va < pgri->pgr_kva + pgri->pgr_npages*PAGE_SIZE) 650 1.21 pooka break; 651 1.61 pooka } 652 1.61 pooka if (pgri) { 653 1.61 pooka i = (va - pgri->pgr_kva) >> PAGE_SHIFT; 654 1.61 pooka pg = pgri->pgr_pgs[i]; 655 1.61 pooka } 656 1.61 pooka mutex_exit(&pagermtx); 657 1.21 pooka 658 1.61 pooka return pg; 659 1.61 pooka } 660 1.15 pooka 661 1.97 pooka /* 662 1.97 pooka * Called with the vm object locked. 663 1.97 pooka * 664 1.97 pooka * Put vnode object pages at the end of the access queue to indicate 665 1.97 pooka * they have been recently accessed and should not be immediate 666 1.97 pooka * candidates for pageout. Do not do this for lookups done by 667 1.97 pooka * the pagedaemon to mimic pmap_kentered mappings which don't track 668 1.97 pooka * access information. 669 1.97 pooka */ 670 1.61 pooka struct vm_page * 671 1.61 pooka uvm_pagelookup(struct uvm_object *uobj, voff_t off) 672 1.61 pooka { 673 1.92 pooka struct vm_page *pg; 674 1.97 pooka bool ispagedaemon = curlwp == uvm.pagedaemon_lwp; 675 1.61 pooka 676 1.175 ad pg = radix_tree_lookup_node(&uobj->uo_pages, off >> PAGE_SHIFT); 677 1.97 pooka if (pg && !UVM_OBJ_IS_AOBJ(pg->uobject) && !ispagedaemon) { 678 1.174 ad mutex_enter(&vmpage_lruqueue_lock); 679 1.92 pooka TAILQ_REMOVE(&vmpage_lruqueue, pg, pageq.queue); 680 1.92 pooka TAILQ_INSERT_TAIL(&vmpage_lruqueue, pg, pageq.queue); 681 1.174 ad mutex_exit(&vmpage_lruqueue_lock); 682 1.92 pooka } 683 1.92 pooka 684 1.92 pooka return pg; 685 1.14 pooka } 686 1.14 pooka 687 1.7 pooka void 688 1.22 pooka uvm_page_unbusy(struct vm_page **pgs, int npgs) 689 1.22 pooka { 690 1.22 pooka struct vm_page *pg; 691 1.191 chs int i, pageout_done; 692 1.22 pooka 693 1.94 pooka KASSERT(npgs > 0); 694 1.94 pooka 695 1.191 chs pageout_done = 0; 696 1.22 pooka for (i = 0; i < npgs; i++) { 697 1.22 pooka pg = pgs[i]; 698 1.191 chs if (pg == NULL || pg == PGO_DONTCARE) { 699 1.22 pooka continue; 700 1.191 chs } 701 1.22 pooka 702 1.191 chs #if 0 703 1.191 chs KASSERT(uvm_page_owner_locked_p(pg, true)); 704 1.191 chs #else 705 1.191 chs /* 706 1.191 chs * uvm_page_owner_locked_p() is not available in rump, 707 1.191 chs * and rump doesn't support amaps anyway. 708 1.191 chs */ 709 1.191 chs KASSERT(rw_write_held(pg->uobject->vmobjlock)); 710 1.191 chs #endif 711 1.22 pooka KASSERT(pg->flags & PG_BUSY); 712 1.191 chs 713 1.191 chs if (pg->flags & PG_PAGEOUT) { 714 1.191 chs pg->flags &= ~PG_PAGEOUT; 715 1.191 chs pg->flags |= PG_RELEASED; 716 1.191 chs pageout_done++; 717 1.191 chs atomic_inc_uint(&uvmexp.pdfreed); 718 1.191 chs } 719 1.186 ad if (pg->flags & PG_RELEASED) { 720 1.191 chs KASSERT(pg->uobject != NULL || 721 1.191 chs (pg->uanon != NULL && pg->uanon->an_ref > 0)); 722 1.191 chs pg->flags &= ~PG_RELEASED; 723 1.36 pooka uvm_pagefree(pg); 724 1.186 ad } else { 725 1.191 chs KASSERT((pg->flags & PG_FAKE) == 0); 726 1.187 ad pg->flags &= ~PG_BUSY; 727 1.186 ad uvm_pagelock(pg); 728 1.187 ad uvm_pagewakeup(pg); 729 1.186 ad uvm_pageunlock(pg); 730 1.191 chs UVM_PAGE_OWN(pg, NULL); 731 1.186 ad } 732 1.186 ad } 733 1.191 chs if (pageout_done != 0) { 734 1.191 chs uvm_pageout_done(pageout_done); 735 1.191 chs } 736 1.186 ad } 737 1.186 ad 738 1.186 ad void 739 1.186 ad uvm_pagewait(struct vm_page *pg, krwlock_t *lock, const char *wmesg) 740 1.186 ad { 741 1.186 ad 742 1.186 ad KASSERT(rw_lock_held(lock)); 743 1.186 ad KASSERT((pg->flags & PG_BUSY) != 0); 744 1.186 ad 745 1.186 ad mutex_enter(&pg->interlock); 746 1.186 ad pg->pqflags |= PQ_WANTED; 747 1.186 ad rw_exit(lock); 748 1.186 ad UVM_UNLOCK_AND_WAIT(pg, &pg->interlock, false, wmesg, 0); 749 1.186 ad } 750 1.186 ad 751 1.186 ad void 752 1.187 ad uvm_pagewakeup(struct vm_page *pg) 753 1.186 ad { 754 1.186 ad 755 1.186 ad KASSERT(mutex_owned(&pg->interlock)); 756 1.186 ad 757 1.186 ad if ((pg->pqflags & PQ_WANTED) != 0) { 758 1.186 ad pg->pqflags &= ~PQ_WANTED; 759 1.186 ad wakeup(pg); 760 1.22 pooka } 761 1.22 pooka } 762 1.22 pooka 763 1.22 pooka void 764 1.7 pooka uvm_estimatepageable(int *active, int *inactive) 765 1.7 pooka { 766 1.7 pooka 767 1.19 pooka /* XXX: guessing game */ 768 1.19 pooka *active = 1024; 769 1.19 pooka *inactive = 1024; 770 1.7 pooka } 771 1.7 pooka 772 1.41 pooka int 773 1.41 pooka uvm_loan(struct vm_map *map, vaddr_t start, vsize_t len, void *v, int flags) 774 1.41 pooka { 775 1.41 pooka 776 1.41 pooka panic("%s: unimplemented", __func__); 777 1.41 pooka } 778 1.41 pooka 779 1.41 pooka void 780 1.41 pooka uvm_unloan(void *v, int npages, int flags) 781 1.41 pooka { 782 1.41 pooka 783 1.41 pooka panic("%s: unimplemented", __func__); 784 1.41 pooka } 785 1.41 pooka 786 1.43 pooka int 787 1.43 pooka uvm_loanuobjpages(struct uvm_object *uobj, voff_t pgoff, int orignpages, 788 1.43 pooka struct vm_page **opp) 789 1.43 pooka { 790 1.43 pooka 791 1.72 pooka return EBUSY; 792 1.43 pooka } 793 1.43 pooka 794 1.116 mrg struct vm_page * 795 1.116 mrg uvm_loanbreak(struct vm_page *pg) 796 1.116 mrg { 797 1.116 mrg 798 1.116 mrg panic("%s: unimplemented", __func__); 799 1.116 mrg } 800 1.116 mrg 801 1.116 mrg void 802 1.116 mrg ubc_purge(struct uvm_object *uobj) 803 1.116 mrg { 804 1.116 mrg 805 1.116 mrg } 806 1.116 mrg 807 1.68 pooka vaddr_t 808 1.168 martin uvm_default_mapaddr(struct proc *p, vaddr_t base, vsize_t sz, int topdown) 809 1.68 pooka { 810 1.68 pooka 811 1.68 pooka return 0; 812 1.68 pooka } 813 1.68 pooka 814 1.71 pooka int 815 1.71 pooka uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end, 816 1.71 pooka vm_prot_t prot, bool set_max) 817 1.71 pooka { 818 1.71 pooka 819 1.71 pooka return EOPNOTSUPP; 820 1.71 pooka } 821 1.71 pooka 822 1.171 martin int 823 1.171 martin uvm_map(struct vm_map *map, vaddr_t *startp, vsize_t size, 824 1.171 martin struct uvm_object *uobj, voff_t uoffset, vsize_t align, 825 1.171 martin uvm_flag_t flags) 826 1.171 martin { 827 1.171 martin 828 1.172 martin *startp = (vaddr_t)rump_hypermalloc(size, align, true, "uvm_map"); 829 1.172 martin return *startp != 0 ? 0 : ENOMEM; 830 1.172 martin } 831 1.172 martin 832 1.172 martin void 833 1.172 martin uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags) 834 1.172 martin { 835 1.172 martin 836 1.172 martin rump_hyperfree((void*)start, end-start); 837 1.171 martin } 838 1.171 martin 839 1.171 martin 840 1.9 pooka /* 841 1.12 pooka * UVM km 842 1.12 pooka */ 843 1.12 pooka 844 1.12 pooka vaddr_t 845 1.12 pooka uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags) 846 1.12 pooka { 847 1.82 pooka void *rv, *desired = NULL; 848 1.50 pooka int alignbit, error; 849 1.50 pooka 850 1.82 pooka #ifdef __x86_64__ 851 1.82 pooka /* 852 1.82 pooka * On amd64, allocate all module memory from the lowest 2GB. 853 1.82 pooka * This is because NetBSD kernel modules are compiled 854 1.82 pooka * with -mcmodel=kernel and reserve only 4 bytes for 855 1.82 pooka * offsets. If we load code compiled with -mcmodel=kernel 856 1.82 pooka * anywhere except the lowest or highest 2GB, it will not 857 1.82 pooka * work. Since userspace does not have access to the highest 858 1.82 pooka * 2GB, use the lowest 2GB. 859 1.82 pooka * 860 1.82 pooka * Note: this assumes the rump kernel resides in 861 1.82 pooka * the lowest 2GB as well. 862 1.82 pooka * 863 1.82 pooka * Note2: yes, it's a quick hack, but since this the only 864 1.82 pooka * place where we care about the map we're allocating from, 865 1.82 pooka * just use a simple "if" instead of coming up with a fancy 866 1.82 pooka * generic solution. 867 1.82 pooka */ 868 1.82 pooka if (map == module_map) { 869 1.82 pooka desired = (void *)(0x80000000 - size); 870 1.82 pooka } 871 1.82 pooka #endif 872 1.82 pooka 873 1.130 pooka if (__predict_false(map == module_map)) { 874 1.130 pooka alignbit = 0; 875 1.130 pooka if (align) { 876 1.130 pooka alignbit = ffs(align)-1; 877 1.130 pooka } 878 1.142 pooka error = rumpuser_anonmmap(desired, size, alignbit, 879 1.142 pooka flags & UVM_KMF_EXEC, &rv); 880 1.130 pooka } else { 881 1.142 pooka error = rumpuser_malloc(size, align, &rv); 882 1.50 pooka } 883 1.50 pooka 884 1.142 pooka if (error) { 885 1.50 pooka if (flags & (UVM_KMF_CANFAIL | UVM_KMF_NOWAIT)) 886 1.50 pooka return 0; 887 1.50 pooka else 888 1.50 pooka panic("uvm_km_alloc failed"); 889 1.50 pooka } 890 1.12 pooka 891 1.50 pooka if (flags & UVM_KMF_ZERO) 892 1.12 pooka memset(rv, 0, size); 893 1.12 pooka 894 1.12 pooka return (vaddr_t)rv; 895 1.12 pooka } 896 1.12 pooka 897 1.12 pooka void 898 1.12 pooka uvm_km_free(struct vm_map *map, vaddr_t vaddr, vsize_t size, uvm_flag_t flags) 899 1.12 pooka { 900 1.12 pooka 901 1.130 pooka if (__predict_false(map == module_map)) 902 1.130 pooka rumpuser_unmap((void *)vaddr, size); 903 1.130 pooka else 904 1.138 pooka rumpuser_free((void *)vaddr, size); 905 1.12 pooka } 906 1.12 pooka 907 1.170 christos int 908 1.170 christos uvm_km_protect(struct vm_map *map, vaddr_t vaddr, vsize_t size, vm_prot_t prot) 909 1.170 christos { 910 1.170 christos return 0; 911 1.170 christos } 912 1.170 christos 913 1.12 pooka struct vm_map * 914 1.12 pooka uvm_km_suballoc(struct vm_map *map, vaddr_t *minaddr, vaddr_t *maxaddr, 915 1.121 para vsize_t size, int pageable, bool fixed, struct vm_map *submap) 916 1.12 pooka { 917 1.12 pooka 918 1.12 pooka return (struct vm_map *)417416; 919 1.12 pooka } 920 1.40 pooka 921 1.121 para int 922 1.121 para uvm_km_kmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, 923 1.121 para vmem_addr_t *addr) 924 1.40 pooka { 925 1.121 para vaddr_t va; 926 1.121 para va = (vaddr_t)rump_hypermalloc(size, PAGE_SIZE, 927 1.121 para (flags & VM_SLEEP), "kmalloc"); 928 1.40 pooka 929 1.121 para if (va) { 930 1.121 para *addr = va; 931 1.121 para return 0; 932 1.121 para } else { 933 1.121 para return ENOMEM; 934 1.121 para } 935 1.40 pooka } 936 1.40 pooka 937 1.40 pooka void 938 1.121 para uvm_km_kmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 939 1.40 pooka { 940 1.40 pooka 941 1.121 para rump_hyperfree((void *)addr, size); 942 1.74 pooka } 943 1.74 pooka 944 1.57 pooka /* 945 1.102 pooka * VM space locking routines. We don't really have to do anything, 946 1.102 pooka * since the pages are always "wired" (both local and remote processes). 947 1.57 pooka */ 948 1.57 pooka int 949 1.57 pooka uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access) 950 1.57 pooka { 951 1.57 pooka 952 1.57 pooka return 0; 953 1.57 pooka } 954 1.57 pooka 955 1.57 pooka void 956 1.57 pooka uvm_vsunlock(struct vmspace *vs, void *addr, size_t len) 957 1.57 pooka { 958 1.57 pooka 959 1.57 pooka } 960 1.57 pooka 961 1.102 pooka /* 962 1.102 pooka * For the local case the buffer mappers don't need to do anything. 963 1.102 pooka * For the remote case we need to reserve space and copy data in or 964 1.102 pooka * out, depending on B_READ/B_WRITE. 965 1.102 pooka */ 966 1.111 pooka int 967 1.57 pooka vmapbuf(struct buf *bp, vsize_t len) 968 1.57 pooka { 969 1.111 pooka int error = 0; 970 1.57 pooka 971 1.57 pooka bp->b_saveaddr = bp->b_data; 972 1.102 pooka 973 1.102 pooka /* remote case */ 974 1.106 pooka if (!RUMP_LOCALPROC_P(curproc)) { 975 1.102 pooka bp->b_data = rump_hypermalloc(len, 0, true, "vmapbuf"); 976 1.102 pooka if (BUF_ISWRITE(bp)) { 977 1.111 pooka error = copyin(bp->b_saveaddr, bp->b_data, len); 978 1.111 pooka if (error) { 979 1.111 pooka rump_hyperfree(bp->b_data, len); 980 1.111 pooka bp->b_data = bp->b_saveaddr; 981 1.111 pooka bp->b_saveaddr = 0; 982 1.111 pooka } 983 1.102 pooka } 984 1.102 pooka } 985 1.111 pooka 986 1.111 pooka return error; 987 1.57 pooka } 988 1.57 pooka 989 1.57 pooka void 990 1.57 pooka vunmapbuf(struct buf *bp, vsize_t len) 991 1.57 pooka { 992 1.57 pooka 993 1.102 pooka /* remote case */ 994 1.106 pooka if (!RUMP_LOCALPROC_P(bp->b_proc)) { 995 1.102 pooka if (BUF_ISREAD(bp)) { 996 1.110 pooka bp->b_error = copyout_proc(bp->b_proc, 997 1.102 pooka bp->b_data, bp->b_saveaddr, len); 998 1.102 pooka } 999 1.102 pooka rump_hyperfree(bp->b_data, len); 1000 1.102 pooka } 1001 1.102 pooka 1002 1.57 pooka bp->b_data = bp->b_saveaddr; 1003 1.57 pooka bp->b_saveaddr = 0; 1004 1.57 pooka } 1005 1.61 pooka 1006 1.61 pooka void 1007 1.83 pooka uvmspace_addref(struct vmspace *vm) 1008 1.83 pooka { 1009 1.83 pooka 1010 1.83 pooka /* 1011 1.103 pooka * No dynamically allocated vmspaces exist. 1012 1.83 pooka */ 1013 1.83 pooka } 1014 1.83 pooka 1015 1.83 pooka void 1016 1.66 pooka uvmspace_free(struct vmspace *vm) 1017 1.66 pooka { 1018 1.66 pooka 1019 1.66 pooka /* nothing for now */ 1020 1.66 pooka } 1021 1.66 pooka 1022 1.61 pooka /* 1023 1.61 pooka * page life cycle stuff. it really doesn't exist, so just stubs. 1024 1.61 pooka */ 1025 1.61 pooka 1026 1.61 pooka void 1027 1.61 pooka uvm_pageactivate(struct vm_page *pg) 1028 1.61 pooka { 1029 1.61 pooka 1030 1.61 pooka /* nada */ 1031 1.61 pooka } 1032 1.61 pooka 1033 1.61 pooka void 1034 1.61 pooka uvm_pagedeactivate(struct vm_page *pg) 1035 1.61 pooka { 1036 1.61 pooka 1037 1.61 pooka /* nada */ 1038 1.61 pooka } 1039 1.61 pooka 1040 1.61 pooka void 1041 1.61 pooka uvm_pagedequeue(struct vm_page *pg) 1042 1.61 pooka { 1043 1.61 pooka 1044 1.61 pooka /* nada*/ 1045 1.61 pooka } 1046 1.61 pooka 1047 1.61 pooka void 1048 1.61 pooka uvm_pageenqueue(struct vm_page *pg) 1049 1.61 pooka { 1050 1.61 pooka 1051 1.61 pooka /* nada */ 1052 1.61 pooka } 1053 1.80 pooka 1054 1.88 pooka void 1055 1.88 pooka uvmpdpol_anfree(struct vm_anon *an) 1056 1.88 pooka { 1057 1.88 pooka 1058 1.88 pooka /* nada */ 1059 1.88 pooka } 1060 1.88 pooka 1061 1.80 pooka /* 1062 1.99 uebayasi * Physical address accessors. 1063 1.99 uebayasi */ 1064 1.99 uebayasi 1065 1.99 uebayasi struct vm_page * 1066 1.99 uebayasi uvm_phys_to_vm_page(paddr_t pa) 1067 1.99 uebayasi { 1068 1.99 uebayasi 1069 1.99 uebayasi return NULL; 1070 1.99 uebayasi } 1071 1.99 uebayasi 1072 1.99 uebayasi paddr_t 1073 1.99 uebayasi uvm_vm_page_to_phys(const struct vm_page *pg) 1074 1.99 uebayasi { 1075 1.99 uebayasi 1076 1.99 uebayasi return 0; 1077 1.99 uebayasi } 1078 1.99 uebayasi 1079 1.153 pooka vaddr_t 1080 1.153 pooka uvm_uarea_alloc(void) 1081 1.153 pooka { 1082 1.153 pooka 1083 1.153 pooka /* non-zero */ 1084 1.153 pooka return (vaddr_t)11; 1085 1.153 pooka } 1086 1.153 pooka 1087 1.153 pooka void 1088 1.153 pooka uvm_uarea_free(vaddr_t uarea) 1089 1.153 pooka { 1090 1.153 pooka 1091 1.153 pooka /* nata, so creamy */ 1092 1.153 pooka } 1093 1.153 pooka 1094 1.99 uebayasi /* 1095 1.80 pooka * Routines related to the Page Baroness. 1096 1.80 pooka */ 1097 1.80 pooka 1098 1.80 pooka void 1099 1.80 pooka uvm_wait(const char *msg) 1100 1.80 pooka { 1101 1.80 pooka 1102 1.80 pooka if (__predict_false(rump_threads == 0)) 1103 1.80 pooka panic("pagedaemon missing (RUMP_THREADS = 0)"); 1104 1.80 pooka 1105 1.147 pooka if (curlwp == uvm.pagedaemon_lwp) { 1106 1.147 pooka /* is it possible for us to later get memory? */ 1107 1.147 pooka if (!uvmexp.paging) 1108 1.147 pooka panic("pagedaemon out of memory"); 1109 1.147 pooka } 1110 1.147 pooka 1111 1.80 pooka mutex_enter(&pdaemonmtx); 1112 1.80 pooka pdaemon_waiters++; 1113 1.80 pooka cv_signal(&pdaemoncv); 1114 1.80 pooka cv_wait(&oomwait, &pdaemonmtx); 1115 1.80 pooka mutex_exit(&pdaemonmtx); 1116 1.80 pooka } 1117 1.80 pooka 1118 1.80 pooka void 1119 1.80 pooka uvm_pageout_start(int npages) 1120 1.80 pooka { 1121 1.80 pooka 1122 1.113 pooka mutex_enter(&pdaemonmtx); 1123 1.113 pooka uvmexp.paging += npages; 1124 1.113 pooka mutex_exit(&pdaemonmtx); 1125 1.80 pooka } 1126 1.80 pooka 1127 1.80 pooka void 1128 1.80 pooka uvm_pageout_done(int npages) 1129 1.80 pooka { 1130 1.80 pooka 1131 1.113 pooka if (!npages) 1132 1.113 pooka return; 1133 1.113 pooka 1134 1.113 pooka mutex_enter(&pdaemonmtx); 1135 1.113 pooka KASSERT(uvmexp.paging >= npages); 1136 1.113 pooka uvmexp.paging -= npages; 1137 1.113 pooka 1138 1.113 pooka if (pdaemon_waiters) { 1139 1.113 pooka pdaemon_waiters = 0; 1140 1.113 pooka cv_broadcast(&oomwait); 1141 1.113 pooka } 1142 1.113 pooka mutex_exit(&pdaemonmtx); 1143 1.80 pooka } 1144 1.80 pooka 1145 1.95 pooka static bool 1146 1.184 ad processpage(struct vm_page *pg) 1147 1.95 pooka { 1148 1.95 pooka struct uvm_object *uobj; 1149 1.95 pooka 1150 1.95 pooka uobj = pg->uobject; 1151 1.184 ad if (rw_tryenter(uobj->vmobjlock, RW_WRITER)) { 1152 1.95 pooka if ((pg->flags & PG_BUSY) == 0) { 1153 1.174 ad mutex_exit(&vmpage_lruqueue_lock); 1154 1.95 pooka uobj->pgops->pgo_put(uobj, pg->offset, 1155 1.95 pooka pg->offset + PAGE_SIZE, 1156 1.95 pooka PGO_CLEANIT|PGO_FREE); 1157 1.184 ad KASSERT(!rw_write_held(uobj->vmobjlock)); 1158 1.95 pooka return true; 1159 1.95 pooka } else { 1160 1.184 ad rw_exit(uobj->vmobjlock); 1161 1.104 pooka } 1162 1.95 pooka } 1163 1.95 pooka 1164 1.95 pooka return false; 1165 1.95 pooka } 1166 1.95 pooka 1167 1.80 pooka /* 1168 1.92 pooka * The Diabolical pageDaemon Director (DDD). 1169 1.113 pooka * 1170 1.113 pooka * This routine can always use better heuristics. 1171 1.80 pooka */ 1172 1.80 pooka void 1173 1.80 pooka uvm_pageout(void *arg) 1174 1.80 pooka { 1175 1.92 pooka struct vm_page *pg; 1176 1.80 pooka struct pool *pp, *pp_first; 1177 1.92 pooka int cleaned, skip, skipped; 1178 1.113 pooka bool succ; 1179 1.80 pooka 1180 1.80 pooka mutex_enter(&pdaemonmtx); 1181 1.80 pooka for (;;) { 1182 1.113 pooka if (pdaemon_waiters) { 1183 1.113 pooka pdaemon_waiters = 0; 1184 1.113 pooka cv_broadcast(&oomwait); 1185 1.104 pooka } 1186 1.188 ad if (!NEED_PAGEDAEMON()) { 1187 1.188 ad kernel_map->flags &= ~VM_MAP_WANTVA; 1188 1.188 ad cv_wait(&pdaemoncv, &pdaemonmtx); 1189 1.188 ad } 1190 1.113 pooka uvmexp.pdwoke++; 1191 1.113 pooka 1192 1.92 pooka /* tell the world that we are hungry */ 1193 1.80 pooka kernel_map->flags |= VM_MAP_WANTVA; 1194 1.80 pooka mutex_exit(&pdaemonmtx); 1195 1.80 pooka 1196 1.92 pooka /* 1197 1.92 pooka * step one: reclaim the page cache. this should give 1198 1.92 pooka * us the biggest earnings since whole pages are released 1199 1.92 pooka * into backing memory. 1200 1.92 pooka */ 1201 1.92 pooka pool_cache_reclaim(&pagecache); 1202 1.92 pooka if (!NEED_PAGEDAEMON()) { 1203 1.92 pooka mutex_enter(&pdaemonmtx); 1204 1.92 pooka continue; 1205 1.92 pooka } 1206 1.92 pooka 1207 1.92 pooka /* 1208 1.92 pooka * Ok, so that didn't help. Next, try to hunt memory 1209 1.92 pooka * by pushing out vnode pages. The pages might contain 1210 1.92 pooka * useful cached data, but we need the memory. 1211 1.92 pooka */ 1212 1.92 pooka cleaned = 0; 1213 1.92 pooka skip = 0; 1214 1.92 pooka again: 1215 1.174 ad mutex_enter(&vmpage_lruqueue_lock); 1216 1.92 pooka while (cleaned < PAGEDAEMON_OBJCHUNK) { 1217 1.92 pooka skipped = 0; 1218 1.92 pooka TAILQ_FOREACH(pg, &vmpage_lruqueue, pageq.queue) { 1219 1.92 pooka 1220 1.92 pooka /* 1221 1.92 pooka * skip over pages we _might_ have tried 1222 1.92 pooka * to handle earlier. they might not be 1223 1.92 pooka * exactly the same ones, but I'm not too 1224 1.92 pooka * concerned. 1225 1.92 pooka */ 1226 1.92 pooka while (skipped++ < skip) 1227 1.92 pooka continue; 1228 1.92 pooka 1229 1.184 ad if (processpage(pg)) { 1230 1.95 pooka cleaned++; 1231 1.95 pooka goto again; 1232 1.92 pooka } 1233 1.92 pooka 1234 1.92 pooka skip++; 1235 1.92 pooka } 1236 1.92 pooka break; 1237 1.92 pooka } 1238 1.174 ad mutex_exit(&vmpage_lruqueue_lock); 1239 1.92 pooka 1240 1.92 pooka /* 1241 1.92 pooka * And of course we need to reclaim the page cache 1242 1.92 pooka * again to actually release memory. 1243 1.92 pooka */ 1244 1.92 pooka pool_cache_reclaim(&pagecache); 1245 1.92 pooka if (!NEED_PAGEDAEMON()) { 1246 1.92 pooka mutex_enter(&pdaemonmtx); 1247 1.92 pooka continue; 1248 1.92 pooka } 1249 1.92 pooka 1250 1.92 pooka /* 1251 1.92 pooka * And then drain the pools. Wipe them out ... all of them. 1252 1.92 pooka */ 1253 1.127 jym for (pp_first = NULL;;) { 1254 1.156 pooka rump_vfs_drainbufs(10 /* XXX: estimate! */); 1255 1.92 pooka 1256 1.127 jym succ = pool_drain(&pp); 1257 1.127 jym if (succ || pp == pp_first) 1258 1.80 pooka break; 1259 1.127 jym 1260 1.127 jym if (pp_first == NULL) 1261 1.127 jym pp_first = pp; 1262 1.80 pooka } 1263 1.92 pooka 1264 1.92 pooka /* 1265 1.92 pooka * Need to use PYEC on our bag of tricks. 1266 1.92 pooka * Unfortunately, the wife just borrowed it. 1267 1.92 pooka */ 1268 1.80 pooka 1269 1.113 pooka mutex_enter(&pdaemonmtx); 1270 1.113 pooka if (!succ && cleaned == 0 && pdaemon_waiters && 1271 1.113 pooka uvmexp.paging == 0) { 1272 1.167 pooka kpause("pddlk", false, hz, &pdaemonmtx); 1273 1.80 pooka } 1274 1.80 pooka } 1275 1.80 pooka 1276 1.80 pooka panic("you can swap out any time you like, but you can never leave"); 1277 1.80 pooka } 1278 1.80 pooka 1279 1.80 pooka void 1280 1.80 pooka uvm_kick_pdaemon() 1281 1.80 pooka { 1282 1.80 pooka 1283 1.92 pooka /* 1284 1.92 pooka * Wake up the diabolical pagedaemon director if we are over 1285 1.92 pooka * 90% of the memory limit. This is a complete and utter 1286 1.92 pooka * stetson-harrison decision which you are allowed to finetune. 1287 1.92 pooka * Don't bother locking. If we have some unflushed caches, 1288 1.92 pooka * other waker-uppers will deal with the issue. 1289 1.92 pooka */ 1290 1.92 pooka if (NEED_PAGEDAEMON()) { 1291 1.92 pooka cv_signal(&pdaemoncv); 1292 1.92 pooka } 1293 1.80 pooka } 1294 1.80 pooka 1295 1.80 pooka void * 1296 1.80 pooka rump_hypermalloc(size_t howmuch, int alignment, bool waitok, const char *wmsg) 1297 1.80 pooka { 1298 1.150 pooka const unsigned long thelimit = 1299 1.150 pooka curlwp == uvm.pagedaemon_lwp ? pdlimit : rump_physmemlimit; 1300 1.84 pooka unsigned long newmem; 1301 1.80 pooka void *rv; 1302 1.142 pooka int error; 1303 1.80 pooka 1304 1.92 pooka uvm_kick_pdaemon(); /* ouch */ 1305 1.92 pooka 1306 1.84 pooka /* first we must be within the limit */ 1307 1.84 pooka limitagain: 1308 1.150 pooka if (thelimit != RUMPMEM_UNLIMITED) { 1309 1.84 pooka newmem = atomic_add_long_nv(&curphysmem, howmuch); 1310 1.150 pooka if (newmem > thelimit) { 1311 1.84 pooka newmem = atomic_add_long_nv(&curphysmem, -howmuch); 1312 1.103 pooka if (!waitok) { 1313 1.84 pooka return NULL; 1314 1.103 pooka } 1315 1.84 pooka uvm_wait(wmsg); 1316 1.84 pooka goto limitagain; 1317 1.84 pooka } 1318 1.84 pooka } 1319 1.84 pooka 1320 1.84 pooka /* second, we must get something from the backend */ 1321 1.80 pooka again: 1322 1.142 pooka error = rumpuser_malloc(howmuch, alignment, &rv); 1323 1.142 pooka if (__predict_false(error && waitok)) { 1324 1.80 pooka uvm_wait(wmsg); 1325 1.80 pooka goto again; 1326 1.80 pooka } 1327 1.80 pooka 1328 1.80 pooka return rv; 1329 1.80 pooka } 1330 1.84 pooka 1331 1.84 pooka void 1332 1.84 pooka rump_hyperfree(void *what, size_t size) 1333 1.84 pooka { 1334 1.84 pooka 1335 1.91 pooka if (rump_physmemlimit != RUMPMEM_UNLIMITED) { 1336 1.84 pooka atomic_add_long(&curphysmem, -size); 1337 1.84 pooka } 1338 1.138 pooka rumpuser_free(what, size); 1339 1.84 pooka } 1340 1.196 riastrad 1341 1.196 riastrad /* 1342 1.196 riastrad * UBC 1343 1.196 riastrad */ 1344 1.196 riastrad 1345 1.196 riastrad #define PAGERFLAGS (PGO_SYNCIO | PGO_NOBLOCKALLOC | PGO_NOTIMESTAMP) 1346 1.196 riastrad 1347 1.196 riastrad void 1348 1.196 riastrad ubc_zerorange(struct uvm_object *uobj, off_t off, size_t len, int flags) 1349 1.196 riastrad { 1350 1.196 riastrad struct vm_page **pgs; 1351 1.196 riastrad int maxpages = MIN(32, round_page(len) >> PAGE_SHIFT); 1352 1.196 riastrad int npages, i; 1353 1.196 riastrad 1354 1.196 riastrad if (maxpages == 0) 1355 1.196 riastrad return; 1356 1.196 riastrad 1357 1.196 riastrad pgs = kmem_alloc(maxpages * sizeof(pgs), KM_SLEEP); 1358 1.196 riastrad rw_enter(uobj->vmobjlock, RW_WRITER); 1359 1.196 riastrad while (len) { 1360 1.196 riastrad npages = MIN(maxpages, round_page(len) >> PAGE_SHIFT); 1361 1.196 riastrad memset(pgs, 0, npages * sizeof(struct vm_page *)); 1362 1.196 riastrad (void)uobj->pgops->pgo_get(uobj, trunc_page(off), 1363 1.196 riastrad pgs, &npages, 0, VM_PROT_READ | VM_PROT_WRITE, 1364 1.196 riastrad 0, PAGERFLAGS | PGO_PASTEOF); 1365 1.196 riastrad KASSERT(npages > 0); 1366 1.196 riastrad 1367 1.196 riastrad rw_enter(uobj->vmobjlock, RW_WRITER); 1368 1.196 riastrad for (i = 0; i < npages; i++) { 1369 1.196 riastrad struct vm_page *pg; 1370 1.196 riastrad uint8_t *start; 1371 1.196 riastrad size_t chunkoff, chunklen; 1372 1.196 riastrad 1373 1.196 riastrad pg = pgs[i]; 1374 1.196 riastrad if (pg == NULL) 1375 1.196 riastrad break; 1376 1.196 riastrad 1377 1.196 riastrad KASSERT(pg->uobject != NULL); 1378 1.196 riastrad KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock); 1379 1.196 riastrad 1380 1.196 riastrad chunkoff = off & PAGE_MASK; 1381 1.196 riastrad chunklen = MIN(PAGE_SIZE - chunkoff, len); 1382 1.196 riastrad start = (uint8_t *)pg->uanon + chunkoff; 1383 1.196 riastrad 1384 1.196 riastrad memset(start, 0, chunklen); 1385 1.196 riastrad uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY); 1386 1.196 riastrad 1387 1.196 riastrad off += chunklen; 1388 1.196 riastrad len -= chunklen; 1389 1.196 riastrad } 1390 1.196 riastrad uvm_page_unbusy(pgs, npages); 1391 1.196 riastrad } 1392 1.196 riastrad rw_exit(uobj->vmobjlock); 1393 1.196 riastrad kmem_free(pgs, maxpages * sizeof(pgs)); 1394 1.196 riastrad } 1395 1.196 riastrad 1396 1.196 riastrad #define len2npages(off, len) \ 1397 1.196 riastrad ((round_page(off+len) - trunc_page(off)) >> PAGE_SHIFT) 1398 1.196 riastrad 1399 1.196 riastrad int 1400 1.196 riastrad ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo, 1401 1.196 riastrad int advice, int flags) 1402 1.196 riastrad { 1403 1.196 riastrad struct vm_page **pgs; 1404 1.196 riastrad int npages = len2npages(uio->uio_offset, todo); 1405 1.196 riastrad size_t pgalloc; 1406 1.196 riastrad int i, rv, pagerflags; 1407 1.196 riastrad vm_prot_t prot; 1408 1.196 riastrad 1409 1.196 riastrad pgalloc = npages * sizeof(pgs); 1410 1.196 riastrad pgs = kmem_alloc(pgalloc, KM_SLEEP); 1411 1.196 riastrad 1412 1.196 riastrad pagerflags = PAGERFLAGS; 1413 1.196 riastrad if (flags & UBC_WRITE) 1414 1.196 riastrad pagerflags |= PGO_PASTEOF; 1415 1.196 riastrad if (flags & UBC_FAULTBUSY) 1416 1.196 riastrad pagerflags |= PGO_OVERWRITE; 1417 1.196 riastrad 1418 1.196 riastrad prot = VM_PROT_READ; 1419 1.196 riastrad if (flags & UBC_WRITE) 1420 1.196 riastrad prot |= VM_PROT_WRITE; 1421 1.196 riastrad 1422 1.196 riastrad rw_enter(uobj->vmobjlock, RW_WRITER); 1423 1.196 riastrad do { 1424 1.196 riastrad npages = len2npages(uio->uio_offset, todo); 1425 1.196 riastrad memset(pgs, 0, pgalloc); 1426 1.196 riastrad rv = uobj->pgops->pgo_get(uobj, trunc_page(uio->uio_offset), 1427 1.196 riastrad pgs, &npages, 0, prot, 0, pagerflags); 1428 1.196 riastrad if (rv) 1429 1.196 riastrad goto out; 1430 1.196 riastrad 1431 1.196 riastrad rw_enter(uobj->vmobjlock, RW_WRITER); 1432 1.196 riastrad for (i = 0; i < npages; i++) { 1433 1.196 riastrad struct vm_page *pg; 1434 1.196 riastrad size_t xfersize; 1435 1.196 riastrad off_t pageoff; 1436 1.196 riastrad 1437 1.196 riastrad pg = pgs[i]; 1438 1.196 riastrad if (pg == NULL) 1439 1.196 riastrad break; 1440 1.196 riastrad 1441 1.196 riastrad KASSERT(pg->uobject != NULL); 1442 1.196 riastrad KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock); 1443 1.196 riastrad pageoff = uio->uio_offset & PAGE_MASK; 1444 1.196 riastrad 1445 1.196 riastrad xfersize = MIN(MIN(todo, PAGE_SIZE), PAGE_SIZE-pageoff); 1446 1.196 riastrad KASSERT(xfersize > 0); 1447 1.196 riastrad rv = uiomove((uint8_t *)pg->uanon + pageoff, 1448 1.196 riastrad xfersize, uio); 1449 1.196 riastrad if (rv) { 1450 1.196 riastrad uvm_page_unbusy(pgs, npages); 1451 1.196 riastrad rw_exit(uobj->vmobjlock); 1452 1.196 riastrad goto out; 1453 1.196 riastrad } 1454 1.196 riastrad if (uio->uio_rw == UIO_WRITE) { 1455 1.196 riastrad pg->flags &= ~PG_FAKE; 1456 1.196 riastrad uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY); 1457 1.196 riastrad } 1458 1.196 riastrad todo -= xfersize; 1459 1.196 riastrad } 1460 1.196 riastrad uvm_page_unbusy(pgs, npages); 1461 1.196 riastrad } while (todo); 1462 1.196 riastrad rw_exit(uobj->vmobjlock); 1463 1.196 riastrad 1464 1.196 riastrad out: 1465 1.196 riastrad kmem_free(pgs, pgalloc); 1466 1.196 riastrad return rv; 1467 1.196 riastrad } 1468