1 1.131 andvar /* $NetBSD: uvm_pager.c,v 1.131 2024/03/15 07:09:37 andvar Exp $ */ 2 1.1 mrg 3 1.1 mrg /* 4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 1.1 mrg * All rights reserved. 6 1.1 mrg * 7 1.1 mrg * Redistribution and use in source and binary forms, with or without 8 1.1 mrg * modification, are permitted provided that the following conditions 9 1.1 mrg * are met: 10 1.1 mrg * 1. Redistributions of source code must retain the above copyright 11 1.1 mrg * notice, this list of conditions and the following disclaimer. 12 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright 13 1.1 mrg * notice, this list of conditions and the following disclaimer in the 14 1.1 mrg * documentation and/or other materials provided with the distribution. 15 1.1 mrg * 16 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 1.1 mrg * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 1.1 mrg * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 1.1 mrg * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 1.1 mrg * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 1.1 mrg * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 1.1 mrg * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 1.1 mrg * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 1.1 mrg * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 1.1 mrg * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 1.3 mrg * 27 1.3 mrg * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp 28 1.1 mrg */ 29 1.1 mrg 30 1.1 mrg /* 31 1.1 mrg * uvm_pager.c: generic functions used to assist the pagers. 32 1.1 mrg */ 33 1.54 lukem 34 1.54 lukem #include <sys/cdefs.h> 35 1.131 andvar __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.131 2024/03/15 07:09:37 andvar Exp $"); 36 1.54 lukem 37 1.54 lukem #include "opt_uvmhist.h" 38 1.72 yamt #include "opt_readahead.h" 39 1.87 yamt #include "opt_pagermap.h" 40 1.1 mrg 41 1.1 mrg #include <sys/param.h> 42 1.1 mrg #include <sys/systm.h> 43 1.113 uwe #include <sys/atomic.h> 44 1.35 chs #include <sys/vnode.h> 45 1.93 pooka #include <sys/buf.h> 46 1.1 mrg 47 1.1 mrg #include <uvm/uvm.h> 48 1.1 mrg 49 1.87 yamt /* 50 1.87 yamt * XXX 51 1.87 yamt * this is needed until the device strategy interface 52 1.87 yamt * is changed to do physically-addressed i/o. 53 1.87 yamt */ 54 1.87 yamt 55 1.87 yamt #ifndef PAGER_MAP_DEFAULT_SIZE 56 1.87 yamt #define PAGER_MAP_DEFAULT_SIZE (16 * 1024 * 1024) 57 1.87 yamt #endif 58 1.87 yamt 59 1.87 yamt #ifndef PAGER_MAP_SIZE 60 1.87 yamt #define PAGER_MAP_SIZE PAGER_MAP_DEFAULT_SIZE 61 1.87 yamt #endif 62 1.87 yamt 63 1.87 yamt size_t pager_map_size = PAGER_MAP_SIZE; 64 1.87 yamt 65 1.1 mrg /* 66 1.1 mrg * list of uvm pagers in the system 67 1.1 mrg */ 68 1.1 mrg 69 1.89 yamt const struct uvm_pagerops * const uvmpagerops[] = { 70 1.10 thorpej &aobj_pager, 71 1.6 mrg &uvm_deviceops, 72 1.6 mrg &uvm_vnodeops, 73 1.35 chs &ubc_pager, 74 1.1 mrg }; 75 1.1 mrg 76 1.1 mrg /* 77 1.1 mrg * the pager map: provides KVA for I/O 78 1.1 mrg */ 79 1.1 mrg 80 1.47 chs struct vm_map *pager_map; /* XXX */ 81 1.112 ad kmutex_t pager_map_wanted_lock __cacheline_aligned; 82 1.80 thorpej bool pager_map_wanted; /* locked by pager map */ 83 1.35 chs static vaddr_t emergva; 84 1.105 matt static int emerg_ncolors; 85 1.80 thorpej static bool emerginuse; 86 1.1 mrg 87 1.105 matt void 88 1.105 matt uvm_pager_realloc_emerg(void) 89 1.105 matt { 90 1.105 matt vaddr_t new_emergva, old_emergva; 91 1.105 matt int old_emerg_ncolors; 92 1.105 matt 93 1.105 matt if (__predict_true(emergva != 0 && emerg_ncolors >= uvmexp.ncolors)) 94 1.105 matt return; 95 1.105 matt 96 1.105 matt KASSERT(!emerginuse); 97 1.105 matt 98 1.105 matt new_emergva = uvm_km_alloc(kernel_map, 99 1.106 uebayasi round_page(MAXPHYS) + ptoa(uvmexp.ncolors), ptoa(uvmexp.ncolors), 100 1.105 matt UVM_KMF_VAONLY); 101 1.105 matt 102 1.105 matt KASSERT(new_emergva != 0); 103 1.105 matt 104 1.105 matt old_emergva = emergva; 105 1.105 matt old_emerg_ncolors = emerg_ncolors; 106 1.105 matt 107 1.105 matt /* 108 1.105 matt * don't support re-color in late boot anyway. 109 1.105 matt */ 110 1.105 matt if (0) /* XXX */ 111 1.105 matt mutex_enter(&pager_map_wanted_lock); 112 1.105 matt 113 1.105 matt emergva = new_emergva; 114 1.105 matt emerg_ncolors = uvmexp.ncolors; 115 1.105 matt wakeup(&old_emergva); 116 1.105 matt 117 1.105 matt if (0) /* XXX */ 118 1.105 matt mutex_exit(&pager_map_wanted_lock); 119 1.105 matt 120 1.105 matt if (old_emergva) 121 1.105 matt uvm_km_free(kernel_map, old_emergva, 122 1.105 matt round_page(MAXPHYS) + ptoa(old_emerg_ncolors), 123 1.105 matt UVM_KMF_VAONLY); 124 1.105 matt } 125 1.105 matt 126 1.1 mrg /* 127 1.1 mrg * uvm_pager_init: init pagers (at boot time) 128 1.1 mrg */ 129 1.1 mrg 130 1.6 mrg void 131 1.67 thorpej uvm_pager_init(void) 132 1.6 mrg { 133 1.59 thorpej u_int lcv; 134 1.50 chs vaddr_t sva, eva; 135 1.1 mrg 136 1.6 mrg /* 137 1.6 mrg * init pager map 138 1.6 mrg */ 139 1.6 mrg 140 1.50 chs sva = 0; 141 1.87 yamt pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, pager_map_size, 0, 142 1.81 thorpej false, NULL); 143 1.84 ad mutex_init(&pager_map_wanted_lock, MUTEX_DEFAULT, IPL_NONE); 144 1.81 thorpej pager_map_wanted = false; 145 1.105 matt 146 1.105 matt uvm_pager_realloc_emerg(); 147 1.6 mrg 148 1.6 mrg /* 149 1.6 mrg * call pager init functions 150 1.6 mrg */ 151 1.88 yamt for (lcv = 0 ; lcv < __arraycount(uvmpagerops); lcv++) { 152 1.6 mrg if (uvmpagerops[lcv]->pgo_init) 153 1.6 mrg uvmpagerops[lcv]->pgo_init(); 154 1.6 mrg } 155 1.1 mrg } 156 1.1 mrg 157 1.124 ad #ifdef PMAP_DIRECT 158 1.124 ad /* 159 1.124 ad * uvm_pagermapdirect: map a single page via the pmap's direct segment 160 1.124 ad * 161 1.124 ad * this is an abuse of pmap_direct_process(), since the kva is being grabbed 162 1.124 ad * and no processing is taking place, but for now.. 163 1.124 ad */ 164 1.124 ad 165 1.124 ad static int 166 1.124 ad uvm_pagermapdirect(void *kva, size_t sz, void *cookie) 167 1.124 ad { 168 1.124 ad 169 1.124 ad KASSERT(sz == PAGE_SIZE); 170 1.124 ad *(vaddr_t *)cookie = (vaddr_t)kva; 171 1.124 ad return 0; 172 1.124 ad } 173 1.124 ad #endif 174 1.124 ad 175 1.1 mrg /* 176 1.1 mrg * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings 177 1.1 mrg * 178 1.1 mrg * we basically just map in a blank map entry to reserve the space in the 179 1.1 mrg * map and then use pmap_enter() to put the mappings in by hand. 180 1.1 mrg */ 181 1.1 mrg 182 1.9 eeh vaddr_t 183 1.67 thorpej uvm_pagermapin(struct vm_page **pps, int npages, int flags) 184 1.1 mrg { 185 1.9 eeh vsize_t size; 186 1.9 eeh vaddr_t kva; 187 1.9 eeh vaddr_t cva; 188 1.6 mrg struct vm_page *pp; 189 1.29 thorpej vm_prot_t prot; 190 1.104 matt const bool pdaemon = (curlwp == uvm.pagedaemon_lwp); 191 1.117 ad const u_int first_color = VM_PGCOLOR(*pps); 192 1.128 skrll UVMHIST_FUNC(__func__); 193 1.128 skrll UVMHIST_CALLARGS(maphist,"(pps=%#jx, npages=%jd, first_color=%ju)", 194 1.111 pgoyette (uintptr_t)pps, npages, first_color, 0); 195 1.29 thorpej 196 1.124 ad #ifdef PMAP_DIRECT 197 1.127 skrll /* 198 1.124 ad * for a single page the direct mapped segment can be used. 199 1.124 ad */ 200 1.124 ad 201 1.124 ad if (npages == 1) { 202 1.124 ad int error __diagused; 203 1.124 ad KASSERT((pps[0]->flags & PG_BUSY) != 0); 204 1.124 ad error = pmap_direct_process(VM_PAGE_TO_PHYS(pps[0]), 0, 205 1.124 ad PAGE_SIZE, uvm_pagermapdirect, &kva); 206 1.124 ad KASSERT(error == 0); 207 1.124 ad UVMHIST_LOG(maphist, "<- done, direct (KVA=%#jx)", kva,0,0,0); 208 1.124 ad return kva; 209 1.124 ad } 210 1.124 ad #endif 211 1.124 ad 212 1.29 thorpej /* 213 1.29 thorpej * compute protection. outgoing I/O only needs read 214 1.29 thorpej * access to the page, whereas incoming needs read/write. 215 1.29 thorpej */ 216 1.29 thorpej 217 1.29 thorpej prot = VM_PROT_READ; 218 1.29 thorpej if (flags & UVMPAGER_MAPIN_READ) 219 1.29 thorpej prot |= VM_PROT_WRITE; 220 1.1 mrg 221 1.1 mrg ReStart: 222 1.104 matt size = ptoa(npages); 223 1.29 thorpej kva = 0; /* let system choose VA */ 224 1.1 mrg 225 1.104 matt if (uvm_map(pager_map, &kva, size, NULL, UVM_UNKNOWN_OFFSET, 226 1.104 matt first_color, UVM_FLAG_COLORMATCH | UVM_FLAG_NOMERGE 227 1.104 matt | (pdaemon ? UVM_FLAG_NOWAIT : 0)) != 0) { 228 1.75 yamt if (pdaemon) { 229 1.84 ad mutex_enter(&pager_map_wanted_lock); 230 1.35 chs if (emerginuse) { 231 1.90 ad UVM_UNLOCK_AND_WAIT(&emergva, 232 1.90 ad &pager_map_wanted_lock, false, 233 1.90 ad "emergva", 0); 234 1.35 chs goto ReStart; 235 1.35 chs } 236 1.81 thorpej emerginuse = true; 237 1.84 ad mutex_exit(&pager_map_wanted_lock); 238 1.104 matt kva = emergva + ptoa(first_color); 239 1.60 tls /* The shift implicitly truncates to PAGE_SIZE */ 240 1.60 tls KASSERT(npages <= (MAXPHYS >> PAGE_SHIFT)); 241 1.35 chs goto enter; 242 1.35 chs } 243 1.29 thorpej if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) { 244 1.6 mrg UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0); 245 1.29 thorpej return(0); 246 1.6 mrg } 247 1.84 ad mutex_enter(&pager_map_wanted_lock); 248 1.81 thorpej pager_map_wanted = true; 249 1.6 mrg UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0); 250 1.90 ad UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, false, 251 1.90 ad "pager_map", 0); 252 1.6 mrg goto ReStart; 253 1.6 mrg } 254 1.1 mrg 255 1.35 chs enter: 256 1.6 mrg /* got it */ 257 1.104 matt for (cva = kva; npages != 0; npages--, cva += PAGE_SIZE) { 258 1.6 mrg pp = *pps++; 259 1.40 mrg KASSERT(pp); 260 1.104 matt // KASSERT(!((VM_PAGE_TO_PHYS(pp) ^ cva) & uvmexp.colormask)); 261 1.38 chs KASSERT(pp->flags & PG_BUSY); 262 1.97 cegger pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot, 0); 263 1.6 mrg } 264 1.49 chris pmap_update(vm_map_pmap(pager_map)); 265 1.1 mrg 266 1.123 rin UVMHIST_LOG(maphist, "<- done (KVA=%#jx)", kva,0,0,0); 267 1.6 mrg return(kva); 268 1.1 mrg } 269 1.1 mrg 270 1.1 mrg /* 271 1.1 mrg * uvm_pagermapout: remove pager_map mapping 272 1.1 mrg * 273 1.1 mrg * we remove our mappings by hand and then remove the mapping (waking 274 1.1 mrg * up anyone wanting space). 275 1.1 mrg */ 276 1.1 mrg 277 1.6 mrg void 278 1.67 thorpej uvm_pagermapout(vaddr_t kva, int npages) 279 1.6 mrg { 280 1.104 matt vsize_t size = ptoa(npages); 281 1.47 chs struct vm_map_entry *entries; 282 1.128 skrll UVMHIST_FUNC(__func__); 283 1.128 skrll UVMHIST_CALLARGS(maphist, " (kva=%#jx, npages=%jd)", kva, npages,0,0); 284 1.1 mrg 285 1.124 ad #ifdef PMAP_DIRECT 286 1.127 skrll /* 287 1.124 ad * solitary pages are mapped directly. 288 1.124 ad */ 289 1.124 ad 290 1.124 ad if (npages == 1) { 291 1.124 ad UVMHIST_LOG(maphist,"<- done, direct", 0,0,0,0); 292 1.124 ad return; 293 1.124 ad } 294 1.124 ad #endif 295 1.124 ad 296 1.6 mrg /* 297 1.6 mrg * duplicate uvm_unmap, but add in pager_map_wanted handling. 298 1.6 mrg */ 299 1.6 mrg 300 1.104 matt pmap_kremove(kva, size); 301 1.98 rmind pmap_update(pmap_kernel()); 302 1.98 rmind 303 1.104 matt if ((kva & ~ptoa(uvmexp.colormask)) == emergva) { 304 1.84 ad mutex_enter(&pager_map_wanted_lock); 305 1.106 uebayasi KASSERT(emerginuse); 306 1.81 thorpej emerginuse = false; 307 1.35 chs wakeup(&emergva); 308 1.84 ad mutex_exit(&pager_map_wanted_lock); 309 1.50 chs return; 310 1.35 chs } 311 1.35 chs 312 1.6 mrg vm_map_lock(pager_map); 313 1.108 para uvm_unmap_remove(pager_map, kva, kva + size, &entries, 0); 314 1.84 ad mutex_enter(&pager_map_wanted_lock); 315 1.6 mrg if (pager_map_wanted) { 316 1.81 thorpej pager_map_wanted = false; 317 1.6 mrg wakeup(pager_map); 318 1.6 mrg } 319 1.84 ad mutex_exit(&pager_map_wanted_lock); 320 1.6 mrg vm_map_unlock(pager_map); 321 1.6 mrg if (entries) 322 1.6 mrg uvm_unmap_detach(entries, 0); 323 1.6 mrg UVMHIST_LOG(maphist,"<- done",0,0,0,0); 324 1.1 mrg } 325 1.1 mrg 326 1.35 chs void 327 1.91 yamt uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error) 328 1.35 chs { 329 1.35 chs struct uvm_object *uobj; 330 1.91 yamt struct vm_page *pg; 331 1.122 ad krwlock_t *slock; 332 1.107 yamt int pageout_done; /* number of PG_PAGEOUT pages processed */ 333 1.131 andvar int swslot __unused; /* used for VMSWAP */ 334 1.91 yamt int i; 335 1.91 yamt bool swap; 336 1.128 skrll UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist); 337 1.50 chs 338 1.50 chs swslot = 0; 339 1.91 yamt pageout_done = 0; 340 1.50 chs slock = NULL; 341 1.91 yamt uobj = NULL; 342 1.55 chs pg = pgs[0]; 343 1.55 chs swap = (pg->uanon != NULL && pg->uobject == NULL) || 344 1.114 ad (pg->flags & PG_AOBJ) != 0; 345 1.50 chs if (!swap) { 346 1.55 chs uobj = pg->uobject; 347 1.101 rmind slock = uobj->vmobjlock; 348 1.122 ad rw_enter(slock, RW_WRITER); 349 1.71 yamt } else { 350 1.71 yamt #if defined(VMSWAP) 351 1.71 yamt if (error) { 352 1.71 yamt if (pg->uobject != NULL) { 353 1.71 yamt swslot = uao_find_swslot(pg->uobject, 354 1.71 yamt pg->offset >> PAGE_SHIFT); 355 1.77 christos } else { 356 1.77 christos KASSERT(pg->uanon != NULL); 357 1.71 yamt swslot = pg->uanon->an_swslot; 358 1.71 yamt } 359 1.71 yamt KASSERT(swslot); 360 1.50 chs } 361 1.71 yamt #else /* defined(VMSWAP) */ 362 1.71 yamt panic("%s: swap", __func__); 363 1.71 yamt #endif /* defined(VMSWAP) */ 364 1.50 chs } 365 1.35 chs for (i = 0; i < npages; i++) { 366 1.103 oki #if defined(VMSWAP) 367 1.102 yamt bool anon_disposed = false; /* XXX gcc */ 368 1.103 oki #endif /* defined(VMSWAP) */ 369 1.102 yamt 370 1.35 chs pg = pgs[i]; 371 1.50 chs KASSERT(swap || pg->uobject == uobj); 372 1.111 pgoyette UVMHIST_LOG(ubchist, "pg %#jx", (uintptr_t)pg, 0,0,0); 373 1.50 chs 374 1.71 yamt #if defined(VMSWAP) 375 1.50 chs /* 376 1.50 chs * for swap i/os, lock each page's object (or anon) 377 1.50 chs * individually since each page may need a different lock. 378 1.50 chs */ 379 1.35 chs 380 1.35 chs if (swap) { 381 1.55 chs if (pg->uobject != NULL) { 382 1.101 rmind slock = pg->uobject->vmobjlock; 383 1.55 chs } else { 384 1.101 rmind slock = pg->uanon->an_lock; 385 1.35 chs } 386 1.122 ad rw_enter(slock, RW_WRITER); 387 1.102 yamt anon_disposed = (pg->flags & PG_RELEASED) != 0; 388 1.102 yamt KASSERT(!anon_disposed || pg->uobject != NULL || 389 1.102 yamt pg->uanon->an_ref == 0); 390 1.50 chs } 391 1.71 yamt #endif /* defined(VMSWAP) */ 392 1.50 chs 393 1.120 ad if (write && uobj != NULL) { 394 1.129 chs KASSERT(uvm_obj_page_writeback_p(pg)); 395 1.129 chs uvm_obj_page_clear_writeback(pg); 396 1.120 ad } 397 1.120 ad 398 1.50 chs /* 399 1.50 chs * process errors. for reads, just mark the page to be freed. 400 1.50 chs * for writes, if the error was ENOMEM, we assume this was 401 1.50 chs * a transient failure so we mark the page dirty so that 402 1.50 chs * we'll try to write it again later. for all other write 403 1.50 chs * errors, we assume the error is permanent, thus the data 404 1.50 chs * in the page is lost. bummer. 405 1.50 chs */ 406 1.50 chs 407 1.50 chs if (error) { 408 1.131 andvar int slot __unused; /* used for VMSWAP */ 409 1.50 chs if (!write) { 410 1.50 chs pg->flags |= PG_RELEASED; 411 1.50 chs continue; 412 1.50 chs } else if (error == ENOMEM) { 413 1.50 chs if (pg->flags & PG_PAGEOUT) { 414 1.50 chs pg->flags &= ~PG_PAGEOUT; 415 1.90 ad pageout_done++; 416 1.50 chs } 417 1.120 ad uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY); 418 1.119 ad uvm_pagelock(pg); 419 1.50 chs uvm_pageactivate(pg); 420 1.119 ad uvm_pageunlock(pg); 421 1.61 pk slot = 0; 422 1.61 pk } else 423 1.61 pk slot = SWSLOT_BAD; 424 1.61 pk 425 1.71 yamt #if defined(VMSWAP) 426 1.61 pk if (swap) { 427 1.61 pk if (pg->uobject != NULL) { 428 1.109 martin int oldslot __diagused; 429 1.62 pk oldslot = uao_set_swslot(pg->uobject, 430 1.62 pk pg->offset >> PAGE_SHIFT, slot); 431 1.62 pk KASSERT(oldslot == swslot + i); 432 1.61 pk } else { 433 1.61 pk KASSERT(pg->uanon->an_swslot == 434 1.61 pk swslot + i); 435 1.61 pk pg->uanon->an_swslot = slot; 436 1.61 pk } 437 1.50 chs } 438 1.71 yamt #endif /* defined(VMSWAP) */ 439 1.50 chs } 440 1.50 chs 441 1.50 chs /* 442 1.50 chs * if the page is PG_FAKE, this must have been a read to 443 1.50 chs * initialize the page. clear PG_FAKE and activate the page. 444 1.50 chs */ 445 1.50 chs 446 1.50 chs if (pg->flags & PG_FAKE) { 447 1.50 chs KASSERT(!write); 448 1.50 chs pg->flags &= ~PG_FAKE; 449 1.72 yamt #if defined(READAHEAD_STATS) 450 1.114 ad pg->flags |= PG_READAHEAD; 451 1.72 yamt uvm_ra_total.ev_count++; 452 1.72 yamt #endif /* defined(READAHEAD_STATS) */ 453 1.120 ad KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN); 454 1.119 ad uvm_pagelock(pg); 455 1.78 yamt uvm_pageenqueue(pg); 456 1.119 ad uvm_pageunlock(pg); 457 1.35 chs } 458 1.35 chs 459 1.71 yamt #if defined(VMSWAP) 460 1.35 chs /* 461 1.50 chs * for swap pages, unlock everything for this page now. 462 1.35 chs */ 463 1.35 chs 464 1.35 chs if (swap) { 465 1.102 yamt if (pg->uobject == NULL && anon_disposed) { 466 1.63 yamt uvm_anon_release(pg->uanon); 467 1.63 yamt } else { 468 1.63 yamt uvm_page_unbusy(&pg, 1); 469 1.122 ad rw_exit(slock); 470 1.63 yamt } 471 1.35 chs } 472 1.71 yamt #endif /* defined(VMSWAP) */ 473 1.35 chs } 474 1.125 ad if (pageout_done != 0) { 475 1.125 ad uvm_pageout_done(pageout_done); 476 1.125 ad } 477 1.35 chs if (!swap) { 478 1.50 chs uvm_page_unbusy(pgs, npages); 479 1.122 ad rw_exit(slock); 480 1.50 chs } else { 481 1.71 yamt #if defined(VMSWAP) 482 1.53 chs KASSERT(write); 483 1.53 chs 484 1.53 chs /* these pages are now only in swap. */ 485 1.110 christos if (error != ENOMEM) { 486 1.112 ad atomic_add_int(&uvmexp.swpgonly, npages); 487 1.110 christos } 488 1.50 chs if (error) { 489 1.61 pk if (error != ENOMEM) 490 1.61 pk uvm_swap_markbad(swslot, npages); 491 1.61 pk else 492 1.61 pk uvm_swap_free(swslot, npages); 493 1.50 chs } 494 1.116 ad atomic_dec_uint(&uvmexp.pdpending); 495 1.71 yamt #endif /* defined(VMSWAP) */ 496 1.35 chs } 497 1.91 yamt } 498 1.91 yamt 499 1.91 yamt /* 500 1.91 yamt * uvm_aio_aiodone: do iodone processing for async i/os. 501 1.91 yamt * this should be called in thread context, not interrupt context. 502 1.91 yamt */ 503 1.91 yamt void 504 1.91 yamt uvm_aio_aiodone(struct buf *bp) 505 1.91 yamt { 506 1.126 jdolecek const int npages = bp->b_bufsize >> PAGE_SHIFT; 507 1.126 jdolecek struct vm_page *pgs[howmany(MAXPHYS, MIN_PAGE_SIZE)]; 508 1.91 yamt int i, error; 509 1.91 yamt bool write; 510 1.128 skrll UVMHIST_FUNC(__func__); 511 1.128 skrll UVMHIST_CALLARGS(ubchist, "bp %#jx", (uintptr_t)bp, 0,0,0); 512 1.91 yamt 513 1.126 jdolecek KASSERT(bp->b_bufsize <= MAXPHYS); 514 1.126 jdolecek KASSERT(npages <= __arraycount(pgs)); 515 1.126 jdolecek 516 1.91 yamt error = bp->b_error; 517 1.91 yamt write = (bp->b_flags & B_READ) == 0; 518 1.91 yamt 519 1.91 yamt for (i = 0; i < npages; i++) { 520 1.91 yamt pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT)); 521 1.111 pgoyette UVMHIST_LOG(ubchist, "pgs[%jd] = %#jx", i, 522 1.111 pgoyette (uintptr_t)pgs[i], 0, 0); 523 1.91 yamt } 524 1.91 yamt uvm_pagermapout((vaddr_t)bp->b_data, npages); 525 1.91 yamt 526 1.91 yamt uvm_aio_aiodone_pages(pgs, npages, write, error); 527 1.91 yamt 528 1.90 ad if (write && (bp->b_cflags & BC_AGE) != 0) { 529 1.90 ad mutex_enter(bp->b_objlock); 530 1.35 chs vwakeup(bp); 531 1.90 ad mutex_exit(bp->b_objlock); 532 1.35 chs } 533 1.73 yamt putiobuf(bp); 534 1.1 mrg } 535 1.74 yamt 536 1.74 yamt /* 537 1.74 yamt * uvm_pageratop: convert KVAs in the pager map back to their page 538 1.74 yamt * structures. 539 1.74 yamt */ 540 1.74 yamt 541 1.74 yamt struct vm_page * 542 1.74 yamt uvm_pageratop(vaddr_t kva) 543 1.74 yamt { 544 1.74 yamt struct vm_page *pg; 545 1.74 yamt paddr_t pa; 546 1.109 martin bool rv __diagused; 547 1.74 yamt 548 1.74 yamt rv = pmap_extract(pmap_kernel(), kva, &pa); 549 1.74 yamt KASSERT(rv); 550 1.74 yamt pg = PHYS_TO_VM_PAGE(pa); 551 1.74 yamt KASSERT(pg != NULL); 552 1.74 yamt return (pg); 553 1.74 yamt } 554