1 1.80 riastrad /* $NetBSD: uvm_device.c,v 1.80 2022/07/07 13:27:02 riastradh Exp $ */ 2 1.4 mrg 3 1.1 mrg /* 4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 1.1 mrg * All rights reserved. 6 1.1 mrg * 7 1.1 mrg * Redistribution and use in source and binary forms, with or without 8 1.1 mrg * modification, are permitted provided that the following conditions 9 1.1 mrg * are met: 10 1.1 mrg * 1. Redistributions of source code must retain the above copyright 11 1.1 mrg * notice, this list of conditions and the following disclaimer. 12 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright 13 1.1 mrg * notice, this list of conditions and the following disclaimer in the 14 1.1 mrg * documentation and/or other materials provided with the distribution. 15 1.1 mrg * 16 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 1.1 mrg * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 1.1 mrg * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 1.1 mrg * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 1.1 mrg * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 1.1 mrg * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 1.1 mrg * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 1.1 mrg * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 1.1 mrg * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 1.1 mrg * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 1.4 mrg * 27 1.4 mrg * from: Id: uvm_device.c,v 1.1.2.9 1998/02/06 05:11:47 chs Exp 28 1.1 mrg */ 29 1.1 mrg 30 1.1 mrg /* 31 1.1 mrg * uvm_device.c: the device pager. 32 1.1 mrg */ 33 1.39 lukem 34 1.39 lukem #include <sys/cdefs.h> 35 1.80 riastrad __KERNEL_RCSID(0, "$NetBSD: uvm_device.c,v 1.80 2022/07/07 13:27:02 riastradh Exp $"); 36 1.39 lukem 37 1.39 lukem #include "opt_uvmhist.h" 38 1.1 mrg 39 1.1 mrg #include <sys/param.h> 40 1.1 mrg #include <sys/systm.h> 41 1.1 mrg #include <sys/conf.h> 42 1.1 mrg #include <sys/proc.h> 43 1.63 para #include <sys/kmem.h> 44 1.1 mrg 45 1.1 mrg #include <uvm/uvm.h> 46 1.1 mrg #include <uvm/uvm_device.h> 47 1.60 jmcneill #include <uvm/uvm_pmap.h> 48 1.1 mrg 49 1.1 mrg /* 50 1.1 mrg * private global data structure 51 1.1 mrg * 52 1.1 mrg * we keep a list of active device objects in the system. 53 1.1 mrg */ 54 1.1 mrg 55 1.1 mrg LIST_HEAD(udv_list_struct, uvm_device); 56 1.1 mrg static struct udv_list_struct udv_list; 57 1.67 ad static kmutex_t udv_lock __cacheline_aligned; 58 1.1 mrg 59 1.1 mrg /* 60 1.1 mrg * functions 61 1.1 mrg */ 62 1.1 mrg 63 1.42 junyoung static void udv_init(void); 64 1.42 junyoung static void udv_reference(struct uvm_object *); 65 1.42 junyoung static void udv_detach(struct uvm_object *); 66 1.42 junyoung static int udv_fault(struct uvm_faultinfo *, vaddr_t, 67 1.47 drochner struct vm_page **, int, int, vm_prot_t, 68 1.45 thorpej int); 69 1.1 mrg 70 1.1 mrg /* 71 1.1 mrg * master pager structure 72 1.1 mrg */ 73 1.1 mrg 74 1.51 yamt const struct uvm_pagerops uvm_deviceops = { 75 1.48 christos .pgo_init = udv_init, 76 1.48 christos .pgo_reference = udv_reference, 77 1.48 christos .pgo_detach = udv_detach, 78 1.48 christos .pgo_fault = udv_fault, 79 1.1 mrg }; 80 1.1 mrg 81 1.1 mrg /* 82 1.1 mrg * the ops! 83 1.1 mrg */ 84 1.1 mrg 85 1.1 mrg /* 86 1.1 mrg * udv_init 87 1.1 mrg * 88 1.1 mrg * init pager private data structures. 89 1.1 mrg */ 90 1.1 mrg 91 1.38 chs static void 92 1.38 chs udv_init(void) 93 1.7 mrg { 94 1.7 mrg LIST_INIT(&udv_list); 95 1.53 ad mutex_init(&udv_lock, MUTEX_DEFAULT, IPL_NONE); 96 1.1 mrg } 97 1.1 mrg 98 1.1 mrg /* 99 1.1 mrg * udv_attach 100 1.1 mrg * 101 1.1 mrg * get a VM object that is associated with a device. allocate a new 102 1.1 mrg * one if needed. 103 1.1 mrg * 104 1.1 mrg * => caller must _not_ already be holding the lock on the uvm_object. 105 1.1 mrg * => in fact, nothing should be locked so that we can sleep here. 106 1.1 mrg */ 107 1.38 chs 108 1.7 mrg struct uvm_object * 109 1.64 chs udv_attach(dev_t device, vm_prot_t accessprot, 110 1.44 thorpej voff_t off, /* used only for access check */ 111 1.44 thorpej vsize_t size /* used only for access check */) 112 1.1 mrg { 113 1.7 mrg struct uvm_device *udv, *lcv; 114 1.41 gehenna const struct cdevsw *cdev; 115 1.73 riastrad dev_mmap_t *mapfn; 116 1.40 christos 117 1.71 skrll UVMHIST_FUNC(__func__); 118 1.71 skrll UVMHIST_CALLARGS(maphist, "(device=%#jx)", device,0,0,0); 119 1.7 mrg 120 1.74 riastrad KASSERT(size > 0); 121 1.74 riastrad 122 1.7 mrg /* 123 1.7 mrg * before we do anything, ensure this device supports mmap 124 1.7 mrg */ 125 1.7 mrg 126 1.41 gehenna cdev = cdevsw_lookup(device); 127 1.52 ad if (cdev == NULL) { 128 1.76 riastrad return NULL; 129 1.52 ad } 130 1.41 gehenna mapfn = cdev->d_mmap; 131 1.65 riastrad if (mapfn == NULL || mapfn == nommap) { 132 1.76 riastrad return NULL; 133 1.52 ad } 134 1.22 drochner 135 1.22 drochner /* 136 1.75 riastrad * Negative offsets on the object are not allowed, unless the 137 1.75 riastrad * device has affirmatively set D_NEGOFFSAFE. 138 1.22 drochner */ 139 1.75 riastrad if ((cdev->d_flag & D_NEGOFFSAFE) == 0 && off != UVM_UNKNOWN_OFFSET) { 140 1.75 riastrad if (off < 0) 141 1.75 riastrad return NULL; 142 1.78 riastrad #if SIZE_MAX > UINT32_MAX /* XXX -Wtype-limits */ 143 1.75 riastrad if (size > __type_max(voff_t)) 144 1.75 riastrad return NULL; 145 1.78 riastrad #endif 146 1.75 riastrad if (off > __type_max(voff_t) - size) 147 1.75 riastrad return NULL; 148 1.75 riastrad } 149 1.13 cgd 150 1.13 cgd /* 151 1.13 cgd * Check that the specified range of the device allows the 152 1.13 cgd * desired protection. 153 1.34 chs * 154 1.13 cgd * XXX assumes VM_PROT_* == PROT_* 155 1.13 cgd * XXX clobbers off and size, but nothing else here needs them. 156 1.13 cgd */ 157 1.77 riastrad do { 158 1.77 riastrad KASSERTMSG((off % PAGE_SIZE) == 0, "off=%jd", (intmax_t)off); 159 1.77 riastrad KASSERTMSG(size >= PAGE_SIZE, "size=%"PRIuVSIZE, size); 160 1.77 riastrad if (cdev_mmap(device, off, accessprot) == -1) 161 1.77 riastrad return NULL; 162 1.77 riastrad KASSERT(off <= __type_max(voff_t) - PAGE_SIZE || 163 1.77 riastrad (cdev->d_flag & D_NEGOFFSAFE) != 0); 164 1.77 riastrad if (__predict_false(off > __type_max(voff_t) - PAGE_SIZE)) { 165 1.77 riastrad /* 166 1.77 riastrad * off += PAGE_SIZE, with two's-complement 167 1.77 riastrad * wraparound, or 168 1.77 riastrad * 169 1.77 riastrad * off += PAGE_SIZE - 2*(VOFF_MAX + 1). 170 1.77 riastrad */ 171 1.80 riastrad CTASSERT(MIN_PAGE_SIZE >= 2); 172 1.77 riastrad off -= __type_max(voff_t); 173 1.77 riastrad off += PAGE_SIZE - 2; 174 1.77 riastrad off -= __type_max(voff_t); 175 1.77 riastrad } else { 176 1.77 riastrad off += PAGE_SIZE; 177 1.52 ad } 178 1.77 riastrad size -= PAGE_SIZE; 179 1.77 riastrad } while (size != 0); 180 1.7 mrg 181 1.7 mrg /* 182 1.7 mrg * keep looping until we get it 183 1.7 mrg */ 184 1.7 mrg 185 1.30 chs for (;;) { 186 1.7 mrg 187 1.7 mrg /* 188 1.34 chs * first, attempt to find it on the main list 189 1.7 mrg */ 190 1.7 mrg 191 1.53 ad mutex_enter(&udv_lock); 192 1.30 chs LIST_FOREACH(lcv, &udv_list, u_list) { 193 1.7 mrg if (device == lcv->u_device) 194 1.7 mrg break; 195 1.7 mrg } 196 1.7 mrg 197 1.7 mrg /* 198 1.7 mrg * got it on main list. put a hold on it and unlock udv_lock. 199 1.7 mrg */ 200 1.7 mrg 201 1.7 mrg if (lcv) { 202 1.7 mrg 203 1.7 mrg /* 204 1.7 mrg * if someone else has a hold on it, sleep and start 205 1.7 mrg * over again. 206 1.7 mrg */ 207 1.7 mrg 208 1.7 mrg if (lcv->u_flags & UVM_DEVICE_HOLD) { 209 1.7 mrg lcv->u_flags |= UVM_DEVICE_WANTED; 210 1.49 thorpej UVM_UNLOCK_AND_WAIT(lcv, &udv_lock, false, 211 1.7 mrg "udv_attach",0); 212 1.7 mrg continue; 213 1.7 mrg } 214 1.7 mrg 215 1.7 mrg /* we are now holding it */ 216 1.7 mrg lcv->u_flags |= UVM_DEVICE_HOLD; 217 1.53 ad mutex_exit(&udv_lock); 218 1.7 mrg 219 1.7 mrg /* 220 1.7 mrg * bump reference count, unhold, return. 221 1.7 mrg */ 222 1.7 mrg 223 1.69 ad rw_enter(lcv->u_obj.vmobjlock, RW_WRITER); 224 1.7 mrg lcv->u_obj.uo_refs++; 225 1.69 ad rw_exit(lcv->u_obj.vmobjlock); 226 1.30 chs 227 1.53 ad mutex_enter(&udv_lock); 228 1.7 mrg if (lcv->u_flags & UVM_DEVICE_WANTED) 229 1.7 mrg wakeup(lcv); 230 1.7 mrg lcv->u_flags &= ~(UVM_DEVICE_WANTED|UVM_DEVICE_HOLD); 231 1.53 ad mutex_exit(&udv_lock); 232 1.76 riastrad return &lcv->u_obj; 233 1.7 mrg } 234 1.7 mrg 235 1.7 mrg /* 236 1.61 rmind * Did not find it on main list. Need to allocate a new one. 237 1.7 mrg */ 238 1.7 mrg 239 1.53 ad mutex_exit(&udv_lock); 240 1.62 rmind 241 1.62 rmind /* Note: both calls may allocate memory and sleep. */ 242 1.63 para udv = kmem_alloc(sizeof(*udv), KM_SLEEP); 243 1.62 rmind uvm_obj_init(&udv->u_obj, &uvm_deviceops, true, 1); 244 1.62 rmind 245 1.53 ad mutex_enter(&udv_lock); 246 1.7 mrg 247 1.7 mrg /* 248 1.7 mrg * now we have to double check to make sure no one added it 249 1.7 mrg * to the list while we were sleeping... 250 1.7 mrg */ 251 1.7 mrg 252 1.30 chs LIST_FOREACH(lcv, &udv_list, u_list) { 253 1.7 mrg if (device == lcv->u_device) 254 1.7 mrg break; 255 1.7 mrg } 256 1.7 mrg 257 1.7 mrg /* 258 1.30 chs * did we lose a race to someone else? 259 1.30 chs * free our memory and retry. 260 1.7 mrg */ 261 1.7 mrg 262 1.7 mrg if (lcv) { 263 1.53 ad mutex_exit(&udv_lock); 264 1.62 rmind uvm_obj_destroy(&udv->u_obj, true); 265 1.63 para kmem_free(udv, sizeof(*udv)); 266 1.7 mrg continue; 267 1.7 mrg } 268 1.7 mrg 269 1.7 mrg /* 270 1.7 mrg * we have it! init the data structures, add to list 271 1.7 mrg * and return. 272 1.7 mrg */ 273 1.7 mrg 274 1.7 mrg udv->u_flags = 0; 275 1.7 mrg udv->u_device = device; 276 1.7 mrg LIST_INSERT_HEAD(&udv_list, udv, u_list); 277 1.53 ad mutex_exit(&udv_lock); 278 1.76 riastrad return &udv->u_obj; 279 1.30 chs } 280 1.7 mrg /*NOTREACHED*/ 281 1.1 mrg } 282 1.34 chs 283 1.1 mrg /* 284 1.1 mrg * udv_reference 285 1.1 mrg * 286 1.1 mrg * add a reference to a VM object. Note that the reference count must 287 1.1 mrg * already be one (the passed in reference) so there is no chance of the 288 1.1 mrg * udv being released or locked out here. 289 1.1 mrg * 290 1.1 mrg * => caller must call with object unlocked. 291 1.1 mrg */ 292 1.1 mrg 293 1.7 mrg static void 294 1.44 thorpej udv_reference(struct uvm_object *uobj) 295 1.1 mrg { 296 1.71 skrll UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 297 1.1 mrg 298 1.69 ad rw_enter(uobj->vmobjlock, RW_WRITER); 299 1.7 mrg uobj->uo_refs++; 300 1.70 rin UVMHIST_LOG(maphist, "<- done (uobj=%#jx, ref = %jd)", 301 1.66 pgoyette (uintptr_t)uobj, uobj->uo_refs,0,0); 302 1.69 ad rw_exit(uobj->vmobjlock); 303 1.1 mrg } 304 1.1 mrg 305 1.1 mrg /* 306 1.1 mrg * udv_detach 307 1.1 mrg * 308 1.1 mrg * remove a reference to a VM object. 309 1.1 mrg * 310 1.1 mrg * => caller must call with object unlocked and map locked. 311 1.1 mrg */ 312 1.1 mrg 313 1.7 mrg static void 314 1.44 thorpej udv_detach(struct uvm_object *uobj) 315 1.1 mrg { 316 1.30 chs struct uvm_device *udv = (struct uvm_device *)uobj; 317 1.71 skrll UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 318 1.1 mrg 319 1.7 mrg /* 320 1.7 mrg * loop until done 321 1.7 mrg */ 322 1.24 pk again: 323 1.69 ad rw_enter(uobj->vmobjlock, RW_WRITER); 324 1.24 pk if (uobj->uo_refs > 1) { 325 1.30 chs uobj->uo_refs--; 326 1.69 ad rw_exit(uobj->vmobjlock); 327 1.70 rin UVMHIST_LOG(maphist," <- done, uobj=%#jx, ref=%jd", 328 1.66 pgoyette (uintptr_t)uobj,uobj->uo_refs,0,0); 329 1.24 pk return; 330 1.24 pk } 331 1.1 mrg 332 1.24 pk /* 333 1.30 chs * is it being held? if so, wait until others are done. 334 1.24 pk */ 335 1.30 chs 336 1.53 ad mutex_enter(&udv_lock); 337 1.24 pk if (udv->u_flags & UVM_DEVICE_HOLD) { 338 1.24 pk udv->u_flags |= UVM_DEVICE_WANTED; 339 1.69 ad rw_exit(uobj->vmobjlock); 340 1.49 thorpej UVM_UNLOCK_AND_WAIT(udv, &udv_lock, false, "udv_detach",0); 341 1.24 pk goto again; 342 1.24 pk } 343 1.1 mrg 344 1.24 pk /* 345 1.24 pk * got it! nuke it now. 346 1.24 pk */ 347 1.30 chs 348 1.24 pk LIST_REMOVE(udv, u_list); 349 1.24 pk if (udv->u_flags & UVM_DEVICE_WANTED) 350 1.24 pk wakeup(udv); 351 1.53 ad mutex_exit(&udv_lock); 352 1.69 ad rw_exit(uobj->vmobjlock); 353 1.62 rmind 354 1.62 rmind uvm_obj_destroy(uobj, true); 355 1.63 para kmem_free(udv, sizeof(*udv)); 356 1.70 rin UVMHIST_LOG(maphist," <- done, freed uobj=%#jx", (uintptr_t)uobj, 357 1.66 pgoyette 0, 0, 0); 358 1.1 mrg } 359 1.1 mrg 360 1.1 mrg /* 361 1.1 mrg * udv_fault: non-standard fault routine for device "pages" 362 1.1 mrg * 363 1.1 mrg * => rather than having a "get" function, we have a fault routine 364 1.1 mrg * since we don't return vm_pages we need full control over the 365 1.1 mrg * pmap_enter map in 366 1.1 mrg * => all the usual fault data structured are locked by the caller 367 1.1 mrg * (i.e. maps(read), amap (if any), uobj) 368 1.1 mrg * => on return, we unlock all fault data structures 369 1.1 mrg * => flags: PGO_ALLPAGES: get all of the pages 370 1.1 mrg * PGO_LOCKED: fault data structures are locked 371 1.1 mrg * XXX: currently PGO_LOCKED is always required ... consider removing 372 1.1 mrg * it as a flag 373 1.1 mrg * => NOTE: vaddr is the VA of pps[0] in ufi->entry, _NOT_ pps[centeridx] 374 1.1 mrg */ 375 1.1 mrg 376 1.7 mrg static int 377 1.44 thorpej udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, struct vm_page **pps, 378 1.47 drochner int npages, int centeridx, vm_prot_t access_type, 379 1.44 thorpej int flags) 380 1.1 mrg { 381 1.7 mrg struct vm_map_entry *entry = ufi->entry; 382 1.7 mrg struct uvm_object *uobj = entry->object.uvm_obj; 383 1.7 mrg struct uvm_device *udv = (struct uvm_device *)uobj; 384 1.20 kleink vaddr_t curr_va; 385 1.27 simonb off_t curr_offset; 386 1.25 simonb paddr_t paddr, mdpgno; 387 1.59 jmcneill u_int mmapflags; 388 1.25 simonb int lcv, retval; 389 1.7 mrg dev_t device; 390 1.17 ross vm_prot_t mapprot; 391 1.71 skrll UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 392 1.72 skrll UVMHIST_LOG(maphist," flags=%#jx", flags,0,0,0); 393 1.7 mrg 394 1.7 mrg /* 395 1.7 mrg * we do not allow device mappings to be mapped copy-on-write 396 1.7 mrg * so we kill any attempt to do so here. 397 1.7 mrg */ 398 1.34 chs 399 1.7 mrg if (UVM_ET_ISCOPYONWRITE(entry)) { 400 1.70 rin UVMHIST_LOG(maphist, "<- failed -- COW entry (etype=%#jx)", 401 1.66 pgoyette entry->etype, 0,0,0); 402 1.62 rmind uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj); 403 1.76 riastrad return EIO; 404 1.7 mrg } 405 1.7 mrg 406 1.7 mrg /* 407 1.34 chs * get device map function. 408 1.7 mrg */ 409 1.30 chs 410 1.7 mrg device = udv->u_device; 411 1.52 ad if (cdevsw_lookup(device) == NULL) { 412 1.52 ad /* XXX This should not happen */ 413 1.62 rmind uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj); 414 1.76 riastrad return EIO; 415 1.41 gehenna } 416 1.7 mrg 417 1.7 mrg /* 418 1.10 chuck * now we must determine the offset in udv to use and the VA to 419 1.10 chuck * use for pmap_enter. note that we always use orig_map's pmap 420 1.10 chuck * for pmap_enter (even if we have a submap). since virtual 421 1.10 chuck * addresses in a submap must match the main map, this is ok. 422 1.7 mrg */ 423 1.30 chs 424 1.7 mrg /* udv offset = (offset from start of entry) + entry's offset */ 425 1.27 simonb curr_offset = entry->offset + (vaddr - entry->start); 426 1.10 chuck /* pmap va = vaddr (virtual address of pps[0]) */ 427 1.10 chuck curr_va = vaddr; 428 1.34 chs 429 1.7 mrg /* 430 1.7 mrg * loop over the page range entering in as needed 431 1.7 mrg */ 432 1.7 mrg 433 1.31 chs retval = 0; 434 1.7 mrg for (lcv = 0 ; lcv < npages ; lcv++, curr_offset += PAGE_SIZE, 435 1.7 mrg curr_va += PAGE_SIZE) { 436 1.7 mrg if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx) 437 1.7 mrg continue; 438 1.7 mrg 439 1.7 mrg if (pps[lcv] == PGO_DONTCARE) 440 1.7 mrg continue; 441 1.7 mrg 442 1.52 ad mdpgno = cdev_mmap(device, curr_offset, access_type); 443 1.11 mrg if (mdpgno == -1) { 444 1.31 chs retval = EIO; 445 1.7 mrg break; 446 1.7 mrg } 447 1.11 mrg paddr = pmap_phys_address(mdpgno); 448 1.59 jmcneill mmapflags = pmap_mmap_flags(mdpgno); 449 1.17 ross mapprot = ufi->entry->protection; 450 1.7 mrg UVMHIST_LOG(maphist, 451 1.70 rin " MAPPING: device: pm=%#jx, va=%#jx, pa=%#jx, at=%jd", 452 1.66 pgoyette (uintptr_t)ufi->orig_map->pmap, curr_va, paddr, mapprot); 453 1.59 jmcneill if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr, mapprot, 454 1.59 jmcneill PMAP_CANFAIL | mapprot | mmapflags) != 0) { 455 1.18 thorpej /* 456 1.18 thorpej * pmap_enter() didn't have the resource to 457 1.18 thorpej * enter this mapping. Unlock everything, 458 1.18 thorpej * wait for the pagedaemon to free up some 459 1.18 thorpej * pages, and then tell uvm_fault() to start 460 1.18 thorpej * the fault again. 461 1.18 thorpej * 462 1.18 thorpej * XXX Needs some rethinking for the PGO_ALLPAGES 463 1.18 thorpej * XXX case. 464 1.18 thorpej */ 465 1.54 ad pmap_update(ufi->orig_map->pmap); /* sync what we have so far */ 466 1.18 thorpej uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, 467 1.62 rmind uobj); 468 1.68 chs return ENOMEM; 469 1.18 thorpej } 470 1.7 mrg } 471 1.1 mrg 472 1.54 ad pmap_update(ufi->orig_map->pmap); 473 1.62 rmind uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj); 474 1.76 riastrad return retval; 475 1.1 mrg } 476