uvm_bio.c revision 1.42 1 /* $NetBSD: uvm_bio.c,v 1.42 2005/11/29 22:52:03 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1998 Chuck Silvers.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 /*
33 * uvm_bio.c: buffered i/o object mapping cache
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.42 2005/11/29 22:52:03 yamt Exp $");
38
39 #include "opt_uvmhist.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h>
45
46 #include <uvm/uvm.h>
47
48 /*
49 * global data structures
50 */
51
52 /*
53 * local functions
54 */
55
56 static int ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
57 int, int, vm_fault_t, vm_prot_t, int);
58 static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
59
60 /*
61 * local data structues
62 */
63
64 #define UBC_HASH(uobj, offset) \
65 (((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
66 ubc_object.hashmask)
67
68 #define UBC_QUEUE(offset) \
69 (&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) & \
70 (UBC_NQUEUES - 1)])
71
72 #define UBC_UMAP_ADDR(u) \
73 (vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
74
75
76 #define UMAP_PAGES_LOCKED 0x0001
77 #define UMAP_MAPPING_CACHED 0x0002
78
79 struct ubc_map
80 {
81 struct uvm_object * uobj; /* mapped object */
82 voff_t offset; /* offset into uobj */
83 voff_t writeoff; /* write offset */
84 vsize_t writelen; /* write len */
85 int refcount; /* refcount on mapping */
86 int flags; /* extra state */
87 int advice;
88
89 LIST_ENTRY(ubc_map) hash; /* hash table */
90 TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */
91 };
92
93 static struct ubc_object
94 {
95 struct uvm_object uobj; /* glue for uvm_map() */
96 char *kva; /* where ubc_object is mapped */
97 struct ubc_map *umap; /* array of ubc_map's */
98
99 LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */
100 u_long hashmask; /* mask for hashtable */
101
102 TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
103 /* inactive queues for ubc_map's */
104
105 } ubc_object;
106
107 struct uvm_pagerops ubc_pager =
108 {
109 NULL, /* init */
110 NULL, /* reference */
111 NULL, /* detach */
112 ubc_fault, /* fault */
113 /* ... rest are NULL */
114 };
115
116 int ubc_nwins = UBC_NWINS;
117 int ubc_winshift = UBC_WINSHIFT;
118 int ubc_winsize;
119 #if defined(PMAP_PREFER)
120 int ubc_nqueues;
121 #define UBC_NQUEUES ubc_nqueues
122 #else
123 #define UBC_NQUEUES 1
124 #endif
125
126 /*
127 * ubc_init
128 *
129 * init pager private data structures.
130 */
131
132 void
133 ubc_init(void)
134 {
135 struct ubc_map *umap;
136 vaddr_t va;
137 int i;
138
139 /*
140 * Make sure ubc_winshift is sane.
141 */
142 if (ubc_winshift < PAGE_SHIFT)
143 ubc_winshift = PAGE_SHIFT;
144
145 /*
146 * init ubc_object.
147 * alloc and init ubc_map's.
148 * init inactive queues.
149 * alloc and init hashtable.
150 * map in ubc_object.
151 */
152
153 UVM_OBJ_INIT(&ubc_object.uobj, &ubc_pager, UVM_OBJ_KERN);
154
155 ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
156 M_TEMP, M_NOWAIT);
157 if (ubc_object.umap == NULL)
158 panic("ubc_init: failed to allocate ubc_map");
159 memset(ubc_object.umap, 0, ubc_nwins * sizeof(struct ubc_map));
160
161 if (ubc_winshift < PAGE_SHIFT) {
162 ubc_winshift = PAGE_SHIFT;
163 }
164 va = (vaddr_t)1L;
165 #ifdef PMAP_PREFER
166 PMAP_PREFER(0, &va, 0, 0); /* kernel is never topdown */
167 ubc_nqueues = va >> ubc_winshift;
168 if (ubc_nqueues == 0) {
169 ubc_nqueues = 1;
170 }
171 #endif
172 ubc_winsize = 1 << ubc_winshift;
173 ubc_object.inactive = malloc(UBC_NQUEUES *
174 sizeof(struct ubc_inactive_head), M_TEMP, M_NOWAIT);
175 if (ubc_object.inactive == NULL)
176 panic("ubc_init: failed to allocate inactive queue heads");
177 for (i = 0; i < UBC_NQUEUES; i++) {
178 TAILQ_INIT(&ubc_object.inactive[i]);
179 }
180 for (i = 0; i < ubc_nwins; i++) {
181 umap = &ubc_object.umap[i];
182 TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
183 umap, inactive);
184 }
185
186 ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
187 &ubc_object.hashmask);
188 for (i = 0; i <= ubc_object.hashmask; i++) {
189 LIST_INIT(&ubc_object.hash[i]);
190 }
191
192 if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
193 ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
194 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
195 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
196 panic("ubc_init: failed to map ubc_object");
197 }
198 UVMHIST_INIT(ubchist, 300);
199 }
200
201 /*
202 * ubc_fault: fault routine for ubc mapping
203 */
204
205 static int
206 ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2,
207 int ign3, int ign4, vm_fault_t fault_type, vm_prot_t access_type,
208 int flags)
209 {
210 struct uvm_object *uobj;
211 struct ubc_map *umap;
212 vaddr_t va, eva, ubc_offset, slot_offset;
213 int i, error, npages;
214 struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg;
215 vm_prot_t prot;
216 UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
217
218 /*
219 * no need to try with PGO_LOCKED...
220 * we don't need to have the map locked since we know that
221 * no one will mess with it until our reference is released.
222 */
223
224 if (flags & PGO_LOCKED) {
225 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
226 flags &= ~PGO_LOCKED;
227 }
228
229 va = ufi->orig_rvaddr;
230 ubc_offset = va - (vaddr_t)ubc_object.kva;
231 umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
232 KASSERT(umap->refcount != 0);
233 slot_offset = ubc_offset & (ubc_winsize - 1);
234
235 /*
236 * some platforms cannot write to individual bytes atomically, so
237 * software has to do read/modify/write of larger quantities instead.
238 * this means that the access_type for "write" operations
239 * can be VM_PROT_READ, which confuses us mightily.
240 *
241 * deal with this by resetting access_type based on the info
242 * that ubc_alloc() stores for us.
243 */
244
245 access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
246 UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx access_type %d",
247 va, ubc_offset, access_type, 0);
248
249 #ifdef DIAGNOSTIC
250 if ((access_type & VM_PROT_WRITE) != 0) {
251 if (slot_offset < trunc_page(umap->writeoff) ||
252 umap->writeoff + umap->writelen <= slot_offset) {
253 panic("ubc_fault: out of range write");
254 }
255 }
256 #endif
257
258 /* no umap locking needed since we have a ref on the umap */
259 uobj = umap->uobj;
260
261 if ((access_type & VM_PROT_WRITE) == 0) {
262 npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
263 } else {
264 npages = (round_page(umap->offset + umap->writeoff +
265 umap->writelen) - (umap->offset + slot_offset))
266 >> PAGE_SHIFT;
267 flags |= PGO_PASTEOF;
268 }
269
270 again:
271 memset(pgs, 0, sizeof (pgs));
272 simple_lock(&uobj->vmobjlock);
273
274 UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x ",
275 slot_offset, umap->writeoff, umap->writelen, 0);
276 UVMHIST_LOG(ubchist, "getpages uobj %p offset 0x%x npages %d",
277 uobj, umap->offset + slot_offset, npages, 0);
278
279 error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
280 &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC |
281 PGO_NOTIMESTAMP);
282 UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages, 0,
283 0);
284
285 if (error == EAGAIN) {
286 tsleep(&lbolt, PVM, "ubc_fault", 0);
287 goto again;
288 }
289 if (error) {
290 return error;
291 }
292
293 va = ufi->orig_rvaddr;
294 eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
295
296 UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0, 0);
297 simple_lock(&uobj->vmobjlock);
298 uvm_lock_pageq();
299 for (i = 0; va < eva; i++, va += PAGE_SIZE) {
300 boolean_t rdonly;
301 vm_prot_t mask;
302
303 /*
304 * for virtually-indexed, virtually-tagged caches we should
305 * avoid creating writable mappings when we don't absolutely
306 * need them, since the "compatible alias" trick doesn't work
307 * on such caches. otherwise, we can always map the pages
308 * writable.
309 */
310
311 #ifdef PMAP_CACHE_VIVT
312 prot = VM_PROT_READ | access_type;
313 #else
314 prot = VM_PROT_READ | VM_PROT_WRITE;
315 #endif
316 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i], 0, 0);
317 pg = pgs[i];
318
319 if (pg == NULL || pg == PGO_DONTCARE) {
320 continue;
321 }
322 if (pg->flags & PG_WANTED) {
323 wakeup(pg);
324 }
325 KASSERT((pg->flags & PG_FAKE) == 0);
326 if (pg->flags & PG_RELEASED) {
327 uvm_pagefree(pg);
328 continue;
329 }
330 if (pg->loan_count != 0) {
331
332 /*
333 * avoid unneeded loan break if possible.
334 */
335
336 if ((access_type & VM_PROT_WRITE) == 0)
337 prot &= ~VM_PROT_WRITE;
338
339 if (prot & VM_PROT_WRITE) {
340 uvm_unlock_pageq();
341 pg = uvm_loanbreak(pg);
342 uvm_lock_pageq();
343 if (pg == NULL)
344 continue; /* will re-fault */
345 }
346 }
347
348 /*
349 * note that a page whose backing store is partially allocated
350 * is marked as PG_RDONLY.
351 */
352
353 rdonly = (access_type & VM_PROT_WRITE) == 0 &&
354 (pg->flags & PG_RDONLY) != 0;
355 KASSERT((pg->flags & PG_RDONLY) == 0 ||
356 (access_type & VM_PROT_WRITE) == 0 ||
357 pg->offset < umap->writeoff ||
358 pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
359 mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL;
360 pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
361 prot & mask, access_type & mask);
362 uvm_pageactivate(pg);
363 pg->flags &= ~(PG_BUSY);
364 UVM_PAGE_OWN(pg, NULL);
365 }
366 uvm_unlock_pageq();
367 simple_unlock(&uobj->vmobjlock);
368 pmap_update(ufi->orig_map->pmap);
369 return 0;
370 }
371
372 /*
373 * local functions
374 */
375
376 static struct ubc_map *
377 ubc_find_mapping(struct uvm_object *uobj, voff_t offset)
378 {
379 struct ubc_map *umap;
380
381 LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
382 if (umap->uobj == uobj && umap->offset == offset) {
383 return umap;
384 }
385 }
386 return NULL;
387 }
388
389
390 /*
391 * ubc interface functions
392 */
393
394 /*
395 * ubc_alloc: allocate a file mapping window
396 */
397
398 void *
399 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
400 int flags)
401 {
402 vaddr_t slot_offset, va;
403 struct ubc_map *umap;
404 voff_t umap_offset;
405 int error;
406 UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
407
408 UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx",
409 uobj, offset, *lenp, 0);
410
411 KASSERT(*lenp > 0);
412 umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
413 slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
414 *lenp = MIN(*lenp, ubc_winsize - slot_offset);
415
416 /*
417 * the object is always locked here, so we don't need to add a ref.
418 */
419
420 again:
421 simple_lock(&ubc_object.uobj.vmobjlock);
422 umap = ubc_find_mapping(uobj, umap_offset);
423 if (umap == NULL) {
424 umap = TAILQ_FIRST(UBC_QUEUE(offset));
425 if (umap == NULL) {
426 simple_unlock(&ubc_object.uobj.vmobjlock);
427 tsleep(&lbolt, PVM, "ubc_alloc", 0);
428 goto again;
429 }
430
431 /*
432 * remove from old hash (if any), add to new hash.
433 */
434
435 if (umap->uobj != NULL) {
436 LIST_REMOVE(umap, hash);
437 }
438 umap->uobj = uobj;
439 umap->offset = umap_offset;
440 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
441 umap, hash);
442 va = UBC_UMAP_ADDR(umap);
443 if (umap->flags & UMAP_MAPPING_CACHED) {
444 umap->flags &= ~UMAP_MAPPING_CACHED;
445 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
446 pmap_update(pmap_kernel());
447 }
448 } else {
449 va = UBC_UMAP_ADDR(umap);
450 }
451
452 if (umap->refcount == 0) {
453 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
454 }
455
456 #ifdef DIAGNOSTIC
457 if ((flags & UBC_WRITE) && (umap->writeoff || umap->writelen)) {
458 panic("ubc_alloc: concurrent writes uobj %p", uobj);
459 }
460 #endif
461 if (flags & UBC_WRITE) {
462 umap->writeoff = slot_offset;
463 umap->writelen = *lenp;
464 }
465
466 umap->refcount++;
467 umap->advice = advice;
468 simple_unlock(&ubc_object.uobj.vmobjlock);
469 UVMHIST_LOG(ubchist, "umap %p refs %d va %p flags 0x%x",
470 umap, umap->refcount, va, flags);
471
472 if (flags & UBC_FAULTBUSY) {
473 int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
474 struct vm_page *pgs[npages];
475 int gpflags =
476 PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
477 PGO_NOTIMESTAMP;
478 int i;
479 KDASSERT(flags & UBC_WRITE);
480
481 if (umap->flags & UMAP_MAPPING_CACHED) {
482 umap->flags &= ~UMAP_MAPPING_CACHED;
483 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
484 }
485 memset(pgs, 0, sizeof(pgs));
486 simple_lock(&uobj->vmobjlock);
487 error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
488 &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags);
489 UVMHIST_LOG(ubchist, "faultbusy getpages %d", error, 0, 0, 0);
490 if (error) {
491 goto out;
492 }
493 for (i = 0; i < npages; i++) {
494 pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
495 VM_PAGE_TO_PHYS(pgs[i]),
496 VM_PROT_READ | VM_PROT_WRITE);
497 }
498 pmap_update(pmap_kernel());
499 umap->flags |= UMAP_PAGES_LOCKED;
500 }
501
502 out:
503 return (void *)(va + slot_offset);
504 }
505
506 /*
507 * ubc_release: free a file mapping window.
508 */
509
510 void
511 ubc_release(void *va, int flags)
512 {
513 struct ubc_map *umap;
514 struct uvm_object *uobj;
515 vaddr_t umapva;
516 boolean_t unmapped;
517 UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
518
519 UVMHIST_LOG(ubchist, "va %p", va, 0, 0, 0);
520 umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
521 umapva = UBC_UMAP_ADDR(umap);
522 uobj = umap->uobj;
523 KASSERT(uobj != NULL);
524
525 if (umap->flags & UMAP_PAGES_LOCKED) {
526 int slot_offset = umap->writeoff;
527 int endoff = umap->writeoff + umap->writelen;
528 int zerolen = round_page(endoff) - endoff;
529 int npages = (int)(round_page(umap->writeoff + umap->writelen)
530 - trunc_page(umap->writeoff)) >> PAGE_SHIFT;
531 struct vm_page *pgs[npages];
532 paddr_t pa;
533 int i;
534 boolean_t rv;
535
536 if (zerolen) {
537 memset((char *)umapva + endoff, 0, zerolen);
538 }
539 umap->flags &= ~UMAP_PAGES_LOCKED;
540 uvm_lock_pageq();
541 for (i = 0; i < npages; i++) {
542 rv = pmap_extract(pmap_kernel(),
543 umapva + slot_offset + (i << PAGE_SHIFT), &pa);
544 KASSERT(rv);
545 pgs[i] = PHYS_TO_VM_PAGE(pa);
546 pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
547 KASSERT(pgs[i]->loan_count == 0);
548 uvm_pageactivate(pgs[i]);
549 }
550 uvm_unlock_pageq();
551 pmap_kremove(umapva, ubc_winsize);
552 pmap_update(pmap_kernel());
553 simple_lock(&uobj->vmobjlock);
554 uvm_page_unbusy(pgs, npages);
555 simple_unlock(&uobj->vmobjlock);
556 unmapped = TRUE;
557 } else {
558 unmapped = FALSE;
559 }
560
561 simple_lock(&ubc_object.uobj.vmobjlock);
562 umap->writeoff = 0;
563 umap->writelen = 0;
564 umap->refcount--;
565 if (umap->refcount == 0) {
566 if (flags & UBC_UNMAP) {
567
568 /*
569 * Invalidate any cached mappings if requested.
570 * This is typically used to avoid leaving
571 * incompatible cache aliases around indefinitely.
572 */
573
574 pmap_remove(pmap_kernel(), umapva,
575 umapva + ubc_winsize);
576 umap->flags &= ~UMAP_MAPPING_CACHED;
577 pmap_update(pmap_kernel());
578 LIST_REMOVE(umap, hash);
579 umap->uobj = NULL;
580 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
581 inactive);
582 } else {
583 if (!unmapped) {
584 umap->flags |= UMAP_MAPPING_CACHED;
585 }
586 TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
587 inactive);
588 }
589 }
590 UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount, 0, 0);
591 simple_unlock(&ubc_object.uobj.vmobjlock);
592 }
593
594
595 #if 0 /* notused */
596 /*
597 * removing a range of mappings from the ubc mapping cache.
598 */
599
600 void
601 ubc_flush(struct uvm_object *uobj, voff_t start, voff_t end)
602 {
603 struct ubc_map *umap;
604 vaddr_t va;
605 UVMHIST_FUNC("ubc_flush"); UVMHIST_CALLED(ubchist);
606
607 UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
608 uobj, start, end, 0);
609
610 simple_lock(&ubc_object.uobj.vmobjlock);
611 for (umap = ubc_object.umap;
612 umap < &ubc_object.umap[ubc_nwins];
613 umap++) {
614
615 if (umap->uobj != uobj || umap->offset < start ||
616 (umap->offset >= end && end != 0) ||
617 umap->refcount > 0) {
618 continue;
619 }
620
621 /*
622 * remove from hash,
623 * move to head of inactive queue.
624 */
625
626 va = (vaddr_t)(ubc_object.kva +
627 ((umap - ubc_object.umap) << ubc_winshift));
628 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
629
630 LIST_REMOVE(umap, hash);
631 umap->uobj = NULL;
632 TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
633 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
634 }
635 pmap_update(pmap_kernel());
636 simple_unlock(&ubc_object.uobj.vmobjlock);
637 }
638 #endif /* notused */
639