uvm_bio.c revision 1.49 1 /* $NetBSD: uvm_bio.c,v 1.49 2006/09/30 15:37:22 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1998 Chuck Silvers.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 /*
33 * uvm_bio.c: buffered i/o object mapping cache
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.49 2006/09/30 15:37:22 yamt Exp $");
38
39 #include "opt_uvmhist.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h>
45 #include <sys/vnode.h>
46
47 #include <uvm/uvm.h>
48
49 /*
50 * global data structures
51 */
52
53 /*
54 * local functions
55 */
56
57 static int ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
58 int, int, vm_prot_t, int);
59 static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
60
61 /*
62 * local data structues
63 */
64
65 #define UBC_HASH(uobj, offset) \
66 (((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
67 ubc_object.hashmask)
68
69 #define UBC_QUEUE(offset) \
70 (&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) & \
71 (UBC_NQUEUES - 1)])
72
73 #define UBC_UMAP_ADDR(u) \
74 (vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
75
76
77 #define UMAP_PAGES_LOCKED 0x0001
78 #define UMAP_MAPPING_CACHED 0x0002
79
80 struct ubc_map
81 {
82 struct uvm_object * uobj; /* mapped object */
83 voff_t offset; /* offset into uobj */
84 voff_t writeoff; /* write offset */
85 vsize_t writelen; /* write len */
86 int refcount; /* refcount on mapping */
87 int flags; /* extra state */
88 int advice;
89
90 LIST_ENTRY(ubc_map) hash; /* hash table */
91 TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */
92 };
93
94 static struct ubc_object
95 {
96 struct uvm_object uobj; /* glue for uvm_map() */
97 char *kva; /* where ubc_object is mapped */
98 struct ubc_map *umap; /* array of ubc_map's */
99
100 LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */
101 u_long hashmask; /* mask for hashtable */
102
103 TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
104 /* inactive queues for ubc_map's */
105
106 } ubc_object;
107
108 struct uvm_pagerops ubc_pager =
109 {
110 .pgo_fault = ubc_fault,
111 /* ... rest are NULL */
112 };
113
114 int ubc_nwins = UBC_NWINS;
115 int ubc_winshift = UBC_WINSHIFT;
116 int ubc_winsize;
117 #if defined(PMAP_PREFER)
118 int ubc_nqueues;
119 #define UBC_NQUEUES ubc_nqueues
120 #else
121 #define UBC_NQUEUES 1
122 #endif
123
124 /*
125 * ubc_init
126 *
127 * init pager private data structures.
128 */
129
130 void
131 ubc_init(void)
132 {
133 struct ubc_map *umap;
134 vaddr_t va;
135 int i;
136
137 /*
138 * Make sure ubc_winshift is sane.
139 */
140 if (ubc_winshift < PAGE_SHIFT)
141 ubc_winshift = PAGE_SHIFT;
142
143 /*
144 * init ubc_object.
145 * alloc and init ubc_map's.
146 * init inactive queues.
147 * alloc and init hashtable.
148 * map in ubc_object.
149 */
150
151 UVM_OBJ_INIT(&ubc_object.uobj, &ubc_pager, UVM_OBJ_KERN);
152
153 ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
154 M_TEMP, M_NOWAIT);
155 if (ubc_object.umap == NULL)
156 panic("ubc_init: failed to allocate ubc_map");
157 memset(ubc_object.umap, 0, ubc_nwins * sizeof(struct ubc_map));
158
159 if (ubc_winshift < PAGE_SHIFT) {
160 ubc_winshift = PAGE_SHIFT;
161 }
162 va = (vaddr_t)1L;
163 #ifdef PMAP_PREFER
164 PMAP_PREFER(0, &va, 0, 0); /* kernel is never topdown */
165 ubc_nqueues = va >> ubc_winshift;
166 if (ubc_nqueues == 0) {
167 ubc_nqueues = 1;
168 }
169 #endif
170 ubc_winsize = 1 << ubc_winshift;
171 ubc_object.inactive = malloc(UBC_NQUEUES *
172 sizeof(struct ubc_inactive_head), M_TEMP, M_NOWAIT);
173 if (ubc_object.inactive == NULL)
174 panic("ubc_init: failed to allocate inactive queue heads");
175 for (i = 0; i < UBC_NQUEUES; i++) {
176 TAILQ_INIT(&ubc_object.inactive[i]);
177 }
178 for (i = 0; i < ubc_nwins; i++) {
179 umap = &ubc_object.umap[i];
180 TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
181 umap, inactive);
182 }
183
184 ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
185 &ubc_object.hashmask);
186 for (i = 0; i <= ubc_object.hashmask; i++) {
187 LIST_INIT(&ubc_object.hash[i]);
188 }
189
190 if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
191 ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
192 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
193 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
194 panic("ubc_init: failed to map ubc_object");
195 }
196 UVMHIST_INIT(ubchist, 300);
197 }
198
199 /*
200 * ubc_fault: fault routine for ubc mapping
201 */
202
203 static int
204 ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2,
205 int ign3, int ign4, vm_prot_t access_type,
206 int flags)
207 {
208 struct uvm_object *uobj;
209 struct ubc_map *umap;
210 vaddr_t va, eva, ubc_offset, slot_offset;
211 int i, error, npages;
212 struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg;
213 vm_prot_t prot;
214 UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
215
216 /*
217 * no need to try with PGO_LOCKED...
218 * we don't need to have the map locked since we know that
219 * no one will mess with it until our reference is released.
220 */
221
222 if (flags & PGO_LOCKED) {
223 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
224 flags &= ~PGO_LOCKED;
225 }
226
227 va = ufi->orig_rvaddr;
228 ubc_offset = va - (vaddr_t)ubc_object.kva;
229 umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
230 KASSERT(umap->refcount != 0);
231 slot_offset = ubc_offset & (ubc_winsize - 1);
232
233 /*
234 * some platforms cannot write to individual bytes atomically, so
235 * software has to do read/modify/write of larger quantities instead.
236 * this means that the access_type for "write" operations
237 * can be VM_PROT_READ, which confuses us mightily.
238 *
239 * deal with this by resetting access_type based on the info
240 * that ubc_alloc() stores for us.
241 */
242
243 access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
244 UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx access_type %d",
245 va, ubc_offset, access_type, 0);
246
247 #ifdef DIAGNOSTIC
248 if ((access_type & VM_PROT_WRITE) != 0) {
249 if (slot_offset < trunc_page(umap->writeoff) ||
250 umap->writeoff + umap->writelen <= slot_offset) {
251 panic("ubc_fault: out of range write");
252 }
253 }
254 #endif
255
256 /* no umap locking needed since we have a ref on the umap */
257 uobj = umap->uobj;
258
259 if ((access_type & VM_PROT_WRITE) == 0) {
260 npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
261 } else {
262 npages = (round_page(umap->offset + umap->writeoff +
263 umap->writelen) - (umap->offset + slot_offset))
264 >> PAGE_SHIFT;
265 flags |= PGO_PASTEOF;
266 }
267
268 again:
269 memset(pgs, 0, sizeof (pgs));
270 simple_lock(&uobj->vmobjlock);
271
272 UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x ",
273 slot_offset, umap->writeoff, umap->writelen, 0);
274 UVMHIST_LOG(ubchist, "getpages uobj %p offset 0x%x npages %d",
275 uobj, umap->offset + slot_offset, npages, 0);
276
277 error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
278 &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC |
279 PGO_NOTIMESTAMP);
280 UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages, 0,
281 0);
282
283 if (error == EAGAIN) {
284 tsleep(&lbolt, PVM, "ubc_fault", 0);
285 goto again;
286 }
287 if (error) {
288 return error;
289 }
290
291 va = ufi->orig_rvaddr;
292 eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
293
294 UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0, 0);
295 for (i = 0; va < eva; i++, va += PAGE_SIZE) {
296 boolean_t rdonly;
297 vm_prot_t mask;
298
299 /*
300 * for virtually-indexed, virtually-tagged caches we should
301 * avoid creating writable mappings when we don't absolutely
302 * need them, since the "compatible alias" trick doesn't work
303 * on such caches. otherwise, we can always map the pages
304 * writable.
305 */
306
307 #ifdef PMAP_CACHE_VIVT
308 prot = VM_PROT_READ | access_type;
309 #else
310 prot = VM_PROT_READ | VM_PROT_WRITE;
311 #endif
312 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i], 0, 0);
313 pg = pgs[i];
314
315 if (pg == NULL || pg == PGO_DONTCARE) {
316 continue;
317 }
318
319 uobj = pg->uobject;
320 simple_lock(&uobj->vmobjlock);
321 if (pg->flags & PG_WANTED) {
322 wakeup(pg);
323 }
324 KASSERT((pg->flags & PG_FAKE) == 0);
325 if (pg->flags & PG_RELEASED) {
326 uvm_lock_pageq();
327 uvm_pagefree(pg);
328 uvm_unlock_pageq();
329 simple_unlock(&uobj->vmobjlock);
330 continue;
331 }
332 if (pg->loan_count != 0) {
333
334 /*
335 * avoid unneeded loan break if possible.
336 */
337
338 if ((access_type & VM_PROT_WRITE) == 0)
339 prot &= ~VM_PROT_WRITE;
340
341 if (prot & VM_PROT_WRITE) {
342 struct vm_page *newpg;
343
344 newpg = uvm_loanbreak(pg);
345 if (newpg == NULL) {
346 uvm_page_unbusy(&pg, 1);
347 simple_unlock(&uobj->vmobjlock);
348 uvm_wait("ubc_loanbrk");
349 continue; /* will re-fault */
350 }
351 pg = newpg;
352 }
353 }
354
355 /*
356 * note that a page whose backing store is partially allocated
357 * is marked as PG_RDONLY.
358 */
359
360 rdonly = ((access_type & VM_PROT_WRITE) == 0 &&
361 (pg->flags & PG_RDONLY) != 0) ||
362 UVM_OBJ_NEEDS_WRITEFAULT(uobj);
363 KASSERT((pg->flags & PG_RDONLY) == 0 ||
364 (access_type & VM_PROT_WRITE) == 0 ||
365 pg->offset < umap->writeoff ||
366 pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
367 mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL;
368 error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
369 prot & mask, PMAP_CANFAIL | (access_type & mask));
370 uvm_lock_pageq();
371 uvm_pageactivate(pg);
372 uvm_unlock_pageq();
373 pg->flags &= ~(PG_BUSY|PG_WANTED);
374 UVM_PAGE_OWN(pg, NULL);
375 simple_unlock(&uobj->vmobjlock);
376 if (error) {
377 UVMHIST_LOG(ubchist, "pmap_enter fail %d",
378 error, 0, 0, 0);
379 uvm_wait("ubc_pmfail");
380 /* will refault */
381 }
382 }
383 pmap_update(ufi->orig_map->pmap);
384 return 0;
385 }
386
387 /*
388 * local functions
389 */
390
391 static struct ubc_map *
392 ubc_find_mapping(struct uvm_object *uobj, voff_t offset)
393 {
394 struct ubc_map *umap;
395
396 LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
397 if (umap->uobj == uobj && umap->offset == offset) {
398 return umap;
399 }
400 }
401 return NULL;
402 }
403
404
405 /*
406 * ubc interface functions
407 */
408
409 /*
410 * ubc_alloc: allocate a file mapping window
411 */
412
413 void *
414 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
415 int flags)
416 {
417 vaddr_t slot_offset, va;
418 struct ubc_map *umap;
419 voff_t umap_offset;
420 int error;
421 UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
422
423 UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx",
424 uobj, offset, *lenp, 0);
425
426 KASSERT(*lenp > 0);
427 umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
428 slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
429 *lenp = MIN(*lenp, ubc_winsize - slot_offset);
430
431 /*
432 * the object is always locked here, so we don't need to add a ref.
433 */
434
435 again:
436 simple_lock(&ubc_object.uobj.vmobjlock);
437 umap = ubc_find_mapping(uobj, umap_offset);
438 if (umap == NULL) {
439 umap = TAILQ_FIRST(UBC_QUEUE(offset));
440 if (umap == NULL) {
441 simple_unlock(&ubc_object.uobj.vmobjlock);
442 tsleep(&lbolt, PVM, "ubc_alloc", 0);
443 goto again;
444 }
445
446 /*
447 * remove from old hash (if any), add to new hash.
448 */
449
450 if (umap->uobj != NULL) {
451 LIST_REMOVE(umap, hash);
452 }
453 umap->uobj = uobj;
454 umap->offset = umap_offset;
455 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
456 umap, hash);
457 va = UBC_UMAP_ADDR(umap);
458 if (umap->flags & UMAP_MAPPING_CACHED) {
459 umap->flags &= ~UMAP_MAPPING_CACHED;
460 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
461 pmap_update(pmap_kernel());
462 }
463 } else {
464 va = UBC_UMAP_ADDR(umap);
465 }
466
467 if (umap->refcount == 0) {
468 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
469 }
470
471 #ifdef DIAGNOSTIC
472 if ((flags & UBC_WRITE) && (umap->writeoff || umap->writelen)) {
473 panic("ubc_alloc: concurrent writes uobj %p", uobj);
474 }
475 #endif
476 if (flags & UBC_WRITE) {
477 umap->writeoff = slot_offset;
478 umap->writelen = *lenp;
479 }
480
481 umap->refcount++;
482 umap->advice = advice;
483 simple_unlock(&ubc_object.uobj.vmobjlock);
484 UVMHIST_LOG(ubchist, "umap %p refs %d va %p flags 0x%x",
485 umap, umap->refcount, va, flags);
486
487 if (flags & UBC_FAULTBUSY) {
488 int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
489 struct vm_page *pgs[npages];
490 int gpflags =
491 PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
492 PGO_NOTIMESTAMP;
493 int i;
494 KDASSERT(flags & UBC_WRITE);
495
496 if (umap->flags & UMAP_MAPPING_CACHED) {
497 umap->flags &= ~UMAP_MAPPING_CACHED;
498 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
499 }
500 memset(pgs, 0, sizeof(pgs));
501 simple_lock(&uobj->vmobjlock);
502 error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
503 &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags);
504 UVMHIST_LOG(ubchist, "faultbusy getpages %d", error, 0, 0, 0);
505 if (error) {
506 goto out;
507 }
508 for (i = 0; i < npages; i++) {
509 pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
510 VM_PAGE_TO_PHYS(pgs[i]),
511 VM_PROT_READ | VM_PROT_WRITE);
512 }
513 pmap_update(pmap_kernel());
514 umap->flags |= UMAP_PAGES_LOCKED;
515 }
516
517 out:
518 return (void *)(va + slot_offset);
519 }
520
521 /*
522 * ubc_release: free a file mapping window.
523 */
524
525 void
526 ubc_release(void *va, int flags)
527 {
528 struct ubc_map *umap;
529 struct uvm_object *uobj;
530 vaddr_t umapva;
531 boolean_t unmapped;
532 UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
533
534 UVMHIST_LOG(ubchist, "va %p", va, 0, 0, 0);
535 umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
536 umapva = UBC_UMAP_ADDR(umap);
537 uobj = umap->uobj;
538 KASSERT(uobj != NULL);
539
540 if (umap->flags & UMAP_PAGES_LOCKED) {
541 int slot_offset = umap->writeoff;
542 int endoff = umap->writeoff + umap->writelen;
543 int zerolen = round_page(endoff) - endoff;
544 int npages = (int)(round_page(umap->writeoff + umap->writelen)
545 - trunc_page(umap->writeoff)) >> PAGE_SHIFT;
546 struct vm_page *pgs[npages];
547 paddr_t pa;
548 int i;
549 boolean_t rv;
550
551 if (zerolen) {
552 memset((char *)umapva + endoff, 0, zerolen);
553 }
554 umap->flags &= ~UMAP_PAGES_LOCKED;
555 uvm_lock_pageq();
556 for (i = 0; i < npages; i++) {
557 rv = pmap_extract(pmap_kernel(),
558 umapva + slot_offset + (i << PAGE_SHIFT), &pa);
559 KASSERT(rv);
560 pgs[i] = PHYS_TO_VM_PAGE(pa);
561 pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
562 KASSERT(pgs[i]->loan_count == 0);
563 uvm_pageactivate(pgs[i]);
564 }
565 uvm_unlock_pageq();
566 pmap_kremove(umapva, ubc_winsize);
567 pmap_update(pmap_kernel());
568 simple_lock(&uobj->vmobjlock);
569 uvm_page_unbusy(pgs, npages);
570 simple_unlock(&uobj->vmobjlock);
571 unmapped = TRUE;
572 } else {
573 unmapped = FALSE;
574 }
575
576 simple_lock(&ubc_object.uobj.vmobjlock);
577 umap->writeoff = 0;
578 umap->writelen = 0;
579 umap->refcount--;
580 if (umap->refcount == 0) {
581 if (flags & UBC_UNMAP) {
582
583 /*
584 * Invalidate any cached mappings if requested.
585 * This is typically used to avoid leaving
586 * incompatible cache aliases around indefinitely.
587 */
588
589 pmap_remove(pmap_kernel(), umapva,
590 umapva + ubc_winsize);
591 umap->flags &= ~UMAP_MAPPING_CACHED;
592 pmap_update(pmap_kernel());
593 LIST_REMOVE(umap, hash);
594 umap->uobj = NULL;
595 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
596 inactive);
597 } else {
598 if (!unmapped) {
599 umap->flags |= UMAP_MAPPING_CACHED;
600 }
601 TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
602 inactive);
603 }
604 }
605 UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount, 0, 0);
606 simple_unlock(&ubc_object.uobj.vmobjlock);
607 }
608
609
610 #if 0 /* notused */
611 /*
612 * removing a range of mappings from the ubc mapping cache.
613 */
614
615 void
616 ubc_flush(struct uvm_object *uobj, voff_t start, voff_t end)
617 {
618 struct ubc_map *umap;
619 vaddr_t va;
620 UVMHIST_FUNC("ubc_flush"); UVMHIST_CALLED(ubchist);
621
622 UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
623 uobj, start, end, 0);
624
625 simple_lock(&ubc_object.uobj.vmobjlock);
626 for (umap = ubc_object.umap;
627 umap < &ubc_object.umap[ubc_nwins];
628 umap++) {
629
630 if (umap->uobj != uobj || umap->offset < start ||
631 (umap->offset >= end && end != 0) ||
632 umap->refcount > 0) {
633 continue;
634 }
635
636 /*
637 * remove from hash,
638 * move to head of inactive queue.
639 */
640
641 va = (vaddr_t)(ubc_object.kva +
642 ((umap - ubc_object.umap) << ubc_winshift));
643 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
644
645 LIST_REMOVE(umap, hash);
646 umap->uobj = NULL;
647 TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
648 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
649 }
650 pmap_update(pmap_kernel());
651 simple_unlock(&ubc_object.uobj.vmobjlock);
652 }
653 #endif /* notused */
654