uvm_bio.c revision 1.50 1 /* $NetBSD: uvm_bio.c,v 1.50 2006/09/30 15:38:06 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1998 Chuck Silvers.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 /*
33 * uvm_bio.c: buffered i/o object mapping cache
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.50 2006/09/30 15:38:06 yamt Exp $");
38
39 #include "opt_uvmhist.h"
40 #include "opt_ubc.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/vnode.h>
47
48 #include <uvm/uvm.h>
49
50 /*
51 * global data structures
52 */
53
54 /*
55 * local functions
56 */
57
58 static int ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
59 int, int, vm_prot_t, int);
60 static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
61
62 /*
63 * local data structues
64 */
65
66 #define UBC_HASH(uobj, offset) \
67 (((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
68 ubc_object.hashmask)
69
70 #define UBC_QUEUE(offset) \
71 (&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) & \
72 (UBC_NQUEUES - 1)])
73
74 #define UBC_UMAP_ADDR(u) \
75 (vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
76
77
78 #define UMAP_PAGES_LOCKED 0x0001
79 #define UMAP_MAPPING_CACHED 0x0002
80
81 struct ubc_map
82 {
83 struct uvm_object * uobj; /* mapped object */
84 voff_t offset; /* offset into uobj */
85 voff_t writeoff; /* write offset */
86 vsize_t writelen; /* write len */
87 int refcount; /* refcount on mapping */
88 int flags; /* extra state */
89 int advice;
90
91 LIST_ENTRY(ubc_map) hash; /* hash table */
92 TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */
93 };
94
95 static struct ubc_object
96 {
97 struct uvm_object uobj; /* glue for uvm_map() */
98 char *kva; /* where ubc_object is mapped */
99 struct ubc_map *umap; /* array of ubc_map's */
100
101 LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */
102 u_long hashmask; /* mask for hashtable */
103
104 TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
105 /* inactive queues for ubc_map's */
106
107 } ubc_object;
108
109 struct uvm_pagerops ubc_pager =
110 {
111 .pgo_fault = ubc_fault,
112 /* ... rest are NULL */
113 };
114
115 int ubc_nwins = UBC_NWINS;
116 int ubc_winshift = UBC_WINSHIFT;
117 int ubc_winsize;
118 #if defined(PMAP_PREFER)
119 int ubc_nqueues;
120 #define UBC_NQUEUES ubc_nqueues
121 #else
122 #define UBC_NQUEUES 1
123 #endif
124
125 #if defined(UBC_STATS)
126
127 #define UBC_EVCNT_DEFINE(name) \
128 struct evcnt ubc_evcnt_##name = \
129 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "ubc", #name); \
130 EVCNT_ATTACH_STATIC(ubc_evcnt_##name);
131 #define UBC_EVCNT_INCR(name) ubc_evcnt_##name.ev_count++
132
133 #else /* defined(UBC_STATS) */
134
135 #define UBC_EVCNT_DEFINE(name) /* nothing */
136 #define UBC_EVCNT_INCR(name) /* nothing */
137
138 #endif /* defined(UBC_STATS) */
139
140 UBC_EVCNT_DEFINE(wincachehit)
141 UBC_EVCNT_DEFINE(wincachemiss)
142
143 /*
144 * ubc_init
145 *
146 * init pager private data structures.
147 */
148
149 void
150 ubc_init(void)
151 {
152 struct ubc_map *umap;
153 vaddr_t va;
154 int i;
155
156 /*
157 * Make sure ubc_winshift is sane.
158 */
159 if (ubc_winshift < PAGE_SHIFT)
160 ubc_winshift = PAGE_SHIFT;
161
162 /*
163 * init ubc_object.
164 * alloc and init ubc_map's.
165 * init inactive queues.
166 * alloc and init hashtable.
167 * map in ubc_object.
168 */
169
170 UVM_OBJ_INIT(&ubc_object.uobj, &ubc_pager, UVM_OBJ_KERN);
171
172 ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
173 M_TEMP, M_NOWAIT);
174 if (ubc_object.umap == NULL)
175 panic("ubc_init: failed to allocate ubc_map");
176 memset(ubc_object.umap, 0, ubc_nwins * sizeof(struct ubc_map));
177
178 if (ubc_winshift < PAGE_SHIFT) {
179 ubc_winshift = PAGE_SHIFT;
180 }
181 va = (vaddr_t)1L;
182 #ifdef PMAP_PREFER
183 PMAP_PREFER(0, &va, 0, 0); /* kernel is never topdown */
184 ubc_nqueues = va >> ubc_winshift;
185 if (ubc_nqueues == 0) {
186 ubc_nqueues = 1;
187 }
188 #endif
189 ubc_winsize = 1 << ubc_winshift;
190 ubc_object.inactive = malloc(UBC_NQUEUES *
191 sizeof(struct ubc_inactive_head), M_TEMP, M_NOWAIT);
192 if (ubc_object.inactive == NULL)
193 panic("ubc_init: failed to allocate inactive queue heads");
194 for (i = 0; i < UBC_NQUEUES; i++) {
195 TAILQ_INIT(&ubc_object.inactive[i]);
196 }
197 for (i = 0; i < ubc_nwins; i++) {
198 umap = &ubc_object.umap[i];
199 TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
200 umap, inactive);
201 }
202
203 ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
204 &ubc_object.hashmask);
205 for (i = 0; i <= ubc_object.hashmask; i++) {
206 LIST_INIT(&ubc_object.hash[i]);
207 }
208
209 if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
210 ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
211 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
212 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
213 panic("ubc_init: failed to map ubc_object");
214 }
215 UVMHIST_INIT(ubchist, 300);
216 }
217
218 /*
219 * ubc_fault: fault routine for ubc mapping
220 */
221
222 static int
223 ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2,
224 int ign3, int ign4, vm_prot_t access_type,
225 int flags)
226 {
227 struct uvm_object *uobj;
228 struct ubc_map *umap;
229 vaddr_t va, eva, ubc_offset, slot_offset;
230 int i, error, npages;
231 struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg;
232 vm_prot_t prot;
233 UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
234
235 /*
236 * no need to try with PGO_LOCKED...
237 * we don't need to have the map locked since we know that
238 * no one will mess with it until our reference is released.
239 */
240
241 if (flags & PGO_LOCKED) {
242 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
243 flags &= ~PGO_LOCKED;
244 }
245
246 va = ufi->orig_rvaddr;
247 ubc_offset = va - (vaddr_t)ubc_object.kva;
248 umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
249 KASSERT(umap->refcount != 0);
250 slot_offset = ubc_offset & (ubc_winsize - 1);
251
252 /*
253 * some platforms cannot write to individual bytes atomically, so
254 * software has to do read/modify/write of larger quantities instead.
255 * this means that the access_type for "write" operations
256 * can be VM_PROT_READ, which confuses us mightily.
257 *
258 * deal with this by resetting access_type based on the info
259 * that ubc_alloc() stores for us.
260 */
261
262 access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
263 UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx access_type %d",
264 va, ubc_offset, access_type, 0);
265
266 #ifdef DIAGNOSTIC
267 if ((access_type & VM_PROT_WRITE) != 0) {
268 if (slot_offset < trunc_page(umap->writeoff) ||
269 umap->writeoff + umap->writelen <= slot_offset) {
270 panic("ubc_fault: out of range write");
271 }
272 }
273 #endif
274
275 /* no umap locking needed since we have a ref on the umap */
276 uobj = umap->uobj;
277
278 if ((access_type & VM_PROT_WRITE) == 0) {
279 npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
280 } else {
281 npages = (round_page(umap->offset + umap->writeoff +
282 umap->writelen) - (umap->offset + slot_offset))
283 >> PAGE_SHIFT;
284 flags |= PGO_PASTEOF;
285 }
286
287 again:
288 memset(pgs, 0, sizeof (pgs));
289 simple_lock(&uobj->vmobjlock);
290
291 UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x ",
292 slot_offset, umap->writeoff, umap->writelen, 0);
293 UVMHIST_LOG(ubchist, "getpages uobj %p offset 0x%x npages %d",
294 uobj, umap->offset + slot_offset, npages, 0);
295
296 error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
297 &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC |
298 PGO_NOTIMESTAMP);
299 UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages, 0,
300 0);
301
302 if (error == EAGAIN) {
303 tsleep(&lbolt, PVM, "ubc_fault", 0);
304 goto again;
305 }
306 if (error) {
307 return error;
308 }
309
310 va = ufi->orig_rvaddr;
311 eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
312
313 UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0, 0);
314 for (i = 0; va < eva; i++, va += PAGE_SIZE) {
315 boolean_t rdonly;
316 vm_prot_t mask;
317
318 /*
319 * for virtually-indexed, virtually-tagged caches we should
320 * avoid creating writable mappings when we don't absolutely
321 * need them, since the "compatible alias" trick doesn't work
322 * on such caches. otherwise, we can always map the pages
323 * writable.
324 */
325
326 #ifdef PMAP_CACHE_VIVT
327 prot = VM_PROT_READ | access_type;
328 #else
329 prot = VM_PROT_READ | VM_PROT_WRITE;
330 #endif
331 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i], 0, 0);
332 pg = pgs[i];
333
334 if (pg == NULL || pg == PGO_DONTCARE) {
335 continue;
336 }
337
338 uobj = pg->uobject;
339 simple_lock(&uobj->vmobjlock);
340 if (pg->flags & PG_WANTED) {
341 wakeup(pg);
342 }
343 KASSERT((pg->flags & PG_FAKE) == 0);
344 if (pg->flags & PG_RELEASED) {
345 uvm_lock_pageq();
346 uvm_pagefree(pg);
347 uvm_unlock_pageq();
348 simple_unlock(&uobj->vmobjlock);
349 continue;
350 }
351 if (pg->loan_count != 0) {
352
353 /*
354 * avoid unneeded loan break if possible.
355 */
356
357 if ((access_type & VM_PROT_WRITE) == 0)
358 prot &= ~VM_PROT_WRITE;
359
360 if (prot & VM_PROT_WRITE) {
361 struct vm_page *newpg;
362
363 newpg = uvm_loanbreak(pg);
364 if (newpg == NULL) {
365 uvm_page_unbusy(&pg, 1);
366 simple_unlock(&uobj->vmobjlock);
367 uvm_wait("ubc_loanbrk");
368 continue; /* will re-fault */
369 }
370 pg = newpg;
371 }
372 }
373
374 /*
375 * note that a page whose backing store is partially allocated
376 * is marked as PG_RDONLY.
377 */
378
379 rdonly = ((access_type & VM_PROT_WRITE) == 0 &&
380 (pg->flags & PG_RDONLY) != 0) ||
381 UVM_OBJ_NEEDS_WRITEFAULT(uobj);
382 KASSERT((pg->flags & PG_RDONLY) == 0 ||
383 (access_type & VM_PROT_WRITE) == 0 ||
384 pg->offset < umap->writeoff ||
385 pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
386 mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL;
387 error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
388 prot & mask, PMAP_CANFAIL | (access_type & mask));
389 uvm_lock_pageq();
390 uvm_pageactivate(pg);
391 uvm_unlock_pageq();
392 pg->flags &= ~(PG_BUSY|PG_WANTED);
393 UVM_PAGE_OWN(pg, NULL);
394 simple_unlock(&uobj->vmobjlock);
395 if (error) {
396 UVMHIST_LOG(ubchist, "pmap_enter fail %d",
397 error, 0, 0, 0);
398 uvm_wait("ubc_pmfail");
399 /* will refault */
400 }
401 }
402 pmap_update(ufi->orig_map->pmap);
403 return 0;
404 }
405
406 /*
407 * local functions
408 */
409
410 static struct ubc_map *
411 ubc_find_mapping(struct uvm_object *uobj, voff_t offset)
412 {
413 struct ubc_map *umap;
414
415 LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
416 if (umap->uobj == uobj && umap->offset == offset) {
417 return umap;
418 }
419 }
420 return NULL;
421 }
422
423
424 /*
425 * ubc interface functions
426 */
427
428 /*
429 * ubc_alloc: allocate a file mapping window
430 */
431
432 void *
433 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
434 int flags)
435 {
436 vaddr_t slot_offset, va;
437 struct ubc_map *umap;
438 voff_t umap_offset;
439 int error;
440 UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
441
442 UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx",
443 uobj, offset, *lenp, 0);
444
445 KASSERT(*lenp > 0);
446 umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
447 slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
448 *lenp = MIN(*lenp, ubc_winsize - slot_offset);
449
450 /*
451 * the object is always locked here, so we don't need to add a ref.
452 */
453
454 again:
455 simple_lock(&ubc_object.uobj.vmobjlock);
456 umap = ubc_find_mapping(uobj, umap_offset);
457 if (umap == NULL) {
458 UBC_EVCNT_INCR(wincachemiss);
459 umap = TAILQ_FIRST(UBC_QUEUE(offset));
460 if (umap == NULL) {
461 simple_unlock(&ubc_object.uobj.vmobjlock);
462 tsleep(&lbolt, PVM, "ubc_alloc", 0);
463 goto again;
464 }
465
466 /*
467 * remove from old hash (if any), add to new hash.
468 */
469
470 if (umap->uobj != NULL) {
471 LIST_REMOVE(umap, hash);
472 }
473 umap->uobj = uobj;
474 umap->offset = umap_offset;
475 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
476 umap, hash);
477 va = UBC_UMAP_ADDR(umap);
478 if (umap->flags & UMAP_MAPPING_CACHED) {
479 umap->flags &= ~UMAP_MAPPING_CACHED;
480 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
481 pmap_update(pmap_kernel());
482 }
483 } else {
484 UBC_EVCNT_INCR(wincachehit);
485 va = UBC_UMAP_ADDR(umap);
486 }
487
488 if (umap->refcount == 0) {
489 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
490 }
491
492 #ifdef DIAGNOSTIC
493 if ((flags & UBC_WRITE) && (umap->writeoff || umap->writelen)) {
494 panic("ubc_alloc: concurrent writes uobj %p", uobj);
495 }
496 #endif
497 if (flags & UBC_WRITE) {
498 umap->writeoff = slot_offset;
499 umap->writelen = *lenp;
500 }
501
502 umap->refcount++;
503 umap->advice = advice;
504 simple_unlock(&ubc_object.uobj.vmobjlock);
505 UVMHIST_LOG(ubchist, "umap %p refs %d va %p flags 0x%x",
506 umap, umap->refcount, va, flags);
507
508 if (flags & UBC_FAULTBUSY) {
509 int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
510 struct vm_page *pgs[npages];
511 int gpflags =
512 PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
513 PGO_NOTIMESTAMP;
514 int i;
515 KDASSERT(flags & UBC_WRITE);
516
517 if (umap->flags & UMAP_MAPPING_CACHED) {
518 umap->flags &= ~UMAP_MAPPING_CACHED;
519 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
520 }
521 memset(pgs, 0, sizeof(pgs));
522 simple_lock(&uobj->vmobjlock);
523 error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
524 &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags);
525 UVMHIST_LOG(ubchist, "faultbusy getpages %d", error, 0, 0, 0);
526 if (error) {
527 goto out;
528 }
529 for (i = 0; i < npages; i++) {
530 pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
531 VM_PAGE_TO_PHYS(pgs[i]),
532 VM_PROT_READ | VM_PROT_WRITE);
533 }
534 pmap_update(pmap_kernel());
535 umap->flags |= UMAP_PAGES_LOCKED;
536 }
537
538 out:
539 return (void *)(va + slot_offset);
540 }
541
542 /*
543 * ubc_release: free a file mapping window.
544 */
545
546 void
547 ubc_release(void *va, int flags)
548 {
549 struct ubc_map *umap;
550 struct uvm_object *uobj;
551 vaddr_t umapva;
552 boolean_t unmapped;
553 UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
554
555 UVMHIST_LOG(ubchist, "va %p", va, 0, 0, 0);
556 umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
557 umapva = UBC_UMAP_ADDR(umap);
558 uobj = umap->uobj;
559 KASSERT(uobj != NULL);
560
561 if (umap->flags & UMAP_PAGES_LOCKED) {
562 int slot_offset = umap->writeoff;
563 int endoff = umap->writeoff + umap->writelen;
564 int zerolen = round_page(endoff) - endoff;
565 int npages = (int)(round_page(umap->writeoff + umap->writelen)
566 - trunc_page(umap->writeoff)) >> PAGE_SHIFT;
567 struct vm_page *pgs[npages];
568 paddr_t pa;
569 int i;
570 boolean_t rv;
571
572 if (zerolen) {
573 memset((char *)umapva + endoff, 0, zerolen);
574 }
575 umap->flags &= ~UMAP_PAGES_LOCKED;
576 uvm_lock_pageq();
577 for (i = 0; i < npages; i++) {
578 rv = pmap_extract(pmap_kernel(),
579 umapva + slot_offset + (i << PAGE_SHIFT), &pa);
580 KASSERT(rv);
581 pgs[i] = PHYS_TO_VM_PAGE(pa);
582 pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
583 KASSERT(pgs[i]->loan_count == 0);
584 uvm_pageactivate(pgs[i]);
585 }
586 uvm_unlock_pageq();
587 pmap_kremove(umapva, ubc_winsize);
588 pmap_update(pmap_kernel());
589 simple_lock(&uobj->vmobjlock);
590 uvm_page_unbusy(pgs, npages);
591 simple_unlock(&uobj->vmobjlock);
592 unmapped = TRUE;
593 } else {
594 unmapped = FALSE;
595 }
596
597 simple_lock(&ubc_object.uobj.vmobjlock);
598 umap->writeoff = 0;
599 umap->writelen = 0;
600 umap->refcount--;
601 if (umap->refcount == 0) {
602 if (flags & UBC_UNMAP) {
603
604 /*
605 * Invalidate any cached mappings if requested.
606 * This is typically used to avoid leaving
607 * incompatible cache aliases around indefinitely.
608 */
609
610 pmap_remove(pmap_kernel(), umapva,
611 umapva + ubc_winsize);
612 umap->flags &= ~UMAP_MAPPING_CACHED;
613 pmap_update(pmap_kernel());
614 LIST_REMOVE(umap, hash);
615 umap->uobj = NULL;
616 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
617 inactive);
618 } else {
619 if (!unmapped) {
620 umap->flags |= UMAP_MAPPING_CACHED;
621 }
622 TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
623 inactive);
624 }
625 }
626 UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount, 0, 0);
627 simple_unlock(&ubc_object.uobj.vmobjlock);
628 }
629
630
631 #if 0 /* notused */
632 /*
633 * removing a range of mappings from the ubc mapping cache.
634 */
635
636 void
637 ubc_flush(struct uvm_object *uobj, voff_t start, voff_t end)
638 {
639 struct ubc_map *umap;
640 vaddr_t va;
641 UVMHIST_FUNC("ubc_flush"); UVMHIST_CALLED(ubchist);
642
643 UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
644 uobj, start, end, 0);
645
646 simple_lock(&ubc_object.uobj.vmobjlock);
647 for (umap = ubc_object.umap;
648 umap < &ubc_object.umap[ubc_nwins];
649 umap++) {
650
651 if (umap->uobj != uobj || umap->offset < start ||
652 (umap->offset >= end && end != 0) ||
653 umap->refcount > 0) {
654 continue;
655 }
656
657 /*
658 * remove from hash,
659 * move to head of inactive queue.
660 */
661
662 va = (vaddr_t)(ubc_object.kva +
663 ((umap - ubc_object.umap) << ubc_winshift));
664 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
665
666 LIST_REMOVE(umap, hash);
667 umap->uobj = NULL;
668 TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
669 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
670 }
671 pmap_update(pmap_kernel());
672 simple_unlock(&ubc_object.uobj.vmobjlock);
673 }
674 #endif /* notused */
675