Home | History | Annotate | Line # | Download | only in uvm
uvm_bio.c revision 1.34
      1 /*	$NetBSD: uvm_bio.c,v 1.34 2005/01/15 15:10:49 chs Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998 Chuck Silvers.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. The name of the author may not be used to endorse or promote products
     16  *    derived from this software without specific prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28  * SUCH DAMAGE.
     29  *
     30  */
     31 
     32 /*
     33  * uvm_bio.c: buffered i/o object mapping cache
     34  */
     35 
     36 #include <sys/cdefs.h>
     37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.34 2005/01/15 15:10:49 chs Exp $");
     38 
     39 #include "opt_uvmhist.h"
     40 
     41 #include <sys/param.h>
     42 #include <sys/systm.h>
     43 #include <sys/malloc.h>
     44 #include <sys/kernel.h>
     45 #include <sys/vnode.h>
     46 #include <sys/proc.h>
     47 
     48 #include <uvm/uvm.h>
     49 
     50 /*
     51  * global data structures
     52  */
     53 
     54 /*
     55  * local functions
     56  */
     57 
     58 int	ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **, int,
     59     int, vm_fault_t, vm_prot_t, int);
     60 struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
     61 
     62 /*
     63  * local data structues
     64  */
     65 
     66 #define UBC_HASH(uobj, offset) 						\
     67 	(((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
     68 				ubc_object.hashmask)
     69 
     70 #define UBC_QUEUE(offset)						\
     71 	(&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) &	\
     72 			     (UBC_NQUEUES - 1)])
     73 
     74 #define UBC_UMAP_ADDR(u)						\
     75 	(vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
     76 
     77 
     78 #define UMAP_PAGES_LOCKED	0x0001
     79 #define UMAP_MAPPING_CACHED	0x0002
     80 
     81 struct ubc_map
     82 {
     83 	struct uvm_object *	uobj;		/* mapped object */
     84 	voff_t			offset;		/* offset into uobj */
     85 	voff_t			writeoff;	/* write offset */
     86 	vsize_t			writelen;	/* write len */
     87 	int			refcount;	/* refcount on mapping */
     88 	int			flags;		/* extra state */
     89 
     90 	LIST_ENTRY(ubc_map)	hash;		/* hash table */
     91 	TAILQ_ENTRY(ubc_map)	inactive;	/* inactive queue */
     92 };
     93 
     94 static struct ubc_object
     95 {
     96 	struct uvm_object uobj;		/* glue for uvm_map() */
     97 	char *kva;			/* where ubc_object is mapped */
     98 	struct ubc_map *umap;		/* array of ubc_map's */
     99 
    100 	LIST_HEAD(, ubc_map) *hash;	/* hashtable for cached ubc_map's */
    101 	u_long hashmask;		/* mask for hashtable */
    102 
    103 	TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
    104 					/* inactive queues for ubc_map's */
    105 
    106 } ubc_object;
    107 
    108 struct uvm_pagerops ubc_pager =
    109 {
    110 	NULL,		/* init */
    111 	NULL,		/* reference */
    112 	NULL,		/* detach */
    113 	ubc_fault,	/* fault */
    114 	/* ... rest are NULL */
    115 };
    116 
    117 int ubc_nwins = UBC_NWINS;
    118 int ubc_winshift = UBC_WINSHIFT;
    119 int ubc_winsize;
    120 #if defined(PMAP_PREFER)
    121 int ubc_nqueues;
    122 #define UBC_NQUEUES ubc_nqueues
    123 #else
    124 #define UBC_NQUEUES 1
    125 #endif
    126 
    127 /*
    128  * ubc_init
    129  *
    130  * init pager private data structures.
    131  */
    132 
    133 void
    134 ubc_init(void)
    135 {
    136 	struct ubc_map *umap;
    137 	vaddr_t va;
    138 	int i;
    139 
    140 	/*
    141 	 * Make sure ubc_winshift is sane.
    142 	 */
    143 	if (ubc_winshift < PAGE_SHIFT)
    144 		ubc_winshift = PAGE_SHIFT;
    145 
    146 	/*
    147 	 * init ubc_object.
    148 	 * alloc and init ubc_map's.
    149 	 * init inactive queues.
    150 	 * alloc and init hashtable.
    151 	 * map in ubc_object.
    152 	 */
    153 
    154 	simple_lock_init(&ubc_object.uobj.vmobjlock);
    155 	ubc_object.uobj.pgops = &ubc_pager;
    156 	TAILQ_INIT(&ubc_object.uobj.memq);
    157 	ubc_object.uobj.uo_npages = 0;
    158 	ubc_object.uobj.uo_refs = UVM_OBJ_KERN;
    159 
    160 	ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
    161 				 M_TEMP, M_NOWAIT);
    162 	if (ubc_object.umap == NULL)
    163 		panic("ubc_init: failed to allocate ubc_map");
    164 	memset(ubc_object.umap, 0, ubc_nwins * sizeof(struct ubc_map));
    165 
    166 	if (ubc_winshift < PAGE_SHIFT) {
    167 		ubc_winshift = PAGE_SHIFT;
    168 	}
    169 	va = (vaddr_t)1L;
    170 #ifdef PMAP_PREFER
    171 	PMAP_PREFER(0, &va);
    172 	ubc_nqueues = va >> ubc_winshift;
    173 	if (ubc_nqueues == 0) {
    174 		ubc_nqueues = 1;
    175 	}
    176 #endif
    177 	ubc_winsize = 1 << ubc_winshift;
    178 	ubc_object.inactive = malloc(UBC_NQUEUES *
    179 	    sizeof(struct ubc_inactive_head), M_TEMP, M_NOWAIT);
    180 	if (ubc_object.inactive == NULL)
    181 		panic("ubc_init: failed to allocate inactive queue heads");
    182 	for (i = 0; i < UBC_NQUEUES; i++) {
    183 		TAILQ_INIT(&ubc_object.inactive[i]);
    184 	}
    185 	for (i = 0; i < ubc_nwins; i++) {
    186 		umap = &ubc_object.umap[i];
    187 		TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
    188 				  umap, inactive);
    189 	}
    190 
    191 	ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
    192 				   &ubc_object.hashmask);
    193 	for (i = 0; i <= ubc_object.hashmask; i++) {
    194 		LIST_INIT(&ubc_object.hash[i]);
    195 	}
    196 
    197 	if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
    198 		    ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
    199 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    200 				UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
    201 		panic("ubc_init: failed to map ubc_object");
    202 	}
    203 	UVMHIST_INIT(ubchist, 300);
    204 }
    205 
    206 /*
    207  * ubc_fault: fault routine for ubc mapping
    208  */
    209 
    210 int
    211 ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags)
    212 	struct uvm_faultinfo *ufi;
    213 	vaddr_t ign1;
    214 	struct vm_page **ign2;
    215 	int ign3, ign4;
    216 	vm_fault_t fault_type;
    217 	vm_prot_t access_type;
    218 	int flags;
    219 {
    220 	struct uvm_object *uobj;
    221 	struct ubc_map *umap;
    222 	vaddr_t va, eva, ubc_offset, slot_offset;
    223 	int i, error, npages;
    224 	struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg;
    225 	vm_prot_t prot;
    226 	UVMHIST_FUNC("ubc_fault");  UVMHIST_CALLED(ubchist);
    227 
    228 	/*
    229 	 * no need to try with PGO_LOCKED...
    230 	 * we don't need to have the map locked since we know that
    231 	 * no one will mess with it until our reference is released.
    232 	 */
    233 
    234 	if (flags & PGO_LOCKED) {
    235 		uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
    236 		flags &= ~PGO_LOCKED;
    237 	}
    238 
    239 	va = ufi->orig_rvaddr;
    240 	ubc_offset = va - (vaddr_t)ubc_object.kva;
    241 	umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
    242 	KASSERT(umap->refcount != 0);
    243 	slot_offset = ubc_offset & (ubc_winsize - 1);
    244 
    245 	/*
    246 	 * some platforms cannot write to individual bytes atomically, so
    247 	 * software has to do read/modify/write of larger quantities instead.
    248 	 * this means that the access_type for "write" operations
    249 	 * can be VM_PROT_READ, which confuses us mightily.
    250 	 *
    251 	 * deal with this by resetting access_type based on the info
    252 	 * that ubc_alloc() stores for us.
    253 	 */
    254 
    255 	access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
    256 	UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx access_type %d",
    257 	    va, ubc_offset, access_type, 0);
    258 
    259 #ifdef DIAGNOSTIC
    260 	if ((access_type & VM_PROT_WRITE) != 0) {
    261 		if (slot_offset < trunc_page(umap->writeoff) ||
    262 		    umap->writeoff + umap->writelen <= slot_offset) {
    263 			panic("ubc_fault: out of range write");
    264 		}
    265 	}
    266 #endif
    267 
    268 	/* no umap locking needed since we have a ref on the umap */
    269 	uobj = umap->uobj;
    270 
    271 	if ((access_type & VM_PROT_WRITE) == 0) {
    272 		npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
    273 	} else {
    274 		npages = (round_page(umap->offset + umap->writeoff +
    275 		    umap->writelen) - (umap->offset + slot_offset))
    276 		    >> PAGE_SHIFT;
    277 		flags |= PGO_PASTEOF;
    278 	}
    279 
    280 again:
    281 	memset(pgs, 0, sizeof (pgs));
    282 	simple_lock(&uobj->vmobjlock);
    283 
    284 	UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x ",
    285 	    slot_offset, umap->writeoff, umap->writelen, 0);
    286 	UVMHIST_LOG(ubchist, "getpages uobj %p offset 0x%x npages %d",
    287 	    uobj, umap->offset + slot_offset, npages, 0);
    288 
    289 	error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
    290 	    &npages, 0, access_type, 0, flags);
    291 	UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages, 0,
    292 	    0);
    293 
    294 	if (error == EAGAIN) {
    295 		tsleep(&lbolt, PVM, "ubc_fault", 0);
    296 		goto again;
    297 	}
    298 	if (error) {
    299 		return error;
    300 	}
    301 
    302 	va = ufi->orig_rvaddr;
    303 	eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
    304 
    305 	UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0, 0);
    306 	simple_lock(&uobj->vmobjlock);
    307 	uvm_lock_pageq();
    308 	for (i = 0; va < eva; i++, va += PAGE_SIZE) {
    309 
    310 		/*
    311 		 * for virtually-indexed, virtually-tagged caches we should
    312 		 * avoid creating writable mappings when we don't absolutely
    313 		 * need them, since the "compatible alias" trick doesn't work
    314 		 * on such caches.  otherwise, we can always map the pages
    315 		 * writable.
    316 		 */
    317 
    318 #ifdef PMAP_CACHE_VIVT
    319 		prot = VM_PROT_READ | access_type;
    320 #else
    321 		prot = VM_PROT_READ | VM_PROT_WRITE;
    322 #endif
    323 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i], 0, 0);
    324 		pg = pgs[i];
    325 
    326 		if (pg == NULL || pg == PGO_DONTCARE) {
    327 			continue;
    328 		}
    329 		if (pg->flags & PG_WANTED) {
    330 			wakeup(pg);
    331 		}
    332 		KASSERT((pg->flags & PG_FAKE) == 0);
    333 		if (pg->flags & PG_RELEASED) {
    334 			uvm_pagefree(pg);
    335 			continue;
    336 		}
    337 		if (pg->loan_count != 0) {
    338 
    339 			/*
    340 			 * avoid unneeded loan break if possible.
    341 			 */
    342 
    343 			if ((access_type & VM_PROT_WRITE) == 0)
    344 				prot &= ~VM_PROT_WRITE;
    345 
    346 			if (prot & VM_PROT_WRITE) {
    347 				uvm_unlock_pageq();
    348 				pg = uvm_loanbreak(pg);
    349 				uvm_lock_pageq();
    350 				if (pg == NULL)
    351 					continue; /* will re-fault */
    352 			}
    353 		}
    354 		KASSERT(access_type == VM_PROT_READ ||
    355 		    (pg->flags & PG_RDONLY) == 0);
    356 		pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
    357 		    (pg->flags & PG_RDONLY) ? prot & ~VM_PROT_WRITE : prot,
    358 		    access_type);
    359 		uvm_pageactivate(pg);
    360 		pg->flags &= ~(PG_BUSY);
    361 		UVM_PAGE_OWN(pg, NULL);
    362 	}
    363 	uvm_unlock_pageq();
    364 	simple_unlock(&uobj->vmobjlock);
    365 	pmap_update(ufi->orig_map->pmap);
    366 	return 0;
    367 }
    368 
    369 /*
    370  * local functions
    371  */
    372 
    373 struct ubc_map *
    374 ubc_find_mapping(uobj, offset)
    375 	struct uvm_object *uobj;
    376 	voff_t offset;
    377 {
    378 	struct ubc_map *umap;
    379 
    380 	LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
    381 		if (umap->uobj == uobj && umap->offset == offset) {
    382 			return umap;
    383 		}
    384 	}
    385 	return NULL;
    386 }
    387 
    388 
    389 /*
    390  * ubc interface functions
    391  */
    392 
    393 /*
    394  * ubc_alloc:  allocate a file mapping window
    395  */
    396 
    397 void *
    398 ubc_alloc(uobj, offset, lenp, flags)
    399 	struct uvm_object *uobj;
    400 	voff_t offset;
    401 	vsize_t *lenp;
    402 	int flags;
    403 {
    404 	vaddr_t slot_offset, va;
    405 	struct ubc_map *umap;
    406 	voff_t umap_offset;
    407 	int error;
    408 	UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
    409 
    410 	UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx",
    411 	    uobj, offset, *lenp, 0);
    412 
    413 	KASSERT(*lenp > 0);
    414 	umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
    415 	slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
    416 	*lenp = MIN(*lenp, ubc_winsize - slot_offset);
    417 
    418 	/*
    419 	 * the object is always locked here, so we don't need to add a ref.
    420 	 */
    421 
    422 again:
    423 	simple_lock(&ubc_object.uobj.vmobjlock);
    424 	umap = ubc_find_mapping(uobj, umap_offset);
    425 	if (umap == NULL) {
    426 		umap = TAILQ_FIRST(UBC_QUEUE(offset));
    427 		if (umap == NULL) {
    428 			simple_unlock(&ubc_object.uobj.vmobjlock);
    429 			tsleep(&lbolt, PVM, "ubc_alloc", 0);
    430 			goto again;
    431 		}
    432 
    433 		/*
    434 		 * remove from old hash (if any), add to new hash.
    435 		 */
    436 
    437 		if (umap->uobj != NULL) {
    438 			LIST_REMOVE(umap, hash);
    439 		}
    440 		umap->uobj = uobj;
    441 		umap->offset = umap_offset;
    442 		LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
    443 		    umap, hash);
    444 		va = UBC_UMAP_ADDR(umap);
    445 		if (umap->flags & UMAP_MAPPING_CACHED) {
    446 			umap->flags &= ~UMAP_MAPPING_CACHED;
    447 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    448 			pmap_update(pmap_kernel());
    449 		}
    450 	} else {
    451 		va = UBC_UMAP_ADDR(umap);
    452 	}
    453 
    454 	if (umap->refcount == 0) {
    455 		TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
    456 	}
    457 
    458 #ifdef DIAGNOSTIC
    459 	if ((flags & UBC_WRITE) && (umap->writeoff || umap->writelen)) {
    460 		panic("ubc_alloc: concurrent writes uobj %p", uobj);
    461 	}
    462 #endif
    463 	if (flags & UBC_WRITE) {
    464 		umap->writeoff = slot_offset;
    465 		umap->writelen = *lenp;
    466 	}
    467 
    468 	umap->refcount++;
    469 	simple_unlock(&ubc_object.uobj.vmobjlock);
    470 	UVMHIST_LOG(ubchist, "umap %p refs %d va %p flags 0x%x",
    471 	    umap, umap->refcount, va, flags);
    472 
    473 	if (flags & UBC_FAULTBUSY) {
    474 		int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
    475 		struct vm_page *pgs[npages];
    476 		int gpflags = PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF;
    477 		int i;
    478 		KDASSERT(flags & UBC_WRITE);
    479 
    480 		if (umap->flags & UMAP_MAPPING_CACHED) {
    481 			umap->flags &= ~UMAP_MAPPING_CACHED;
    482 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    483 		}
    484 		memset(pgs, 0, sizeof(pgs));
    485 		simple_lock(&uobj->vmobjlock);
    486 		error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
    487 		    &npages, 0, VM_PROT_READ | VM_PROT_WRITE, 0, gpflags);
    488 		UVMHIST_LOG(ubchist, "faultbusy getpages %d", error, 0, 0, 0);
    489 		if (error) {
    490 			goto out;
    491 		}
    492 		for (i = 0; i < npages; i++) {
    493 			pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
    494 			    VM_PAGE_TO_PHYS(pgs[i]),
    495 			    VM_PROT_READ | VM_PROT_WRITE);
    496 		}
    497 		pmap_update(pmap_kernel());
    498 		umap->flags |= UMAP_PAGES_LOCKED;
    499 	}
    500 
    501 out:
    502 	return (void *)(va + slot_offset);
    503 }
    504 
    505 /*
    506  * ubc_release:  free a file mapping window.
    507  */
    508 
    509 void
    510 ubc_release(va, flags)
    511 	void *va;
    512 	int flags;
    513 {
    514 	struct ubc_map *umap;
    515 	struct uvm_object *uobj;
    516 	vaddr_t umapva;
    517 	boolean_t unmapped;
    518 	UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
    519 
    520 	UVMHIST_LOG(ubchist, "va %p", va, 0, 0, 0);
    521 	umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
    522 	umapva = UBC_UMAP_ADDR(umap);
    523 	uobj = umap->uobj;
    524 	KASSERT(uobj != NULL);
    525 
    526 	if (umap->flags & UMAP_PAGES_LOCKED) {
    527 		int slot_offset = umap->writeoff;
    528 		int endoff = umap->writeoff + umap->writelen;
    529 		int zerolen = round_page(endoff) - endoff;
    530 		int npages = (int)(round_page(umap->writeoff + umap->writelen)
    531 				   - trunc_page(umap->writeoff)) >> PAGE_SHIFT;
    532 		struct vm_page *pgs[npages];
    533 		paddr_t pa;
    534 		int i;
    535 		boolean_t rv;
    536 
    537 		if (zerolen) {
    538 			memset((char *)umapva + endoff, 0, zerolen);
    539 		}
    540 		umap->flags &= ~UMAP_PAGES_LOCKED;
    541 		uvm_lock_pageq();
    542 		for (i = 0; i < npages; i++) {
    543 			rv = pmap_extract(pmap_kernel(),
    544 			    umapva + slot_offset + (i << PAGE_SHIFT), &pa);
    545 			KASSERT(rv);
    546 			pgs[i] = PHYS_TO_VM_PAGE(pa);
    547 			pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
    548 			KASSERT(pgs[i]->loan_count == 0);
    549 			uvm_pageactivate(pgs[i]);
    550 		}
    551 		uvm_unlock_pageq();
    552 		pmap_kremove(umapva, ubc_winsize);
    553 		pmap_update(pmap_kernel());
    554 		simple_lock(&uobj->vmobjlock);
    555 		uvm_page_unbusy(pgs, npages);
    556 		simple_unlock(&uobj->vmobjlock);
    557 		unmapped = TRUE;
    558 	} else {
    559 		unmapped = FALSE;
    560 	}
    561 
    562 	simple_lock(&ubc_object.uobj.vmobjlock);
    563 	umap->writeoff = 0;
    564 	umap->writelen = 0;
    565 	umap->refcount--;
    566 	if (umap->refcount == 0) {
    567 		if (flags & UBC_UNMAP) {
    568 
    569 			/*
    570 			 * Invalidate any cached mappings if requested.
    571 			 * This is typically used to avoid leaving
    572 			 * incompatible cache aliases around indefinitely.
    573 			 */
    574 
    575 			pmap_remove(pmap_kernel(), umapva,
    576 				    umapva + ubc_winsize);
    577 			umap->flags &= ~UMAP_MAPPING_CACHED;
    578 			pmap_update(pmap_kernel());
    579 			LIST_REMOVE(umap, hash);
    580 			umap->uobj = NULL;
    581 			TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
    582 			    inactive);
    583 		} else {
    584 			if (!unmapped) {
    585 				umap->flags |= UMAP_MAPPING_CACHED;
    586 			}
    587 			TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
    588 			    inactive);
    589 		}
    590 	}
    591 	UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount, 0, 0);
    592 	simple_unlock(&ubc_object.uobj.vmobjlock);
    593 }
    594 
    595 
    596 #if 0 /* notused */
    597 /*
    598  * removing a range of mappings from the ubc mapping cache.
    599  */
    600 
    601 void
    602 ubc_flush(uobj, start, end)
    603 	struct uvm_object *uobj;
    604 	voff_t start, end;
    605 {
    606 	struct ubc_map *umap;
    607 	vaddr_t va;
    608 	UVMHIST_FUNC("ubc_flush");  UVMHIST_CALLED(ubchist);
    609 
    610 	UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
    611 		    uobj, start, end, 0);
    612 
    613 	simple_lock(&ubc_object.uobj.vmobjlock);
    614 	for (umap = ubc_object.umap;
    615 	     umap < &ubc_object.umap[ubc_nwins];
    616 	     umap++) {
    617 
    618 		if (umap->uobj != uobj || umap->offset < start ||
    619 		    (umap->offset >= end && end != 0) ||
    620 		    umap->refcount > 0) {
    621 			continue;
    622 		}
    623 
    624 		/*
    625 		 * remove from hash,
    626 		 * move to head of inactive queue.
    627 		 */
    628 
    629 		va = (vaddr_t)(ubc_object.kva +
    630 		    ((umap - ubc_object.umap) << ubc_winshift));
    631 		pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    632 
    633 		LIST_REMOVE(umap, hash);
    634 		umap->uobj = NULL;
    635 		TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
    636 		TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
    637 	}
    638 	pmap_update(pmap_kernel());
    639 	simple_unlock(&ubc_object.uobj.vmobjlock);
    640 }
    641 #endif /* notused */
    642