Home | History | Annotate | Line # | Download | only in uvm
uvm_bio.c revision 1.48
      1 /*	$NetBSD: uvm_bio.c,v 1.48 2006/09/03 21:33:33 christos Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998 Chuck Silvers.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. The name of the author may not be used to endorse or promote products
     16  *    derived from this software without specific prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28  * SUCH DAMAGE.
     29  *
     30  */
     31 
     32 /*
     33  * uvm_bio.c: buffered i/o object mapping cache
     34  */
     35 
     36 #include <sys/cdefs.h>
     37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.48 2006/09/03 21:33:33 christos Exp $");
     38 
     39 #include "opt_uvmhist.h"
     40 
     41 #include <sys/param.h>
     42 #include <sys/systm.h>
     43 #include <sys/malloc.h>
     44 #include <sys/kernel.h>
     45 
     46 #include <uvm/uvm.h>
     47 
     48 /*
     49  * global data structures
     50  */
     51 
     52 /*
     53  * local functions
     54  */
     55 
     56 static int	ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
     57 			  int, int, vm_prot_t, int);
     58 static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
     59 
     60 /*
     61  * local data structues
     62  */
     63 
     64 #define UBC_HASH(uobj, offset) 						\
     65 	(((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
     66 				ubc_object.hashmask)
     67 
     68 #define UBC_QUEUE(offset)						\
     69 	(&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) &	\
     70 			     (UBC_NQUEUES - 1)])
     71 
     72 #define UBC_UMAP_ADDR(u)						\
     73 	(vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
     74 
     75 
     76 #define UMAP_PAGES_LOCKED	0x0001
     77 #define UMAP_MAPPING_CACHED	0x0002
     78 
     79 struct ubc_map
     80 {
     81 	struct uvm_object *	uobj;		/* mapped object */
     82 	voff_t			offset;		/* offset into uobj */
     83 	voff_t			writeoff;	/* write offset */
     84 	vsize_t			writelen;	/* write len */
     85 	int			refcount;	/* refcount on mapping */
     86 	int			flags;		/* extra state */
     87 	int			advice;
     88 
     89 	LIST_ENTRY(ubc_map)	hash;		/* hash table */
     90 	TAILQ_ENTRY(ubc_map)	inactive;	/* inactive queue */
     91 };
     92 
     93 static struct ubc_object
     94 {
     95 	struct uvm_object uobj;		/* glue for uvm_map() */
     96 	char *kva;			/* where ubc_object is mapped */
     97 	struct ubc_map *umap;		/* array of ubc_map's */
     98 
     99 	LIST_HEAD(, ubc_map) *hash;	/* hashtable for cached ubc_map's */
    100 	u_long hashmask;		/* mask for hashtable */
    101 
    102 	TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
    103 					/* inactive queues for ubc_map's */
    104 
    105 } ubc_object;
    106 
    107 struct uvm_pagerops ubc_pager =
    108 {
    109 	.pgo_fault = ubc_fault,
    110 	/* ... rest are NULL */
    111 };
    112 
    113 int ubc_nwins = UBC_NWINS;
    114 int ubc_winshift = UBC_WINSHIFT;
    115 int ubc_winsize;
    116 #if defined(PMAP_PREFER)
    117 int ubc_nqueues;
    118 #define UBC_NQUEUES ubc_nqueues
    119 #else
    120 #define UBC_NQUEUES 1
    121 #endif
    122 
    123 /*
    124  * ubc_init
    125  *
    126  * init pager private data structures.
    127  */
    128 
    129 void
    130 ubc_init(void)
    131 {
    132 	struct ubc_map *umap;
    133 	vaddr_t va;
    134 	int i;
    135 
    136 	/*
    137 	 * Make sure ubc_winshift is sane.
    138 	 */
    139 	if (ubc_winshift < PAGE_SHIFT)
    140 		ubc_winshift = PAGE_SHIFT;
    141 
    142 	/*
    143 	 * init ubc_object.
    144 	 * alloc and init ubc_map's.
    145 	 * init inactive queues.
    146 	 * alloc and init hashtable.
    147 	 * map in ubc_object.
    148 	 */
    149 
    150 	UVM_OBJ_INIT(&ubc_object.uobj, &ubc_pager, UVM_OBJ_KERN);
    151 
    152 	ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
    153 				 M_TEMP, M_NOWAIT);
    154 	if (ubc_object.umap == NULL)
    155 		panic("ubc_init: failed to allocate ubc_map");
    156 	memset(ubc_object.umap, 0, ubc_nwins * sizeof(struct ubc_map));
    157 
    158 	if (ubc_winshift < PAGE_SHIFT) {
    159 		ubc_winshift = PAGE_SHIFT;
    160 	}
    161 	va = (vaddr_t)1L;
    162 #ifdef PMAP_PREFER
    163 	PMAP_PREFER(0, &va, 0, 0);	/* kernel is never topdown */
    164 	ubc_nqueues = va >> ubc_winshift;
    165 	if (ubc_nqueues == 0) {
    166 		ubc_nqueues = 1;
    167 	}
    168 #endif
    169 	ubc_winsize = 1 << ubc_winshift;
    170 	ubc_object.inactive = malloc(UBC_NQUEUES *
    171 	    sizeof(struct ubc_inactive_head), M_TEMP, M_NOWAIT);
    172 	if (ubc_object.inactive == NULL)
    173 		panic("ubc_init: failed to allocate inactive queue heads");
    174 	for (i = 0; i < UBC_NQUEUES; i++) {
    175 		TAILQ_INIT(&ubc_object.inactive[i]);
    176 	}
    177 	for (i = 0; i < ubc_nwins; i++) {
    178 		umap = &ubc_object.umap[i];
    179 		TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
    180 				  umap, inactive);
    181 	}
    182 
    183 	ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
    184 				   &ubc_object.hashmask);
    185 	for (i = 0; i <= ubc_object.hashmask; i++) {
    186 		LIST_INIT(&ubc_object.hash[i]);
    187 	}
    188 
    189 	if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
    190 		    ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
    191 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    192 				UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
    193 		panic("ubc_init: failed to map ubc_object");
    194 	}
    195 	UVMHIST_INIT(ubchist, 300);
    196 }
    197 
    198 /*
    199  * ubc_fault: fault routine for ubc mapping
    200  */
    201 
    202 static int
    203 ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2,
    204     int ign3, int ign4, vm_prot_t access_type,
    205     int flags)
    206 {
    207 	struct uvm_object *uobj;
    208 	struct ubc_map *umap;
    209 	vaddr_t va, eva, ubc_offset, slot_offset;
    210 	int i, error, npages;
    211 	struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg;
    212 	vm_prot_t prot;
    213 	UVMHIST_FUNC("ubc_fault");  UVMHIST_CALLED(ubchist);
    214 
    215 	/*
    216 	 * no need to try with PGO_LOCKED...
    217 	 * we don't need to have the map locked since we know that
    218 	 * no one will mess with it until our reference is released.
    219 	 */
    220 
    221 	if (flags & PGO_LOCKED) {
    222 		uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
    223 		flags &= ~PGO_LOCKED;
    224 	}
    225 
    226 	va = ufi->orig_rvaddr;
    227 	ubc_offset = va - (vaddr_t)ubc_object.kva;
    228 	umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
    229 	KASSERT(umap->refcount != 0);
    230 	slot_offset = ubc_offset & (ubc_winsize - 1);
    231 
    232 	/*
    233 	 * some platforms cannot write to individual bytes atomically, so
    234 	 * software has to do read/modify/write of larger quantities instead.
    235 	 * this means that the access_type for "write" operations
    236 	 * can be VM_PROT_READ, which confuses us mightily.
    237 	 *
    238 	 * deal with this by resetting access_type based on the info
    239 	 * that ubc_alloc() stores for us.
    240 	 */
    241 
    242 	access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
    243 	UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx access_type %d",
    244 	    va, ubc_offset, access_type, 0);
    245 
    246 #ifdef DIAGNOSTIC
    247 	if ((access_type & VM_PROT_WRITE) != 0) {
    248 		if (slot_offset < trunc_page(umap->writeoff) ||
    249 		    umap->writeoff + umap->writelen <= slot_offset) {
    250 			panic("ubc_fault: out of range write");
    251 		}
    252 	}
    253 #endif
    254 
    255 	/* no umap locking needed since we have a ref on the umap */
    256 	uobj = umap->uobj;
    257 
    258 	if ((access_type & VM_PROT_WRITE) == 0) {
    259 		npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
    260 	} else {
    261 		npages = (round_page(umap->offset + umap->writeoff +
    262 		    umap->writelen) - (umap->offset + slot_offset))
    263 		    >> PAGE_SHIFT;
    264 		flags |= PGO_PASTEOF;
    265 	}
    266 
    267 again:
    268 	memset(pgs, 0, sizeof (pgs));
    269 	simple_lock(&uobj->vmobjlock);
    270 
    271 	UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x ",
    272 	    slot_offset, umap->writeoff, umap->writelen, 0);
    273 	UVMHIST_LOG(ubchist, "getpages uobj %p offset 0x%x npages %d",
    274 	    uobj, umap->offset + slot_offset, npages, 0);
    275 
    276 	error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
    277 	    &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC |
    278 	    PGO_NOTIMESTAMP);
    279 	UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages, 0,
    280 	    0);
    281 
    282 	if (error == EAGAIN) {
    283 		tsleep(&lbolt, PVM, "ubc_fault", 0);
    284 		goto again;
    285 	}
    286 	if (error) {
    287 		return error;
    288 	}
    289 
    290 	va = ufi->orig_rvaddr;
    291 	eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
    292 
    293 	UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0, 0);
    294 	for (i = 0; va < eva; i++, va += PAGE_SIZE) {
    295 		boolean_t rdonly;
    296 		vm_prot_t mask;
    297 
    298 		/*
    299 		 * for virtually-indexed, virtually-tagged caches we should
    300 		 * avoid creating writable mappings when we don't absolutely
    301 		 * need them, since the "compatible alias" trick doesn't work
    302 		 * on such caches.  otherwise, we can always map the pages
    303 		 * writable.
    304 		 */
    305 
    306 #ifdef PMAP_CACHE_VIVT
    307 		prot = VM_PROT_READ | access_type;
    308 #else
    309 		prot = VM_PROT_READ | VM_PROT_WRITE;
    310 #endif
    311 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i], 0, 0);
    312 		pg = pgs[i];
    313 
    314 		if (pg == NULL || pg == PGO_DONTCARE) {
    315 			continue;
    316 		}
    317 
    318 		uobj = pg->uobject;
    319 		simple_lock(&uobj->vmobjlock);
    320 		if (pg->flags & PG_WANTED) {
    321 			wakeup(pg);
    322 		}
    323 		KASSERT((pg->flags & PG_FAKE) == 0);
    324 		if (pg->flags & PG_RELEASED) {
    325 			uvm_lock_pageq();
    326 			uvm_pagefree(pg);
    327 			uvm_unlock_pageq();
    328 			simple_unlock(&uobj->vmobjlock);
    329 			continue;
    330 		}
    331 		if (pg->loan_count != 0) {
    332 
    333 			/*
    334 			 * avoid unneeded loan break if possible.
    335 			 */
    336 
    337 			if ((access_type & VM_PROT_WRITE) == 0)
    338 				prot &= ~VM_PROT_WRITE;
    339 
    340 			if (prot & VM_PROT_WRITE) {
    341 				struct vm_page *newpg;
    342 
    343 				newpg = uvm_loanbreak(pg);
    344 				if (newpg == NULL) {
    345 					uvm_page_unbusy(&pg, 1);
    346 					simple_unlock(&uobj->vmobjlock);
    347 					uvm_wait("ubc_loanbrk");
    348 					continue; /* will re-fault */
    349 				}
    350 				pg = newpg;
    351 			}
    352 		}
    353 
    354 		/*
    355 		 * note that a page whose backing store is partially allocated
    356 		 * is marked as PG_RDONLY.
    357 		 */
    358 
    359 		rdonly = (access_type & VM_PROT_WRITE) == 0 &&
    360 		    (pg->flags & PG_RDONLY) != 0;
    361 		KASSERT((pg->flags & PG_RDONLY) == 0 ||
    362 		    (access_type & VM_PROT_WRITE) == 0 ||
    363 		    pg->offset < umap->writeoff ||
    364 		    pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
    365 		mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL;
    366 		error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
    367 		    prot & mask, PMAP_CANFAIL | (access_type & mask));
    368 		uvm_lock_pageq();
    369 		uvm_pageactivate(pg);
    370 		uvm_unlock_pageq();
    371 		pg->flags &= ~(PG_BUSY|PG_WANTED);
    372 		UVM_PAGE_OWN(pg, NULL);
    373 		simple_unlock(&uobj->vmobjlock);
    374 		if (error) {
    375 			UVMHIST_LOG(ubchist, "pmap_enter fail %d",
    376 			    error, 0, 0, 0);
    377 			uvm_wait("ubc_pmfail");
    378 			/* will refault */
    379 		}
    380 	}
    381 	pmap_update(ufi->orig_map->pmap);
    382 	return 0;
    383 }
    384 
    385 /*
    386  * local functions
    387  */
    388 
    389 static struct ubc_map *
    390 ubc_find_mapping(struct uvm_object *uobj, voff_t offset)
    391 {
    392 	struct ubc_map *umap;
    393 
    394 	LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
    395 		if (umap->uobj == uobj && umap->offset == offset) {
    396 			return umap;
    397 		}
    398 	}
    399 	return NULL;
    400 }
    401 
    402 
    403 /*
    404  * ubc interface functions
    405  */
    406 
    407 /*
    408  * ubc_alloc:  allocate a file mapping window
    409  */
    410 
    411 void *
    412 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
    413     int flags)
    414 {
    415 	vaddr_t slot_offset, va;
    416 	struct ubc_map *umap;
    417 	voff_t umap_offset;
    418 	int error;
    419 	UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
    420 
    421 	UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx",
    422 	    uobj, offset, *lenp, 0);
    423 
    424 	KASSERT(*lenp > 0);
    425 	umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
    426 	slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
    427 	*lenp = MIN(*lenp, ubc_winsize - slot_offset);
    428 
    429 	/*
    430 	 * the object is always locked here, so we don't need to add a ref.
    431 	 */
    432 
    433 again:
    434 	simple_lock(&ubc_object.uobj.vmobjlock);
    435 	umap = ubc_find_mapping(uobj, umap_offset);
    436 	if (umap == NULL) {
    437 		umap = TAILQ_FIRST(UBC_QUEUE(offset));
    438 		if (umap == NULL) {
    439 			simple_unlock(&ubc_object.uobj.vmobjlock);
    440 			tsleep(&lbolt, PVM, "ubc_alloc", 0);
    441 			goto again;
    442 		}
    443 
    444 		/*
    445 		 * remove from old hash (if any), add to new hash.
    446 		 */
    447 
    448 		if (umap->uobj != NULL) {
    449 			LIST_REMOVE(umap, hash);
    450 		}
    451 		umap->uobj = uobj;
    452 		umap->offset = umap_offset;
    453 		LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
    454 		    umap, hash);
    455 		va = UBC_UMAP_ADDR(umap);
    456 		if (umap->flags & UMAP_MAPPING_CACHED) {
    457 			umap->flags &= ~UMAP_MAPPING_CACHED;
    458 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    459 			pmap_update(pmap_kernel());
    460 		}
    461 	} else {
    462 		va = UBC_UMAP_ADDR(umap);
    463 	}
    464 
    465 	if (umap->refcount == 0) {
    466 		TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
    467 	}
    468 
    469 #ifdef DIAGNOSTIC
    470 	if ((flags & UBC_WRITE) && (umap->writeoff || umap->writelen)) {
    471 		panic("ubc_alloc: concurrent writes uobj %p", uobj);
    472 	}
    473 #endif
    474 	if (flags & UBC_WRITE) {
    475 		umap->writeoff = slot_offset;
    476 		umap->writelen = *lenp;
    477 	}
    478 
    479 	umap->refcount++;
    480 	umap->advice = advice;
    481 	simple_unlock(&ubc_object.uobj.vmobjlock);
    482 	UVMHIST_LOG(ubchist, "umap %p refs %d va %p flags 0x%x",
    483 	    umap, umap->refcount, va, flags);
    484 
    485 	if (flags & UBC_FAULTBUSY) {
    486 		int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
    487 		struct vm_page *pgs[npages];
    488 		int gpflags =
    489 		    PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
    490 		    PGO_NOTIMESTAMP;
    491 		int i;
    492 		KDASSERT(flags & UBC_WRITE);
    493 
    494 		if (umap->flags & UMAP_MAPPING_CACHED) {
    495 			umap->flags &= ~UMAP_MAPPING_CACHED;
    496 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    497 		}
    498 		memset(pgs, 0, sizeof(pgs));
    499 		simple_lock(&uobj->vmobjlock);
    500 		error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
    501 		    &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags);
    502 		UVMHIST_LOG(ubchist, "faultbusy getpages %d", error, 0, 0, 0);
    503 		if (error) {
    504 			goto out;
    505 		}
    506 		for (i = 0; i < npages; i++) {
    507 			pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
    508 			    VM_PAGE_TO_PHYS(pgs[i]),
    509 			    VM_PROT_READ | VM_PROT_WRITE);
    510 		}
    511 		pmap_update(pmap_kernel());
    512 		umap->flags |= UMAP_PAGES_LOCKED;
    513 	}
    514 
    515 out:
    516 	return (void *)(va + slot_offset);
    517 }
    518 
    519 /*
    520  * ubc_release:  free a file mapping window.
    521  */
    522 
    523 void
    524 ubc_release(void *va, int flags)
    525 {
    526 	struct ubc_map *umap;
    527 	struct uvm_object *uobj;
    528 	vaddr_t umapva;
    529 	boolean_t unmapped;
    530 	UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
    531 
    532 	UVMHIST_LOG(ubchist, "va %p", va, 0, 0, 0);
    533 	umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
    534 	umapva = UBC_UMAP_ADDR(umap);
    535 	uobj = umap->uobj;
    536 	KASSERT(uobj != NULL);
    537 
    538 	if (umap->flags & UMAP_PAGES_LOCKED) {
    539 		int slot_offset = umap->writeoff;
    540 		int endoff = umap->writeoff + umap->writelen;
    541 		int zerolen = round_page(endoff) - endoff;
    542 		int npages = (int)(round_page(umap->writeoff + umap->writelen)
    543 				   - trunc_page(umap->writeoff)) >> PAGE_SHIFT;
    544 		struct vm_page *pgs[npages];
    545 		paddr_t pa;
    546 		int i;
    547 		boolean_t rv;
    548 
    549 		if (zerolen) {
    550 			memset((char *)umapva + endoff, 0, zerolen);
    551 		}
    552 		umap->flags &= ~UMAP_PAGES_LOCKED;
    553 		uvm_lock_pageq();
    554 		for (i = 0; i < npages; i++) {
    555 			rv = pmap_extract(pmap_kernel(),
    556 			    umapva + slot_offset + (i << PAGE_SHIFT), &pa);
    557 			KASSERT(rv);
    558 			pgs[i] = PHYS_TO_VM_PAGE(pa);
    559 			pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
    560 			KASSERT(pgs[i]->loan_count == 0);
    561 			uvm_pageactivate(pgs[i]);
    562 		}
    563 		uvm_unlock_pageq();
    564 		pmap_kremove(umapva, ubc_winsize);
    565 		pmap_update(pmap_kernel());
    566 		simple_lock(&uobj->vmobjlock);
    567 		uvm_page_unbusy(pgs, npages);
    568 		simple_unlock(&uobj->vmobjlock);
    569 		unmapped = TRUE;
    570 	} else {
    571 		unmapped = FALSE;
    572 	}
    573 
    574 	simple_lock(&ubc_object.uobj.vmobjlock);
    575 	umap->writeoff = 0;
    576 	umap->writelen = 0;
    577 	umap->refcount--;
    578 	if (umap->refcount == 0) {
    579 		if (flags & UBC_UNMAP) {
    580 
    581 			/*
    582 			 * Invalidate any cached mappings if requested.
    583 			 * This is typically used to avoid leaving
    584 			 * incompatible cache aliases around indefinitely.
    585 			 */
    586 
    587 			pmap_remove(pmap_kernel(), umapva,
    588 				    umapva + ubc_winsize);
    589 			umap->flags &= ~UMAP_MAPPING_CACHED;
    590 			pmap_update(pmap_kernel());
    591 			LIST_REMOVE(umap, hash);
    592 			umap->uobj = NULL;
    593 			TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
    594 			    inactive);
    595 		} else {
    596 			if (!unmapped) {
    597 				umap->flags |= UMAP_MAPPING_CACHED;
    598 			}
    599 			TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
    600 			    inactive);
    601 		}
    602 	}
    603 	UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount, 0, 0);
    604 	simple_unlock(&ubc_object.uobj.vmobjlock);
    605 }
    606 
    607 
    608 #if 0 /* notused */
    609 /*
    610  * removing a range of mappings from the ubc mapping cache.
    611  */
    612 
    613 void
    614 ubc_flush(struct uvm_object *uobj, voff_t start, voff_t end)
    615 {
    616 	struct ubc_map *umap;
    617 	vaddr_t va;
    618 	UVMHIST_FUNC("ubc_flush");  UVMHIST_CALLED(ubchist);
    619 
    620 	UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
    621 		    uobj, start, end, 0);
    622 
    623 	simple_lock(&ubc_object.uobj.vmobjlock);
    624 	for (umap = ubc_object.umap;
    625 	     umap < &ubc_object.umap[ubc_nwins];
    626 	     umap++) {
    627 
    628 		if (umap->uobj != uobj || umap->offset < start ||
    629 		    (umap->offset >= end && end != 0) ||
    630 		    umap->refcount > 0) {
    631 			continue;
    632 		}
    633 
    634 		/*
    635 		 * remove from hash,
    636 		 * move to head of inactive queue.
    637 		 */
    638 
    639 		va = (vaddr_t)(ubc_object.kva +
    640 		    ((umap - ubc_object.umap) << ubc_winshift));
    641 		pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    642 
    643 		LIST_REMOVE(umap, hash);
    644 		umap->uobj = NULL;
    645 		TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
    646 		TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
    647 	}
    648 	pmap_update(pmap_kernel());
    649 	simple_unlock(&ubc_object.uobj.vmobjlock);
    650 }
    651 #endif /* notused */
    652