Home | History | Annotate | Line # | Download | only in uvm
uvm_bio.c revision 1.62
      1 /*	$NetBSD: uvm_bio.c,v 1.62 2007/07/27 09:50:37 yamt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998 Chuck Silvers.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. The name of the author may not be used to endorse or promote products
     16  *    derived from this software without specific prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28  * SUCH DAMAGE.
     29  *
     30  */
     31 
     32 /*
     33  * uvm_bio.c: buffered i/o object mapping cache
     34  */
     35 
     36 #include <sys/cdefs.h>
     37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.62 2007/07/27 09:50:37 yamt Exp $");
     38 
     39 #include "opt_uvmhist.h"
     40 #include "opt_ubc.h"
     41 
     42 #include <sys/param.h>
     43 #include <sys/systm.h>
     44 #include <sys/malloc.h>
     45 #include <sys/kernel.h>
     46 #include <sys/proc.h>
     47 
     48 #include <uvm/uvm.h>
     49 
     50 /*
     51  * global data structures
     52  */
     53 
     54 /*
     55  * local functions
     56  */
     57 
     58 static int	ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
     59 			  int, int, vm_prot_t, int);
     60 static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
     61 
     62 /*
     63  * local data structues
     64  */
     65 
     66 #define UBC_HASH(uobj, offset) 						\
     67 	(((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
     68 				ubc_object.hashmask)
     69 
     70 #define UBC_QUEUE(offset)						\
     71 	(&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) &	\
     72 			     (UBC_NQUEUES - 1)])
     73 
     74 #define UBC_UMAP_ADDR(u)						\
     75 	(vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
     76 
     77 
     78 #define UMAP_PAGES_LOCKED	0x0001
     79 #define UMAP_MAPPING_CACHED	0x0002
     80 
     81 struct ubc_map
     82 {
     83 	struct uvm_object *	uobj;		/* mapped object */
     84 	voff_t			offset;		/* offset into uobj */
     85 	voff_t			writeoff;	/* write offset */
     86 	vsize_t			writelen;	/* write len */
     87 	int			refcount;	/* refcount on mapping */
     88 	int			flags;		/* extra state */
     89 	int			advice;
     90 
     91 	LIST_ENTRY(ubc_map)	hash;		/* hash table */
     92 	TAILQ_ENTRY(ubc_map)	inactive;	/* inactive queue */
     93 };
     94 
     95 static struct ubc_object
     96 {
     97 	struct uvm_object uobj;		/* glue for uvm_map() */
     98 	char *kva;			/* where ubc_object is mapped */
     99 	struct ubc_map *umap;		/* array of ubc_map's */
    100 
    101 	LIST_HEAD(, ubc_map) *hash;	/* hashtable for cached ubc_map's */
    102 	u_long hashmask;		/* mask for hashtable */
    103 
    104 	TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
    105 					/* inactive queues for ubc_map's */
    106 
    107 } ubc_object;
    108 
    109 struct uvm_pagerops ubc_pager =
    110 {
    111 	.pgo_fault = ubc_fault,
    112 	/* ... rest are NULL */
    113 };
    114 
    115 int ubc_nwins = UBC_NWINS;
    116 int ubc_winshift = UBC_WINSHIFT;
    117 int ubc_winsize;
    118 #if defined(PMAP_PREFER)
    119 int ubc_nqueues;
    120 #define UBC_NQUEUES ubc_nqueues
    121 #else
    122 #define UBC_NQUEUES 1
    123 #endif
    124 
    125 #if defined(UBC_STATS)
    126 
    127 #define	UBC_EVCNT_DEFINE(name) \
    128 struct evcnt ubc_evcnt_##name = \
    129 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "ubc", #name); \
    130 EVCNT_ATTACH_STATIC(ubc_evcnt_##name);
    131 #define	UBC_EVCNT_INCR(name) ubc_evcnt_##name.ev_count++
    132 
    133 #else /* defined(UBC_STATS) */
    134 
    135 #define	UBC_EVCNT_DEFINE(name)	/* nothing */
    136 #define	UBC_EVCNT_INCR(name)	/* nothing */
    137 
    138 #endif /* defined(UBC_STATS) */
    139 
    140 UBC_EVCNT_DEFINE(wincachehit)
    141 UBC_EVCNT_DEFINE(wincachemiss)
    142 UBC_EVCNT_DEFINE(faultbusy)
    143 
    144 /*
    145  * ubc_init
    146  *
    147  * init pager private data structures.
    148  */
    149 
    150 void
    151 ubc_init(void)
    152 {
    153 	struct ubc_map *umap;
    154 	vaddr_t va;
    155 	int i;
    156 
    157 	/*
    158 	 * Make sure ubc_winshift is sane.
    159 	 */
    160 	if (ubc_winshift < PAGE_SHIFT)
    161 		ubc_winshift = PAGE_SHIFT;
    162 
    163 	/*
    164 	 * init ubc_object.
    165 	 * alloc and init ubc_map's.
    166 	 * init inactive queues.
    167 	 * alloc and init hashtable.
    168 	 * map in ubc_object.
    169 	 */
    170 
    171 	UVM_OBJ_INIT(&ubc_object.uobj, &ubc_pager, UVM_OBJ_KERN);
    172 
    173 	ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
    174 				 M_TEMP, M_NOWAIT);
    175 	if (ubc_object.umap == NULL)
    176 		panic("ubc_init: failed to allocate ubc_map");
    177 	memset(ubc_object.umap, 0, ubc_nwins * sizeof(struct ubc_map));
    178 
    179 	if (ubc_winshift < PAGE_SHIFT) {
    180 		ubc_winshift = PAGE_SHIFT;
    181 	}
    182 	va = (vaddr_t)1L;
    183 #ifdef PMAP_PREFER
    184 	PMAP_PREFER(0, &va, 0, 0);	/* kernel is never topdown */
    185 	ubc_nqueues = va >> ubc_winshift;
    186 	if (ubc_nqueues == 0) {
    187 		ubc_nqueues = 1;
    188 	}
    189 #endif
    190 	ubc_winsize = 1 << ubc_winshift;
    191 	ubc_object.inactive = malloc(UBC_NQUEUES *
    192 	    sizeof(struct ubc_inactive_head), M_TEMP, M_NOWAIT);
    193 	if (ubc_object.inactive == NULL)
    194 		panic("ubc_init: failed to allocate inactive queue heads");
    195 	for (i = 0; i < UBC_NQUEUES; i++) {
    196 		TAILQ_INIT(&ubc_object.inactive[i]);
    197 	}
    198 	for (i = 0; i < ubc_nwins; i++) {
    199 		umap = &ubc_object.umap[i];
    200 		TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
    201 				  umap, inactive);
    202 	}
    203 
    204 	ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
    205 				   &ubc_object.hashmask);
    206 	for (i = 0; i <= ubc_object.hashmask; i++) {
    207 		LIST_INIT(&ubc_object.hash[i]);
    208 	}
    209 
    210 	if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
    211 		    ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
    212 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    213 				UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
    214 		panic("ubc_init: failed to map ubc_object");
    215 	}
    216 	UVMHIST_INIT(ubchist, 300);
    217 }
    218 
    219 /*
    220  * ubc_fault: fault routine for ubc mapping
    221  */
    222 
    223 static int
    224 ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2,
    225     int ign3, int ign4, vm_prot_t access_type, int flags)
    226 {
    227 	struct uvm_object *uobj;
    228 	struct ubc_map *umap;
    229 	vaddr_t va, eva, ubc_offset, slot_offset;
    230 	int i, error, npages;
    231 	struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg;
    232 	vm_prot_t prot;
    233 	UVMHIST_FUNC("ubc_fault");  UVMHIST_CALLED(ubchist);
    234 
    235 	/*
    236 	 * no need to try with PGO_LOCKED...
    237 	 * we don't need to have the map locked since we know that
    238 	 * no one will mess with it until our reference is released.
    239 	 */
    240 
    241 	if (flags & PGO_LOCKED) {
    242 		uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
    243 		flags &= ~PGO_LOCKED;
    244 	}
    245 
    246 	va = ufi->orig_rvaddr;
    247 	ubc_offset = va - (vaddr_t)ubc_object.kva;
    248 	umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
    249 	KASSERT(umap->refcount != 0);
    250 	KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
    251 	slot_offset = ubc_offset & (ubc_winsize - 1);
    252 
    253 	/*
    254 	 * some platforms cannot write to individual bytes atomically, so
    255 	 * software has to do read/modify/write of larger quantities instead.
    256 	 * this means that the access_type for "write" operations
    257 	 * can be VM_PROT_READ, which confuses us mightily.
    258 	 *
    259 	 * deal with this by resetting access_type based on the info
    260 	 * that ubc_alloc() stores for us.
    261 	 */
    262 
    263 	access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
    264 	UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx access_type %d",
    265 	    va, ubc_offset, access_type, 0);
    266 
    267 #ifdef DIAGNOSTIC
    268 	if ((access_type & VM_PROT_WRITE) != 0) {
    269 		if (slot_offset < trunc_page(umap->writeoff) ||
    270 		    umap->writeoff + umap->writelen <= slot_offset) {
    271 			panic("ubc_fault: out of range write");
    272 		}
    273 	}
    274 #endif
    275 
    276 	/* no umap locking needed since we have a ref on the umap */
    277 	uobj = umap->uobj;
    278 
    279 	if ((access_type & VM_PROT_WRITE) == 0) {
    280 		npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
    281 	} else {
    282 		npages = (round_page(umap->offset + umap->writeoff +
    283 		    umap->writelen) - (umap->offset + slot_offset))
    284 		    >> PAGE_SHIFT;
    285 		flags |= PGO_PASTEOF;
    286 	}
    287 
    288 again:
    289 	memset(pgs, 0, sizeof (pgs));
    290 	simple_lock(&uobj->vmobjlock);
    291 
    292 	UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x ",
    293 	    slot_offset, umap->writeoff, umap->writelen, 0);
    294 	UVMHIST_LOG(ubchist, "getpages uobj %p offset 0x%x npages %d",
    295 	    uobj, umap->offset + slot_offset, npages, 0);
    296 
    297 	error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
    298 	    &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC |
    299 	    PGO_NOTIMESTAMP);
    300 	UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages, 0,
    301 	    0);
    302 
    303 	if (error == EAGAIN) {
    304 		kpause("ubc_fault", false, hz, NULL);
    305 		goto again;
    306 	}
    307 	if (error) {
    308 		return error;
    309 	}
    310 
    311 	va = ufi->orig_rvaddr;
    312 	eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
    313 
    314 	UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0, 0);
    315 	for (i = 0; va < eva; i++, va += PAGE_SIZE) {
    316 		bool rdonly;
    317 		vm_prot_t mask;
    318 
    319 		/*
    320 		 * for virtually-indexed, virtually-tagged caches we should
    321 		 * avoid creating writable mappings when we don't absolutely
    322 		 * need them, since the "compatible alias" trick doesn't work
    323 		 * on such caches.  otherwise, we can always map the pages
    324 		 * writable.
    325 		 */
    326 
    327 #ifdef PMAP_CACHE_VIVT
    328 		prot = VM_PROT_READ | access_type;
    329 #else
    330 		prot = VM_PROT_READ | VM_PROT_WRITE;
    331 #endif
    332 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i], 0, 0);
    333 		pg = pgs[i];
    334 
    335 		if (pg == NULL || pg == PGO_DONTCARE) {
    336 			continue;
    337 		}
    338 
    339 		uobj = pg->uobject;
    340 		simple_lock(&uobj->vmobjlock);
    341 		if (pg->flags & PG_WANTED) {
    342 			wakeup(pg);
    343 		}
    344 		KASSERT((pg->flags & PG_FAKE) == 0);
    345 		if (pg->flags & PG_RELEASED) {
    346 			uvm_lock_pageq();
    347 			uvm_pagefree(pg);
    348 			uvm_unlock_pageq();
    349 			simple_unlock(&uobj->vmobjlock);
    350 			continue;
    351 		}
    352 		if (pg->loan_count != 0) {
    353 
    354 			/*
    355 			 * avoid unneeded loan break if possible.
    356 			 */
    357 
    358 			if ((access_type & VM_PROT_WRITE) == 0)
    359 				prot &= ~VM_PROT_WRITE;
    360 
    361 			if (prot & VM_PROT_WRITE) {
    362 				struct vm_page *newpg;
    363 
    364 				newpg = uvm_loanbreak(pg);
    365 				if (newpg == NULL) {
    366 					uvm_page_unbusy(&pg, 1);
    367 					simple_unlock(&uobj->vmobjlock);
    368 					uvm_wait("ubc_loanbrk");
    369 					continue; /* will re-fault */
    370 				}
    371 				pg = newpg;
    372 			}
    373 		}
    374 
    375 		/*
    376 		 * note that a page whose backing store is partially allocated
    377 		 * is marked as PG_RDONLY.
    378 		 */
    379 
    380 		rdonly = ((access_type & VM_PROT_WRITE) == 0 &&
    381 		    (pg->flags & PG_RDONLY) != 0) ||
    382 		    UVM_OBJ_NEEDS_WRITEFAULT(uobj);
    383 		KASSERT((pg->flags & PG_RDONLY) == 0 ||
    384 		    (access_type & VM_PROT_WRITE) == 0 ||
    385 		    pg->offset < umap->writeoff ||
    386 		    pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
    387 		mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL;
    388 		error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
    389 		    prot & mask, PMAP_CANFAIL | (access_type & mask));
    390 		uvm_lock_pageq();
    391 		uvm_pageactivate(pg);
    392 		uvm_unlock_pageq();
    393 		pg->flags &= ~(PG_BUSY|PG_WANTED);
    394 		UVM_PAGE_OWN(pg, NULL);
    395 		simple_unlock(&uobj->vmobjlock);
    396 		if (error) {
    397 			UVMHIST_LOG(ubchist, "pmap_enter fail %d",
    398 			    error, 0, 0, 0);
    399 			uvm_wait("ubc_pmfail");
    400 			/* will refault */
    401 		}
    402 	}
    403 	pmap_update(ufi->orig_map->pmap);
    404 	return 0;
    405 }
    406 
    407 /*
    408  * local functions
    409  */
    410 
    411 static struct ubc_map *
    412 ubc_find_mapping(struct uvm_object *uobj, voff_t offset)
    413 {
    414 	struct ubc_map *umap;
    415 
    416 	LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
    417 		if (umap->uobj == uobj && umap->offset == offset) {
    418 			return umap;
    419 		}
    420 	}
    421 	return NULL;
    422 }
    423 
    424 
    425 /*
    426  * ubc interface functions
    427  */
    428 
    429 /*
    430  * ubc_alloc:  allocate a file mapping window
    431  */
    432 
    433 void *
    434 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
    435     int flags)
    436 {
    437 	vaddr_t slot_offset, va;
    438 	struct ubc_map *umap;
    439 	voff_t umap_offset;
    440 	int error;
    441 	UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
    442 
    443 	UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx",
    444 	    uobj, offset, *lenp, 0);
    445 
    446 	KASSERT(*lenp > 0);
    447 	umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
    448 	slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
    449 	*lenp = MIN(*lenp, ubc_winsize - slot_offset);
    450 
    451 	/*
    452 	 * the object is always locked here, so we don't need to add a ref.
    453 	 */
    454 
    455 again:
    456 	simple_lock(&ubc_object.uobj.vmobjlock);
    457 	umap = ubc_find_mapping(uobj, umap_offset);
    458 	if (umap == NULL) {
    459 		UBC_EVCNT_INCR(wincachemiss);
    460 		umap = TAILQ_FIRST(UBC_QUEUE(offset));
    461 		if (umap == NULL) {
    462 			simple_unlock(&ubc_object.uobj.vmobjlock);
    463 			kpause("ubc_alloc", false, hz, NULL);
    464 			goto again;
    465 		}
    466 
    467 		/*
    468 		 * remove from old hash (if any), add to new hash.
    469 		 */
    470 
    471 		if (umap->uobj != NULL) {
    472 			LIST_REMOVE(umap, hash);
    473 		}
    474 		umap->uobj = uobj;
    475 		umap->offset = umap_offset;
    476 		LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
    477 		    umap, hash);
    478 		va = UBC_UMAP_ADDR(umap);
    479 		if (umap->flags & UMAP_MAPPING_CACHED) {
    480 			umap->flags &= ~UMAP_MAPPING_CACHED;
    481 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    482 			pmap_update(pmap_kernel());
    483 		}
    484 	} else {
    485 		UBC_EVCNT_INCR(wincachehit);
    486 		va = UBC_UMAP_ADDR(umap);
    487 	}
    488 
    489 	if (umap->refcount == 0) {
    490 		TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
    491 	}
    492 
    493 #ifdef DIAGNOSTIC
    494 	if ((flags & UBC_WRITE) && (umap->writeoff || umap->writelen)) {
    495 		panic("ubc_alloc: concurrent writes uobj %p", uobj);
    496 	}
    497 #endif
    498 	if (flags & UBC_WRITE) {
    499 		umap->writeoff = slot_offset;
    500 		umap->writelen = *lenp;
    501 	}
    502 
    503 	umap->refcount++;
    504 	umap->advice = advice;
    505 	simple_unlock(&ubc_object.uobj.vmobjlock);
    506 	UVMHIST_LOG(ubchist, "umap %p refs %d va %p flags 0x%x",
    507 	    umap, umap->refcount, va, flags);
    508 
    509 	if (flags & UBC_FAULTBUSY) {
    510 		int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
    511 		struct vm_page *pgs[npages];
    512 		int gpflags =
    513 		    PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
    514 		    PGO_NOTIMESTAMP;
    515 		int i;
    516 		KDASSERT(flags & UBC_WRITE);
    517 		KASSERT(umap->refcount == 1);
    518 
    519 		UBC_EVCNT_INCR(faultbusy);
    520 		if (umap->flags & UMAP_MAPPING_CACHED) {
    521 			umap->flags &= ~UMAP_MAPPING_CACHED;
    522 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    523 		}
    524 again_faultbusy:
    525 		memset(pgs, 0, sizeof(pgs));
    526 		simple_lock(&uobj->vmobjlock);
    527 		error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
    528 		    &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags);
    529 		UVMHIST_LOG(ubchist, "faultbusy getpages %d", error, 0, 0, 0);
    530 		if (error) {
    531 			goto out;
    532 		}
    533 		for (i = 0; i < npages; i++) {
    534 			struct vm_page *pg = pgs[i];
    535 
    536 			KASSERT(pg->uobject == uobj);
    537 			if (pg->loan_count != 0) {
    538 				simple_lock(&uobj->vmobjlock);
    539 				if (pg->loan_count != 0) {
    540 					pg = uvm_loanbreak(pg);
    541 				}
    542 				simple_unlock(&uobj->vmobjlock);
    543 				if (pg == NULL) {
    544 					pmap_kremove(va, ubc_winsize);
    545 					pmap_update(pmap_kernel());
    546 					simple_lock(&uobj->vmobjlock);
    547 					uvm_page_unbusy(pgs, npages);
    548 					simple_unlock(&uobj->vmobjlock);
    549 					uvm_wait("ubc_alloc");
    550 					goto again_faultbusy;
    551 				}
    552 				pgs[i] = pg;
    553 			}
    554 			pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
    555 			    VM_PAGE_TO_PHYS(pg), VM_PROT_READ | VM_PROT_WRITE);
    556 		}
    557 		pmap_update(pmap_kernel());
    558 		umap->flags |= UMAP_PAGES_LOCKED;
    559 	} else {
    560 		KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
    561 	}
    562 
    563 out:
    564 	return (void *)(va + slot_offset);
    565 }
    566 
    567 /*
    568  * ubc_release:  free a file mapping window.
    569  */
    570 
    571 void
    572 ubc_release(void *va, int flags)
    573 {
    574 	struct ubc_map *umap;
    575 	struct uvm_object *uobj;
    576 	vaddr_t umapva;
    577 	bool unmapped;
    578 	UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
    579 
    580 	UVMHIST_LOG(ubchist, "va %p", va, 0, 0, 0);
    581 	umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
    582 	umapva = UBC_UMAP_ADDR(umap);
    583 	uobj = umap->uobj;
    584 	KASSERT(uobj != NULL);
    585 
    586 	if (umap->flags & UMAP_PAGES_LOCKED) {
    587 		int slot_offset = umap->writeoff;
    588 		int endoff = umap->writeoff + umap->writelen;
    589 		int zerolen = round_page(endoff) - endoff;
    590 		int npages = (int)(round_page(umap->writeoff + umap->writelen)
    591 				   - trunc_page(umap->writeoff)) >> PAGE_SHIFT;
    592 		struct vm_page *pgs[npages];
    593 		paddr_t pa;
    594 		int i;
    595 		bool rv;
    596 
    597 		KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
    598 		if (zerolen) {
    599 			memset((char *)umapva + endoff, 0, zerolen);
    600 		}
    601 		umap->flags &= ~UMAP_PAGES_LOCKED;
    602 		uvm_lock_pageq();
    603 		for (i = 0; i < npages; i++) {
    604 			rv = pmap_extract(pmap_kernel(),
    605 			    umapva + slot_offset + (i << PAGE_SHIFT), &pa);
    606 			KASSERT(rv);
    607 			pgs[i] = PHYS_TO_VM_PAGE(pa);
    608 			pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
    609 			KASSERT(pgs[i]->loan_count == 0);
    610 			uvm_pageactivate(pgs[i]);
    611 		}
    612 		uvm_unlock_pageq();
    613 		pmap_kremove(umapva, ubc_winsize);
    614 		pmap_update(pmap_kernel());
    615 		simple_lock(&uobj->vmobjlock);
    616 		uvm_page_unbusy(pgs, npages);
    617 		simple_unlock(&uobj->vmobjlock);
    618 		unmapped = true;
    619 	} else {
    620 		unmapped = false;
    621 	}
    622 
    623 	simple_lock(&ubc_object.uobj.vmobjlock);
    624 	umap->writeoff = 0;
    625 	umap->writelen = 0;
    626 	umap->refcount--;
    627 	if (umap->refcount == 0) {
    628 		if (flags & UBC_UNMAP) {
    629 
    630 			/*
    631 			 * Invalidate any cached mappings if requested.
    632 			 * This is typically used to avoid leaving
    633 			 * incompatible cache aliases around indefinitely.
    634 			 */
    635 
    636 			pmap_remove(pmap_kernel(), umapva,
    637 				    umapva + ubc_winsize);
    638 			umap->flags &= ~UMAP_MAPPING_CACHED;
    639 			pmap_update(pmap_kernel());
    640 			LIST_REMOVE(umap, hash);
    641 			umap->uobj = NULL;
    642 			TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
    643 			    inactive);
    644 		} else {
    645 			if (!unmapped) {
    646 				umap->flags |= UMAP_MAPPING_CACHED;
    647 			}
    648 			TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
    649 			    inactive);
    650 		}
    651 	}
    652 	UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount, 0, 0);
    653 	simple_unlock(&ubc_object.uobj.vmobjlock);
    654 }
    655 
    656 /*
    657  * ubc_uiomove: move data to/from an object.
    658  */
    659 
    660 int
    661 ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
    662     int flags)
    663 {
    664 	voff_t off;
    665 	const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
    666 	int error;
    667 
    668 	KASSERT(todo <= uio->uio_resid);
    669 	KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
    670 	    ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
    671 
    672 	off = uio->uio_offset;
    673 	error = 0;
    674 	while (todo > 0) {
    675 		vsize_t bytelen = todo;
    676 		void *win;
    677 
    678 		win = ubc_alloc(uobj, off, &bytelen, advice, flags);
    679 		if (error == 0) {
    680 			error = uiomove(win, bytelen, uio);
    681 		}
    682 		if (error != 0 && overwrite) {
    683 			/*
    684 			 * if we haven't initialized the pages yet,
    685 			 * do it now.  it's safe to use memset here
    686 			 * because we just mapped the pages above.
    687 			 */
    688 			memset(win, 0, bytelen);
    689 		}
    690 		ubc_release(win, flags);
    691 		off += bytelen;
    692 		todo -= bytelen;
    693 		if (error != 0 && (flags & UBC_PARTIALOK) != 0) {
    694 			break;
    695 		}
    696 	}
    697 
    698 	return error;
    699 }
    700 
    701 #if 0 /* notused */
    702 /*
    703  * removing a range of mappings from the ubc mapping cache.
    704  */
    705 
    706 void
    707 ubc_flush(struct uvm_object *uobj, voff_t start, voff_t end)
    708 {
    709 	struct ubc_map *umap;
    710 	vaddr_t va;
    711 	UVMHIST_FUNC("ubc_flush");  UVMHIST_CALLED(ubchist);
    712 
    713 	UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
    714 		    uobj, start, end, 0);
    715 
    716 	simple_lock(&ubc_object.uobj.vmobjlock);
    717 	for (umap = ubc_object.umap;
    718 	     umap < &ubc_object.umap[ubc_nwins];
    719 	     umap++) {
    720 
    721 		if (umap->uobj != uobj || umap->offset < start ||
    722 		    (umap->offset >= end && end != 0) ||
    723 		    umap->refcount > 0) {
    724 			continue;
    725 		}
    726 
    727 		/*
    728 		 * remove from hash,
    729 		 * move to head of inactive queue.
    730 		 */
    731 
    732 		va = (vaddr_t)(ubc_object.kva +
    733 		    ((umap - ubc_object.umap) << ubc_winshift));
    734 		pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    735 
    736 		LIST_REMOVE(umap, hash);
    737 		umap->uobj = NULL;
    738 		TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
    739 		TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
    740 	}
    741 	pmap_update(pmap_kernel());
    742 	simple_unlock(&ubc_object.uobj.vmobjlock);
    743 }
    744 #endif /* notused */
    745