Home | History | Annotate | Line # | Download | only in uvm
uvm_bio.c revision 1.69
      1 /*	$NetBSD: uvm_bio.c,v 1.69 2010/05/29 23:17:53 rmind Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998 Chuck Silvers.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. The name of the author may not be used to endorse or promote products
     16  *    derived from this software without specific prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28  * SUCH DAMAGE.
     29  *
     30  */
     31 
     32 /*
     33  * uvm_bio.c: buffered i/o object mapping cache
     34  */
     35 
     36 #include <sys/cdefs.h>
     37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.69 2010/05/29 23:17:53 rmind Exp $");
     38 
     39 #include "opt_uvmhist.h"
     40 #include "opt_ubc.h"
     41 
     42 #include <sys/param.h>
     43 #include <sys/systm.h>
     44 #include <sys/kmem.h>
     45 #include <sys/kernel.h>
     46 #include <sys/proc.h>
     47 #include <sys/vnode.h>
     48 
     49 #include <uvm/uvm.h>
     50 
     51 /*
     52  * global data structures
     53  */
     54 
     55 /*
     56  * local functions
     57  */
     58 
     59 static int	ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
     60 			  int, int, vm_prot_t, int);
     61 static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
     62 
     63 /*
     64  * local data structues
     65  */
     66 
     67 #define UBC_HASH(uobj, offset) 						\
     68 	(((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
     69 				ubc_object.hashmask)
     70 
     71 #define UBC_QUEUE(offset)						\
     72 	(&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) &	\
     73 			     (UBC_NQUEUES - 1)])
     74 
     75 #define UBC_UMAP_ADDR(u)						\
     76 	(vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
     77 
     78 
     79 #define UMAP_PAGES_LOCKED	0x0001
     80 #define UMAP_MAPPING_CACHED	0x0002
     81 
     82 struct ubc_map
     83 {
     84 	struct uvm_object *	uobj;		/* mapped object */
     85 	voff_t			offset;		/* offset into uobj */
     86 	voff_t			writeoff;	/* write offset */
     87 	vsize_t			writelen;	/* write len */
     88 	int			refcount;	/* refcount on mapping */
     89 	int			flags;		/* extra state */
     90 	int			advice;
     91 
     92 	LIST_ENTRY(ubc_map)	hash;		/* hash table */
     93 	TAILQ_ENTRY(ubc_map)	inactive;	/* inactive queue */
     94 };
     95 
     96 static struct ubc_object
     97 {
     98 	struct uvm_object uobj;		/* glue for uvm_map() */
     99 	char *kva;			/* where ubc_object is mapped */
    100 	struct ubc_map *umap;		/* array of ubc_map's */
    101 
    102 	LIST_HEAD(, ubc_map) *hash;	/* hashtable for cached ubc_map's */
    103 	u_long hashmask;		/* mask for hashtable */
    104 
    105 	TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
    106 					/* inactive queues for ubc_map's */
    107 
    108 } ubc_object;
    109 
    110 const struct uvm_pagerops ubc_pager = {
    111 	.pgo_fault = ubc_fault,
    112 	/* ... rest are NULL */
    113 };
    114 
    115 int ubc_nwins = UBC_NWINS;
    116 int ubc_winshift = UBC_WINSHIFT;
    117 int ubc_winsize;
    118 #if defined(PMAP_PREFER)
    119 int ubc_nqueues;
    120 #define UBC_NQUEUES ubc_nqueues
    121 #else
    122 #define UBC_NQUEUES 1
    123 #endif
    124 
    125 #if defined(UBC_STATS)
    126 
    127 #define	UBC_EVCNT_DEFINE(name) \
    128 struct evcnt ubc_evcnt_##name = \
    129 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "ubc", #name); \
    130 EVCNT_ATTACH_STATIC(ubc_evcnt_##name);
    131 #define	UBC_EVCNT_INCR(name) ubc_evcnt_##name.ev_count++
    132 
    133 #else /* defined(UBC_STATS) */
    134 
    135 #define	UBC_EVCNT_DEFINE(name)	/* nothing */
    136 #define	UBC_EVCNT_INCR(name)	/* nothing */
    137 
    138 #endif /* defined(UBC_STATS) */
    139 
    140 UBC_EVCNT_DEFINE(wincachehit)
    141 UBC_EVCNT_DEFINE(wincachemiss)
    142 UBC_EVCNT_DEFINE(faultbusy)
    143 
    144 /*
    145  * ubc_init
    146  *
    147  * init pager private data structures.
    148  */
    149 
    150 void
    151 ubc_init(void)
    152 {
    153 	struct ubc_map *umap;
    154 	vaddr_t va;
    155 	int i;
    156 
    157 	/*
    158 	 * Make sure ubc_winshift is sane.
    159 	 */
    160 	if (ubc_winshift < PAGE_SHIFT)
    161 		ubc_winshift = PAGE_SHIFT;
    162 
    163 	/*
    164 	 * init ubc_object.
    165 	 * alloc and init ubc_map's.
    166 	 * init inactive queues.
    167 	 * alloc and init hashtable.
    168 	 * map in ubc_object.
    169 	 */
    170 
    171 	UVM_OBJ_INIT(&ubc_object.uobj, &ubc_pager, UVM_OBJ_KERN);
    172 
    173 	ubc_object.umap = kmem_zalloc(ubc_nwins * sizeof(struct ubc_map),
    174 	    KM_SLEEP);
    175 	if (ubc_object.umap == NULL)
    176 		panic("ubc_init: failed to allocate ubc_map");
    177 
    178 	if (ubc_winshift < PAGE_SHIFT) {
    179 		ubc_winshift = PAGE_SHIFT;
    180 	}
    181 	va = (vaddr_t)1L;
    182 #ifdef PMAP_PREFER
    183 	PMAP_PREFER(0, &va, 0, 0);	/* kernel is never topdown */
    184 	ubc_nqueues = va >> ubc_winshift;
    185 	if (ubc_nqueues == 0) {
    186 		ubc_nqueues = 1;
    187 	}
    188 #endif
    189 	ubc_winsize = 1 << ubc_winshift;
    190 	ubc_object.inactive = kmem_alloc(UBC_NQUEUES *
    191 	    sizeof(struct ubc_inactive_head), KM_SLEEP);
    192 	if (ubc_object.inactive == NULL)
    193 		panic("ubc_init: failed to allocate inactive queue heads");
    194 	for (i = 0; i < UBC_NQUEUES; i++) {
    195 		TAILQ_INIT(&ubc_object.inactive[i]);
    196 	}
    197 	for (i = 0; i < ubc_nwins; i++) {
    198 		umap = &ubc_object.umap[i];
    199 		TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
    200 				  umap, inactive);
    201 	}
    202 
    203 	ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, true,
    204 	    &ubc_object.hashmask);
    205 	for (i = 0; i <= ubc_object.hashmask; i++) {
    206 		LIST_INIT(&ubc_object.hash[i]);
    207 	}
    208 
    209 	if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
    210 		    ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
    211 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    212 				UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
    213 		panic("ubc_init: failed to map ubc_object");
    214 	}
    215 	UVMHIST_INIT(ubchist, 300);
    216 }
    217 
    218 /*
    219  * ubc_fault_page: helper of ubc_fault to handle a single page.
    220  */
    221 
    222 static inline void
    223 ubc_fault_page(const struct uvm_faultinfo *ufi, const struct ubc_map *umap,
    224     struct vm_page *pg, vm_prot_t prot, vm_prot_t access_type, vaddr_t va)
    225 {
    226 	struct uvm_object *uobj;
    227 	vm_prot_t mask;
    228 	int error;
    229 	bool rdonly;
    230 
    231 	uobj = pg->uobject;
    232 	mutex_enter(&uobj->vmobjlock);
    233 	if (pg->flags & PG_WANTED) {
    234 		wakeup(pg);
    235 	}
    236 	KASSERT((pg->flags & PG_FAKE) == 0);
    237 	if (pg->flags & PG_RELEASED) {
    238 		mutex_enter(&uvm_pageqlock);
    239 		uvm_pagefree(pg);
    240 		mutex_exit(&uvm_pageqlock);
    241 		mutex_exit(&uobj->vmobjlock);
    242 		return;
    243 	}
    244 	if (pg->loan_count != 0) {
    245 
    246 		/*
    247 		 * Avoid unneeded loan break, if possible.
    248 		 */
    249 
    250 		if ((access_type & VM_PROT_WRITE) == 0) {
    251 			prot &= ~VM_PROT_WRITE;
    252 		}
    253 		if (prot & VM_PROT_WRITE) {
    254 			struct vm_page *newpg;
    255 
    256 			newpg = uvm_loanbreak(pg);
    257 			if (newpg == NULL) {
    258 				uvm_page_unbusy(&pg, 1);
    259 				mutex_exit(&uobj->vmobjlock);
    260 				uvm_wait("ubc_loanbrk");
    261 				/*
    262 				 * Note: will re-fault.
    263 				 */
    264 				return;
    265 			}
    266 			pg = newpg;
    267 		}
    268 	}
    269 
    270 	/*
    271 	 * Note that a page whose backing store is partially allocated
    272 	 * is marked as PG_RDONLY.
    273 	 */
    274 
    275 	KASSERT((pg->flags & PG_RDONLY) == 0 ||
    276 	    (access_type & VM_PROT_WRITE) == 0 ||
    277 	    pg->offset < umap->writeoff ||
    278 	    pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
    279 
    280 	rdonly = ((access_type & VM_PROT_WRITE) == 0 &&
    281 	    (pg->flags & PG_RDONLY) != 0) ||
    282 	    UVM_OBJ_NEEDS_WRITEFAULT(uobj);
    283 	mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL;
    284 
    285 	error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
    286 	    prot & mask, PMAP_CANFAIL | (access_type & mask));
    287 
    288 	mutex_enter(&uvm_pageqlock);
    289 	uvm_pageactivate(pg);
    290 	mutex_exit(&uvm_pageqlock);
    291 	pg->flags &= ~(PG_BUSY|PG_WANTED);
    292 	UVM_PAGE_OWN(pg, NULL);
    293 	mutex_exit(&uobj->vmobjlock);
    294 
    295 	if (error) {
    296 		/*
    297 		 * Note: will re-fault.
    298 		 */
    299 		uvm_wait("ubc_pmfail");
    300 	}
    301 }
    302 
    303 /*
    304  * ubc_fault: fault routine for ubc mapping
    305  */
    306 
    307 static int
    308 ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2,
    309     int ign3, int ign4, vm_prot_t access_type, int flags)
    310 {
    311 	struct uvm_object *uobj;
    312 	struct ubc_map *umap;
    313 	vaddr_t va, eva, ubc_offset, slot_offset;
    314 	struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT];
    315 	int i, error, npages;
    316 	vm_prot_t prot;
    317 
    318 	UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
    319 
    320 	/*
    321 	 * no need to try with PGO_LOCKED...
    322 	 * we don't need to have the map locked since we know that
    323 	 * no one will mess with it until our reference is released.
    324 	 */
    325 
    326 	if (flags & PGO_LOCKED) {
    327 		uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
    328 		flags &= ~PGO_LOCKED;
    329 	}
    330 
    331 	va = ufi->orig_rvaddr;
    332 	ubc_offset = va - (vaddr_t)ubc_object.kva;
    333 	umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
    334 	KASSERT(umap->refcount != 0);
    335 	KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
    336 	slot_offset = ubc_offset & (ubc_winsize - 1);
    337 
    338 	/*
    339 	 * some platforms cannot write to individual bytes atomically, so
    340 	 * software has to do read/modify/write of larger quantities instead.
    341 	 * this means that the access_type for "write" operations
    342 	 * can be VM_PROT_READ, which confuses us mightily.
    343 	 *
    344 	 * deal with this by resetting access_type based on the info
    345 	 * that ubc_alloc() stores for us.
    346 	 */
    347 
    348 	access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
    349 	UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx access_type %d",
    350 	    va, ubc_offset, access_type, 0);
    351 
    352 #ifdef DIAGNOSTIC
    353 	if ((access_type & VM_PROT_WRITE) != 0) {
    354 		if (slot_offset < trunc_page(umap->writeoff) ||
    355 		    umap->writeoff + umap->writelen <= slot_offset) {
    356 			panic("ubc_fault: out of range write");
    357 		}
    358 	}
    359 #endif
    360 
    361 	/* no umap locking needed since we have a ref on the umap */
    362 	uobj = umap->uobj;
    363 
    364 	if ((access_type & VM_PROT_WRITE) == 0) {
    365 		npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
    366 	} else {
    367 		npages = (round_page(umap->offset + umap->writeoff +
    368 		    umap->writelen) - (umap->offset + slot_offset))
    369 		    >> PAGE_SHIFT;
    370 		flags |= PGO_PASTEOF;
    371 	}
    372 
    373 again:
    374 	memset(pgs, 0, sizeof (pgs));
    375 	mutex_enter(&uobj->vmobjlock);
    376 
    377 	UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x ",
    378 	    slot_offset, umap->writeoff, umap->writelen, 0);
    379 	UVMHIST_LOG(ubchist, "getpages uobj %p offset 0x%x npages %d",
    380 	    uobj, umap->offset + slot_offset, npages, 0);
    381 
    382 	error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
    383 	    &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC |
    384 	    PGO_NOTIMESTAMP);
    385 	UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages, 0,
    386 	    0);
    387 
    388 	if (error == EAGAIN) {
    389 		kpause("ubc_fault", false, hz, NULL);
    390 		goto again;
    391 	}
    392 	if (error) {
    393 		return error;
    394 	}
    395 
    396 	/*
    397 	 * For virtually-indexed, virtually-tagged caches we should avoid
    398 	 * creating writable mappings when we do not absolutely need them,
    399 	 * since the "compatible alias" trick does not work on such caches.
    400 	 * Otherwise, we can always map the pages writable.
    401 	 */
    402 
    403 #ifdef PMAP_CACHE_VIVT
    404 	prot = VM_PROT_READ | access_type;
    405 #else
    406 	prot = VM_PROT_READ | VM_PROT_WRITE;
    407 #endif
    408 	va = ufi->orig_rvaddr;
    409 	eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
    410 
    411 	UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0, 0);
    412 	for (i = 0; va < eva; i++, va += PAGE_SIZE) {
    413 		struct vm_page *pg;
    414 
    415 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i], 0, 0);
    416 		pg = pgs[i];
    417 
    418 		if (pg == NULL || pg == PGO_DONTCARE) {
    419 			continue;
    420 		}
    421 		ubc_fault_page(ufi, umap, pg, prot, access_type, va);
    422 	}
    423 	pmap_update(ufi->orig_map->pmap);
    424 	return 0;
    425 }
    426 
    427 /*
    428  * local functions
    429  */
    430 
    431 static struct ubc_map *
    432 ubc_find_mapping(struct uvm_object *uobj, voff_t offset)
    433 {
    434 	struct ubc_map *umap;
    435 
    436 	LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
    437 		if (umap->uobj == uobj && umap->offset == offset) {
    438 			return umap;
    439 		}
    440 	}
    441 	return NULL;
    442 }
    443 
    444 
    445 /*
    446  * ubc interface functions
    447  */
    448 
    449 /*
    450  * ubc_alloc:  allocate a file mapping window
    451  */
    452 
    453 void *
    454 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
    455     int flags)
    456 {
    457 	vaddr_t slot_offset, va;
    458 	struct ubc_map *umap;
    459 	voff_t umap_offset;
    460 	int error;
    461 	UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
    462 
    463 	UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx",
    464 	    uobj, offset, *lenp, 0);
    465 
    466 	KASSERT(*lenp > 0);
    467 	umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
    468 	slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
    469 	*lenp = MIN(*lenp, ubc_winsize - slot_offset);
    470 
    471 	/*
    472 	 * the object is always locked here, so we don't need to add a ref.
    473 	 */
    474 
    475 again:
    476 	mutex_enter(&ubc_object.uobj.vmobjlock);
    477 	umap = ubc_find_mapping(uobj, umap_offset);
    478 	if (umap == NULL) {
    479 		UBC_EVCNT_INCR(wincachemiss);
    480 		umap = TAILQ_FIRST(UBC_QUEUE(offset));
    481 		if (umap == NULL) {
    482 			mutex_exit(&ubc_object.uobj.vmobjlock);
    483 			kpause("ubc_alloc", false, hz, NULL);
    484 			goto again;
    485 		}
    486 
    487 		/*
    488 		 * remove from old hash (if any), add to new hash.
    489 		 */
    490 
    491 		if (umap->uobj != NULL) {
    492 			LIST_REMOVE(umap, hash);
    493 		}
    494 		umap->uobj = uobj;
    495 		umap->offset = umap_offset;
    496 		LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
    497 		    umap, hash);
    498 		va = UBC_UMAP_ADDR(umap);
    499 		if (umap->flags & UMAP_MAPPING_CACHED) {
    500 			umap->flags &= ~UMAP_MAPPING_CACHED;
    501 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    502 			pmap_update(pmap_kernel());
    503 		}
    504 	} else {
    505 		UBC_EVCNT_INCR(wincachehit);
    506 		va = UBC_UMAP_ADDR(umap);
    507 	}
    508 
    509 	if (umap->refcount == 0) {
    510 		TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
    511 	}
    512 
    513 #ifdef DIAGNOSTIC
    514 	if ((flags & UBC_WRITE) && (umap->writeoff || umap->writelen)) {
    515 		panic("ubc_alloc: concurrent writes uobj %p", uobj);
    516 	}
    517 #endif
    518 	if (flags & UBC_WRITE) {
    519 		umap->writeoff = slot_offset;
    520 		umap->writelen = *lenp;
    521 	}
    522 
    523 	umap->refcount++;
    524 	umap->advice = advice;
    525 	mutex_exit(&ubc_object.uobj.vmobjlock);
    526 	UVMHIST_LOG(ubchist, "umap %p refs %d va %p flags 0x%x",
    527 	    umap, umap->refcount, va, flags);
    528 
    529 	if (flags & UBC_FAULTBUSY) {
    530 		int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
    531 		struct vm_page *pgs[npages];
    532 		int gpflags =
    533 		    PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
    534 		    PGO_NOTIMESTAMP;
    535 		int i;
    536 		KDASSERT(flags & UBC_WRITE);
    537 		KASSERT(umap->refcount == 1);
    538 
    539 		UBC_EVCNT_INCR(faultbusy);
    540 		if (umap->flags & UMAP_MAPPING_CACHED) {
    541 			umap->flags &= ~UMAP_MAPPING_CACHED;
    542 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    543 		}
    544 again_faultbusy:
    545 		memset(pgs, 0, sizeof(pgs));
    546 		mutex_enter(&uobj->vmobjlock);
    547 		error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
    548 		    &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags);
    549 		UVMHIST_LOG(ubchist, "faultbusy getpages %d", error, 0, 0, 0);
    550 		if (error) {
    551 			goto out;
    552 		}
    553 		for (i = 0; i < npages; i++) {
    554 			struct vm_page *pg = pgs[i];
    555 
    556 			KASSERT(pg->uobject == uobj);
    557 			if (pg->loan_count != 0) {
    558 				mutex_enter(&uobj->vmobjlock);
    559 				if (pg->loan_count != 0) {
    560 					pg = uvm_loanbreak(pg);
    561 				}
    562 				mutex_exit(&uobj->vmobjlock);
    563 				if (pg == NULL) {
    564 					pmap_kremove(va, ubc_winsize);
    565 					pmap_update(pmap_kernel());
    566 					mutex_enter(&uobj->vmobjlock);
    567 					uvm_page_unbusy(pgs, npages);
    568 					mutex_exit(&uobj->vmobjlock);
    569 					uvm_wait("ubc_alloc");
    570 					goto again_faultbusy;
    571 				}
    572 				pgs[i] = pg;
    573 			}
    574 			pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
    575 			    VM_PAGE_TO_PHYS(pg),
    576 			    VM_PROT_READ | VM_PROT_WRITE, 0);
    577 		}
    578 		pmap_update(pmap_kernel());
    579 		umap->flags |= UMAP_PAGES_LOCKED;
    580 	} else {
    581 		KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
    582 	}
    583 
    584 out:
    585 	return (void *)(va + slot_offset);
    586 }
    587 
    588 /*
    589  * ubc_release:  free a file mapping window.
    590  */
    591 
    592 void
    593 ubc_release(void *va, int flags)
    594 {
    595 	struct ubc_map *umap;
    596 	struct uvm_object *uobj;
    597 	vaddr_t umapva;
    598 	bool unmapped;
    599 	UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
    600 
    601 	UVMHIST_LOG(ubchist, "va %p", va, 0, 0, 0);
    602 	umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
    603 	umapva = UBC_UMAP_ADDR(umap);
    604 	uobj = umap->uobj;
    605 	KASSERT(uobj != NULL);
    606 
    607 	if (umap->flags & UMAP_PAGES_LOCKED) {
    608 		int slot_offset = umap->writeoff;
    609 		int endoff = umap->writeoff + umap->writelen;
    610 		int zerolen = round_page(endoff) - endoff;
    611 		int npages = (int)(round_page(umap->writeoff + umap->writelen)
    612 				   - trunc_page(umap->writeoff)) >> PAGE_SHIFT;
    613 		struct vm_page *pgs[npages];
    614 		paddr_t pa;
    615 		int i;
    616 		bool rv;
    617 
    618 		KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
    619 		if (zerolen) {
    620 			memset((char *)umapva + endoff, 0, zerolen);
    621 		}
    622 		umap->flags &= ~UMAP_PAGES_LOCKED;
    623 		mutex_enter(&uvm_pageqlock);
    624 		for (i = 0; i < npages; i++) {
    625 			rv = pmap_extract(pmap_kernel(),
    626 			    umapva + slot_offset + (i << PAGE_SHIFT), &pa);
    627 			KASSERT(rv);
    628 			pgs[i] = PHYS_TO_VM_PAGE(pa);
    629 			pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
    630 			KASSERT(pgs[i]->loan_count == 0);
    631 			uvm_pageactivate(pgs[i]);
    632 		}
    633 		mutex_exit(&uvm_pageqlock);
    634 		pmap_kremove(umapva, ubc_winsize);
    635 		pmap_update(pmap_kernel());
    636 		mutex_enter(&uobj->vmobjlock);
    637 		uvm_page_unbusy(pgs, npages);
    638 		mutex_exit(&uobj->vmobjlock);
    639 		unmapped = true;
    640 	} else {
    641 		unmapped = false;
    642 	}
    643 
    644 	mutex_enter(&ubc_object.uobj.vmobjlock);
    645 	umap->writeoff = 0;
    646 	umap->writelen = 0;
    647 	umap->refcount--;
    648 	if (umap->refcount == 0) {
    649 		if (flags & UBC_UNMAP) {
    650 
    651 			/*
    652 			 * Invalidate any cached mappings if requested.
    653 			 * This is typically used to avoid leaving
    654 			 * incompatible cache aliases around indefinitely.
    655 			 */
    656 
    657 			pmap_remove(pmap_kernel(), umapva,
    658 				    umapva + ubc_winsize);
    659 			umap->flags &= ~UMAP_MAPPING_CACHED;
    660 			pmap_update(pmap_kernel());
    661 			LIST_REMOVE(umap, hash);
    662 			umap->uobj = NULL;
    663 			TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
    664 			    inactive);
    665 		} else {
    666 			if (!unmapped) {
    667 				umap->flags |= UMAP_MAPPING_CACHED;
    668 			}
    669 			TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
    670 			    inactive);
    671 		}
    672 	}
    673 	UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount, 0, 0);
    674 	mutex_exit(&ubc_object.uobj.vmobjlock);
    675 }
    676 
    677 /*
    678  * ubc_uiomove: move data to/from an object.
    679  */
    680 
    681 int
    682 ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
    683     int flags)
    684 {
    685 	voff_t off;
    686 	const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
    687 	int error;
    688 
    689 	KASSERT(todo <= uio->uio_resid);
    690 	KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
    691 	    ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
    692 
    693 	off = uio->uio_offset;
    694 	error = 0;
    695 	while (todo > 0) {
    696 		vsize_t bytelen = todo;
    697 		void *win;
    698 
    699 		win = ubc_alloc(uobj, off, &bytelen, advice, flags);
    700 		if (error == 0) {
    701 			error = uiomove(win, bytelen, uio);
    702 		}
    703 		if (error != 0 && overwrite) {
    704 			/*
    705 			 * if we haven't initialized the pages yet,
    706 			 * do it now.  it's safe to use memset here
    707 			 * because we just mapped the pages above.
    708 			 */
    709 			printf("%s: error=%d\n", __func__, error);
    710 			memset(win, 0, bytelen);
    711 		}
    712 		ubc_release(win, flags);
    713 		off += bytelen;
    714 		todo -= bytelen;
    715 		if (error != 0 && (flags & UBC_PARTIALOK) != 0) {
    716 			break;
    717 		}
    718 	}
    719 
    720 	return error;
    721 }
    722 
    723 
    724 /*
    725  * uvm_vnp_zerorange:  set a range of bytes in a file to zero.
    726  */
    727 
    728 void
    729 uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
    730 {
    731 	void *win;
    732 	int flags;
    733 
    734 	/*
    735 	 * XXXUBC invent kzero() and use it
    736 	 */
    737 
    738 	while (len) {
    739 		vsize_t bytelen = len;
    740 
    741 		win = ubc_alloc(&vp->v_uobj, off, &bytelen, UVM_ADV_NORMAL,
    742 		    UBC_WRITE);
    743 		memset(win, 0, bytelen);
    744 		flags = UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0;
    745 		ubc_release(win, flags);
    746 
    747 		off += bytelen;
    748 		len -= bytelen;
    749 	}
    750 }
    751