Home | History | Annotate | Line # | Download | only in uvm
uvm_bio.c revision 1.96
      1 /*	$NetBSD: uvm_bio.c,v 1.96 2018/05/26 18:57:35 jdolecek Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998 Chuck Silvers.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. The name of the author may not be used to endorse or promote products
     16  *    derived from this software without specific prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28  * SUCH DAMAGE.
     29  *
     30  */
     31 
     32 /*
     33  * uvm_bio.c: buffered i/o object mapping cache
     34  */
     35 
     36 #include <sys/cdefs.h>
     37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.96 2018/05/26 18:57:35 jdolecek Exp $");
     38 
     39 #include "opt_uvmhist.h"
     40 #include "opt_ubc.h"
     41 
     42 #include <sys/param.h>
     43 #include <sys/systm.h>
     44 #include <sys/kmem.h>
     45 #include <sys/kernel.h>
     46 #include <sys/proc.h>
     47 #include <sys/vnode.h>
     48 
     49 #include <uvm/uvm.h>
     50 
     51 #ifdef PMAP_DIRECT
     52 #  define UBC_USE_PMAP_DIRECT
     53 #endif
     54 
     55 /*
     56  * local functions
     57  */
     58 
     59 static int	ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
     60 			  int, int, vm_prot_t, int);
     61 static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
     62 #ifdef UBC_USE_PMAP_DIRECT
     63 static int __noinline ubc_uiomove_direct(struct uvm_object *, struct uio *, vsize_t,
     64 			  int, int);
     65 static void __noinline ubc_zerorange_direct(struct uvm_object *, off_t, size_t, int);
     66 
     67 bool ubc_direct = false; /* XXX */
     68 #endif
     69 
     70 /*
     71  * local data structues
     72  */
     73 
     74 #define UBC_HASH(uobj, offset) 						\
     75 	(((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
     76 				ubc_object.hashmask)
     77 
     78 #define UBC_QUEUE(offset)						\
     79 	(&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) &	\
     80 			     (UBC_NQUEUES - 1)])
     81 
     82 #define UBC_UMAP_ADDR(u)						\
     83 	(vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
     84 
     85 
     86 #define UMAP_PAGES_LOCKED	0x0001
     87 #define UMAP_MAPPING_CACHED	0x0002
     88 
     89 struct ubc_map {
     90 	struct uvm_object *	uobj;		/* mapped object */
     91 	voff_t			offset;		/* offset into uobj */
     92 	voff_t			writeoff;	/* write offset */
     93 	vsize_t			writelen;	/* write len */
     94 	int			refcount;	/* refcount on mapping */
     95 	int			flags;		/* extra state */
     96 	int			advice;
     97 
     98 	LIST_ENTRY(ubc_map)	hash;		/* hash table */
     99 	TAILQ_ENTRY(ubc_map)	inactive;	/* inactive queue */
    100 	LIST_ENTRY(ubc_map)	list;		/* per-object list */
    101 };
    102 
    103 TAILQ_HEAD(ubc_inactive_head, ubc_map);
    104 static struct ubc_object {
    105 	struct uvm_object uobj;		/* glue for uvm_map() */
    106 	char *kva;			/* where ubc_object is mapped */
    107 	struct ubc_map *umap;		/* array of ubc_map's */
    108 
    109 	LIST_HEAD(, ubc_map) *hash;	/* hashtable for cached ubc_map's */
    110 	u_long hashmask;		/* mask for hashtable */
    111 
    112 	struct ubc_inactive_head *inactive;
    113 					/* inactive queues for ubc_map's */
    114 } ubc_object;
    115 
    116 const struct uvm_pagerops ubc_pager = {
    117 	.pgo_fault = ubc_fault,
    118 	/* ... rest are NULL */
    119 };
    120 
    121 int ubc_nwins = UBC_NWINS;
    122 int ubc_winshift __read_mostly = UBC_WINSHIFT;
    123 int ubc_winsize __read_mostly;
    124 #if defined(PMAP_PREFER)
    125 int ubc_nqueues;
    126 #define UBC_NQUEUES ubc_nqueues
    127 #else
    128 #define UBC_NQUEUES 1
    129 #endif
    130 
    131 #if defined(UBC_STATS)
    132 
    133 #define	UBC_EVCNT_DEFINE(name) \
    134 struct evcnt ubc_evcnt_##name = \
    135 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "ubc", #name); \
    136 EVCNT_ATTACH_STATIC(ubc_evcnt_##name);
    137 #define	UBC_EVCNT_INCR(name) ubc_evcnt_##name.ev_count++
    138 
    139 #else /* defined(UBC_STATS) */
    140 
    141 #define	UBC_EVCNT_DEFINE(name)	/* nothing */
    142 #define	UBC_EVCNT_INCR(name)	/* nothing */
    143 
    144 #endif /* defined(UBC_STATS) */
    145 
    146 UBC_EVCNT_DEFINE(wincachehit)
    147 UBC_EVCNT_DEFINE(wincachemiss)
    148 UBC_EVCNT_DEFINE(faultbusy)
    149 
    150 /*
    151  * ubc_init
    152  *
    153  * init pager private data structures.
    154  */
    155 
    156 void
    157 ubc_init(void)
    158 {
    159 	/*
    160 	 * Make sure ubc_winshift is sane.
    161 	 */
    162 	if (ubc_winshift < PAGE_SHIFT)
    163 		ubc_winshift = PAGE_SHIFT;
    164 	ubc_winsize = 1 << ubc_winshift;
    165 
    166 	/*
    167 	 * init ubc_object.
    168 	 * alloc and init ubc_map's.
    169 	 * init inactive queues.
    170 	 * alloc and init hashtable.
    171 	 * map in ubc_object.
    172 	 */
    173 
    174 	uvm_obj_init(&ubc_object.uobj, &ubc_pager, true, UVM_OBJ_KERN);
    175 
    176 	ubc_object.umap = kmem_zalloc(ubc_nwins * sizeof(struct ubc_map),
    177 	    KM_SLEEP);
    178 	if (ubc_object.umap == NULL)
    179 		panic("ubc_init: failed to allocate ubc_map");
    180 
    181 	vaddr_t va = (vaddr_t)1L;
    182 #ifdef PMAP_PREFER
    183 	PMAP_PREFER(0, &va, 0, 0);	/* kernel is never topdown */
    184 	ubc_nqueues = va >> ubc_winshift;
    185 	if (ubc_nqueues == 0) {
    186 		ubc_nqueues = 1;
    187 	}
    188 #endif
    189 	ubc_object.inactive = kmem_alloc(UBC_NQUEUES *
    190 	    sizeof(struct ubc_inactive_head), KM_SLEEP);
    191 	for (int i = 0; i < UBC_NQUEUES; i++) {
    192 		TAILQ_INIT(&ubc_object.inactive[i]);
    193 	}
    194 	for (int i = 0; i < ubc_nwins; i++) {
    195 		struct ubc_map *umap;
    196 		umap = &ubc_object.umap[i];
    197 		TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
    198 				  umap, inactive);
    199 	}
    200 
    201 	ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, true,
    202 	    &ubc_object.hashmask);
    203 	for (int i = 0; i <= ubc_object.hashmask; i++) {
    204 		LIST_INIT(&ubc_object.hash[i]);
    205 	}
    206 
    207 	if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
    208 		    ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
    209 		    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_NONE,
    210 				UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
    211 		panic("ubc_init: failed to map ubc_object");
    212 	}
    213 }
    214 
    215 void
    216 ubchist_init(void)
    217 {
    218 
    219 	UVMHIST_INIT(ubchist, 300);
    220 }
    221 
    222 /*
    223  * ubc_fault_page: helper of ubc_fault to handle a single page.
    224  *
    225  * => Caller has UVM object locked.
    226  * => Caller will perform pmap_update().
    227  */
    228 
    229 static inline int
    230 ubc_fault_page(const struct uvm_faultinfo *ufi, const struct ubc_map *umap,
    231     struct vm_page *pg, vm_prot_t prot, vm_prot_t access_type, vaddr_t va)
    232 {
    233 	struct uvm_object *uobj;
    234 	vm_prot_t mask;
    235 	int error;
    236 	bool rdonly;
    237 
    238 	uobj = pg->uobject;
    239 	KASSERT(mutex_owned(uobj->vmobjlock));
    240 
    241 	if (pg->flags & PG_WANTED) {
    242 		wakeup(pg);
    243 	}
    244 	KASSERT((pg->flags & PG_FAKE) == 0);
    245 	if (pg->flags & PG_RELEASED) {
    246 		mutex_enter(&uvm_pageqlock);
    247 		uvm_pagefree(pg);
    248 		mutex_exit(&uvm_pageqlock);
    249 		return 0;
    250 	}
    251 	if (pg->loan_count != 0) {
    252 
    253 		/*
    254 		 * Avoid unneeded loan break, if possible.
    255 		 */
    256 
    257 		if ((access_type & VM_PROT_WRITE) == 0) {
    258 			prot &= ~VM_PROT_WRITE;
    259 		}
    260 		if (prot & VM_PROT_WRITE) {
    261 			struct vm_page *newpg;
    262 
    263 			newpg = uvm_loanbreak(pg);
    264 			if (newpg == NULL) {
    265 				uvm_page_unbusy(&pg, 1);
    266 				return ENOMEM;
    267 			}
    268 			pg = newpg;
    269 		}
    270 	}
    271 
    272 	/*
    273 	 * Note that a page whose backing store is partially allocated
    274 	 * is marked as PG_RDONLY.
    275 	 */
    276 
    277 	KASSERT((pg->flags & PG_RDONLY) == 0 ||
    278 	    (access_type & VM_PROT_WRITE) == 0 ||
    279 	    pg->offset < umap->writeoff ||
    280 	    pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
    281 
    282 	rdonly = ((access_type & VM_PROT_WRITE) == 0 &&
    283 	    (pg->flags & PG_RDONLY) != 0) ||
    284 	    UVM_OBJ_NEEDS_WRITEFAULT(uobj);
    285 	mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL;
    286 
    287 	error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
    288 	    prot & mask, PMAP_CANFAIL | (access_type & mask));
    289 
    290 	mutex_enter(&uvm_pageqlock);
    291 	uvm_pageactivate(pg);
    292 	mutex_exit(&uvm_pageqlock);
    293 	pg->flags &= ~(PG_BUSY|PG_WANTED);
    294 	UVM_PAGE_OWN(pg, NULL);
    295 
    296 	return error;
    297 }
    298 
    299 /*
    300  * ubc_fault: fault routine for ubc mapping
    301  */
    302 
    303 static int
    304 ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2,
    305     int ign3, int ign4, vm_prot_t access_type, int flags)
    306 {
    307 	struct uvm_object *uobj;
    308 	struct ubc_map *umap;
    309 	vaddr_t va, eva, ubc_offset, slot_offset;
    310 	struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT];
    311 	int i, error, npages;
    312 	vm_prot_t prot;
    313 
    314 	UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
    315 
    316 	/*
    317 	 * no need to try with PGO_LOCKED...
    318 	 * we don't need to have the map locked since we know that
    319 	 * no one will mess with it until our reference is released.
    320 	 */
    321 
    322 	if (flags & PGO_LOCKED) {
    323 		uvmfault_unlockall(ufi, NULL, &ubc_object.uobj);
    324 		flags &= ~PGO_LOCKED;
    325 	}
    326 
    327 	va = ufi->orig_rvaddr;
    328 	ubc_offset = va - (vaddr_t)ubc_object.kva;
    329 	umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
    330 	KASSERT(umap->refcount != 0);
    331 	KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
    332 	slot_offset = ubc_offset & (ubc_winsize - 1);
    333 
    334 	/*
    335 	 * some platforms cannot write to individual bytes atomically, so
    336 	 * software has to do read/modify/write of larger quantities instead.
    337 	 * this means that the access_type for "write" operations
    338 	 * can be VM_PROT_READ, which confuses us mightily.
    339 	 *
    340 	 * deal with this by resetting access_type based on the info
    341 	 * that ubc_alloc() stores for us.
    342 	 */
    343 
    344 	access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
    345 	UVMHIST_LOG(ubchist, "va 0x%jx ubc_offset 0x%jx access_type %jd",
    346 	    va, ubc_offset, access_type, 0);
    347 
    348 	if ((access_type & VM_PROT_WRITE) != 0) {
    349 #ifndef PRIxOFF		/* XXX */
    350 #define PRIxOFF "jx"	/* XXX */
    351 #endif			/* XXX */
    352 		KASSERTMSG((trunc_page(umap->writeoff) <= slot_offset),
    353 		    "out of range write: slot=%#"PRIxVSIZE" off=%#"PRIxOFF,
    354 		    slot_offset, (intmax_t)umap->writeoff);
    355 		KASSERTMSG((slot_offset < umap->writeoff + umap->writelen),
    356 		    "out of range write: slot=%#"PRIxVADDR
    357 		        " off=%#"PRIxOFF" len=%#"PRIxVSIZE,
    358 		    slot_offset, (intmax_t)umap->writeoff, umap->writelen);
    359 	}
    360 
    361 	/* no umap locking needed since we have a ref on the umap */
    362 	uobj = umap->uobj;
    363 
    364 	if ((access_type & VM_PROT_WRITE) == 0) {
    365 		npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
    366 	} else {
    367 		npages = (round_page(umap->offset + umap->writeoff +
    368 		    umap->writelen) - (umap->offset + slot_offset))
    369 		    >> PAGE_SHIFT;
    370 		flags |= PGO_PASTEOF;
    371 	}
    372 
    373 again:
    374 	memset(pgs, 0, sizeof (pgs));
    375 	mutex_enter(uobj->vmobjlock);
    376 
    377 	UVMHIST_LOG(ubchist, "slot_offset 0x%jx writeoff 0x%jx writelen 0x%jx ",
    378 	    slot_offset, umap->writeoff, umap->writelen, 0);
    379 	UVMHIST_LOG(ubchist, "getpages uobj %#jx offset 0x%jx npages %jd",
    380 	    (uintptr_t)uobj, umap->offset + slot_offset, npages, 0);
    381 
    382 	error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
    383 	    &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC |
    384 	    PGO_NOTIMESTAMP);
    385 	UVMHIST_LOG(ubchist, "getpages error %jd npages %jd", error, npages, 0,
    386 	    0);
    387 
    388 	if (error == EAGAIN) {
    389 		kpause("ubc_fault", false, hz >> 2, NULL);
    390 		goto again;
    391 	}
    392 	if (error) {
    393 		return error;
    394 	}
    395 
    396 	/*
    397 	 * For virtually-indexed, virtually-tagged caches we should avoid
    398 	 * creating writable mappings when we do not absolutely need them,
    399 	 * since the "compatible alias" trick does not work on such caches.
    400 	 * Otherwise, we can always map the pages writable.
    401 	 */
    402 
    403 #ifdef PMAP_CACHE_VIVT
    404 	prot = VM_PROT_READ | access_type;
    405 #else
    406 	prot = VM_PROT_READ | VM_PROT_WRITE;
    407 #endif
    408 
    409 	va = ufi->orig_rvaddr;
    410 	eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
    411 
    412 	UVMHIST_LOG(ubchist, "va 0x%jx eva 0x%jx", va, eva, 0, 0);
    413 
    414 	/*
    415 	 * Note: normally all returned pages would have the same UVM object.
    416 	 * However, layered file-systems and e.g. tmpfs, may return pages
    417 	 * which belong to underlying UVM object.  In such case, lock is
    418 	 * shared amongst the objects.
    419 	 */
    420 	mutex_enter(uobj->vmobjlock);
    421 	for (i = 0; va < eva; i++, va += PAGE_SIZE) {
    422 		struct vm_page *pg;
    423 
    424 		UVMHIST_LOG(ubchist, "pgs[%jd] = %#jx", i, (uintptr_t)pgs[i],
    425 		    0, 0);
    426 		pg = pgs[i];
    427 
    428 		if (pg == NULL || pg == PGO_DONTCARE) {
    429 			continue;
    430 		}
    431 		KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
    432 		error = ubc_fault_page(ufi, umap, pg, prot, access_type, va);
    433 		if (error) {
    434 			/*
    435 			 * Flush (there might be pages entered), drop the lock,
    436 			 * and perform uvm_wait().  Note: page will re-fault.
    437 			 */
    438 			pmap_update(ufi->orig_map->pmap);
    439 			mutex_exit(uobj->vmobjlock);
    440 			uvm_wait("ubc_fault");
    441 			mutex_enter(uobj->vmobjlock);
    442 		}
    443 	}
    444 	/* Must make VA visible before the unlock. */
    445 	pmap_update(ufi->orig_map->pmap);
    446 	mutex_exit(uobj->vmobjlock);
    447 
    448 	return 0;
    449 }
    450 
    451 /*
    452  * local functions
    453  */
    454 
    455 static struct ubc_map *
    456 ubc_find_mapping(struct uvm_object *uobj, voff_t offset)
    457 {
    458 	struct ubc_map *umap;
    459 
    460 	LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
    461 		if (umap->uobj == uobj && umap->offset == offset) {
    462 			return umap;
    463 		}
    464 	}
    465 	return NULL;
    466 }
    467 
    468 
    469 /*
    470  * ubc interface functions
    471  */
    472 
    473 /*
    474  * ubc_alloc:  allocate a file mapping window
    475  */
    476 
    477 static void * __noinline
    478 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
    479     int flags)
    480 {
    481 	vaddr_t slot_offset, va;
    482 	struct ubc_map *umap;
    483 	voff_t umap_offset;
    484 	int error;
    485 	UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
    486 
    487 	UVMHIST_LOG(ubchist, "uobj %#jx offset 0x%jx len 0x%jx",
    488 	    (uintptr_t)uobj, offset, *lenp, 0);
    489 
    490 	KASSERT(*lenp > 0);
    491 	umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
    492 	slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
    493 	*lenp = MIN(*lenp, ubc_winsize - slot_offset);
    494 
    495 	mutex_enter(ubc_object.uobj.vmobjlock);
    496 again:
    497 	/*
    498 	 * The UVM object is already referenced.
    499 	 * Lock order: UBC object -> ubc_map::uobj.
    500 	 */
    501 	umap = ubc_find_mapping(uobj, umap_offset);
    502 	if (umap == NULL) {
    503 		struct uvm_object *oobj;
    504 
    505 		UBC_EVCNT_INCR(wincachemiss);
    506 		umap = TAILQ_FIRST(UBC_QUEUE(offset));
    507 		if (umap == NULL) {
    508 			kpause("ubc_alloc", false, hz >> 2,
    509 			    ubc_object.uobj.vmobjlock);
    510 			goto again;
    511 		}
    512 
    513 		va = UBC_UMAP_ADDR(umap);
    514 		oobj = umap->uobj;
    515 
    516 		/*
    517 		 * Remove from old hash (if any), add to new hash.
    518 		 */
    519 
    520 		if (oobj != NULL) {
    521 			/*
    522 			 * Mapping must be removed before the list entry,
    523 			 * since there is a race with ubc_purge().
    524 			 */
    525 			if (umap->flags & UMAP_MAPPING_CACHED) {
    526 				umap->flags &= ~UMAP_MAPPING_CACHED;
    527 				mutex_enter(oobj->vmobjlock);
    528 				pmap_remove(pmap_kernel(), va,
    529 				    va + ubc_winsize);
    530 				pmap_update(pmap_kernel());
    531 				mutex_exit(oobj->vmobjlock);
    532 			}
    533 			LIST_REMOVE(umap, hash);
    534 			LIST_REMOVE(umap, list);
    535 		} else {
    536 			KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
    537 		}
    538 		umap->uobj = uobj;
    539 		umap->offset = umap_offset;
    540 		LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
    541 		    umap, hash);
    542 		LIST_INSERT_HEAD(&uobj->uo_ubc, umap, list);
    543 	} else {
    544 		UBC_EVCNT_INCR(wincachehit);
    545 		va = UBC_UMAP_ADDR(umap);
    546 	}
    547 
    548 	if (umap->refcount == 0) {
    549 		TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
    550 	}
    551 
    552 	if (flags & UBC_WRITE) {
    553 		KASSERTMSG(umap->writeoff == 0 && umap->writelen == 0,
    554 		    "ubc_alloc: concurrent writes to uobj %p", uobj);
    555 		umap->writeoff = slot_offset;
    556 		umap->writelen = *lenp;
    557 	}
    558 
    559 	umap->refcount++;
    560 	umap->advice = advice;
    561 	mutex_exit(ubc_object.uobj.vmobjlock);
    562 	UVMHIST_LOG(ubchist, "umap %#jx refs %jd va %#jx flags 0x%jx",
    563 	    (uintptr_t)umap, umap->refcount, (uintptr_t)va, flags);
    564 
    565 	if (flags & UBC_FAULTBUSY) {
    566 		// XXX add offset from slot_offset?
    567 		int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
    568 		struct vm_page *pgs[npages];
    569 		int gpflags =
    570 		    PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
    571 		    PGO_NOTIMESTAMP;
    572 		int i;
    573 		KDASSERT(flags & UBC_WRITE);
    574 		KASSERT(umap->refcount == 1);
    575 
    576 		UBC_EVCNT_INCR(faultbusy);
    577 again_faultbusy:
    578 		mutex_enter(uobj->vmobjlock);
    579 		if (umap->flags & UMAP_MAPPING_CACHED) {
    580 			umap->flags &= ~UMAP_MAPPING_CACHED;
    581 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    582 		}
    583 		memset(pgs, 0, sizeof(pgs));
    584 
    585 		error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
    586 		    &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags);
    587 		UVMHIST_LOG(ubchist, "faultbusy getpages %jd", error, 0, 0, 0);
    588 		if (error) {
    589 			/*
    590 			 * Flush: the mapping above might have been removed.
    591 			 */
    592 			pmap_update(pmap_kernel());
    593 			goto out;
    594 		}
    595 		for (i = 0; i < npages; i++) {
    596 			struct vm_page *pg = pgs[i];
    597 
    598 			KASSERT(pg->uobject == uobj);
    599 			if (pg->loan_count != 0) {
    600 				mutex_enter(uobj->vmobjlock);
    601 				if (pg->loan_count != 0) {
    602 					pg = uvm_loanbreak(pg);
    603 				}
    604 				if (pg == NULL) {
    605 					pmap_kremove(va, ubc_winsize);
    606 					pmap_update(pmap_kernel());
    607 					uvm_page_unbusy(pgs, npages);
    608 					mutex_exit(uobj->vmobjlock);
    609 					uvm_wait("ubc_alloc");
    610 					goto again_faultbusy;
    611 				}
    612 				mutex_exit(uobj->vmobjlock);
    613 				pgs[i] = pg;
    614 			}
    615 			pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
    616 			    VM_PAGE_TO_PHYS(pg),
    617 			    VM_PROT_READ | VM_PROT_WRITE, 0);
    618 		}
    619 		pmap_update(pmap_kernel());
    620 		umap->flags |= UMAP_PAGES_LOCKED;
    621 	} else {
    622 		KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
    623 	}
    624 
    625 out:
    626 	return (void *)(va + slot_offset);
    627 }
    628 
    629 /*
    630  * ubc_release:  free a file mapping window.
    631  */
    632 
    633 static void __noinline
    634 ubc_release(void *va, int flags)
    635 {
    636 	struct ubc_map *umap;
    637 	struct uvm_object *uobj;
    638 	vaddr_t umapva;
    639 	bool unmapped;
    640 	UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
    641 
    642 	UVMHIST_LOG(ubchist, "va %#jx", (uintptr_t)va, 0, 0, 0);
    643 	umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
    644 	umapva = UBC_UMAP_ADDR(umap);
    645 	uobj = umap->uobj;
    646 	KASSERT(uobj != NULL);
    647 
    648 	if (umap->flags & UMAP_PAGES_LOCKED) {
    649 		const voff_t slot_offset = umap->writeoff;
    650 		const voff_t endoff = umap->writeoff + umap->writelen;
    651 		const voff_t zerolen = round_page(endoff) - endoff;
    652 		const u_int npages = (round_page(endoff) -
    653 		    trunc_page(slot_offset)) >> PAGE_SHIFT;
    654 		struct vm_page *pgs[npages];
    655 
    656 		KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
    657 		if (zerolen) {
    658 			memset((char *)umapva + endoff, 0, zerolen);
    659 		}
    660 		umap->flags &= ~UMAP_PAGES_LOCKED;
    661 		mutex_enter(uobj->vmobjlock);
    662 		mutex_enter(&uvm_pageqlock);
    663 		for (u_int i = 0; i < npages; i++) {
    664 			paddr_t pa;
    665 			bool rv __diagused;
    666 
    667 			rv = pmap_extract(pmap_kernel(),
    668 			    umapva + slot_offset + (i << PAGE_SHIFT), &pa);
    669 			KASSERT(rv);
    670 			pgs[i] = PHYS_TO_VM_PAGE(pa);
    671 			pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
    672 			KASSERT(pgs[i]->loan_count == 0);
    673 			uvm_pageactivate(pgs[i]);
    674 		}
    675 		mutex_exit(&uvm_pageqlock);
    676 		pmap_kremove(umapva, ubc_winsize);
    677 		pmap_update(pmap_kernel());
    678 		uvm_page_unbusy(pgs, npages);
    679 		mutex_exit(uobj->vmobjlock);
    680 		unmapped = true;
    681 	} else {
    682 		unmapped = false;
    683 	}
    684 
    685 	mutex_enter(ubc_object.uobj.vmobjlock);
    686 	umap->writeoff = 0;
    687 	umap->writelen = 0;
    688 	umap->refcount--;
    689 	if (umap->refcount == 0) {
    690 		if (flags & UBC_UNMAP) {
    691 			/*
    692 			 * Invalidate any cached mappings if requested.
    693 			 * This is typically used to avoid leaving
    694 			 * incompatible cache aliases around indefinitely.
    695 			 */
    696 			mutex_enter(uobj->vmobjlock);
    697 			pmap_remove(pmap_kernel(), umapva,
    698 				    umapva + ubc_winsize);
    699 			pmap_update(pmap_kernel());
    700 			mutex_exit(uobj->vmobjlock);
    701 
    702 			umap->flags &= ~UMAP_MAPPING_CACHED;
    703 			LIST_REMOVE(umap, hash);
    704 			LIST_REMOVE(umap, list);
    705 			umap->uobj = NULL;
    706 			TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
    707 			    inactive);
    708 		} else {
    709 			if (!unmapped) {
    710 				umap->flags |= UMAP_MAPPING_CACHED;
    711 			}
    712 			TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
    713 			    inactive);
    714 		}
    715 	}
    716 	UVMHIST_LOG(ubchist, "umap %cw#jxp refs %jd", (uintptr_t)umap,
    717 	    umap->refcount, 0, 0);
    718 	mutex_exit(ubc_object.uobj.vmobjlock);
    719 }
    720 
    721 /*
    722  * ubc_uiomove: move data to/from an object.
    723  */
    724 
    725 int
    726 ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
    727     int flags)
    728 {
    729 	const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
    730 	voff_t off;
    731 	int error;
    732 
    733 	KASSERT(todo <= uio->uio_resid);
    734 	KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
    735 	    ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
    736 
    737 #ifdef UBC_USE_PMAP_DIRECT
    738 	if (ubc_direct) {
    739 		return ubc_uiomove_direct(uobj, uio, todo, advice, flags);
    740 	}
    741 #endif
    742 
    743 	off = uio->uio_offset;
    744 	error = 0;
    745 	while (todo > 0) {
    746 		vsize_t bytelen = todo;
    747 		void *win;
    748 
    749 		win = ubc_alloc(uobj, off, &bytelen, advice, flags);
    750 		if (error == 0) {
    751 			error = uiomove(win, bytelen, uio);
    752 		}
    753 		if (error != 0 && overwrite) {
    754 			/*
    755 			 * if we haven't initialized the pages yet,
    756 			 * do it now.  it's safe to use memset here
    757 			 * because we just mapped the pages above.
    758 			 */
    759 			printf("%s: error=%d\n", __func__, error);
    760 			memset(win, 0, bytelen);
    761 		}
    762 		ubc_release(win, flags);
    763 		off += bytelen;
    764 		todo -= bytelen;
    765 		if (error != 0 && (flags & UBC_PARTIALOK) != 0) {
    766 			break;
    767 		}
    768 	}
    769 
    770 	return error;
    771 }
    772 
    773 /*
    774  * ubc_zerorange: set a range of bytes in an object to zero.
    775  */
    776 
    777 void
    778 ubc_zerorange(struct uvm_object *uobj, off_t off, size_t len, int flags)
    779 {
    780 
    781 #ifdef UBC_USE_PMAP_DIRECT
    782 	if (ubc_direct) {
    783 		ubc_zerorange_direct(uobj, off, len, flags);
    784 		return;
    785 	}
    786 #endif
    787 
    788 	/*
    789 	 * XXXUBC invent kzero() and use it
    790 	 */
    791 
    792 	while (len) {
    793 		void *win;
    794 		vsize_t bytelen = len;
    795 
    796 		win = ubc_alloc(uobj, off, &bytelen, UVM_ADV_NORMAL, UBC_WRITE);
    797 		memset(win, 0, bytelen);
    798 		ubc_release(win, flags);
    799 
    800 		off += bytelen;
    801 		len -= bytelen;
    802 	}
    803 }
    804 
    805 #ifdef UBC_USE_PMAP_DIRECT
    806 /* Copy data using direct map */
    807 
    808 /*
    809  * ubc_alloc_direct:  allocate a file mapping window using direct map
    810  */
    811 static int __noinline
    812 ubc_alloc_direct(struct uvm_object *uobj, voff_t offset, vsize_t *lenp,
    813     int advice, int flags, struct vm_page **pgs, int *npages)
    814 {
    815 	voff_t pgoff;
    816 	int error;
    817 	int gpflags = flags | PGO_NOTIMESTAMP | PGO_SYNCIO | PGO_ALLPAGES;
    818 	int access_type = VM_PROT_READ;
    819 
    820 	if (flags & UBC_WRITE) {
    821 		if (flags & UBC_FAULTBUSY)
    822 			gpflags |= PGO_OVERWRITE;
    823 #if 0
    824 		KASSERT(!UVM_OBJ_NEEDS_WRITEFAULT(uobj));
    825 #endif
    826 
    827 		gpflags |= PGO_PASTEOF;
    828 		access_type |= VM_PROT_WRITE;
    829 	}
    830 
    831 	pgoff = (offset & PAGE_MASK);
    832 	*lenp = MIN(*lenp, ubc_winsize - pgoff);
    833 
    834 again:
    835 	*npages = (*lenp + pgoff + PAGE_SIZE - 1) >> PAGE_SHIFT;
    836 	KASSERT((*npages * PAGE_SIZE) <= ubc_winsize);
    837 	KASSERT(*lenp + pgoff <= ubc_winsize);
    838 	memset(pgs, 0, *npages * sizeof(pgs[0]));
    839 
    840 	mutex_enter(uobj->vmobjlock);
    841 	error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
    842 	    npages, 0, access_type, advice, gpflags);
    843 	UVMHIST_LOG(ubchist, "alloc_direct getpages %jd", error, 0, 0, 0);
    844 	if (error) {
    845 		if (error == EAGAIN) {
    846 			kpause("ubc_alloc_directg", false, hz >> 2, NULL);
    847 			goto again;
    848 		}
    849 		return error;
    850 	}
    851 
    852 	mutex_enter(uobj->vmobjlock);
    853 	for (int i = 0; i < *npages; i++) {
    854 		struct vm_page *pg = pgs[i];
    855 
    856 		KASSERT(pg != NULL);
    857 		KASSERT(pg != PGO_DONTCARE);
    858 		KASSERT((pg->flags & PG_FAKE) == 0 || (gpflags & PGO_OVERWRITE));
    859 		KASSERT(pg->uobject->vmobjlock == uobj->vmobjlock);
    860 
    861 		/* Avoid breaking loan if possible, only do it on write */
    862 		if ((flags & UBC_WRITE) && pg->loan_count != 0) {
    863 			pg = uvm_loanbreak(pg);
    864 			if (pg == NULL) {
    865 				uvm_page_unbusy(pgs, *npages);
    866 				mutex_exit(uobj->vmobjlock);
    867 				uvm_wait("ubc_alloc_directl");
    868 				goto again;
    869 			}
    870 			pgs[i] = pg;
    871 		}
    872 
    873 		/* Page must be writable by now */
    874 		KASSERT((pg->flags & PG_RDONLY) == 0 || (flags & UBC_WRITE) == 0);
    875 	}
    876 	mutex_exit(uobj->vmobjlock);
    877 
    878 	return 0;
    879 }
    880 
    881 static void __noinline
    882 ubc_direct_release(struct uvm_object *uobj,
    883 	int flags, struct vm_page **pgs, int npages)
    884 {
    885 	mutex_enter(uobj->vmobjlock);
    886 	mutex_enter(&uvm_pageqlock);
    887 	for (int i = 0; i < npages; i++) {
    888 		struct vm_page *pg = pgs[i];
    889 
    890 		uvm_pageactivate(pg);
    891 
    892 		/* Page was changed, no longer fake and neither clean */
    893 		if (flags & UBC_WRITE)
    894 			pg->flags &= ~(PG_FAKE|PG_CLEAN);
    895 	}
    896 	mutex_exit(&uvm_pageqlock);
    897 
    898 	uvm_page_unbusy(pgs, npages);
    899 	mutex_exit(uobj->vmobjlock);
    900 }
    901 
    902 static int
    903 ubc_uiomove_process(void *win, size_t len, void *arg)
    904 {
    905 	struct uio *uio = (struct uio *)arg;
    906 
    907 	return uiomove(win, len, uio);
    908 }
    909 
    910 static int
    911 ubc_zerorange_process(void *win, size_t len, void *arg)
    912 {
    913 	memset(win, 0, len);
    914 	return 0;
    915 }
    916 
    917 static int __noinline
    918 ubc_uiomove_direct(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
    919     int flags)
    920 {
    921 	const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
    922 	voff_t off;
    923 	int error, npages;
    924 	struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT];
    925 
    926 	KASSERT(todo <= uio->uio_resid);
    927 	KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
    928 	    ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
    929 
    930 	off = uio->uio_offset;
    931 	error = 0;
    932 	while (todo > 0) {
    933 		vsize_t bytelen = todo;
    934 
    935 		error = ubc_alloc_direct(uobj, off, &bytelen, advice, flags,
    936 		    pgs, &npages);
    937 		if (error != 0) {
    938 			/* can't do anything, failed to get the pages */
    939 			break;
    940 		}
    941 
    942 		if (error == 0) {
    943 			error = uvm_direct_process(pgs, npages, off, bytelen,
    944 			    ubc_uiomove_process, uio);
    945 		}
    946 		if (error != 0 && overwrite) {
    947 			/*
    948 			 * if we haven't initialized the pages yet,
    949 			 * do it now.  it's safe to use memset here
    950 			 * because we just mapped the pages above.
    951 			 */
    952 			printf("%s: error=%d\n", __func__, error);
    953 			(void) uvm_direct_process(pgs, npages, off, bytelen,
    954 			    ubc_zerorange_process, NULL);
    955 		}
    956 
    957 		ubc_direct_release(uobj, flags, pgs, npages);
    958 
    959 		off += bytelen;
    960 		todo -= bytelen;
    961 
    962 		if (error != 0 && ISSET(flags, UBC_PARTIALOK)) {
    963 			break;
    964 		}
    965 	}
    966 
    967 	return error;
    968 }
    969 
    970 static void __noinline
    971 ubc_zerorange_direct(struct uvm_object *uobj, off_t off, size_t todo, int flags)
    972 {
    973 	int error, npages;
    974 	struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT];
    975 
    976 	flags |= UBC_WRITE;
    977 
    978 	error = 0;
    979 	while (todo > 0) {
    980 		vsize_t bytelen = todo;
    981 
    982 		error = ubc_alloc_direct(uobj, off, &bytelen, UVM_ADV_NORMAL,
    983 		    flags, pgs, &npages);
    984 		if (error != 0) {
    985 			/* can't do anything, failed to get the pages */
    986 			break;
    987 		}
    988 
    989 		error = uvm_direct_process(pgs, npages, off, bytelen,
    990 		    ubc_zerorange_process, NULL);
    991 
    992 		ubc_direct_release(uobj, flags, pgs, npages);
    993 
    994 		off += bytelen;
    995 		todo -= bytelen;
    996 	}
    997 }
    998 
    999 #endif /* UBC_USE_PMAP_DIRECT */
   1000 
   1001 /*
   1002  * ubc_purge: disassociate ubc_map structures from an empty uvm_object.
   1003  */
   1004 
   1005 void
   1006 ubc_purge(struct uvm_object *uobj)
   1007 {
   1008 	struct ubc_map *umap;
   1009 	vaddr_t va;
   1010 
   1011 	KASSERT(uobj->uo_npages == 0);
   1012 
   1013 	/*
   1014 	 * Safe to check without lock held, as ubc_alloc() removes
   1015 	 * the mapping and list entry in the correct order.
   1016 	 */
   1017 	if (__predict_true(LIST_EMPTY(&uobj->uo_ubc))) {
   1018 		return;
   1019 	}
   1020 	mutex_enter(ubc_object.uobj.vmobjlock);
   1021 	while ((umap = LIST_FIRST(&uobj->uo_ubc)) != NULL) {
   1022 		KASSERT(umap->refcount == 0);
   1023 		for (va = 0; va < ubc_winsize; va += PAGE_SIZE) {
   1024 			KASSERT(!pmap_extract(pmap_kernel(),
   1025 			    va + UBC_UMAP_ADDR(umap), NULL));
   1026 		}
   1027 		LIST_REMOVE(umap, list);
   1028 		LIST_REMOVE(umap, hash);
   1029 		umap->flags &= ~UMAP_MAPPING_CACHED;
   1030 		umap->uobj = NULL;
   1031 	}
   1032 	mutex_exit(ubc_object.uobj.vmobjlock);
   1033 }
   1034