Home | History | Annotate | Line # | Download | only in uvm
uvm_bio.c revision 1.122
      1 /*	$NetBSD: uvm_bio.c,v 1.122 2020/10/05 04:48:23 rin Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998 Chuck Silvers.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. The name of the author may not be used to endorse or promote products
     16  *    derived from this software without specific prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28  * SUCH DAMAGE.
     29  *
     30  */
     31 
     32 /*
     33  * uvm_bio.c: buffered i/o object mapping cache
     34  */
     35 
     36 #include <sys/cdefs.h>
     37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.122 2020/10/05 04:48:23 rin Exp $");
     38 
     39 #include "opt_uvmhist.h"
     40 #include "opt_ubc.h"
     41 
     42 #include <sys/param.h>
     43 #include <sys/systm.h>
     44 #include <sys/kmem.h>
     45 #include <sys/kernel.h>
     46 #include <sys/proc.h>
     47 #include <sys/vnode.h>
     48 #include <sys/bitops.h>		/* for ilog2() */
     49 
     50 #include <uvm/uvm.h>
     51 #include <uvm/uvm_pdpolicy.h>
     52 
     53 #ifdef PMAP_DIRECT
     54 #  define UBC_USE_PMAP_DIRECT
     55 #endif
     56 
     57 /*
     58  * local functions
     59  */
     60 
     61 static int	ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
     62 			  int, int, vm_prot_t, int);
     63 static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
     64 #ifdef UBC_USE_PMAP_DIRECT
     65 static int __noinline ubc_uiomove_direct(struct uvm_object *, struct uio *, vsize_t,
     66 			  int, int);
     67 static void __noinline ubc_zerorange_direct(struct uvm_object *, off_t, size_t, int);
     68 
     69 /* XXX disabled by default until the kinks are worked out. */
     70 bool ubc_direct = false;
     71 #endif
     72 
     73 /*
     74  * local data structues
     75  */
     76 
     77 #define UBC_HASH(uobj, offset) 						\
     78 	(((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
     79 				ubc_object.hashmask)
     80 
     81 #define UBC_QUEUE(offset)						\
     82 	(&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) &	\
     83 			     (UBC_NQUEUES - 1)])
     84 
     85 #define UBC_UMAP_ADDR(u)						\
     86 	(vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
     87 
     88 
     89 #define UMAP_PAGES_LOCKED	0x0001
     90 #define UMAP_MAPPING_CACHED	0x0002
     91 
     92 struct ubc_map {
     93 	struct uvm_object *	uobj;		/* mapped object */
     94 	voff_t			offset;		/* offset into uobj */
     95 	voff_t			writeoff;	/* write offset */
     96 	vsize_t			writelen;	/* write len */
     97 	int			refcount;	/* refcount on mapping */
     98 	int			flags;		/* extra state */
     99 	int			advice;
    100 
    101 	LIST_ENTRY(ubc_map)	hash;		/* hash table */
    102 	TAILQ_ENTRY(ubc_map)	inactive;	/* inactive queue */
    103 	LIST_ENTRY(ubc_map)	list;		/* per-object list */
    104 };
    105 
    106 TAILQ_HEAD(ubc_inactive_head, ubc_map);
    107 static struct ubc_object {
    108 	struct uvm_object uobj;		/* glue for uvm_map() */
    109 	char *kva;			/* where ubc_object is mapped */
    110 	struct ubc_map *umap;		/* array of ubc_map's */
    111 
    112 	LIST_HEAD(, ubc_map) *hash;	/* hashtable for cached ubc_map's */
    113 	u_long hashmask;		/* mask for hashtable */
    114 
    115 	struct ubc_inactive_head *inactive;
    116 					/* inactive queues for ubc_map's */
    117 } ubc_object;
    118 
    119 const struct uvm_pagerops ubc_pager = {
    120 	.pgo_fault = ubc_fault,
    121 	/* ... rest are NULL */
    122 };
    123 
    124 /* Use value at least as big as maximum page size supported by architecture */
    125 #define UBC_MAX_WINSHIFT	\
    126     ((1 << UBC_WINSHIFT) > MAX_PAGE_SIZE ? UBC_WINSHIFT : ilog2(MAX_PAGE_SIZE))
    127 
    128 int ubc_nwins = UBC_NWINS;
    129 const int ubc_winshift = UBC_MAX_WINSHIFT;
    130 const int ubc_winsize = 1 << UBC_MAX_WINSHIFT;
    131 #if defined(PMAP_PREFER)
    132 int ubc_nqueues;
    133 #define UBC_NQUEUES ubc_nqueues
    134 #else
    135 #define UBC_NQUEUES 1
    136 #endif
    137 
    138 #if defined(UBC_STATS)
    139 
    140 #define	UBC_EVCNT_DEFINE(name) \
    141 struct evcnt ubc_evcnt_##name = \
    142 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "ubc", #name); \
    143 EVCNT_ATTACH_STATIC(ubc_evcnt_##name);
    144 #define	UBC_EVCNT_INCR(name) ubc_evcnt_##name.ev_count++
    145 
    146 #else /* defined(UBC_STATS) */
    147 
    148 #define	UBC_EVCNT_DEFINE(name)	/* nothing */
    149 #define	UBC_EVCNT_INCR(name)	/* nothing */
    150 
    151 #endif /* defined(UBC_STATS) */
    152 
    153 UBC_EVCNT_DEFINE(wincachehit)
    154 UBC_EVCNT_DEFINE(wincachemiss)
    155 UBC_EVCNT_DEFINE(faultbusy)
    156 
    157 /*
    158  * ubc_init
    159  *
    160  * init pager private data structures.
    161  */
    162 
    163 void
    164 ubc_init(void)
    165 {
    166 	/*
    167 	 * Make sure ubc_winshift is sane.
    168 	 */
    169 	KASSERT(ubc_winshift >= PAGE_SHIFT);
    170 
    171 	/*
    172 	 * init ubc_object.
    173 	 * alloc and init ubc_map's.
    174 	 * init inactive queues.
    175 	 * alloc and init hashtable.
    176 	 * map in ubc_object.
    177 	 */
    178 
    179 	uvm_obj_init(&ubc_object.uobj, &ubc_pager, true, UVM_OBJ_KERN);
    180 
    181 	ubc_object.umap = kmem_zalloc(ubc_nwins * sizeof(struct ubc_map),
    182 	    KM_SLEEP);
    183 	if (ubc_object.umap == NULL)
    184 		panic("ubc_init: failed to allocate ubc_map");
    185 
    186 	vaddr_t va = (vaddr_t)1L;
    187 #ifdef PMAP_PREFER
    188 	PMAP_PREFER(0, &va, 0, 0);	/* kernel is never topdown */
    189 	ubc_nqueues = va >> ubc_winshift;
    190 	if (ubc_nqueues == 0) {
    191 		ubc_nqueues = 1;
    192 	}
    193 #endif
    194 	ubc_object.inactive = kmem_alloc(UBC_NQUEUES *
    195 	    sizeof(struct ubc_inactive_head), KM_SLEEP);
    196 	for (int i = 0; i < UBC_NQUEUES; i++) {
    197 		TAILQ_INIT(&ubc_object.inactive[i]);
    198 	}
    199 	for (int i = 0; i < ubc_nwins; i++) {
    200 		struct ubc_map *umap;
    201 		umap = &ubc_object.umap[i];
    202 		TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
    203 				  umap, inactive);
    204 	}
    205 
    206 	ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, true,
    207 	    &ubc_object.hashmask);
    208 	for (int i = 0; i <= ubc_object.hashmask; i++) {
    209 		LIST_INIT(&ubc_object.hash[i]);
    210 	}
    211 
    212 	if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
    213 		    ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
    214 		    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_NONE,
    215 				UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
    216 		panic("ubc_init: failed to map ubc_object");
    217 	}
    218 }
    219 
    220 void
    221 ubchist_init(void)
    222 {
    223 
    224 	UVMHIST_INIT(ubchist, 300);
    225 }
    226 
    227 /*
    228  * ubc_fault_page: helper of ubc_fault to handle a single page.
    229  *
    230  * => Caller has UVM object locked.
    231  * => Caller will perform pmap_update().
    232  */
    233 
    234 static inline int
    235 ubc_fault_page(const struct uvm_faultinfo *ufi, const struct ubc_map *umap,
    236     struct vm_page *pg, vm_prot_t prot, vm_prot_t access_type, vaddr_t va)
    237 {
    238 	int error;
    239 
    240 	KASSERT(rw_write_held(pg->uobject->vmobjlock));
    241 
    242 	KASSERT((pg->flags & PG_FAKE) == 0);
    243 	if (pg->flags & PG_RELEASED) {
    244 		uvm_pagefree(pg);
    245 		return 0;
    246 	}
    247 	if (pg->loan_count != 0) {
    248 
    249 		/*
    250 		 * Avoid unneeded loan break, if possible.
    251 		 */
    252 
    253 		if ((access_type & VM_PROT_WRITE) == 0) {
    254 			prot &= ~VM_PROT_WRITE;
    255 		}
    256 		if (prot & VM_PROT_WRITE) {
    257 			struct vm_page *newpg;
    258 
    259 			newpg = uvm_loanbreak(pg);
    260 			if (newpg == NULL) {
    261 				uvm_page_unbusy(&pg, 1);
    262 				return ENOMEM;
    263 			}
    264 			pg = newpg;
    265 		}
    266 	}
    267 
    268 	/*
    269 	 * Note that a page whose backing store is partially allocated
    270 	 * is marked as PG_RDONLY.
    271 	 *
    272 	 * it's a responsibility of ubc_alloc's caller to allocate backing
    273 	 * blocks before writing to the window.
    274 	 */
    275 
    276 	KASSERT((pg->flags & PG_RDONLY) == 0 ||
    277 	    (access_type & VM_PROT_WRITE) == 0 ||
    278 	    pg->offset < umap->writeoff ||
    279 	    pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
    280 
    281 	KASSERT((access_type & VM_PROT_WRITE) == 0 ||
    282 	    uvm_pagegetdirty(pg) != UVM_PAGE_STATUS_CLEAN);
    283 
    284 	error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
    285 	    prot, PMAP_CANFAIL | access_type);
    286 
    287 	uvm_pagelock(pg);
    288 	uvm_pageactivate(pg);
    289 	uvm_pagewakeup(pg);
    290 	uvm_pageunlock(pg);
    291 	pg->flags &= ~PG_BUSY;
    292 	UVM_PAGE_OWN(pg, NULL);
    293 
    294 	return error;
    295 }
    296 
    297 /*
    298  * ubc_fault: fault routine for ubc mapping
    299  */
    300 
    301 static int
    302 ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2,
    303     int ign3, int ign4, vm_prot_t access_type, int flags)
    304 {
    305 	struct uvm_object *uobj;
    306 	struct ubc_map *umap;
    307 	vaddr_t va, eva, ubc_offset, slot_offset;
    308 	struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)];
    309 	int i, error, npages;
    310 	vm_prot_t prot;
    311 
    312 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
    313 
    314 	/*
    315 	 * no need to try with PGO_LOCKED...
    316 	 * we don't need to have the map locked since we know that
    317 	 * no one will mess with it until our reference is released.
    318 	 */
    319 
    320 	if (flags & PGO_LOCKED) {
    321 		uvmfault_unlockall(ufi, NULL, &ubc_object.uobj);
    322 		flags &= ~PGO_LOCKED;
    323 	}
    324 
    325 	va = ufi->orig_rvaddr;
    326 	ubc_offset = va - (vaddr_t)ubc_object.kva;
    327 	umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
    328 	KASSERT(umap->refcount != 0);
    329 	KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
    330 	slot_offset = ubc_offset & (ubc_winsize - 1);
    331 
    332 	/*
    333 	 * some platforms cannot write to individual bytes atomically, so
    334 	 * software has to do read/modify/write of larger quantities instead.
    335 	 * this means that the access_type for "write" operations
    336 	 * can be VM_PROT_READ, which confuses us mightily.
    337 	 *
    338 	 * deal with this by resetting access_type based on the info
    339 	 * that ubc_alloc() stores for us.
    340 	 */
    341 
    342 	access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
    343 	UVMHIST_LOG(ubchist, "va 0x%jx ubc_offset 0x%jx access_type %jd",
    344 	    va, ubc_offset, access_type, 0);
    345 
    346 	if ((access_type & VM_PROT_WRITE) != 0) {
    347 #ifndef PRIxOFF		/* XXX */
    348 #define PRIxOFF "jx"	/* XXX */
    349 #endif			/* XXX */
    350 		KASSERTMSG((trunc_page(umap->writeoff) <= slot_offset),
    351 		    "out of range write: slot=%#"PRIxVSIZE" off=%#"PRIxOFF,
    352 		    slot_offset, (intmax_t)umap->writeoff);
    353 		KASSERTMSG((slot_offset < umap->writeoff + umap->writelen),
    354 		    "out of range write: slot=%#"PRIxVADDR
    355 		        " off=%#"PRIxOFF" len=%#"PRIxVSIZE,
    356 		    slot_offset, (intmax_t)umap->writeoff, umap->writelen);
    357 	}
    358 
    359 	/* no umap locking needed since we have a ref on the umap */
    360 	uobj = umap->uobj;
    361 
    362 	if ((access_type & VM_PROT_WRITE) == 0) {
    363 		npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
    364 	} else {
    365 		npages = (round_page(umap->offset + umap->writeoff +
    366 		    umap->writelen) - (umap->offset + slot_offset))
    367 		    >> PAGE_SHIFT;
    368 		flags |= PGO_PASTEOF;
    369 	}
    370 
    371 again:
    372 	memset(pgs, 0, sizeof (pgs));
    373 	rw_enter(uobj->vmobjlock, RW_WRITER);
    374 
    375 	UVMHIST_LOG(ubchist, "slot_offset 0x%jx writeoff 0x%jx writelen 0x%jx ",
    376 	    slot_offset, umap->writeoff, umap->writelen, 0);
    377 	UVMHIST_LOG(ubchist, "getpages uobj %#jx offset 0x%jx npages %jd",
    378 	    (uintptr_t)uobj, umap->offset + slot_offset, npages, 0);
    379 
    380 	error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
    381 	    &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC |
    382 	    PGO_NOTIMESTAMP);
    383 	UVMHIST_LOG(ubchist, "getpages error %jd npages %jd", error, npages, 0,
    384 	    0);
    385 
    386 	if (error == EAGAIN) {
    387 		kpause("ubc_fault", false, hz >> 2, NULL);
    388 		goto again;
    389 	}
    390 	if (error) {
    391 		return error;
    392 	}
    393 
    394 	/*
    395 	 * For virtually-indexed, virtually-tagged caches we should avoid
    396 	 * creating writable mappings when we do not absolutely need them,
    397 	 * since the "compatible alias" trick does not work on such caches.
    398 	 * Otherwise, we can always map the pages writable.
    399 	 */
    400 
    401 #ifdef PMAP_CACHE_VIVT
    402 	prot = VM_PROT_READ | access_type;
    403 #else
    404 	prot = VM_PROT_READ | VM_PROT_WRITE;
    405 #endif
    406 
    407 	va = ufi->orig_rvaddr;
    408 	eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
    409 
    410 	UVMHIST_LOG(ubchist, "va 0x%jx eva 0x%jx", va, eva, 0, 0);
    411 
    412 	/*
    413 	 * Note: normally all returned pages would have the same UVM object.
    414 	 * However, layered file-systems and e.g. tmpfs, may return pages
    415 	 * which belong to underlying UVM object.  In such case, lock is
    416 	 * shared amongst the objects.
    417 	 */
    418 	rw_enter(uobj->vmobjlock, RW_WRITER);
    419 	for (i = 0; va < eva; i++, va += PAGE_SIZE) {
    420 		struct vm_page *pg;
    421 
    422 		UVMHIST_LOG(ubchist, "pgs[%jd] = %#jx", i, (uintptr_t)pgs[i],
    423 		    0, 0);
    424 		pg = pgs[i];
    425 
    426 		if (pg == NULL || pg == PGO_DONTCARE) {
    427 			continue;
    428 		}
    429 		KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
    430 		error = ubc_fault_page(ufi, umap, pg, prot, access_type, va);
    431 		if (error) {
    432 			/*
    433 			 * Flush (there might be pages entered), drop the lock,
    434 			 * and perform uvm_wait().  Note: page will re-fault.
    435 			 */
    436 			pmap_update(ufi->orig_map->pmap);
    437 			rw_exit(uobj->vmobjlock);
    438 			uvm_wait("ubc_fault");
    439 			rw_enter(uobj->vmobjlock, RW_WRITER);
    440 		}
    441 	}
    442 	/* Must make VA visible before the unlock. */
    443 	pmap_update(ufi->orig_map->pmap);
    444 	rw_exit(uobj->vmobjlock);
    445 
    446 	return 0;
    447 }
    448 
    449 /*
    450  * local functions
    451  */
    452 
    453 static struct ubc_map *
    454 ubc_find_mapping(struct uvm_object *uobj, voff_t offset)
    455 {
    456 	struct ubc_map *umap;
    457 
    458 	LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
    459 		if (umap->uobj == uobj && umap->offset == offset) {
    460 			return umap;
    461 		}
    462 	}
    463 	return NULL;
    464 }
    465 
    466 
    467 /*
    468  * ubc interface functions
    469  */
    470 
    471 /*
    472  * ubc_alloc:  allocate a file mapping window
    473  */
    474 
    475 static void * __noinline
    476 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
    477     int flags, struct vm_page **pgs, int *npagesp)
    478 {
    479 	vaddr_t slot_offset, va;
    480 	struct ubc_map *umap;
    481 	voff_t umap_offset;
    482 	int error;
    483 	UVMHIST_FUNC(__func__);
    484 	UVMHIST_CALLARGS(ubchist, "uobj %#jx offset 0x%jx len 0x%jx",
    485 	    (uintptr_t)uobj, offset, *lenp, 0);
    486 
    487 	KASSERT(*lenp > 0);
    488 	umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
    489 	slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
    490 	*lenp = MIN(*lenp, ubc_winsize - slot_offset);
    491 	KASSERT(*lenp > 0);
    492 
    493 	rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
    494 again:
    495 	/*
    496 	 * The UVM object is already referenced.
    497 	 * Lock order: UBC object -> ubc_map::uobj.
    498 	 */
    499 	umap = ubc_find_mapping(uobj, umap_offset);
    500 	if (umap == NULL) {
    501 		struct uvm_object *oobj;
    502 
    503 		UBC_EVCNT_INCR(wincachemiss);
    504 		umap = TAILQ_FIRST(UBC_QUEUE(offset));
    505 		if (umap == NULL) {
    506 			rw_exit(ubc_object.uobj.vmobjlock);
    507 			kpause("ubc_alloc", false, hz >> 2, NULL);
    508 			rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
    509 			goto again;
    510 		}
    511 
    512 		va = UBC_UMAP_ADDR(umap);
    513 		oobj = umap->uobj;
    514 
    515 		/*
    516 		 * Remove from old hash (if any), add to new hash.
    517 		 */
    518 
    519 		if (oobj != NULL) {
    520 			/*
    521 			 * Mapping must be removed before the list entry,
    522 			 * since there is a race with ubc_purge().
    523 			 */
    524 			if (umap->flags & UMAP_MAPPING_CACHED) {
    525 				umap->flags &= ~UMAP_MAPPING_CACHED;
    526 				rw_enter(oobj->vmobjlock, RW_WRITER);
    527 				pmap_remove(pmap_kernel(), va,
    528 				    va + ubc_winsize);
    529 				pmap_update(pmap_kernel());
    530 				rw_exit(oobj->vmobjlock);
    531 			}
    532 			LIST_REMOVE(umap, hash);
    533 			LIST_REMOVE(umap, list);
    534 		} else {
    535 			KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
    536 		}
    537 		umap->uobj = uobj;
    538 		umap->offset = umap_offset;
    539 		LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
    540 		    umap, hash);
    541 		LIST_INSERT_HEAD(&uobj->uo_ubc, umap, list);
    542 	} else {
    543 		UBC_EVCNT_INCR(wincachehit);
    544 		va = UBC_UMAP_ADDR(umap);
    545 	}
    546 
    547 	if (umap->refcount == 0) {
    548 		TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
    549 	}
    550 
    551 	if (flags & UBC_WRITE) {
    552 		KASSERTMSG(umap->writeoff == 0 && umap->writelen == 0,
    553 		    "ubc_alloc: concurrent writes to uobj %p", uobj);
    554 		umap->writeoff = slot_offset;
    555 		umap->writelen = *lenp;
    556 	}
    557 
    558 	umap->refcount++;
    559 	umap->advice = advice;
    560 	rw_exit(ubc_object.uobj.vmobjlock);
    561 	UVMHIST_LOG(ubchist, "umap %#jx refs %jd va %#jx flags 0x%jx",
    562 	    (uintptr_t)umap, umap->refcount, (uintptr_t)va, flags);
    563 
    564 	if (flags & UBC_FAULTBUSY) {
    565 		int npages = (*lenp + (offset & (PAGE_SIZE - 1)) +
    566 		    PAGE_SIZE - 1) >> PAGE_SHIFT;
    567 		int gpflags =
    568 		    PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
    569 		    PGO_NOTIMESTAMP;
    570 		int i;
    571 		KDASSERT(flags & UBC_WRITE);
    572 		KASSERT(npages <= *npagesp);
    573 		KASSERT(umap->refcount == 1);
    574 
    575 		UBC_EVCNT_INCR(faultbusy);
    576 again_faultbusy:
    577 		rw_enter(uobj->vmobjlock, RW_WRITER);
    578 		if (umap->flags & UMAP_MAPPING_CACHED) {
    579 			umap->flags &= ~UMAP_MAPPING_CACHED;
    580 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    581 		}
    582 		memset(pgs, 0, *npagesp * sizeof(pgs[0]));
    583 
    584 		error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
    585 		    &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags);
    586 		UVMHIST_LOG(ubchist, "faultbusy getpages %jd", error, 0, 0, 0);
    587 		if (error) {
    588 			/*
    589 			 * Flush: the mapping above might have been removed.
    590 			 */
    591 			pmap_update(pmap_kernel());
    592 			goto out;
    593 		}
    594 		for (i = 0; i < npages; i++) {
    595 			struct vm_page *pg = pgs[i];
    596 
    597 			KASSERT(pg->uobject == uobj);
    598 			if (pg->loan_count != 0) {
    599 				rw_enter(uobj->vmobjlock, RW_WRITER);
    600 				if (pg->loan_count != 0) {
    601 					pg = uvm_loanbreak(pg);
    602 				}
    603 				if (pg == NULL) {
    604 					pmap_kremove(va, ubc_winsize);
    605 					pmap_update(pmap_kernel());
    606 					uvm_page_unbusy(pgs, npages);
    607 					rw_exit(uobj->vmobjlock);
    608 					uvm_wait("ubc_alloc");
    609 					goto again_faultbusy;
    610 				}
    611 				rw_exit(uobj->vmobjlock);
    612 				pgs[i] = pg;
    613 			}
    614 			pmap_kenter_pa(
    615 			    va + trunc_page(slot_offset) + (i << PAGE_SHIFT),
    616 			    VM_PAGE_TO_PHYS(pg),
    617 			    VM_PROT_READ | VM_PROT_WRITE, 0);
    618 		}
    619 		pmap_update(pmap_kernel());
    620 		umap->flags |= UMAP_PAGES_LOCKED;
    621 		*npagesp = npages;
    622 	} else {
    623 		KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
    624 	}
    625 
    626 out:
    627 	return (void *)(va + slot_offset);
    628 }
    629 
    630 /*
    631  * ubc_release:  free a file mapping window.
    632  */
    633 
    634 static void __noinline
    635 ubc_release(void *va, int flags, struct vm_page **pgs, int npages)
    636 {
    637 	struct ubc_map *umap;
    638 	struct uvm_object *uobj;
    639 	vaddr_t umapva;
    640 	bool unmapped;
    641 	UVMHIST_FUNC(__func__);
    642 	UVMHIST_CALLARGS(ubchist, "va %#jx", (uintptr_t)va, 0, 0, 0);
    643 
    644 	umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
    645 	umapva = UBC_UMAP_ADDR(umap);
    646 	uobj = umap->uobj;
    647 	KASSERT(uobj != NULL);
    648 
    649 	if (umap->flags & UMAP_PAGES_LOCKED) {
    650 		const voff_t endoff = umap->writeoff + umap->writelen;
    651 		const voff_t zerolen = round_page(endoff) - endoff;
    652 
    653 		KASSERT(npages == (round_page(endoff) -
    654 		    trunc_page(umap->writeoff)) >> PAGE_SHIFT);
    655 		KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
    656 		if (zerolen) {
    657 			memset((char *)umapva + endoff, 0, zerolen);
    658 		}
    659 		umap->flags &= ~UMAP_PAGES_LOCKED;
    660 		rw_enter(uobj->vmobjlock, RW_WRITER);
    661 		for (u_int i = 0; i < npages; i++) {
    662 			struct vm_page *pg = pgs[i];
    663 #ifdef DIAGNOSTIC
    664 			paddr_t pa;
    665 			bool rv;
    666 			rv = pmap_extract(pmap_kernel(), umapva +
    667 			    umap->writeoff + (i << PAGE_SHIFT), &pa);
    668 			KASSERT(rv);
    669 			KASSERT(PHYS_TO_VM_PAGE(pa) == pg);
    670 #endif
    671 			pg->flags &= ~PG_FAKE;
    672 			KASSERTMSG(uvm_pagegetdirty(pg) ==
    673 			    UVM_PAGE_STATUS_DIRTY,
    674 			    "page %p not dirty", pg);
    675 			KASSERT(pg->loan_count == 0);
    676 			if (uvmpdpol_pageactivate_p(pg)) {
    677 				uvm_pagelock(pg);
    678 				uvm_pageactivate(pg);
    679 				uvm_pageunlock(pg);
    680 			}
    681 		}
    682 		pmap_kremove(umapva, ubc_winsize);
    683 		pmap_update(pmap_kernel());
    684 		uvm_page_unbusy(pgs, npages);
    685 		rw_exit(uobj->vmobjlock);
    686 		unmapped = true;
    687 	} else {
    688 		unmapped = false;
    689 	}
    690 
    691 	rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
    692 	umap->writeoff = 0;
    693 	umap->writelen = 0;
    694 	umap->refcount--;
    695 	if (umap->refcount == 0) {
    696 		if (flags & UBC_UNMAP) {
    697 			/*
    698 			 * Invalidate any cached mappings if requested.
    699 			 * This is typically used to avoid leaving
    700 			 * incompatible cache aliases around indefinitely.
    701 			 */
    702 			rw_enter(uobj->vmobjlock, RW_WRITER);
    703 			pmap_remove(pmap_kernel(), umapva,
    704 				    umapva + ubc_winsize);
    705 			pmap_update(pmap_kernel());
    706 			rw_exit(uobj->vmobjlock);
    707 
    708 			umap->flags &= ~UMAP_MAPPING_CACHED;
    709 			LIST_REMOVE(umap, hash);
    710 			LIST_REMOVE(umap, list);
    711 			umap->uobj = NULL;
    712 			TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
    713 			    inactive);
    714 		} else {
    715 			if (!unmapped) {
    716 				umap->flags |= UMAP_MAPPING_CACHED;
    717 			}
    718 			TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
    719 			    inactive);
    720 		}
    721 	}
    722 	UVMHIST_LOG(ubchist, "umap %#jx refs %jd", (uintptr_t)umap,
    723 	    umap->refcount, 0, 0);
    724 	rw_exit(ubc_object.uobj.vmobjlock);
    725 }
    726 
    727 /*
    728  * ubc_uiomove: move data to/from an object.
    729  */
    730 
    731 int
    732 ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
    733     int flags)
    734 {
    735 	const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
    736 	struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)];
    737 	voff_t off;
    738 	int error, npages;
    739 
    740 	KASSERT(todo <= uio->uio_resid);
    741 	KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
    742 	    ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
    743 
    744 #ifdef UBC_USE_PMAP_DIRECT
    745 	/*
    746 	 * during direct access pages need to be held busy to prevent them
    747 	 * changing identity, and therefore if we read or write an object
    748 	 * into a mapped view of same we could deadlock while faulting.
    749 	 *
    750 	 * avoid the problem by disallowing direct access if the object
    751 	 * might be visible somewhere via mmap().
    752 	 *
    753 	 * XXX concurrent reads cause thundering herd issues with PG_BUSY.
    754 	 * In the future enable by default for writes or if ncpu<=2, and
    755 	 * make the toggle override that.
    756 	 */
    757 	if ((ubc_direct && (flags & UBC_ISMAPPED) == 0) ||
    758 	    (flags & UBC_FAULTBUSY) != 0) {
    759 		return ubc_uiomove_direct(uobj, uio, todo, advice, flags);
    760 	}
    761 #endif
    762 
    763 	off = uio->uio_offset;
    764 	error = 0;
    765 	while (todo > 0) {
    766 		vsize_t bytelen = todo;
    767 		void *win;
    768 
    769 		npages = __arraycount(pgs);
    770 		win = ubc_alloc(uobj, off, &bytelen, advice, flags, pgs,
    771 		    &npages);
    772 		if (error == 0) {
    773 			error = uiomove(win, bytelen, uio);
    774 		}
    775 		if (error != 0 && overwrite) {
    776 			/*
    777 			 * if we haven't initialized the pages yet,
    778 			 * do it now.  it's safe to use memset here
    779 			 * because we just mapped the pages above.
    780 			 */
    781 			printf("%s: error=%d\n", __func__, error);
    782 			memset(win, 0, bytelen);
    783 		}
    784 		ubc_release(win, flags, pgs, npages);
    785 		off += bytelen;
    786 		todo -= bytelen;
    787 		if (error != 0 && (flags & UBC_PARTIALOK) != 0) {
    788 			break;
    789 		}
    790 	}
    791 
    792 	return error;
    793 }
    794 
    795 /*
    796  * ubc_zerorange: set a range of bytes in an object to zero.
    797  */
    798 
    799 void
    800 ubc_zerorange(struct uvm_object *uobj, off_t off, size_t len, int flags)
    801 {
    802 	struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)];
    803 	int npages;
    804 
    805 #ifdef UBC_USE_PMAP_DIRECT
    806 	if (ubc_direct || (flags & UBC_FAULTBUSY) != 0) {
    807 		ubc_zerorange_direct(uobj, off, len, flags);
    808 		return;
    809 	}
    810 #endif
    811 
    812 	/*
    813 	 * XXXUBC invent kzero() and use it
    814 	 */
    815 
    816 	while (len) {
    817 		void *win;
    818 		vsize_t bytelen = len;
    819 
    820 		npages = __arraycount(pgs);
    821 		win = ubc_alloc(uobj, off, &bytelen, UVM_ADV_NORMAL, UBC_WRITE,
    822 		    pgs, &npages);
    823 		memset(win, 0, bytelen);
    824 		ubc_release(win, flags, pgs, npages);
    825 
    826 		off += bytelen;
    827 		len -= bytelen;
    828 	}
    829 }
    830 
    831 #ifdef UBC_USE_PMAP_DIRECT
    832 /* Copy data using direct map */
    833 
    834 /*
    835  * ubc_alloc_direct:  allocate a file mapping window using direct map
    836  */
    837 static int __noinline
    838 ubc_alloc_direct(struct uvm_object *uobj, voff_t offset, vsize_t *lenp,
    839     int advice, int flags, struct vm_page **pgs, int *npages)
    840 {
    841 	voff_t pgoff;
    842 	int error;
    843 	int gpflags = flags | PGO_NOTIMESTAMP | PGO_SYNCIO;
    844 	int access_type = VM_PROT_READ;
    845 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
    846 
    847 	if (flags & UBC_WRITE) {
    848 		if (flags & UBC_FAULTBUSY)
    849 			gpflags |= PGO_OVERWRITE | PGO_NOBLOCKALLOC;
    850 #if 0
    851 		KASSERT(!UVM_OBJ_NEEDS_WRITEFAULT(uobj));
    852 #endif
    853 
    854 		/*
    855 		 * Tell genfs_getpages() we already have the journal lock,
    856 		 * allow allocation past current EOF.
    857 		 */
    858 		gpflags |= PGO_JOURNALLOCKED | PGO_PASTEOF;
    859 		access_type |= VM_PROT_WRITE;
    860 	} else {
    861 		/* Don't need the empty blocks allocated, PG_RDONLY is okay */
    862 		gpflags |= PGO_NOBLOCKALLOC;
    863 	}
    864 
    865 	pgoff = (offset & PAGE_MASK);
    866 	*lenp = MIN(*lenp, ubc_winsize - pgoff);
    867 
    868 again:
    869 	*npages = (*lenp + pgoff + PAGE_SIZE - 1) >> PAGE_SHIFT;
    870 	KASSERT((*npages * PAGE_SIZE) <= ubc_winsize);
    871 	KASSERT(*lenp + pgoff <= ubc_winsize);
    872 	memset(pgs, 0, *npages * sizeof(pgs[0]));
    873 
    874 	rw_enter(uobj->vmobjlock, RW_WRITER);
    875 	error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
    876 	    npages, 0, access_type, advice, gpflags);
    877 	UVMHIST_LOG(ubchist, "alloc_direct getpages %jd", error, 0, 0, 0);
    878 	if (error) {
    879 		if (error == EAGAIN) {
    880 			kpause("ubc_alloc_directg", false, hz >> 2, NULL);
    881 			goto again;
    882 		}
    883 		return error;
    884 	}
    885 
    886 	rw_enter(uobj->vmobjlock, RW_WRITER);
    887 	for (int i = 0; i < *npages; i++) {
    888 		struct vm_page *pg = pgs[i];
    889 
    890 		KASSERT(pg != NULL);
    891 		KASSERT(pg != PGO_DONTCARE);
    892 		KASSERT((pg->flags & PG_FAKE) == 0 || (gpflags & PGO_OVERWRITE));
    893 		KASSERT(pg->uobject->vmobjlock == uobj->vmobjlock);
    894 
    895 		/* Avoid breaking loan if possible, only do it on write */
    896 		if ((flags & UBC_WRITE) && pg->loan_count != 0) {
    897 			pg = uvm_loanbreak(pg);
    898 			if (pg == NULL) {
    899 				uvm_page_unbusy(pgs, *npages);
    900 				rw_exit(uobj->vmobjlock);
    901 				uvm_wait("ubc_alloc_directl");
    902 				goto again;
    903 			}
    904 			pgs[i] = pg;
    905 		}
    906 
    907 		/* Page must be writable by now */
    908 		KASSERT((pg->flags & PG_RDONLY) == 0 || (flags & UBC_WRITE) == 0);
    909 
    910 		/*
    911 		 * XXX For aobj pages.  No managed mapping - mark the page
    912 		 * dirty.
    913 		 */
    914 		if ((flags & UBC_WRITE) != 0) {
    915 			uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
    916 		}
    917 	}
    918 	rw_exit(uobj->vmobjlock);
    919 
    920 	return 0;
    921 }
    922 
    923 static void __noinline
    924 ubc_direct_release(struct uvm_object *uobj,
    925 	int flags, struct vm_page **pgs, int npages)
    926 {
    927 	rw_enter(uobj->vmobjlock, RW_WRITER);
    928 	for (int i = 0; i < npages; i++) {
    929 		struct vm_page *pg = pgs[i];
    930 
    931 		pg->flags &= ~PG_BUSY;
    932 		UVM_PAGE_OWN(pg, NULL);
    933 		if (pg->flags & PG_RELEASED) {
    934 			pg->flags &= ~PG_RELEASED;
    935 			uvm_pagefree(pg);
    936 			continue;
    937 		}
    938 
    939 		if (uvm_pagewanted_p(pg) || uvmpdpol_pageactivate_p(pg)) {
    940 			uvm_pagelock(pg);
    941 			uvm_pageactivate(pg);
    942 			uvm_pagewakeup(pg);
    943 			uvm_pageunlock(pg);
    944 		}
    945 
    946 		/* Page was changed, no longer fake and neither clean. */
    947 		if (flags & UBC_WRITE) {
    948 			KASSERTMSG(uvm_pagegetdirty(pg) ==
    949 			    UVM_PAGE_STATUS_DIRTY,
    950 			    "page %p not dirty", pg);
    951 			pg->flags &= ~PG_FAKE;
    952 		}
    953 	}
    954 	rw_exit(uobj->vmobjlock);
    955 }
    956 
    957 static int
    958 ubc_uiomove_process(void *win, size_t len, void *arg)
    959 {
    960 	struct uio *uio = (struct uio *)arg;
    961 
    962 	return uiomove(win, len, uio);
    963 }
    964 
    965 static int
    966 ubc_zerorange_process(void *win, size_t len, void *arg)
    967 {
    968 	memset(win, 0, len);
    969 	return 0;
    970 }
    971 
    972 static int __noinline
    973 ubc_uiomove_direct(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
    974     int flags)
    975 {
    976 	const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
    977 	voff_t off;
    978 	int error, npages;
    979 	struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)];
    980 
    981 	KASSERT(todo <= uio->uio_resid);
    982 	KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
    983 	    ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
    984 
    985 	off = uio->uio_offset;
    986 	error = 0;
    987 	while (todo > 0) {
    988 		vsize_t bytelen = todo;
    989 
    990 		error = ubc_alloc_direct(uobj, off, &bytelen, advice, flags,
    991 		    pgs, &npages);
    992 		if (error != 0) {
    993 			/* can't do anything, failed to get the pages */
    994 			break;
    995 		}
    996 
    997 		if (error == 0) {
    998 			error = uvm_direct_process(pgs, npages, off, bytelen,
    999 			    ubc_uiomove_process, uio);
   1000 		}
   1001 
   1002 		if (overwrite) {
   1003 			voff_t endoff;
   1004 
   1005 			/*
   1006 			 * if we haven't initialized the pages yet due to an
   1007 			 * error above, do it now.
   1008 			 */
   1009 			if (error != 0) {
   1010 				printf("%s: error=%d\n", __func__, error);
   1011 				(void) uvm_direct_process(pgs, npages, off,
   1012 				    bytelen, ubc_zerorange_process, NULL);
   1013 			}
   1014 
   1015 			off += bytelen;
   1016 			todo -= bytelen;
   1017 			endoff = off & (PAGE_SIZE - 1);
   1018 
   1019 			/*
   1020 			 * zero out the remaining portion of the final page
   1021 			 * (if any).
   1022 			 */
   1023 			if (todo == 0 && endoff != 0) {
   1024 				vsize_t zlen = PAGE_SIZE - endoff;
   1025 				(void) uvm_direct_process(pgs + npages - 1, 1,
   1026 				    off, zlen, ubc_zerorange_process, NULL);
   1027 			}
   1028 		} else {
   1029 			off += bytelen;
   1030 			todo -= bytelen;
   1031 		}
   1032 
   1033 		ubc_direct_release(uobj, flags, pgs, npages);
   1034 
   1035 		if (error != 0 && ISSET(flags, UBC_PARTIALOK)) {
   1036 			break;
   1037 		}
   1038 	}
   1039 
   1040 	return error;
   1041 }
   1042 
   1043 static void __noinline
   1044 ubc_zerorange_direct(struct uvm_object *uobj, off_t off, size_t todo, int flags)
   1045 {
   1046 	int error, npages;
   1047 	struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)];
   1048 
   1049 	flags |= UBC_WRITE;
   1050 
   1051 	error = 0;
   1052 	while (todo > 0) {
   1053 		vsize_t bytelen = todo;
   1054 
   1055 		error = ubc_alloc_direct(uobj, off, &bytelen, UVM_ADV_NORMAL,
   1056 		    flags, pgs, &npages);
   1057 		if (error != 0) {
   1058 			/* can't do anything, failed to get the pages */
   1059 			break;
   1060 		}
   1061 
   1062 		error = uvm_direct_process(pgs, npages, off, bytelen,
   1063 		    ubc_zerorange_process, NULL);
   1064 
   1065 		ubc_direct_release(uobj, flags, pgs, npages);
   1066 
   1067 		off += bytelen;
   1068 		todo -= bytelen;
   1069 	}
   1070 }
   1071 
   1072 #endif /* UBC_USE_PMAP_DIRECT */
   1073 
   1074 /*
   1075  * ubc_purge: disassociate ubc_map structures from an empty uvm_object.
   1076  */
   1077 
   1078 void
   1079 ubc_purge(struct uvm_object *uobj)
   1080 {
   1081 	struct ubc_map *umap;
   1082 	vaddr_t va;
   1083 
   1084 	KASSERT(uobj->uo_npages == 0);
   1085 
   1086 	/*
   1087 	 * Safe to check without lock held, as ubc_alloc() removes
   1088 	 * the mapping and list entry in the correct order.
   1089 	 */
   1090 	if (__predict_true(LIST_EMPTY(&uobj->uo_ubc))) {
   1091 		return;
   1092 	}
   1093 	rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
   1094 	while ((umap = LIST_FIRST(&uobj->uo_ubc)) != NULL) {
   1095 		KASSERT(umap->refcount == 0);
   1096 		for (va = 0; va < ubc_winsize; va += PAGE_SIZE) {
   1097 			KASSERT(!pmap_extract(pmap_kernel(),
   1098 			    va + UBC_UMAP_ADDR(umap), NULL));
   1099 		}
   1100 		LIST_REMOVE(umap, list);
   1101 		LIST_REMOVE(umap, hash);
   1102 		umap->flags &= ~UMAP_MAPPING_CACHED;
   1103 		umap->uobj = NULL;
   1104 	}
   1105 	rw_exit(ubc_object.uobj.vmobjlock);
   1106 }
   1107