Home | History | Annotate | Line # | Download | only in genfs
genfs_vnops.c revision 1.36
      1 /*	$NetBSD: genfs_vnops.c,v 1.36 2001/08/17 05:51:29 chs Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. All advertising materials mentioning features or use of this software
     16  *    must display the following acknowledgement:
     17  *	This product includes software developed by the University of
     18  *	California, Berkeley and its contributors.
     19  * 4. Neither the name of the University nor the names of its contributors
     20  *    may be used to endorse or promote products derived from this software
     21  *    without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     33  * SUCH DAMAGE.
     34  *
     35  */
     36 
     37 #include "opt_nfsserver.h"
     38 
     39 #include <sys/param.h>
     40 #include <sys/systm.h>
     41 #include <sys/proc.h>
     42 #include <sys/kernel.h>
     43 #include <sys/mount.h>
     44 #include <sys/namei.h>
     45 #include <sys/vnode.h>
     46 #include <sys/fcntl.h>
     47 #include <sys/malloc.h>
     48 #include <sys/poll.h>
     49 
     50 #include <miscfs/genfs/genfs.h>
     51 #include <miscfs/specfs/specdev.h>
     52 
     53 #include <uvm/uvm.h>
     54 #include <uvm/uvm_pager.h>
     55 
     56 #ifdef NFSSERVER
     57 #include <nfs/rpcv2.h>
     58 #include <nfs/nfsproto.h>
     59 #include <nfs/nfs.h>
     60 #include <nfs/nqnfs.h>
     61 #include <nfs/nfs_var.h>
     62 #endif
     63 
     64 int
     65 genfs_poll(v)
     66 	void *v;
     67 {
     68 	struct vop_poll_args /* {
     69 		struct vnode *a_vp;
     70 		int a_events;
     71 		struct proc *a_p;
     72 	} */ *ap = v;
     73 
     74 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
     75 }
     76 
     77 int
     78 genfs_fsync(v)
     79 	void *v;
     80 {
     81 	struct vop_fsync_args /* {
     82 		struct vnode *a_vp;
     83 		struct ucred *a_cred;
     84 		int a_flags;
     85 		off_t offlo;
     86 		off_t offhi;
     87 		struct proc *a_p;
     88 	} */ *ap = v;
     89 	struct vnode *vp = ap->a_vp;
     90 	int wait;
     91 
     92 	wait = (ap->a_flags & FSYNC_WAIT) != 0;
     93 	vflushbuf(vp, wait);
     94 	if ((ap->a_flags & FSYNC_DATAONLY) != 0)
     95 		return (0);
     96 	else
     97 		return (VOP_UPDATE(vp, NULL, NULL, wait ? UPDATE_WAIT : 0));
     98 }
     99 
    100 int
    101 genfs_seek(v)
    102 	void *v;
    103 {
    104 	struct vop_seek_args /* {
    105 		struct vnode *a_vp;
    106 		off_t a_oldoff;
    107 		off_t a_newoff;
    108 		struct ucred *a_ucred;
    109 	} */ *ap = v;
    110 
    111 	if (ap->a_newoff < 0)
    112 		return (EINVAL);
    113 
    114 	return (0);
    115 }
    116 
    117 int
    118 genfs_abortop(v)
    119 	void *v;
    120 {
    121 	struct vop_abortop_args /* {
    122 		struct vnode *a_dvp;
    123 		struct componentname *a_cnp;
    124 	} */ *ap = v;
    125 
    126 	if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
    127 		PNBUF_PUT(ap->a_cnp->cn_pnbuf);
    128 	return (0);
    129 }
    130 
    131 int
    132 genfs_fcntl(v)
    133 	void *v;
    134 {
    135 	struct vop_fcntl_args /* {
    136 		struct vnode *a_vp;
    137 		u_int a_command;
    138 		caddr_t a_data;
    139 		int a_fflag;
    140 		struct ucred *a_cred;
    141 		struct proc *a_p;
    142 	} */ *ap = v;
    143 
    144 	if (ap->a_command == F_SETFL)
    145 		return (0);
    146 	else
    147 		return (EOPNOTSUPP);
    148 }
    149 
    150 /*ARGSUSED*/
    151 int
    152 genfs_badop(v)
    153 	void *v;
    154 {
    155 
    156 	panic("genfs: bad op");
    157 }
    158 
    159 /*ARGSUSED*/
    160 int
    161 genfs_nullop(v)
    162 	void *v;
    163 {
    164 
    165 	return (0);
    166 }
    167 
    168 /*ARGSUSED*/
    169 int
    170 genfs_einval(v)
    171 	void *v;
    172 {
    173 
    174 	return (EINVAL);
    175 }
    176 
    177 /*ARGSUSED*/
    178 int
    179 genfs_eopnotsupp(v)
    180 	void *v;
    181 {
    182 
    183 	return (EOPNOTSUPP);
    184 }
    185 
    186 /*
    187  * Called when an fs doesn't support a particular vop but the vop needs to
    188  * vrele, vput, or vunlock passed in vnodes.
    189  */
    190 int
    191 genfs_eopnotsupp_rele(v)
    192 	void *v;
    193 {
    194 	struct vop_generic_args /*
    195 		struct vnodeop_desc *a_desc;
    196 		/ * other random data follows, presumably * /
    197 	} */ *ap = v;
    198 	struct vnodeop_desc *desc = ap->a_desc;
    199 	struct vnode *vp;
    200 	int flags, i, j, offset;
    201 
    202 	flags = desc->vdesc_flags;
    203 	for (i = 0; i < VDESC_MAX_VPS; flags >>=1, i++) {
    204 		if ((offset = desc->vdesc_vp_offsets[i]) == VDESC_NO_OFFSET)
    205 			break;	/* stop at end of list */
    206 		if ((j = flags & VDESC_VP0_WILLPUT)) {
    207 			vp = *VOPARG_OFFSETTO(struct vnode**,offset,ap);
    208 			switch (j) {
    209 			case VDESC_VP0_WILLPUT:
    210 				vput(vp);
    211 				break;
    212 			case VDESC_VP0_WILLUNLOCK:
    213 				VOP_UNLOCK(vp, 0);
    214 				break;
    215 			case VDESC_VP0_WILLRELE:
    216 				vrele(vp);
    217 				break;
    218 			}
    219 		}
    220 	}
    221 
    222 	return (EOPNOTSUPP);
    223 }
    224 
    225 /*ARGSUSED*/
    226 int
    227 genfs_ebadf(v)
    228 	void *v;
    229 {
    230 
    231 	return (EBADF);
    232 }
    233 
    234 /* ARGSUSED */
    235 int
    236 genfs_enoioctl(v)
    237 	void *v;
    238 {
    239 
    240 	return (ENOTTY);
    241 }
    242 
    243 
    244 /*
    245  * Eliminate all activity associated with the requested vnode
    246  * and with all vnodes aliased to the requested vnode.
    247  */
    248 int
    249 genfs_revoke(v)
    250 	void *v;
    251 {
    252 	struct vop_revoke_args /* {
    253 		struct vnode *a_vp;
    254 		int a_flags;
    255 	} */ *ap = v;
    256 	struct vnode *vp, *vq;
    257 	struct proc *p = curproc;	/* XXX */
    258 
    259 #ifdef DIAGNOSTIC
    260 	if ((ap->a_flags & REVOKEALL) == 0)
    261 		panic("genfs_revoke: not revokeall");
    262 #endif
    263 
    264 	vp = ap->a_vp;
    265 	simple_lock(&vp->v_interlock);
    266 
    267 	if (vp->v_flag & VALIASED) {
    268 		/*
    269 		 * If a vgone (or vclean) is already in progress,
    270 		 * wait until it is done and return.
    271 		 */
    272 		if (vp->v_flag & VXLOCK) {
    273 			vp->v_flag |= VXWANT;
    274 			simple_unlock(&vp->v_interlock);
    275 			tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0);
    276 			return (0);
    277 		}
    278 		/*
    279 		 * Ensure that vp will not be vgone'd while we
    280 		 * are eliminating its aliases.
    281 		 */
    282 		vp->v_flag |= VXLOCK;
    283 		simple_unlock(&vp->v_interlock);
    284 		while (vp->v_flag & VALIASED) {
    285 			simple_lock(&spechash_slock);
    286 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
    287 				if (vq->v_rdev != vp->v_rdev ||
    288 				    vq->v_type != vp->v_type || vp == vq)
    289 					continue;
    290 				simple_unlock(&spechash_slock);
    291 				vgone(vq);
    292 				break;
    293 			}
    294 			if (vq == NULLVP)
    295 				simple_unlock(&spechash_slock);
    296 		}
    297 		/*
    298 		 * Remove the lock so that vgone below will
    299 		 * really eliminate the vnode after which time
    300 		 * vgone will awaken any sleepers.
    301 		 */
    302 		simple_lock(&vp->v_interlock);
    303 		vp->v_flag &= ~VXLOCK;
    304 	}
    305 	vgonel(vp, p);
    306 	return (0);
    307 }
    308 
    309 /*
    310  * Lock the node.
    311  */
    312 int
    313 genfs_lock(v)
    314 	void *v;
    315 {
    316 	struct vop_lock_args /* {
    317 		struct vnode *a_vp;
    318 		int a_flags;
    319 	} */ *ap = v;
    320 	struct vnode *vp = ap->a_vp;
    321 
    322 	return (lockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock));
    323 }
    324 
    325 /*
    326  * Unlock the node.
    327  */
    328 int
    329 genfs_unlock(v)
    330 	void *v;
    331 {
    332 	struct vop_unlock_args /* {
    333 		struct vnode *a_vp;
    334 		int a_flags;
    335 	} */ *ap = v;
    336 	struct vnode *vp = ap->a_vp;
    337 
    338 	return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE,
    339 		&vp->v_interlock));
    340 }
    341 
    342 /*
    343  * Return whether or not the node is locked.
    344  */
    345 int
    346 genfs_islocked(v)
    347 	void *v;
    348 {
    349 	struct vop_islocked_args /* {
    350 		struct vnode *a_vp;
    351 	} */ *ap = v;
    352 	struct vnode *vp = ap->a_vp;
    353 
    354 	return (lockstatus(&vp->v_lock));
    355 }
    356 
    357 /*
    358  * Stubs to use when there is no locking to be done on the underlying object.
    359  */
    360 int
    361 genfs_nolock(v)
    362 	void *v;
    363 {
    364 	struct vop_lock_args /* {
    365 		struct vnode *a_vp;
    366 		int a_flags;
    367 		struct proc *a_p;
    368 	} */ *ap = v;
    369 
    370 	/*
    371 	 * Since we are not using the lock manager, we must clear
    372 	 * the interlock here.
    373 	 */
    374 	if (ap->a_flags & LK_INTERLOCK)
    375 		simple_unlock(&ap->a_vp->v_interlock);
    376 	return (0);
    377 }
    378 
    379 int
    380 genfs_nounlock(v)
    381 	void *v;
    382 {
    383 	return (0);
    384 }
    385 
    386 int
    387 genfs_noislocked(v)
    388 	void *v;
    389 {
    390 	return (0);
    391 }
    392 
    393 /*
    394  * Local lease check for NFS servers.  Just set up args and let
    395  * nqsrv_getlease() do the rest.  If NFSSERVER is not in the kernel,
    396  * this is a null operation.
    397  */
    398 int
    399 genfs_lease_check(v)
    400 	void *v;
    401 {
    402 #ifdef NFSSERVER
    403 	struct vop_lease_args /* {
    404 		struct vnode *a_vp;
    405 		struct proc *a_p;
    406 		struct ucred *a_cred;
    407 		int a_flag;
    408 	} */ *ap = v;
    409 	u_int32_t duration = 0;
    410 	int cache;
    411 	u_quad_t frev;
    412 
    413 	(void) nqsrv_getlease(ap->a_vp, &duration, ND_CHECK | ap->a_flag,
    414 	    NQLOCALSLP, ap->a_p, (struct mbuf *)0, &cache, &frev, ap->a_cred);
    415 	return (0);
    416 #else
    417 	return (0);
    418 #endif /* NFSSERVER */
    419 }
    420 
    421 int
    422 genfs_mmap(v)
    423 	void *v;
    424 {
    425 	return 0;
    426 }
    427 
    428 /*
    429  * generic VM getpages routine.
    430  * Return PG_BUSY pages for the given range,
    431  * reading from backing store if necessary.
    432  */
    433 
    434 int
    435 genfs_getpages(v)
    436 	void *v;
    437 {
    438 	struct vop_getpages_args /* {
    439 		struct vnode *a_vp;
    440 		voff_t a_offset;
    441 		struct vm_page **a_m;
    442 		int *a_count;
    443 		int a_centeridx;
    444 		vm_prot_t a_access_type;
    445 		int a_advice;
    446 		int a_flags;
    447 	} */ *ap = v;
    448 
    449 	off_t newsize, diskeof, memeof;
    450 	off_t offset, origoffset, startoffset, endoffset, raoffset;
    451 	daddr_t lbn, blkno;
    452 	int s, i, error, npages, orignpages, npgs, run, ridx, pidx, pcount;
    453 	int fs_bshift, fs_bsize, dev_bshift, dev_bsize;
    454 	int flags = ap->a_flags;
    455 	size_t bytes, iobytes, tailbytes, totalbytes, skipbytes;
    456 	vaddr_t kva;
    457 	struct buf *bp, *mbp;
    458 	struct vnode *vp = ap->a_vp;
    459 	struct vnode *devvp;
    460 	struct uvm_object *uobj = &vp->v_uvm.u_obj;
    461 	struct vm_page *pgs[16];			/* XXXUBC 16 */
    462 	struct ucred *cred = curproc->p_ucred;		/* XXXUBC curproc */
    463 	boolean_t async = (flags & PGO_SYNCIO) == 0;
    464 	boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
    465 	boolean_t sawhole = FALSE;
    466 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    467 
    468 	UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
    469 		    vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    470 
    471 	/* XXXUBC temp limit */
    472 	if (*ap->a_count > 16) {
    473 		return EINVAL;
    474 	}
    475 
    476 	error = 0;
    477 	origoffset = ap->a_offset;
    478 	orignpages = *ap->a_count;
    479 	error = VOP_SIZE(vp, vp->v_uvm.u_size, &diskeof);
    480 	if (error) {
    481 		return error;
    482 	}
    483 	if (flags & PGO_PASTEOF) {
    484 		newsize = MAX(vp->v_uvm.u_size,
    485 			      origoffset + (orignpages << PAGE_SHIFT));
    486 		error = VOP_SIZE(vp, newsize, &memeof);
    487 		if (error) {
    488 			return error;
    489 		}
    490 	} else {
    491 		memeof = diskeof;
    492 	}
    493 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    494 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    495 	KASSERT(orignpages > 0);
    496 
    497 	/*
    498 	 * Bounds-check the request.
    499 	 */
    500 
    501 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    502 		if ((flags & PGO_LOCKED) == 0) {
    503 			simple_unlock(&uobj->vmobjlock);
    504 		}
    505 		UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
    506 			    origoffset, *ap->a_count, memeof,0);
    507 		return EINVAL;
    508 	}
    509 
    510 	/*
    511 	 * For PGO_LOCKED requests, just return whatever's in memory.
    512 	 */
    513 
    514 	if (flags & PGO_LOCKED) {
    515 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
    516 			      UFP_NOWAIT|UFP_NOALLOC|UFP_NORDONLY);
    517 
    518 		return ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
    519 	}
    520 
    521 	/* vnode is VOP_LOCKed, uobj is locked */
    522 
    523 	if (write && (vp->v_flag & VONWORKLST) == 0) {
    524 		vn_syncer_add_to_worklist(vp, filedelay);
    525 	}
    526 
    527 	/*
    528 	 * find the requested pages and make some simple checks.
    529 	 * leave space in the page array for a whole block.
    530 	 */
    531 
    532 	if (vp->v_type == VREG) {
    533 		fs_bshift = vp->v_mount->mnt_fs_bshift;
    534 		dev_bshift = vp->v_mount->mnt_dev_bshift;
    535 	} else {
    536 		fs_bshift = DEV_BSHIFT;
    537 		dev_bshift = DEV_BSHIFT;
    538 	}
    539 	fs_bsize = 1 << fs_bshift;
    540 	dev_bsize = 1 << dev_bshift;
    541 	KASSERT((diskeof & (dev_bsize - 1)) == 0);
    542 	KASSERT((memeof & (dev_bsize - 1)) == 0);
    543 
    544 	orignpages = MIN(orignpages,
    545 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    546 	npages = orignpages;
    547 	startoffset = origoffset & ~(fs_bsize - 1);
    548 	endoffset = round_page((origoffset + (npages << PAGE_SHIFT)
    549 				+ fs_bsize - 1) & ~(fs_bsize - 1));
    550 	endoffset = MIN(endoffset, round_page(memeof));
    551 	ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    552 
    553 	memset(pgs, 0, sizeof(pgs));
    554 	uvn_findpages(uobj, origoffset, &npages, &pgs[ridx], UFP_ALL);
    555 
    556 	/*
    557 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    558 	 * PGO_OVERWRITE also means that the caller guarantees
    559 	 * that the pages already have backing store allocated.
    560 	 */
    561 
    562 	if (flags & PGO_OVERWRITE) {
    563 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    564 
    565 		for (i = 0; i < npages; i++) {
    566 			struct vm_page *pg = pgs[ridx + i];
    567 
    568 			if (pg->flags & PG_FAKE) {
    569 				uvm_pagezero(pg);
    570 				pg->flags &= ~(PG_FAKE);
    571 			}
    572 			pg->flags &= ~(PG_RDONLY);
    573 		}
    574 		npages += ridx;
    575 		goto out;
    576 	}
    577 
    578 	/*
    579 	 * if the pages are already resident, just return them.
    580 	 */
    581 
    582 	for (i = 0; i < npages; i++) {
    583 		struct vm_page *pg = pgs[ridx + i];
    584 
    585 		if ((pg->flags & PG_FAKE) ||
    586 		    (write && (pg->flags & PG_RDONLY))) {
    587 			break;
    588 		}
    589 	}
    590 	if (i == npages) {
    591 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    592 		raoffset = origoffset + (orignpages << PAGE_SHIFT);
    593 		npages += ridx;
    594 		goto raout;
    595 	}
    596 
    597 	/*
    598 	 * the page wasn't resident and we're not overwriting,
    599 	 * so we're going to have to do some i/o.
    600 	 * find any additional pages needed to cover the expanded range.
    601 	 */
    602 
    603 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    604 	if (startoffset != origoffset || npages != orignpages) {
    605 
    606 		/*
    607 		 * XXXUBC we need to avoid deadlocks caused by locking
    608 		 * additional pages at lower offsets than pages we
    609 		 * already have locked.  for now, unlock them all and
    610 		 * start over.
    611 		 */
    612 
    613 		for (i = 0; i < orignpages; i++) {
    614 			struct vm_page *pg = pgs[ridx + i];
    615 
    616 			if (pg->flags & PG_FAKE) {
    617 				pg->flags |= PG_RELEASED;
    618 			}
    619 		}
    620 		uvm_page_unbusy(&pgs[ridx], orignpages);
    621 		memset(pgs, 0, sizeof(pgs));
    622 
    623 		UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
    624 			    startoffset, endoffset, 0,0);
    625 		npgs = npages;
    626 		uvn_findpages(uobj, startoffset, &npgs, pgs, UFP_ALL);
    627 	}
    628 	simple_unlock(&uobj->vmobjlock);
    629 
    630 	/*
    631 	 * read the desired page(s).
    632 	 */
    633 
    634 	totalbytes = npages << PAGE_SHIFT;
    635 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    636 	tailbytes = totalbytes - bytes;
    637 	skipbytes = 0;
    638 
    639 	kva = uvm_pagermapin(pgs, npages, UVMPAGER_MAPIN_WAITOK |
    640 			     UVMPAGER_MAPIN_READ);
    641 
    642 	s = splbio();
    643 	mbp = pool_get(&bufpool, PR_WAITOK);
    644 	splx(s);
    645 	mbp->b_bufsize = totalbytes;
    646 	mbp->b_data = (void *)kva;
    647 	mbp->b_resid = mbp->b_bcount = bytes;
    648 	mbp->b_flags = B_BUSY|B_READ| (async ? B_CALL : 0);
    649 	mbp->b_iodone = uvm_aio_biodone;
    650 	mbp->b_vp = vp;
    651 	LIST_INIT(&mbp->b_dep);
    652 
    653 	/*
    654 	 * if EOF is in the middle of the range, zero the part past EOF.
    655 	 */
    656 
    657 	if (tailbytes > 0) {
    658 		memset((void *)(kva + bytes), 0, tailbytes);
    659 	}
    660 
    661 	/*
    662 	 * now loop over the pages, reading as needed.
    663 	 */
    664 
    665 	if (write) {
    666 		lockmgr(&vp->v_glock, LK_EXCLUSIVE, NULL);
    667 	} else {
    668 		lockmgr(&vp->v_glock, LK_SHARED, NULL);
    669 	}
    670 
    671 	bp = NULL;
    672 	for (offset = startoffset;
    673 	     bytes > 0;
    674 	     offset += iobytes, bytes -= iobytes) {
    675 
    676 		/*
    677 		 * skip pages which don't need to be read.
    678 		 */
    679 
    680 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    681 		while ((pgs[pidx]->flags & (PG_FAKE|PG_RDONLY)) == 0) {
    682 			size_t b;
    683 
    684 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    685 			b = MIN(PAGE_SIZE, bytes);
    686 			offset += b;
    687 			bytes -= b;
    688 			skipbytes += b;
    689 			pidx++;
    690 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
    691 				    offset, 0,0,0);
    692 			if (bytes == 0) {
    693 				goto loopdone;
    694 			}
    695 		}
    696 
    697 		/*
    698 		 * bmap the file to find out the blkno to read from and
    699 		 * how much we can read in one i/o.  if bmap returns an error,
    700 		 * skip the rest of the top-level i/o.
    701 		 */
    702 
    703 		lbn = offset >> fs_bshift;
    704 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    705 		if (error) {
    706 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
    707 				    lbn, error,0,0);
    708 			skipbytes += bytes;
    709 			goto loopdone;
    710 		}
    711 
    712 		/*
    713 		 * see how many pages can be read with this i/o.
    714 		 * reduce the i/o size if necessary to avoid
    715 		 * overwriting pages with valid data.
    716 		 */
    717 
    718 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    719 		    bytes);
    720 		if (offset + iobytes > round_page(offset)) {
    721 			pcount = 1;
    722 			while (pidx + pcount < npages &&
    723 			       pgs[pidx + pcount]->flags & PG_FAKE) {
    724 				pcount++;
    725 			}
    726 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    727 				      (offset - trunc_page(offset)));
    728 		}
    729 
    730 		/*
    731 		 * if this block isn't allocated, zero it instead of reading it.
    732 		 * if this is a read access, mark the pages we zeroed PG_RDONLY.
    733 		 */
    734 
    735 		if (blkno < 0) {
    736 			int holepages = (round_page(offset + iobytes) -
    737 					 trunc_page(offset)) >> PAGE_SHIFT;
    738 			UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
    739 
    740 			sawhole = TRUE;
    741 			memset((char *)kva + (offset - startoffset), 0,
    742 			       iobytes);
    743 			skipbytes += iobytes;
    744 
    745 			for (i = 0; i < holepages; i++) {
    746 				if (write) {
    747 					pgs[pidx + i]->flags &= ~PG_CLEAN;
    748 				} else {
    749 					pgs[pidx + i]->flags |= PG_RDONLY;
    750 				}
    751 			}
    752 			continue;
    753 		}
    754 
    755 		/*
    756 		 * allocate a sub-buf for this piece of the i/o
    757 		 * (or just use mbp if there's only 1 piece),
    758 		 * and start it going.
    759 		 */
    760 
    761 		if (offset == startoffset && iobytes == bytes) {
    762 			bp = mbp;
    763 		} else {
    764 			s = splbio();
    765 			bp = pool_get(&bufpool, PR_WAITOK);
    766 			splx(s);
    767 			bp->b_data = (char *)kva + offset - startoffset;
    768 			bp->b_resid = bp->b_bcount = iobytes;
    769 			bp->b_flags = B_BUSY|B_READ|B_CALL;
    770 			bp->b_iodone = uvm_aio_biodone1;
    771 			bp->b_vp = vp;
    772 			LIST_INIT(&bp->b_dep);
    773 		}
    774 		bp->b_lblkno = 0;
    775 		bp->b_private = mbp;
    776 		bp->b_dev = devvp->v_rdev;
    777 
    778 		/* adjust physical blkno for partial blocks */
    779 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    780 				       dev_bshift);
    781 
    782 		UVMHIST_LOG(ubchist, "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
    783 			    bp, offset, iobytes, bp->b_blkno);
    784 
    785 		VOP_STRATEGY(bp);
    786 	}
    787 
    788 loopdone:
    789 	if (skipbytes) {
    790 		s = splbio();
    791 		if (error) {
    792 			mbp->b_flags |= B_ERROR;
    793 			mbp->b_error = error;
    794 		}
    795 		mbp->b_resid -= skipbytes;
    796 		if (mbp->b_resid == 0) {
    797 			biodone(mbp);
    798 		}
    799 		splx(s);
    800 	}
    801 
    802 	if (async) {
    803 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    804 		lockmgr(&vp->v_glock, LK_RELEASE, NULL);
    805 		return 0;
    806 	}
    807 	if (bp != NULL) {
    808 		error = biowait(mbp);
    809 	}
    810 	s = splbio();
    811 	pool_put(&bufpool, mbp);
    812 	splx(s);
    813 	uvm_pagermapout(kva, npages);
    814 	raoffset = startoffset + totalbytes;
    815 
    816 	/*
    817 	 * if this we encountered a hole then we have to do a little more work.
    818 	 * for read faults, we marked the page PG_RDONLY so that future
    819 	 * write accesses to the page will fault again.
    820 	 * for write faults, we must make sure that the backing store for
    821 	 * the page is completely allocated while the pages are locked.
    822 	 */
    823 
    824 	if (error == 0 && sawhole && write) {
    825 		error = VOP_BALLOCN(vp, startoffset, npages << PAGE_SHIFT,
    826 				   cred, 0);
    827 		if (error) {
    828 			UVMHIST_LOG(ubchist, "balloc lbn 0x%x -> %d",
    829 				    lbn, error,0,0);
    830 			lockmgr(&vp->v_glock, LK_RELEASE, NULL);
    831 			simple_lock(&uobj->vmobjlock);
    832 			goto out;
    833 		}
    834 	}
    835 	lockmgr(&vp->v_glock, LK_RELEASE, NULL);
    836 	simple_lock(&uobj->vmobjlock);
    837 
    838 	/*
    839 	 * see if we want to start any readahead.
    840 	 * XXXUBC for now, just read the next 128k on 64k boundaries.
    841 	 * this is pretty nonsensical, but it is 50% faster than reading
    842 	 * just the next 64k.
    843 	 */
    844 
    845 raout:
    846 	if (!error && !async && !write && ((int)raoffset & 0xffff) == 0 &&
    847 	    PAGE_SHIFT <= 16) {
    848 		int racount;
    849 
    850 		racount = 1 << (16 - PAGE_SHIFT);
    851 		(void) VOP_GETPAGES(vp, raoffset, NULL, &racount, 0,
    852 				    VM_PROT_READ, 0, 0);
    853 		simple_lock(&uobj->vmobjlock);
    854 
    855 		racount = 1 << (16 - PAGE_SHIFT);
    856 		(void) VOP_GETPAGES(vp, raoffset + 0x10000, NULL, &racount, 0,
    857 				    VM_PROT_READ, 0, 0);
    858 		simple_lock(&uobj->vmobjlock);
    859 	}
    860 
    861 	/*
    862 	 * we're almost done!  release the pages...
    863 	 * for errors, we free the pages.
    864 	 * otherwise we activate them and mark them as valid and clean.
    865 	 * also, unbusy pages that were not actually requested.
    866 	 */
    867 
    868 out:
    869 	if (error) {
    870 		uvm_lock_pageq();
    871 		for (i = 0; i < npages; i++) {
    872 			if (pgs[i] == NULL) {
    873 				continue;
    874 			}
    875 			UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    876 				    pgs[i], pgs[i]->flags, 0,0);
    877 			if (pgs[i]->flags & PG_WANTED) {
    878 				wakeup(pgs[i]);
    879 			}
    880 			if (pgs[i]->flags & PG_RELEASED) {
    881 				uvm_unlock_pageq();
    882 				(uobj->pgops->pgo_releasepg)(pgs[i], NULL);
    883 				uvm_lock_pageq();
    884 				continue;
    885 			}
    886 			if (pgs[i]->flags & PG_FAKE) {
    887 				uvm_pagefree(pgs[i]);
    888 				continue;
    889 			}
    890 			uvm_pageactivate(pgs[i]);
    891 			pgs[i]->flags &= ~(PG_WANTED|PG_BUSY);
    892 			UVM_PAGE_OWN(pgs[i], NULL);
    893 		}
    894 		uvm_unlock_pageq();
    895 		simple_unlock(&uobj->vmobjlock);
    896 		UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
    897 		return error;
    898 	}
    899 
    900 	UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
    901 	uvm_lock_pageq();
    902 	for (i = 0; i < npages; i++) {
    903 		if (pgs[i] == NULL) {
    904 			continue;
    905 		}
    906 		UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    907 			    pgs[i], pgs[i]->flags, 0,0);
    908 		if (pgs[i]->flags & PG_FAKE) {
    909 			UVMHIST_LOG(ubchist, "unfaking pg %p offset 0x%x",
    910 				    pgs[i], pgs[i]->offset,0,0);
    911 			pgs[i]->flags &= ~(PG_FAKE);
    912 			pmap_clear_modify(pgs[i]);
    913 			pmap_clear_reference(pgs[i]);
    914 		}
    915 		if (write) {
    916 			pgs[i]->flags &= ~(PG_RDONLY);
    917 		}
    918 		if (i < ridx || i >= ridx + orignpages || async) {
    919 			UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
    920 				    pgs[i], pgs[i]->offset,0,0);
    921 			if (pgs[i]->flags & PG_WANTED) {
    922 				wakeup(pgs[i]);
    923 			}
    924 			if (pgs[i]->flags & PG_RELEASED) {
    925 				uvm_unlock_pageq();
    926 				(uobj->pgops->pgo_releasepg)(pgs[i], NULL);
    927 				uvm_lock_pageq();
    928 				continue;
    929 			}
    930 			uvm_pageactivate(pgs[i]);
    931 			pgs[i]->flags &= ~(PG_WANTED|PG_BUSY);
    932 			UVM_PAGE_OWN(pgs[i], NULL);
    933 		}
    934 	}
    935 	uvm_unlock_pageq();
    936 	simple_unlock(&uobj->vmobjlock);
    937 	if (ap->a_m != NULL) {
    938 		memcpy(ap->a_m, &pgs[ridx],
    939 		       orignpages * sizeof(struct vm_page *));
    940 	}
    941 	return 0;
    942 }
    943 
    944 /*
    945  * generic VM putpages routine.
    946  * Write the given range of pages to backing store.
    947  */
    948 
    949 int
    950 genfs_putpages(v)
    951 	void *v;
    952 {
    953 	struct vop_putpages_args /* {
    954 		struct vnode *a_vp;
    955 		struct vm_page **a_m;
    956 		int a_count;
    957 		int a_flags;
    958 		int *a_rtvals;
    959 	} */ *ap = v;
    960 
    961 	int s, error, npages, run;
    962 	int fs_bshift, dev_bshift, dev_bsize;
    963 	vaddr_t kva;
    964 	off_t eof, offset, startoffset;
    965 	size_t bytes, iobytes, skipbytes;
    966 	daddr_t lbn, blkno;
    967 	struct vm_page *pg;
    968 	struct buf *mbp, *bp;
    969 	struct vnode *vp = ap->a_vp;
    970 	struct vnode *devvp;
    971 	boolean_t async = (ap->a_flags & PGO_SYNCIO) == 0;
    972 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
    973 	UVMHIST_LOG(ubchist, "vp %p offset 0x%x count %d",
    974 		    vp, ap->a_m[0]->offset, ap->a_count, 0);
    975 
    976 	simple_unlock(&vp->v_uvm.u_obj.vmobjlock);
    977 
    978 	error = VOP_SIZE(vp, vp->v_uvm.u_size, &eof);
    979 	if (error) {
    980 		return error;
    981 	}
    982 
    983 	error = 0;
    984 	npages = ap->a_count;
    985 	if (vp->v_type == VREG) {
    986 		fs_bshift = vp->v_mount->mnt_fs_bshift;
    987 		dev_bshift = vp->v_mount->mnt_dev_bshift;
    988 	} else {
    989 		fs_bshift = DEV_BSHIFT;
    990 		dev_bshift = DEV_BSHIFT;
    991 	}
    992 	dev_bsize = 1 << dev_bshift;
    993 	KASSERT((eof & (dev_bsize - 1)) == 0);
    994 
    995 	pg = ap->a_m[0];
    996 	startoffset = pg->offset;
    997 	bytes = MIN(npages << PAGE_SHIFT, eof - startoffset);
    998 	skipbytes = 0;
    999 	KASSERT(bytes != 0);
   1000 
   1001 	kva = uvm_pagermapin(ap->a_m, npages, UVMPAGER_MAPIN_WAITOK);
   1002 
   1003 	s = splbio();
   1004 	vp->v_numoutput += 2;
   1005 	mbp = pool_get(&bufpool, PR_WAITOK);
   1006 	UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
   1007 		    vp, mbp, vp->v_numoutput, bytes);
   1008 	splx(s);
   1009 	mbp->b_bufsize = npages << PAGE_SHIFT;
   1010 	mbp->b_data = (void *)kva;
   1011 	mbp->b_resid = mbp->b_bcount = bytes;
   1012 	mbp->b_flags = B_BUSY|B_WRITE|B_AGE |
   1013 		(async ? B_CALL : 0) |
   1014 		(curproc == uvm.pagedaemon_proc ? B_PDAEMON : 0);
   1015 	mbp->b_iodone = uvm_aio_biodone;
   1016 	mbp->b_vp = vp;
   1017 	LIST_INIT(&mbp->b_dep);
   1018 
   1019 	bp = NULL;
   1020 	for (offset = startoffset;
   1021 	     bytes > 0;
   1022 	     offset += iobytes, bytes -= iobytes) {
   1023 		lbn = offset >> fs_bshift;
   1024 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1025 		if (error) {
   1026 			UVMHIST_LOG(ubchist, "VOP_BMAP() -> %d", error,0,0,0);
   1027 			skipbytes += bytes;
   1028 			bytes = 0;
   1029 			break;
   1030 		}
   1031 
   1032 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1033 		    bytes);
   1034 		if (blkno == (daddr_t)-1) {
   1035 			skipbytes += iobytes;
   1036 			continue;
   1037 		}
   1038 
   1039 		/* if it's really one i/o, don't make a second buf */
   1040 		if (offset == startoffset && iobytes == bytes) {
   1041 			bp = mbp;
   1042 		} else {
   1043 			s = splbio();
   1044 			vp->v_numoutput++;
   1045 			bp = pool_get(&bufpool, PR_WAITOK);
   1046 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
   1047 				    vp, bp, vp->v_numoutput, 0);
   1048 			splx(s);
   1049 			bp->b_data = (char *)kva +
   1050 				(vaddr_t)(offset - pg->offset);
   1051 			bp->b_resid = bp->b_bcount = iobytes;
   1052 			bp->b_flags = B_BUSY|B_WRITE|B_CALL|B_ASYNC;
   1053 			bp->b_iodone = uvm_aio_biodone1;
   1054 			bp->b_vp = vp;
   1055 			LIST_INIT(&bp->b_dep);
   1056 		}
   1057 		bp->b_lblkno = 0;
   1058 		bp->b_private = mbp;
   1059 		bp->b_dev = devvp->v_rdev;
   1060 
   1061 		/* adjust physical blkno for partial blocks */
   1062 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1063 				       dev_bshift);
   1064 		UVMHIST_LOG(ubchist, "vp %p offset 0x%x bcount 0x%x blkno 0x%x",
   1065 			    vp, offset, bp->b_bcount, bp->b_blkno);
   1066 		VOP_STRATEGY(bp);
   1067 	}
   1068 	if (skipbytes) {
   1069 		UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
   1070 		s = splbio();
   1071 		mbp->b_resid -= skipbytes;
   1072 		if (error) {
   1073 			mbp->b_flags |= B_ERROR;
   1074 			mbp->b_error = error;
   1075 		}
   1076 		if (mbp->b_resid == 0) {
   1077 			biodone(mbp);
   1078 		}
   1079 		splx(s);
   1080 	}
   1081 	if (async) {
   1082 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1083 		return 0;
   1084 	}
   1085 	if (bp != NULL) {
   1086 		UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
   1087 		error = biowait(mbp);
   1088 	}
   1089 	if (bioops.io_pageiodone) {
   1090 		(*bioops.io_pageiodone)(mbp);
   1091 	}
   1092 	s = splbio();
   1093 	vwakeup(mbp);
   1094 	pool_put(&bufpool, mbp);
   1095 	splx(s);
   1096 	uvm_pagermapout(kva, npages);
   1097 	UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
   1098 	return error;
   1099 }
   1100 
   1101 int
   1102 genfs_size(v)
   1103 	void *v;
   1104 {
   1105 	struct vop_size_args /* {
   1106 		struct vnode *a_vp;
   1107 		off_t a_size;
   1108 		off_t *a_eobp;
   1109 	} */ *ap = v;
   1110 	int bsize;
   1111 
   1112 	bsize = 1 << ap->a_vp->v_mount->mnt_fs_bshift;
   1113 	*ap->a_eobp = (ap->a_size + bsize - 1) & ~(bsize - 1);
   1114 	return 0;
   1115 }
   1116