Home | History | Annotate | Line # | Download | only in genfs
genfs_vnops.c revision 1.135
      1  1.133      yamt /*	$NetBSD: genfs_vnops.c,v 1.135 2006/10/14 09:15:52 yamt Exp $	*/
      2    1.6      fvdl 
      3    1.6      fvdl /*
      4    1.6      fvdl  * Copyright (c) 1982, 1986, 1989, 1993
      5    1.6      fvdl  *	The Regents of the University of California.  All rights reserved.
      6    1.6      fvdl  *
      7    1.6      fvdl  * Redistribution and use in source and binary forms, with or without
      8    1.6      fvdl  * modification, are permitted provided that the following conditions
      9    1.6      fvdl  * are met:
     10    1.6      fvdl  * 1. Redistributions of source code must retain the above copyright
     11    1.6      fvdl  *    notice, this list of conditions and the following disclaimer.
     12    1.6      fvdl  * 2. Redistributions in binary form must reproduce the above copyright
     13    1.6      fvdl  *    notice, this list of conditions and the following disclaimer in the
     14    1.6      fvdl  *    documentation and/or other materials provided with the distribution.
     15   1.81       agc  * 3. Neither the name of the University nor the names of its contributors
     16    1.6      fvdl  *    may be used to endorse or promote products derived from this software
     17    1.6      fvdl  *    without specific prior written permission.
     18    1.6      fvdl  *
     19    1.6      fvdl  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20    1.6      fvdl  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21    1.6      fvdl  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22    1.6      fvdl  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23    1.6      fvdl  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24    1.6      fvdl  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25    1.6      fvdl  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26    1.6      fvdl  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27    1.6      fvdl  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28    1.6      fvdl  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29    1.6      fvdl  * SUCH DAMAGE.
     30    1.6      fvdl  *
     31    1.6      fvdl  */
     32   1.40     lukem 
     33   1.40     lukem #include <sys/cdefs.h>
     34  1.133      yamt __KERNEL_RCSID(0, "$NetBSD: genfs_vnops.c,v 1.135 2006/10/14 09:15:52 yamt Exp $");
     35    1.5     perry 
     36   1.92       dbj #if defined(_KERNEL_OPT)
     37    1.8   thorpej #include "opt_nfsserver.h"
     38   1.92       dbj #endif
     39    1.8   thorpej 
     40    1.1   mycroft #include <sys/param.h>
     41    1.1   mycroft #include <sys/systm.h>
     42    1.6      fvdl #include <sys/proc.h>
     43    1.1   mycroft #include <sys/kernel.h>
     44    1.1   mycroft #include <sys/mount.h>
     45    1.1   mycroft #include <sys/namei.h>
     46    1.1   mycroft #include <sys/vnode.h>
     47   1.13  wrstuden #include <sys/fcntl.h>
     48  1.135      yamt #include <sys/kmem.h>
     49    1.3   mycroft #include <sys/poll.h>
     50   1.37       chs #include <sys/mman.h>
     51   1.66  jdolecek #include <sys/file.h>
     52  1.125      elad #include <sys/kauth.h>
     53    1.1   mycroft 
     54    1.1   mycroft #include <miscfs/genfs/genfs.h>
     55   1.37       chs #include <miscfs/genfs/genfs_node.h>
     56    1.6      fvdl #include <miscfs/specfs/specdev.h>
     57    1.1   mycroft 
     58   1.21       chs #include <uvm/uvm.h>
     59   1.21       chs #include <uvm/uvm_pager.h>
     60   1.21       chs 
     61    1.8   thorpej #ifdef NFSSERVER
     62    1.8   thorpej #include <nfs/rpcv2.h>
     63    1.8   thorpej #include <nfs/nfsproto.h>
     64    1.8   thorpej #include <nfs/nfs.h>
     65    1.8   thorpej #include <nfs/nqnfs.h>
     66    1.8   thorpej #include <nfs/nfs_var.h>
     67    1.8   thorpej #endif
     68    1.8   thorpej 
     69  1.130       chs static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
     70  1.130       chs     off_t, enum uio_rw);
     71  1.130       chs static void genfs_dio_iodone(struct buf *);
     72  1.130       chs 
     73  1.130       chs static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
     74  1.130       chs     void (*)(struct buf *));
     75  1.118     perry static inline void genfs_rel_pages(struct vm_page **, int);
     76   1.70  christos static void filt_genfsdetach(struct knote *);
     77   1.70  christos static int filt_genfsread(struct knote *, long);
     78   1.70  christos static int filt_genfsvnode(struct knote *, long);
     79   1.70  christos 
     80  1.110      yamt #define MAX_READ_PAGES	16 	/* XXXUBC 16 */
     81   1.41  christos 
     82  1.130       chs int genfs_maxdio = MAXPHYS;
     83  1.130       chs 
     84    1.1   mycroft int
     85   1.53     enami genfs_poll(void *v)
     86    1.1   mycroft {
     87    1.3   mycroft 	struct vop_poll_args /* {
     88    1.1   mycroft 		struct vnode *a_vp;
     89    1.3   mycroft 		int a_events;
     90  1.116  christos 		struct lwp *a_l;
     91    1.1   mycroft 	} */ *ap = v;
     92    1.1   mycroft 
     93    1.3   mycroft 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
     94    1.1   mycroft }
     95    1.1   mycroft 
     96    1.1   mycroft int
     97   1.53     enami genfs_seek(void *v)
     98    1.4    kleink {
     99    1.4    kleink 	struct vop_seek_args /* {
    100    1.4    kleink 		struct vnode *a_vp;
    101    1.4    kleink 		off_t a_oldoff;
    102    1.4    kleink 		off_t a_newoff;
    103  1.125      elad 		kauth_cred_t cred;
    104    1.4    kleink 	} */ *ap = v;
    105    1.4    kleink 
    106    1.4    kleink 	if (ap->a_newoff < 0)
    107    1.4    kleink 		return (EINVAL);
    108    1.4    kleink 
    109    1.4    kleink 	return (0);
    110    1.4    kleink }
    111    1.4    kleink 
    112    1.4    kleink int
    113   1.53     enami genfs_abortop(void *v)
    114    1.1   mycroft {
    115    1.1   mycroft 	struct vop_abortop_args /* {
    116    1.1   mycroft 		struct vnode *a_dvp;
    117    1.1   mycroft 		struct componentname *a_cnp;
    118    1.1   mycroft 	} */ *ap = v;
    119   1.53     enami 
    120    1.1   mycroft 	if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
    121   1.19   thorpej 		PNBUF_PUT(ap->a_cnp->cn_pnbuf);
    122    1.1   mycroft 	return (0);
    123   1.13  wrstuden }
    124   1.13  wrstuden 
    125   1.13  wrstuden int
    126   1.53     enami genfs_fcntl(void *v)
    127   1.13  wrstuden {
    128   1.13  wrstuden 	struct vop_fcntl_args /* {
    129   1.13  wrstuden 		struct vnode *a_vp;
    130   1.13  wrstuden 		u_int a_command;
    131   1.13  wrstuden 		caddr_t a_data;
    132   1.13  wrstuden 		int a_fflag;
    133  1.125      elad 		kauth_cred_t a_cred;
    134  1.116  christos 		struct lwp *a_l;
    135   1.13  wrstuden 	} */ *ap = v;
    136   1.13  wrstuden 
    137   1.13  wrstuden 	if (ap->a_command == F_SETFL)
    138   1.13  wrstuden 		return (0);
    139   1.13  wrstuden 	else
    140   1.13  wrstuden 		return (EOPNOTSUPP);
    141    1.1   mycroft }
    142    1.1   mycroft 
    143    1.1   mycroft /*ARGSUSED*/
    144    1.1   mycroft int
    145  1.131  christos genfs_badop(void *v __unused)
    146    1.1   mycroft {
    147    1.1   mycroft 
    148    1.1   mycroft 	panic("genfs: bad op");
    149    1.1   mycroft }
    150    1.1   mycroft 
    151    1.1   mycroft /*ARGSUSED*/
    152    1.1   mycroft int
    153  1.131  christos genfs_nullop(void *v __unused)
    154    1.1   mycroft {
    155    1.1   mycroft 
    156    1.1   mycroft 	return (0);
    157   1.10    kleink }
    158   1.10    kleink 
    159   1.10    kleink /*ARGSUSED*/
    160   1.10    kleink int
    161  1.131  christos genfs_einval(void *v __unused)
    162   1.10    kleink {
    163   1.10    kleink 
    164   1.10    kleink 	return (EINVAL);
    165    1.1   mycroft }
    166    1.1   mycroft 
    167   1.12  wrstuden /*
    168   1.74  jdolecek  * Called when an fs doesn't support a particular vop.
    169   1.74  jdolecek  * This takes care to vrele, vput, or vunlock passed in vnodes.
    170   1.12  wrstuden  */
    171   1.12  wrstuden int
    172   1.75  jdolecek genfs_eopnotsupp(void *v)
    173   1.12  wrstuden {
    174   1.12  wrstuden 	struct vop_generic_args /*
    175   1.12  wrstuden 		struct vnodeop_desc *a_desc;
    176   1.53     enami 		/ * other random data follows, presumably * /
    177   1.12  wrstuden 	} */ *ap = v;
    178   1.12  wrstuden 	struct vnodeop_desc *desc = ap->a_desc;
    179   1.74  jdolecek 	struct vnode *vp, *vp_last = NULL;
    180   1.12  wrstuden 	int flags, i, j, offset;
    181   1.12  wrstuden 
    182   1.12  wrstuden 	flags = desc->vdesc_flags;
    183   1.12  wrstuden 	for (i = 0; i < VDESC_MAX_VPS; flags >>=1, i++) {
    184   1.12  wrstuden 		if ((offset = desc->vdesc_vp_offsets[i]) == VDESC_NO_OFFSET)
    185   1.12  wrstuden 			break;	/* stop at end of list */
    186   1.12  wrstuden 		if ((j = flags & VDESC_VP0_WILLPUT)) {
    187   1.53     enami 			vp = *VOPARG_OFFSETTO(struct vnode **, offset, ap);
    188   1.74  jdolecek 
    189   1.74  jdolecek 			/* Skip if NULL */
    190   1.74  jdolecek 			if (!vp)
    191   1.74  jdolecek 				continue;
    192   1.74  jdolecek 
    193   1.12  wrstuden 			switch (j) {
    194   1.12  wrstuden 			case VDESC_VP0_WILLPUT:
    195   1.74  jdolecek 				/* Check for dvp == vp cases */
    196   1.74  jdolecek 				if (vp == vp_last)
    197   1.74  jdolecek 					vrele(vp);
    198   1.74  jdolecek 				else {
    199   1.74  jdolecek 					vput(vp);
    200   1.74  jdolecek 					vp_last = vp;
    201   1.74  jdolecek 				}
    202   1.12  wrstuden 				break;
    203   1.12  wrstuden 			case VDESC_VP0_WILLUNLOCK:
    204   1.12  wrstuden 				VOP_UNLOCK(vp, 0);
    205   1.12  wrstuden 				break;
    206   1.12  wrstuden 			case VDESC_VP0_WILLRELE:
    207   1.12  wrstuden 				vrele(vp);
    208   1.12  wrstuden 				break;
    209   1.12  wrstuden 			}
    210   1.12  wrstuden 		}
    211   1.12  wrstuden 	}
    212   1.12  wrstuden 
    213   1.12  wrstuden 	return (EOPNOTSUPP);
    214   1.12  wrstuden }
    215   1.12  wrstuden 
    216    1.1   mycroft /*ARGSUSED*/
    217    1.1   mycroft int
    218  1.131  christos genfs_ebadf(void *v __unused)
    219    1.1   mycroft {
    220    1.1   mycroft 
    221    1.1   mycroft 	return (EBADF);
    222    1.9  matthias }
    223    1.9  matthias 
    224    1.9  matthias /* ARGSUSED */
    225    1.9  matthias int
    226  1.131  christos genfs_enoioctl(void *v __unused)
    227    1.9  matthias {
    228    1.9  matthias 
    229   1.51    atatat 	return (EPASSTHROUGH);
    230    1.6      fvdl }
    231    1.6      fvdl 
    232    1.6      fvdl 
    233    1.6      fvdl /*
    234   1.15      fvdl  * Eliminate all activity associated with the requested vnode
    235    1.6      fvdl  * and with all vnodes aliased to the requested vnode.
    236    1.6      fvdl  */
    237    1.6      fvdl int
    238   1.53     enami genfs_revoke(void *v)
    239    1.6      fvdl {
    240    1.6      fvdl 	struct vop_revoke_args /* {
    241    1.6      fvdl 		struct vnode *a_vp;
    242    1.6      fvdl 		int a_flags;
    243    1.6      fvdl 	} */ *ap = v;
    244    1.6      fvdl 	struct vnode *vp, *vq;
    245  1.116  christos 	struct lwp *l = curlwp;		/* XXX */
    246    1.6      fvdl 
    247    1.6      fvdl #ifdef DIAGNOSTIC
    248    1.6      fvdl 	if ((ap->a_flags & REVOKEALL) == 0)
    249    1.6      fvdl 		panic("genfs_revoke: not revokeall");
    250    1.6      fvdl #endif
    251    1.6      fvdl 
    252    1.6      fvdl 	vp = ap->a_vp;
    253    1.6      fvdl 	simple_lock(&vp->v_interlock);
    254    1.6      fvdl 
    255    1.6      fvdl 	if (vp->v_flag & VALIASED) {
    256    1.6      fvdl 		/*
    257    1.6      fvdl 		 * If a vgone (or vclean) is already in progress,
    258    1.6      fvdl 		 * wait until it is done and return.
    259    1.6      fvdl 		 */
    260    1.6      fvdl 		if (vp->v_flag & VXLOCK) {
    261    1.6      fvdl 			vp->v_flag |= VXWANT;
    262   1.83        pk 			ltsleep(vp, PINOD|PNORELOCK, "vop_revokeall", 0,
    263   1.83        pk 				&vp->v_interlock);
    264    1.6      fvdl 			return (0);
    265    1.6      fvdl 		}
    266    1.6      fvdl 		/*
    267    1.6      fvdl 		 * Ensure that vp will not be vgone'd while we
    268    1.6      fvdl 		 * are eliminating its aliases.
    269    1.6      fvdl 		 */
    270    1.6      fvdl 		vp->v_flag |= VXLOCK;
    271    1.6      fvdl 		simple_unlock(&vp->v_interlock);
    272    1.6      fvdl 		while (vp->v_flag & VALIASED) {
    273    1.6      fvdl 			simple_lock(&spechash_slock);
    274    1.6      fvdl 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
    275    1.6      fvdl 				if (vq->v_rdev != vp->v_rdev ||
    276    1.6      fvdl 				    vq->v_type != vp->v_type || vp == vq)
    277    1.6      fvdl 					continue;
    278    1.6      fvdl 				simple_unlock(&spechash_slock);
    279    1.6      fvdl 				vgone(vq);
    280    1.6      fvdl 				break;
    281    1.6      fvdl 			}
    282    1.6      fvdl 			if (vq == NULLVP)
    283    1.6      fvdl 				simple_unlock(&spechash_slock);
    284    1.6      fvdl 		}
    285    1.6      fvdl 		/*
    286    1.6      fvdl 		 * Remove the lock so that vgone below will
    287    1.6      fvdl 		 * really eliminate the vnode after which time
    288    1.6      fvdl 		 * vgone will awaken any sleepers.
    289    1.6      fvdl 		 */
    290    1.6      fvdl 		simple_lock(&vp->v_interlock);
    291    1.6      fvdl 		vp->v_flag &= ~VXLOCK;
    292    1.6      fvdl 	}
    293  1.116  christos 	vgonel(vp, l);
    294    1.6      fvdl 	return (0);
    295    1.6      fvdl }
    296    1.6      fvdl 
    297    1.6      fvdl /*
    298   1.12  wrstuden  * Lock the node.
    299    1.6      fvdl  */
    300    1.6      fvdl int
    301   1.53     enami genfs_lock(void *v)
    302    1.6      fvdl {
    303    1.6      fvdl 	struct vop_lock_args /* {
    304    1.6      fvdl 		struct vnode *a_vp;
    305    1.6      fvdl 		int a_flags;
    306    1.6      fvdl 	} */ *ap = v;
    307    1.6      fvdl 	struct vnode *vp = ap->a_vp;
    308    1.6      fvdl 
    309   1.86   hannken 	return (lockmgr(vp->v_vnlock, ap->a_flags, &vp->v_interlock));
    310    1.6      fvdl }
    311    1.6      fvdl 
    312    1.6      fvdl /*
    313   1.12  wrstuden  * Unlock the node.
    314    1.6      fvdl  */
    315    1.6      fvdl int
    316   1.53     enami genfs_unlock(void *v)
    317    1.6      fvdl {
    318    1.6      fvdl 	struct vop_unlock_args /* {
    319    1.6      fvdl 		struct vnode *a_vp;
    320    1.6      fvdl 		int a_flags;
    321    1.6      fvdl 	} */ *ap = v;
    322    1.6      fvdl 	struct vnode *vp = ap->a_vp;
    323    1.6      fvdl 
    324   1.86   hannken 	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE,
    325   1.53     enami 	    &vp->v_interlock));
    326    1.6      fvdl }
    327    1.6      fvdl 
    328    1.6      fvdl /*
    329   1.12  wrstuden  * Return whether or not the node is locked.
    330    1.6      fvdl  */
    331    1.6      fvdl int
    332   1.53     enami genfs_islocked(void *v)
    333    1.6      fvdl {
    334    1.6      fvdl 	struct vop_islocked_args /* {
    335    1.6      fvdl 		struct vnode *a_vp;
    336    1.6      fvdl 	} */ *ap = v;
    337    1.6      fvdl 	struct vnode *vp = ap->a_vp;
    338    1.6      fvdl 
    339   1.86   hannken 	return (lockstatus(vp->v_vnlock));
    340   1.12  wrstuden }
    341   1.12  wrstuden 
    342   1.12  wrstuden /*
    343   1.12  wrstuden  * Stubs to use when there is no locking to be done on the underlying object.
    344   1.12  wrstuden  */
    345   1.12  wrstuden int
    346   1.53     enami genfs_nolock(void *v)
    347   1.12  wrstuden {
    348   1.12  wrstuden 	struct vop_lock_args /* {
    349   1.12  wrstuden 		struct vnode *a_vp;
    350   1.12  wrstuden 		int a_flags;
    351  1.116  christos 		struct lwp *a_l;
    352   1.12  wrstuden 	} */ *ap = v;
    353   1.12  wrstuden 
    354   1.12  wrstuden 	/*
    355   1.12  wrstuden 	 * Since we are not using the lock manager, we must clear
    356   1.12  wrstuden 	 * the interlock here.
    357   1.12  wrstuden 	 */
    358   1.12  wrstuden 	if (ap->a_flags & LK_INTERLOCK)
    359   1.12  wrstuden 		simple_unlock(&ap->a_vp->v_interlock);
    360   1.12  wrstuden 	return (0);
    361   1.12  wrstuden }
    362   1.12  wrstuden 
    363   1.12  wrstuden int
    364  1.131  christos genfs_nounlock(void *v __unused)
    365   1.12  wrstuden {
    366   1.53     enami 
    367   1.12  wrstuden 	return (0);
    368   1.12  wrstuden }
    369   1.12  wrstuden 
    370   1.12  wrstuden int
    371  1.131  christos genfs_noislocked(void *v __unused)
    372   1.12  wrstuden {
    373   1.53     enami 
    374   1.12  wrstuden 	return (0);
    375    1.8   thorpej }
    376    1.8   thorpej 
    377    1.8   thorpej /*
    378    1.8   thorpej  * Local lease check for NFS servers.  Just set up args and let
    379    1.8   thorpej  * nqsrv_getlease() do the rest.  If NFSSERVER is not in the kernel,
    380    1.8   thorpej  * this is a null operation.
    381    1.8   thorpej  */
    382    1.8   thorpej int
    383   1.53     enami genfs_lease_check(void *v)
    384    1.8   thorpej {
    385    1.8   thorpej #ifdef NFSSERVER
    386    1.8   thorpej 	struct vop_lease_args /* {
    387    1.8   thorpej 		struct vnode *a_vp;
    388  1.116  christos 		struct lwp *a_l;
    389  1.125      elad 		kauth_cred_t a_cred;
    390    1.8   thorpej 		int a_flag;
    391    1.8   thorpej 	} */ *ap = v;
    392    1.8   thorpej 	u_int32_t duration = 0;
    393    1.8   thorpej 	int cache;
    394    1.8   thorpej 	u_quad_t frev;
    395    1.8   thorpej 
    396    1.8   thorpej 	(void) nqsrv_getlease(ap->a_vp, &duration, ND_CHECK | ap->a_flag,
    397  1.116  christos 	    NQLOCALSLP, ap->a_l, (struct mbuf *)0, &cache, &frev, ap->a_cred);
    398    1.8   thorpej 	return (0);
    399    1.8   thorpej #else
    400  1.132   thorpej 	(void) v;
    401    1.8   thorpej 	return (0);
    402    1.8   thorpej #endif /* NFSSERVER */
    403   1.34       chs }
    404   1.34       chs 
    405   1.34       chs int
    406  1.131  christos genfs_mmap(void *v __unused)
    407   1.34       chs {
    408   1.53     enami 
    409   1.53     enami 	return (0);
    410   1.21       chs }
    411   1.21       chs 
    412  1.118     perry static inline void
    413   1.63     enami genfs_rel_pages(struct vm_page **pgs, int npages)
    414   1.63     enami {
    415   1.63     enami 	int i;
    416   1.63     enami 
    417   1.63     enami 	for (i = 0; i < npages; i++) {
    418   1.63     enami 		struct vm_page *pg = pgs[i];
    419   1.63     enami 
    420  1.127      yamt 		if (pg == NULL || pg == PGO_DONTCARE)
    421   1.63     enami 			continue;
    422   1.63     enami 		if (pg->flags & PG_FAKE) {
    423   1.63     enami 			pg->flags |= PG_RELEASED;
    424   1.63     enami 		}
    425   1.63     enami 	}
    426   1.64     enami 	uvm_lock_pageq();
    427   1.63     enami 	uvm_page_unbusy(pgs, npages);
    428   1.64     enami 	uvm_unlock_pageq();
    429   1.63     enami }
    430   1.63     enami 
    431   1.21       chs /*
    432   1.21       chs  * generic VM getpages routine.
    433   1.21       chs  * Return PG_BUSY pages for the given range,
    434   1.21       chs  * reading from backing store if necessary.
    435   1.21       chs  */
    436   1.21       chs 
    437   1.21       chs int
    438   1.53     enami genfs_getpages(void *v)
    439   1.21       chs {
    440   1.21       chs 	struct vop_getpages_args /* {
    441   1.21       chs 		struct vnode *a_vp;
    442   1.21       chs 		voff_t a_offset;
    443   1.33       chs 		struct vm_page **a_m;
    444   1.21       chs 		int *a_count;
    445   1.21       chs 		int a_centeridx;
    446   1.21       chs 		vm_prot_t a_access_type;
    447   1.21       chs 		int a_advice;
    448   1.21       chs 		int a_flags;
    449   1.21       chs 	} */ *ap = v;
    450   1.21       chs 
    451   1.30       chs 	off_t newsize, diskeof, memeof;
    452  1.124      yamt 	off_t offset, origoffset, startoffset, endoffset;
    453   1.21       chs 	daddr_t lbn, blkno;
    454  1.120      yamt 	int i, error, npages, orignpages, npgs, run, ridx, pidx, pcount;
    455   1.37       chs 	int fs_bshift, fs_bsize, dev_bshift;
    456   1.21       chs 	int flags = ap->a_flags;
    457   1.21       chs 	size_t bytes, iobytes, tailbytes, totalbytes, skipbytes;
    458   1.21       chs 	vaddr_t kva;
    459   1.21       chs 	struct buf *bp, *mbp;
    460   1.21       chs 	struct vnode *vp = ap->a_vp;
    461   1.36       chs 	struct vnode *devvp;
    462   1.37       chs 	struct genfs_node *gp = VTOG(vp);
    463   1.37       chs 	struct uvm_object *uobj = &vp->v_uobj;
    464  1.110      yamt 	struct vm_page *pg, **pgs, *pgs_onstack[MAX_READ_PAGES];
    465   1.77      yamt 	int pgs_size;
    466  1.128        ad 	kauth_cred_t cred = curlwp->l_cred;		/* XXXUBC curlwp */
    467   1.21       chs 	boolean_t async = (flags & PGO_SYNCIO) == 0;
    468   1.21       chs 	boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
    469   1.21       chs 	boolean_t sawhole = FALSE;
    470   1.37       chs 	boolean_t overwrite = (flags & PGO_OVERWRITE) != 0;
    471  1.100      yamt 	boolean_t blockalloc = write && (flags & PGO_NOBLOCKALLOC) == 0;
    472  1.126      yamt 	voff_t origvsize;
    473   1.21       chs 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    474   1.21       chs 
    475   1.30       chs 	UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
    476   1.53     enami 	    vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    477   1.30       chs 
    478  1.121   reinoud 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
    479  1.121   reinoud 	    vp->v_type == VLNK || vp->v_type == VBLK);
    480  1.109      yamt 
    481   1.21       chs 	/* XXXUBC temp limit */
    482  1.110      yamt 	if (*ap->a_count > MAX_READ_PAGES) {
    483   1.37       chs 		panic("genfs_getpages: too many pages");
    484   1.21       chs 	}
    485   1.21       chs 
    486  1.126      yamt startover:
    487   1.26       chs 	error = 0;
    488  1.126      yamt 	origvsize = vp->v_size;
    489   1.26       chs 	origoffset = ap->a_offset;
    490   1.26       chs 	orignpages = *ap->a_count;
    491  1.123      yamt 	GOP_SIZE(vp, vp->v_size, &diskeof, 0);
    492   1.26       chs 	if (flags & PGO_PASTEOF) {
    493   1.37       chs 		newsize = MAX(vp->v_size,
    494   1.53     enami 		    origoffset + (orignpages << PAGE_SHIFT));
    495  1.123      yamt 		GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
    496   1.26       chs 	} else {
    497  1.123      yamt 		GOP_SIZE(vp, vp->v_size, &memeof, GOP_SIZE_MEM);
    498   1.21       chs 	}
    499   1.30       chs 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    500   1.30       chs 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    501   1.30       chs 	KASSERT(orignpages > 0);
    502   1.95       chs 
    503   1.95       chs 	/*
    504   1.95       chs 	 * Bounds-check the request.
    505   1.95       chs 	 */
    506   1.95       chs 
    507   1.95       chs 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    508   1.95       chs 		if ((flags & PGO_LOCKED) == 0) {
    509   1.95       chs 			simple_unlock(&uobj->vmobjlock);
    510   1.95       chs 		}
    511   1.95       chs 		UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
    512   1.95       chs 		    origoffset, *ap->a_count, memeof,0);
    513   1.95       chs 		return (EINVAL);
    514   1.95       chs 	}
    515   1.21       chs 
    516   1.99      yamt 	/* uobj is locked */
    517   1.99      yamt 
    518  1.103      yamt 	if ((flags & PGO_NOTIMESTAMP) == 0 &&
    519  1.121   reinoud 	    (vp->v_type != VBLK ||
    520  1.103      yamt 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
    521  1.103      yamt 		int updflags = 0;
    522  1.103      yamt 
    523  1.103      yamt 		if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
    524  1.103      yamt 			updflags = GOP_UPDATE_ACCESSED;
    525  1.103      yamt 		}
    526  1.103      yamt 		if (write) {
    527  1.103      yamt 			updflags |= GOP_UPDATE_MODIFIED;
    528  1.103      yamt 		}
    529  1.103      yamt 		if (updflags != 0) {
    530  1.103      yamt 			GOP_MARKUPDATE(vp, updflags);
    531  1.103      yamt 		}
    532  1.103      yamt 	}
    533  1.103      yamt 
    534  1.101      yamt 	if (write) {
    535  1.101      yamt 		gp->g_dirtygen++;
    536  1.101      yamt 		if ((vp->v_flag & VONWORKLST) == 0) {
    537  1.101      yamt 			vn_syncer_add_to_worklist(vp, filedelay);
    538  1.101      yamt 		}
    539  1.103      yamt 		if ((vp->v_flag & (VWRITEMAP|VWRITEMAPDIRTY)) == VWRITEMAP) {
    540  1.103      yamt 			vp->v_flag |= VWRITEMAPDIRTY;
    541  1.103      yamt 		}
    542   1.99      yamt 	}
    543   1.99      yamt 
    544   1.21       chs 	/*
    545   1.21       chs 	 * For PGO_LOCKED requests, just return whatever's in memory.
    546   1.21       chs 	 */
    547   1.21       chs 
    548   1.21       chs 	if (flags & PGO_LOCKED) {
    549  1.127      yamt 		int nfound;
    550  1.127      yamt 
    551  1.127      yamt 		npages = *ap->a_count;
    552  1.127      yamt #if defined(DEBUG)
    553  1.127      yamt 		for (i = 0; i < npages; i++) {
    554  1.127      yamt 			pg = ap->a_m[i];
    555  1.127      yamt 			KASSERT(pg == NULL || pg == PGO_DONTCARE);
    556  1.127      yamt 		}
    557  1.127      yamt #endif /* defined(DEBUG) */
    558  1.127      yamt 		nfound = uvn_findpages(uobj, origoffset, &npages,
    559  1.127      yamt 		    ap->a_m, UFP_NOWAIT|UFP_NOALLOC|(write ? UFP_NORDONLY : 0));
    560  1.127      yamt 		KASSERT(npages == *ap->a_count);
    561  1.127      yamt 		if (nfound == 0) {
    562  1.127      yamt 			return EBUSY;
    563  1.127      yamt 		}
    564  1.127      yamt 		if (lockmgr(&gp->g_glock, LK_SHARED | LK_NOWAIT, NULL)) {
    565  1.127      yamt 			genfs_rel_pages(ap->a_m, npages);
    566  1.127      yamt 
    567  1.127      yamt 			/*
    568  1.127      yamt 			 * restore the array.
    569  1.127      yamt 			 */
    570  1.127      yamt 
    571  1.127      yamt 			for (i = 0; i < npages; i++) {
    572  1.127      yamt 				pg = ap->a_m[i];
    573   1.21       chs 
    574  1.127      yamt 				if (pg != NULL || pg != PGO_DONTCARE) {
    575  1.127      yamt 					ap->a_m[i] = NULL;
    576  1.127      yamt 				}
    577  1.127      yamt 			}
    578  1.127      yamt 		} else {
    579  1.127      yamt 			lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    580  1.127      yamt 		}
    581   1.53     enami 		return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    582   1.21       chs 	}
    583  1.126      yamt 	simple_unlock(&uobj->vmobjlock);
    584   1.21       chs 
    585   1.21       chs 	/*
    586   1.21       chs 	 * find the requested pages and make some simple checks.
    587   1.21       chs 	 * leave space in the page array for a whole block.
    588   1.21       chs 	 */
    589   1.21       chs 
    590  1.121   reinoud 	if (vp->v_type != VBLK) {
    591   1.36       chs 		fs_bshift = vp->v_mount->mnt_fs_bshift;
    592   1.36       chs 		dev_bshift = vp->v_mount->mnt_dev_bshift;
    593   1.36       chs 	} else {
    594   1.36       chs 		fs_bshift = DEV_BSHIFT;
    595   1.36       chs 		dev_bshift = DEV_BSHIFT;
    596   1.36       chs 	}
    597   1.21       chs 	fs_bsize = 1 << fs_bshift;
    598   1.21       chs 
    599   1.30       chs 	orignpages = MIN(orignpages,
    600   1.30       chs 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    601   1.21       chs 	npages = orignpages;
    602   1.21       chs 	startoffset = origoffset & ~(fs_bsize - 1);
    603   1.53     enami 	endoffset = round_page((origoffset + (npages << PAGE_SHIFT) +
    604   1.53     enami 	    fs_bsize - 1) & ~(fs_bsize - 1));
    605   1.30       chs 	endoffset = MIN(endoffset, round_page(memeof));
    606   1.21       chs 	ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    607   1.21       chs 
    608   1.77      yamt 	pgs_size = sizeof(struct vm_page *) *
    609   1.77      yamt 	    ((endoffset - startoffset) >> PAGE_SHIFT);
    610   1.77      yamt 	if (pgs_size > sizeof(pgs_onstack)) {
    611  1.135      yamt 		pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
    612   1.78    simonb 		if (pgs == NULL) {
    613   1.78    simonb 			return (ENOMEM);
    614   1.78    simonb 		}
    615   1.77      yamt 	} else {
    616   1.77      yamt 		pgs = pgs_onstack;
    617   1.77      yamt 		memset(pgs, 0, pgs_size);
    618   1.77      yamt 	}
    619   1.63     enami 	UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
    620   1.63     enami 	    ridx, npages, startoffset, endoffset);
    621  1.126      yamt 
    622  1.126      yamt 	/*
    623  1.126      yamt 	 * hold g_glock to prevent a race with truncate.
    624  1.126      yamt 	 *
    625  1.126      yamt 	 * check if our idea of v_size is still valid.
    626  1.126      yamt 	 */
    627  1.126      yamt 
    628  1.126      yamt 	if (blockalloc) {
    629  1.126      yamt 		lockmgr(&gp->g_glock, LK_EXCLUSIVE, NULL);
    630  1.126      yamt 	} else {
    631  1.126      yamt 		lockmgr(&gp->g_glock, LK_SHARED, NULL);
    632  1.126      yamt 	}
    633  1.126      yamt 	simple_lock(&uobj->vmobjlock);
    634  1.126      yamt 	if (vp->v_size < origvsize) {
    635  1.126      yamt 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    636  1.126      yamt 		if (pgs != pgs_onstack)
    637  1.135      yamt 			kmem_free(pgs, pgs_size);
    638  1.126      yamt 		goto startover;
    639  1.126      yamt 	}
    640  1.126      yamt 
    641   1.63     enami 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
    642   1.63     enami 	    async ? UFP_NOWAIT : UFP_ALL) != orignpages) {
    643  1.126      yamt 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    644   1.63     enami 		KASSERT(async != 0);
    645   1.63     enami 		genfs_rel_pages(&pgs[ridx], orignpages);
    646   1.63     enami 		simple_unlock(&uobj->vmobjlock);
    647   1.77      yamt 		if (pgs != pgs_onstack)
    648  1.135      yamt 			kmem_free(pgs, pgs_size);
    649   1.63     enami 		return (EBUSY);
    650   1.63     enami 	}
    651   1.21       chs 
    652   1.21       chs 	/*
    653   1.21       chs 	 * if the pages are already resident, just return them.
    654   1.21       chs 	 */
    655   1.21       chs 
    656   1.21       chs 	for (i = 0; i < npages; i++) {
    657   1.97  christos 		struct vm_page *pg1 = pgs[ridx + i];
    658   1.21       chs 
    659   1.97  christos 		if ((pg1->flags & PG_FAKE) ||
    660  1.100      yamt 		    (blockalloc && (pg1->flags & PG_RDONLY))) {
    661   1.21       chs 			break;
    662   1.21       chs 		}
    663   1.21       chs 	}
    664   1.21       chs 	if (i == npages) {
    665  1.126      yamt 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    666   1.21       chs 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    667   1.26       chs 		npages += ridx;
    668  1.110      yamt 		goto out;
    669   1.21       chs 	}
    670   1.21       chs 
    671   1.21       chs 	/*
    672   1.37       chs 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    673   1.37       chs 	 */
    674   1.37       chs 
    675  1.124      yamt 	if (overwrite) {
    676  1.126      yamt 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    677   1.37       chs 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    678   1.37       chs 
    679   1.37       chs 		for (i = 0; i < npages; i++) {
    680   1.97  christos 			struct vm_page *pg1 = pgs[ridx + i];
    681   1.37       chs 
    682   1.97  christos 			pg1->flags &= ~(PG_RDONLY|PG_CLEAN);
    683   1.37       chs 		}
    684   1.37       chs 		npages += ridx;
    685   1.37       chs 		goto out;
    686   1.37       chs 	}
    687   1.37       chs 
    688   1.37       chs 	/*
    689   1.21       chs 	 * the page wasn't resident and we're not overwriting,
    690   1.21       chs 	 * so we're going to have to do some i/o.
    691   1.21       chs 	 * find any additional pages needed to cover the expanded range.
    692   1.21       chs 	 */
    693   1.21       chs 
    694   1.35       chs 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    695   1.35       chs 	if (startoffset != origoffset || npages != orignpages) {
    696   1.21       chs 
    697   1.21       chs 		/*
    698   1.37       chs 		 * we need to avoid deadlocks caused by locking
    699   1.21       chs 		 * additional pages at lower offsets than pages we
    700   1.37       chs 		 * already have locked.  unlock them all and start over.
    701   1.21       chs 		 */
    702   1.21       chs 
    703   1.63     enami 		genfs_rel_pages(&pgs[ridx], orignpages);
    704   1.77      yamt 		memset(pgs, 0, pgs_size);
    705   1.21       chs 
    706   1.21       chs 		UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
    707   1.53     enami 		    startoffset, endoffset, 0,0);
    708   1.21       chs 		npgs = npages;
    709   1.63     enami 		if (uvn_findpages(uobj, startoffset, &npgs, pgs,
    710   1.63     enami 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    711  1.126      yamt 			lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    712   1.63     enami 			KASSERT(async != 0);
    713   1.63     enami 			genfs_rel_pages(pgs, npages);
    714   1.63     enami 			simple_unlock(&uobj->vmobjlock);
    715   1.77      yamt 			if (pgs != pgs_onstack)
    716  1.135      yamt 				kmem_free(pgs, pgs_size);
    717   1.63     enami 			return (EBUSY);
    718   1.63     enami 		}
    719   1.21       chs 	}
    720   1.21       chs 	simple_unlock(&uobj->vmobjlock);
    721   1.21       chs 
    722   1.21       chs 	/*
    723   1.21       chs 	 * read the desired page(s).
    724   1.21       chs 	 */
    725   1.21       chs 
    726   1.21       chs 	totalbytes = npages << PAGE_SHIFT;
    727   1.30       chs 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    728   1.21       chs 	tailbytes = totalbytes - bytes;
    729   1.21       chs 	skipbytes = 0;
    730   1.21       chs 
    731   1.53     enami 	kva = uvm_pagermapin(pgs, npages,
    732   1.53     enami 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
    733   1.21       chs 
    734  1.119      yamt 	mbp = getiobuf();
    735   1.21       chs 	mbp->b_bufsize = totalbytes;
    736   1.21       chs 	mbp->b_data = (void *)kva;
    737   1.21       chs 	mbp->b_resid = mbp->b_bcount = bytes;
    738   1.65      fvdl 	mbp->b_flags = B_BUSY|B_READ| (async ? B_CALL|B_ASYNC : 0);
    739   1.37       chs 	mbp->b_iodone = (async ? uvm_aio_biodone : 0);
    740   1.21       chs 	mbp->b_vp = vp;
    741  1.120      yamt 	if (async)
    742  1.120      yamt 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
    743  1.120      yamt 	else
    744  1.120      yamt 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
    745   1.21       chs 
    746   1.21       chs 	/*
    747   1.31       chs 	 * if EOF is in the middle of the range, zero the part past EOF.
    748   1.38       chs 	 * if the page including EOF is not PG_FAKE, skip over it since
    749   1.38       chs 	 * in that case it has valid data that we need to preserve.
    750   1.21       chs 	 */
    751   1.21       chs 
    752   1.31       chs 	if (tailbytes > 0) {
    753   1.38       chs 		size_t tailstart = bytes;
    754   1.38       chs 
    755   1.38       chs 		if ((pgs[bytes >> PAGE_SHIFT]->flags & PG_FAKE) == 0) {
    756   1.38       chs 			tailstart = round_page(tailstart);
    757   1.38       chs 			tailbytes -= tailstart - bytes;
    758   1.38       chs 		}
    759   1.37       chs 		UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
    760   1.53     enami 		    kva, tailstart, tailbytes,0);
    761   1.38       chs 		memset((void *)(kva + tailstart), 0, tailbytes);
    762   1.21       chs 	}
    763   1.21       chs 
    764   1.21       chs 	/*
    765   1.21       chs 	 * now loop over the pages, reading as needed.
    766   1.21       chs 	 */
    767   1.21       chs 
    768   1.21       chs 	bp = NULL;
    769   1.21       chs 	for (offset = startoffset;
    770   1.53     enami 	    bytes > 0;
    771   1.53     enami 	    offset += iobytes, bytes -= iobytes) {
    772   1.21       chs 
    773   1.21       chs 		/*
    774   1.21       chs 		 * skip pages which don't need to be read.
    775   1.21       chs 		 */
    776   1.21       chs 
    777   1.21       chs 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    778  1.100      yamt 		while ((pgs[pidx]->flags & PG_FAKE) == 0) {
    779   1.21       chs 			size_t b;
    780   1.21       chs 
    781   1.24       chs 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    782  1.100      yamt 			if ((pgs[pidx]->flags & PG_RDONLY)) {
    783  1.100      yamt 				sawhole = TRUE;
    784  1.100      yamt 			}
    785   1.26       chs 			b = MIN(PAGE_SIZE, bytes);
    786   1.21       chs 			offset += b;
    787   1.21       chs 			bytes -= b;
    788   1.21       chs 			skipbytes += b;
    789   1.21       chs 			pidx++;
    790   1.21       chs 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
    791   1.53     enami 			    offset, 0,0,0);
    792   1.21       chs 			if (bytes == 0) {
    793   1.21       chs 				goto loopdone;
    794   1.21       chs 			}
    795   1.21       chs 		}
    796   1.21       chs 
    797   1.21       chs 		/*
    798   1.21       chs 		 * bmap the file to find out the blkno to read from and
    799   1.21       chs 		 * how much we can read in one i/o.  if bmap returns an error,
    800   1.21       chs 		 * skip the rest of the top-level i/o.
    801   1.21       chs 		 */
    802   1.21       chs 
    803   1.21       chs 		lbn = offset >> fs_bshift;
    804   1.36       chs 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    805   1.21       chs 		if (error) {
    806   1.21       chs 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
    807   1.53     enami 			    lbn, error,0,0);
    808   1.21       chs 			skipbytes += bytes;
    809   1.21       chs 			goto loopdone;
    810   1.21       chs 		}
    811   1.21       chs 
    812   1.21       chs 		/*
    813   1.21       chs 		 * see how many pages can be read with this i/o.
    814   1.21       chs 		 * reduce the i/o size if necessary to avoid
    815   1.21       chs 		 * overwriting pages with valid data.
    816   1.21       chs 		 */
    817   1.21       chs 
    818   1.26       chs 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    819   1.26       chs 		    bytes);
    820   1.21       chs 		if (offset + iobytes > round_page(offset)) {
    821   1.21       chs 			pcount = 1;
    822   1.21       chs 			while (pidx + pcount < npages &&
    823   1.53     enami 			    pgs[pidx + pcount]->flags & PG_FAKE) {
    824   1.21       chs 				pcount++;
    825   1.21       chs 			}
    826   1.26       chs 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    827   1.53     enami 			    (offset - trunc_page(offset)));
    828   1.21       chs 		}
    829   1.21       chs 
    830   1.21       chs 		/*
    831   1.53     enami 		 * if this block isn't allocated, zero it instead of
    832  1.100      yamt 		 * reading it.  unless we are going to allocate blocks,
    833  1.100      yamt 		 * mark the pages we zeroed PG_RDONLY.
    834   1.21       chs 		 */
    835   1.21       chs 
    836   1.21       chs 		if (blkno < 0) {
    837   1.53     enami 			int holepages = (round_page(offset + iobytes) -
    838   1.53     enami 			    trunc_page(offset)) >> PAGE_SHIFT;
    839   1.21       chs 			UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
    840   1.21       chs 
    841   1.21       chs 			sawhole = TRUE;
    842   1.21       chs 			memset((char *)kva + (offset - startoffset), 0,
    843   1.53     enami 			    iobytes);
    844   1.21       chs 			skipbytes += iobytes;
    845   1.21       chs 
    846   1.35       chs 			for (i = 0; i < holepages; i++) {
    847   1.35       chs 				if (write) {
    848   1.35       chs 					pgs[pidx + i]->flags &= ~PG_CLEAN;
    849  1.100      yamt 				}
    850  1.100      yamt 				if (!blockalloc) {
    851   1.21       chs 					pgs[pidx + i]->flags |= PG_RDONLY;
    852   1.21       chs 				}
    853   1.21       chs 			}
    854   1.21       chs 			continue;
    855   1.21       chs 		}
    856   1.21       chs 
    857   1.21       chs 		/*
    858   1.21       chs 		 * allocate a sub-buf for this piece of the i/o
    859   1.21       chs 		 * (or just use mbp if there's only 1 piece),
    860   1.21       chs 		 * and start it going.
    861   1.21       chs 		 */
    862   1.21       chs 
    863   1.21       chs 		if (offset == startoffset && iobytes == bytes) {
    864   1.21       chs 			bp = mbp;
    865   1.21       chs 		} else {
    866  1.119      yamt 			bp = getiobuf();
    867  1.120      yamt 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
    868   1.21       chs 		}
    869  1.112      yamt 		bp->b_lblkno = 0;
    870   1.21       chs 
    871   1.21       chs 		/* adjust physical blkno for partial blocks */
    872   1.25      fvdl 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    873   1.53     enami 		    dev_bshift);
    874   1.21       chs 
    875   1.53     enami 		UVMHIST_LOG(ubchist,
    876   1.53     enami 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
    877   1.53     enami 		    bp, offset, iobytes, bp->b_blkno);
    878   1.21       chs 
    879  1.109      yamt 		VOP_STRATEGY(devvp, bp);
    880   1.21       chs 	}
    881   1.21       chs 
    882   1.21       chs loopdone:
    883  1.120      yamt 	nestiobuf_done(mbp, skipbytes, error);
    884   1.21       chs 	if (async) {
    885   1.32       chs 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    886   1.37       chs 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    887   1.77      yamt 		if (pgs != pgs_onstack)
    888  1.135      yamt 			kmem_free(pgs, pgs_size);
    889   1.53     enami 		return (0);
    890   1.21       chs 	}
    891   1.21       chs 	if (bp != NULL) {
    892   1.21       chs 		error = biowait(mbp);
    893   1.21       chs 	}
    894  1.119      yamt 	putiobuf(mbp);
    895   1.21       chs 	uvm_pagermapout(kva, npages);
    896   1.21       chs 
    897   1.21       chs 	/*
    898   1.21       chs 	 * if this we encountered a hole then we have to do a little more work.
    899   1.21       chs 	 * for read faults, we marked the page PG_RDONLY so that future
    900   1.21       chs 	 * write accesses to the page will fault again.
    901   1.21       chs 	 * for write faults, we must make sure that the backing store for
    902   1.21       chs 	 * the page is completely allocated while the pages are locked.
    903   1.21       chs 	 */
    904   1.21       chs 
    905  1.100      yamt 	if (!error && sawhole && blockalloc) {
    906   1.37       chs 		error = GOP_ALLOC(vp, startoffset, npages << PAGE_SHIFT, 0,
    907   1.53     enami 		    cred);
    908   1.37       chs 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
    909   1.37       chs 		    startoffset, npages << PAGE_SHIFT, error,0);
    910  1.100      yamt 		if (!error) {
    911  1.100      yamt 			for (i = 0; i < npages; i++) {
    912  1.100      yamt 				if (pgs[i] == NULL) {
    913  1.100      yamt 					continue;
    914  1.100      yamt 				}
    915  1.100      yamt 				pgs[i]->flags &= ~(PG_CLEAN|PG_RDONLY);
    916  1.100      yamt 				UVMHIST_LOG(ubchist, "mark dirty pg %p",
    917  1.100      yamt 				    pgs[i],0,0,0);
    918  1.100      yamt 			}
    919  1.100      yamt 		}
    920   1.21       chs 	}
    921   1.37       chs 	lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    922   1.21       chs 	simple_lock(&uobj->vmobjlock);
    923   1.21       chs 
    924   1.21       chs 	/*
    925   1.21       chs 	 * we're almost done!  release the pages...
    926   1.21       chs 	 * for errors, we free the pages.
    927   1.21       chs 	 * otherwise we activate them and mark them as valid and clean.
    928   1.21       chs 	 * also, unbusy pages that were not actually requested.
    929   1.21       chs 	 */
    930   1.21       chs 
    931   1.21       chs 	if (error) {
    932   1.21       chs 		for (i = 0; i < npages; i++) {
    933   1.21       chs 			if (pgs[i] == NULL) {
    934   1.21       chs 				continue;
    935   1.21       chs 			}
    936   1.21       chs 			UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    937   1.53     enami 			    pgs[i], pgs[i]->flags, 0,0);
    938   1.26       chs 			if (pgs[i]->flags & PG_FAKE) {
    939   1.37       chs 				pgs[i]->flags |= PG_RELEASED;
    940   1.21       chs 			}
    941   1.21       chs 		}
    942   1.37       chs 		uvm_lock_pageq();
    943   1.37       chs 		uvm_page_unbusy(pgs, npages);
    944   1.21       chs 		uvm_unlock_pageq();
    945   1.21       chs 		simple_unlock(&uobj->vmobjlock);
    946   1.21       chs 		UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
    947   1.77      yamt 		if (pgs != pgs_onstack)
    948  1.135      yamt 			kmem_free(pgs, pgs_size);
    949   1.53     enami 		return (error);
    950   1.21       chs 	}
    951   1.21       chs 
    952   1.37       chs out:
    953   1.21       chs 	UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
    954   1.26       chs 	uvm_lock_pageq();
    955   1.21       chs 	for (i = 0; i < npages; i++) {
    956   1.37       chs 		pg = pgs[i];
    957   1.37       chs 		if (pg == NULL) {
    958   1.21       chs 			continue;
    959   1.21       chs 		}
    960   1.21       chs 		UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    961   1.53     enami 		    pg, pg->flags, 0,0);
    962   1.37       chs 		if (pg->flags & PG_FAKE && !overwrite) {
    963   1.37       chs 			pg->flags &= ~(PG_FAKE);
    964   1.21       chs 			pmap_clear_modify(pgs[i]);
    965   1.21       chs 		}
    966  1.100      yamt 		KASSERT(!write || !blockalloc || (pg->flags & PG_RDONLY) == 0);
    967   1.21       chs 		if (i < ridx || i >= ridx + orignpages || async) {
    968   1.21       chs 			UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
    969   1.53     enami 			    pg, pg->offset,0,0);
    970   1.37       chs 			if (pg->flags & PG_WANTED) {
    971   1.37       chs 				wakeup(pg);
    972   1.37       chs 			}
    973   1.37       chs 			if (pg->flags & PG_FAKE) {
    974   1.37       chs 				KASSERT(overwrite);
    975   1.37       chs 				uvm_pagezero(pg);
    976   1.37       chs 			}
    977   1.37       chs 			if (pg->flags & PG_RELEASED) {
    978   1.37       chs 				uvm_pagefree(pg);
    979   1.26       chs 				continue;
    980   1.21       chs 			}
    981  1.129      yamt 			uvm_pageenqueue(pg);
    982   1.37       chs 			pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
    983   1.37       chs 			UVM_PAGE_OWN(pg, NULL);
    984   1.21       chs 		}
    985   1.21       chs 	}
    986   1.26       chs 	uvm_unlock_pageq();
    987   1.21       chs 	simple_unlock(&uobj->vmobjlock);
    988   1.21       chs 	if (ap->a_m != NULL) {
    989   1.21       chs 		memcpy(ap->a_m, &pgs[ridx],
    990   1.53     enami 		    orignpages * sizeof(struct vm_page *));
    991   1.21       chs 	}
    992   1.77      yamt 	if (pgs != pgs_onstack)
    993  1.135      yamt 		kmem_free(pgs, pgs_size);
    994   1.53     enami 	return (0);
    995   1.21       chs }
    996   1.21       chs 
    997   1.21       chs /*
    998   1.21       chs  * generic VM putpages routine.
    999   1.21       chs  * Write the given range of pages to backing store.
   1000   1.37       chs  *
   1001   1.37       chs  * => "offhi == 0" means flush all pages at or after "offlo".
   1002   1.37       chs  * => object should be locked by caller.   we may _unlock_ the object
   1003   1.37       chs  *	if (and only if) we need to clean a page (PGO_CLEANIT), or
   1004   1.37       chs  *	if PGO_SYNCIO is set and there are pages busy.
   1005   1.37       chs  *	we return with the object locked.
   1006   1.37       chs  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
   1007   1.37       chs  *	thus, a caller might want to unlock higher level resources
   1008   1.37       chs  *	(e.g. vm_map) before calling flush.
   1009   1.37       chs  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, then we will neither
   1010   1.37       chs  *	unlock the object nor block.
   1011   1.37       chs  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
   1012   1.37       chs  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
   1013   1.37       chs  *	that new pages are inserted on the tail end of the list.   thus,
   1014   1.37       chs  *	we can make a complete pass through the object in one go by starting
   1015   1.37       chs  *	at the head and working towards the tail (new pages are put in
   1016   1.37       chs  *	front of us).
   1017   1.37       chs  * => NOTE: we are allowed to lock the page queues, so the caller
   1018   1.37       chs  *	must not be holding the page queue lock.
   1019   1.37       chs  *
   1020   1.37       chs  * note on "cleaning" object and PG_BUSY pages:
   1021   1.37       chs  *	this routine is holding the lock on the object.   the only time
   1022   1.37       chs  *	that it can run into a PG_BUSY page that it does not own is if
   1023   1.37       chs  *	some other process has started I/O on the page (e.g. either
   1024   1.37       chs  *	a pagein, or a pageout).    if the PG_BUSY page is being paged
   1025   1.37       chs  *	in, then it can not be dirty (!PG_CLEAN) because no one has
   1026   1.37       chs  *	had a chance to modify it yet.    if the PG_BUSY page is being
   1027   1.37       chs  *	paged out then it means that someone else has already started
   1028   1.53     enami  *	cleaning the page for us (how nice!).    in this case, if we
   1029   1.37       chs  *	have syncio specified, then after we make our pass through the
   1030   1.53     enami  *	object we need to wait for the other PG_BUSY pages to clear
   1031   1.37       chs  *	off (i.e. we need to do an iosync).   also note that once a
   1032   1.37       chs  *	page is PG_BUSY it must stay in its object until it is un-busyed.
   1033   1.37       chs  *
   1034   1.37       chs  * note on page traversal:
   1035   1.37       chs  *	we can traverse the pages in an object either by going down the
   1036   1.37       chs  *	linked list in "uobj->memq", or we can go over the address range
   1037   1.37       chs  *	by page doing hash table lookups for each address.    depending
   1038   1.53     enami  *	on how many pages are in the object it may be cheaper to do one
   1039   1.37       chs  *	or the other.   we set "by_list" to true if we are using memq.
   1040   1.37       chs  *	if the cost of a hash lookup was equal to the cost of the list
   1041   1.37       chs  *	traversal we could compare the number of pages in the start->stop
   1042   1.37       chs  *	range to the total number of pages in the object.   however, it
   1043   1.37       chs  *	seems that a hash table lookup is more expensive than the linked
   1044   1.53     enami  *	list traversal, so we multiply the number of pages in the
   1045   1.37       chs  *	range by an estimate of the relatively higher cost of the hash lookup.
   1046   1.21       chs  */
   1047   1.21       chs 
   1048   1.21       chs int
   1049   1.53     enami genfs_putpages(void *v)
   1050   1.21       chs {
   1051   1.21       chs 	struct vop_putpages_args /* {
   1052   1.21       chs 		struct vnode *a_vp;
   1053   1.37       chs 		voff_t a_offlo;
   1054   1.37       chs 		voff_t a_offhi;
   1055   1.21       chs 		int a_flags;
   1056   1.21       chs 	} */ *ap = v;
   1057   1.37       chs 	struct vnode *vp = ap->a_vp;
   1058   1.37       chs 	struct uvm_object *uobj = &vp->v_uobj;
   1059   1.46       chs 	struct simplelock *slock = &uobj->vmobjlock;
   1060   1.37       chs 	off_t startoff = ap->a_offlo;
   1061   1.37       chs 	off_t endoff = ap->a_offhi;
   1062   1.37       chs 	off_t off;
   1063   1.37       chs 	int flags = ap->a_flags;
   1064   1.76       tls 	/* Even for strange MAXPHYS, the shift rounds down to a page */
   1065   1.76       tls 	const int maxpages = MAXPHYS >> PAGE_SHIFT;
   1066   1.37       chs 	int i, s, error, npages, nback;
   1067   1.37       chs 	int freeflag;
   1068   1.60     enami 	struct vm_page *pgs[maxpages], *pg, *nextpg, *tpg, curmp, endmp;
   1069   1.97  christos 	boolean_t wasclean, by_list, needs_clean, yld;
   1070   1.37       chs 	boolean_t async = (flags & PGO_SYNCIO) == 0;
   1071   1.56     enami 	boolean_t pagedaemon = curproc == uvm.pagedaemon_proc;
   1072   1.70  christos 	struct lwp *l = curlwp ? curlwp : &lwp0;
   1073  1.101      yamt 	struct genfs_node *gp = VTOG(vp);
   1074  1.101      yamt 	int dirtygen;
   1075  1.103      yamt 	boolean_t modified = FALSE;
   1076  1.104      yamt 	boolean_t cleanall;
   1077   1.70  christos 
   1078   1.37       chs 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
   1079   1.37       chs 
   1080   1.37       chs 	KASSERT(flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
   1081   1.37       chs 	KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
   1082   1.37       chs 	KASSERT(startoff < endoff || endoff == 0);
   1083   1.37       chs 
   1084   1.37       chs 	UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
   1085   1.37       chs 	    vp, uobj->uo_npages, startoff, endoff - startoff);
   1086  1.103      yamt 
   1087  1.103      yamt 	KASSERT((vp->v_flag & VONWORKLST) != 0 ||
   1088  1.103      yamt 	    (vp->v_flag & VWRITEMAPDIRTY) == 0);
   1089   1.37       chs 	if (uobj->uo_npages == 0) {
   1090   1.62  perseant 		s = splbio();
   1091  1.103      yamt 		if (vp->v_flag & VONWORKLST) {
   1092  1.103      yamt 			vp->v_flag &= ~VWRITEMAPDIRTY;
   1093  1.103      yamt 			if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
   1094  1.103      yamt 				vp->v_flag &= ~VONWORKLST;
   1095  1.103      yamt 				LIST_REMOVE(vp, v_synclist);
   1096  1.103      yamt 			}
   1097   1.37       chs 		}
   1098   1.62  perseant 		splx(s);
   1099   1.46       chs 		simple_unlock(slock);
   1100   1.53     enami 		return (0);
   1101   1.37       chs 	}
   1102   1.37       chs 
   1103   1.37       chs 	/*
   1104   1.37       chs 	 * the vnode has pages, set up to process the request.
   1105   1.37       chs 	 */
   1106   1.37       chs 
   1107   1.37       chs 	error = 0;
   1108   1.44       chs 	s = splbio();
   1109   1.71        pk 	simple_lock(&global_v_numoutput_slock);
   1110   1.44       chs 	wasclean = (vp->v_numoutput == 0);
   1111   1.71        pk 	simple_unlock(&global_v_numoutput_slock);
   1112   1.44       chs 	splx(s);
   1113   1.37       chs 	off = startoff;
   1114   1.37       chs 	if (endoff == 0 || flags & PGO_ALLPAGES) {
   1115   1.37       chs 		endoff = trunc_page(LLONG_MAX);
   1116   1.37       chs 	}
   1117   1.37       chs 	by_list = (uobj->uo_npages <=
   1118   1.37       chs 	    ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY);
   1119   1.37       chs 
   1120  1.102      yamt #if !defined(DEBUG)
   1121  1.102      yamt 	/*
   1122  1.102      yamt 	 * if this vnode is known not to have dirty pages,
   1123  1.102      yamt 	 * don't bother to clean it out.
   1124  1.102      yamt 	 */
   1125  1.102      yamt 
   1126  1.102      yamt 	if ((vp->v_flag & VONWORKLST) == 0) {
   1127  1.102      yamt 		if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == 0) {
   1128  1.102      yamt 			goto skip_scan;
   1129  1.102      yamt 		}
   1130  1.102      yamt 		flags &= ~PGO_CLEANIT;
   1131  1.102      yamt 	}
   1132  1.102      yamt #endif /* !defined(DEBUG) */
   1133  1.102      yamt 
   1134   1.37       chs 	/*
   1135   1.37       chs 	 * start the loop.  when scanning by list, hold the last page
   1136   1.37       chs 	 * in the list before we start.  pages allocated after we start
   1137   1.37       chs 	 * will be added to the end of the list, so we can stop at the
   1138   1.37       chs 	 * current last page.
   1139   1.37       chs 	 */
   1140   1.37       chs 
   1141  1.104      yamt 	cleanall = (flags & PGO_CLEANIT) != 0 && wasclean &&
   1142  1.104      yamt 	    startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
   1143  1.104      yamt 	    (vp->v_flag & VONWORKLST) != 0;
   1144  1.101      yamt 	dirtygen = gp->g_dirtygen;
   1145   1.56     enami 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
   1146   1.37       chs 	if (by_list) {
   1147  1.113      yamt 		curmp.uobject = uobj;
   1148  1.113      yamt 		curmp.offset = (voff_t)-1;
   1149  1.113      yamt 		curmp.flags = PG_BUSY;
   1150  1.113      yamt 		endmp.uobject = uobj;
   1151  1.113      yamt 		endmp.offset = (voff_t)-1;
   1152  1.113      yamt 		endmp.flags = PG_BUSY;
   1153   1.37       chs 		pg = TAILQ_FIRST(&uobj->memq);
   1154   1.37       chs 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq);
   1155   1.70  christos 		PHOLD(l);
   1156   1.37       chs 	} else {
   1157   1.37       chs 		pg = uvm_pagelookup(uobj, off);
   1158   1.37       chs 	}
   1159   1.37       chs 	nextpg = NULL;
   1160   1.37       chs 	while (by_list || off < endoff) {
   1161   1.37       chs 
   1162   1.37       chs 		/*
   1163   1.37       chs 		 * if the current page is not interesting, move on to the next.
   1164   1.37       chs 		 */
   1165   1.37       chs 
   1166   1.37       chs 		KASSERT(pg == NULL || pg->uobject == uobj);
   1167   1.37       chs 		KASSERT(pg == NULL ||
   1168   1.53     enami 		    (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
   1169   1.53     enami 		    (pg->flags & PG_BUSY) != 0);
   1170   1.37       chs 		if (by_list) {
   1171   1.37       chs 			if (pg == &endmp) {
   1172   1.37       chs 				break;
   1173   1.37       chs 			}
   1174   1.37       chs 			if (pg->offset < startoff || pg->offset >= endoff ||
   1175   1.37       chs 			    pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1176  1.101      yamt 				if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1177  1.101      yamt 					wasclean = FALSE;
   1178  1.101      yamt 				}
   1179   1.37       chs 				pg = TAILQ_NEXT(pg, listq);
   1180   1.37       chs 				continue;
   1181   1.37       chs 			}
   1182   1.37       chs 			off = pg->offset;
   1183  1.101      yamt 		} else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1184  1.101      yamt 			if (pg != NULL) {
   1185  1.101      yamt 				wasclean = FALSE;
   1186  1.101      yamt 			}
   1187   1.37       chs 			off += PAGE_SIZE;
   1188   1.37       chs 			if (off < endoff) {
   1189   1.37       chs 				pg = uvm_pagelookup(uobj, off);
   1190   1.37       chs 			}
   1191   1.37       chs 			continue;
   1192   1.37       chs 		}
   1193   1.21       chs 
   1194   1.37       chs 		/*
   1195   1.37       chs 		 * if the current page needs to be cleaned and it's busy,
   1196   1.37       chs 		 * wait for it to become unbusy.
   1197   1.37       chs 		 */
   1198   1.37       chs 
   1199   1.97  christos 		yld = (l->l_cpu->ci_schedstate.spc_flags &
   1200   1.56     enami 		    SPCF_SHOULDYIELD) && !pagedaemon;
   1201   1.97  christos 		if (pg->flags & PG_BUSY || yld) {
   1202   1.72  perseant 			UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
   1203   1.72  perseant 			if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
   1204   1.72  perseant 				UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0);
   1205   1.72  perseant 				error = EDEADLK;
   1206   1.72  perseant 				break;
   1207   1.72  perseant 			}
   1208   1.56     enami 			KASSERT(!pagedaemon);
   1209   1.37       chs 			if (by_list) {
   1210   1.37       chs 				TAILQ_INSERT_BEFORE(pg, &curmp, listq);
   1211   1.37       chs 				UVMHIST_LOG(ubchist, "curmp next %p",
   1212   1.53     enami 				    TAILQ_NEXT(&curmp, listq), 0,0,0);
   1213   1.37       chs 			}
   1214   1.97  christos 			if (yld) {
   1215   1.49       chs 				simple_unlock(slock);
   1216   1.69   thorpej 				preempt(1);
   1217   1.49       chs 				simple_lock(slock);
   1218   1.49       chs 			} else {
   1219   1.49       chs 				pg->flags |= PG_WANTED;
   1220   1.49       chs 				UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
   1221   1.49       chs 				simple_lock(slock);
   1222   1.49       chs 			}
   1223   1.37       chs 			if (by_list) {
   1224   1.37       chs 				UVMHIST_LOG(ubchist, "after next %p",
   1225   1.53     enami 				    TAILQ_NEXT(&curmp, listq), 0,0,0);
   1226   1.37       chs 				pg = TAILQ_NEXT(&curmp, listq);
   1227   1.37       chs 				TAILQ_REMOVE(&uobj->memq, &curmp, listq);
   1228   1.37       chs 			} else {
   1229   1.37       chs 				pg = uvm_pagelookup(uobj, off);
   1230   1.37       chs 			}
   1231   1.37       chs 			continue;
   1232   1.49       chs 		}
   1233   1.49       chs 
   1234   1.49       chs 		/*
   1235   1.49       chs 		 * if we're freeing, remove all mappings of the page now.
   1236   1.49       chs 		 * if we're cleaning, check if the page is needs to be cleaned.
   1237   1.49       chs 		 */
   1238   1.49       chs 
   1239   1.49       chs 		if (flags & PGO_FREE) {
   1240   1.49       chs 			pmap_page_protect(pg, VM_PROT_NONE);
   1241  1.101      yamt 		} else if (flags & PGO_CLEANIT) {
   1242  1.101      yamt 
   1243  1.101      yamt 			/*
   1244  1.101      yamt 			 * if we still have some hope to pull this vnode off
   1245  1.101      yamt 			 * from the syncer queue, write-protect the page.
   1246  1.101      yamt 			 */
   1247  1.101      yamt 
   1248  1.104      yamt 			if (cleanall && wasclean &&
   1249  1.104      yamt 			    gp->g_dirtygen == dirtygen) {
   1250  1.104      yamt 
   1251  1.104      yamt 				/*
   1252  1.104      yamt 				 * uobj pages get wired only by uvm_fault
   1253  1.104      yamt 				 * where uobj is locked.
   1254  1.104      yamt 				 */
   1255  1.104      yamt 
   1256  1.104      yamt 				if (pg->wire_count == 0) {
   1257  1.104      yamt 					pmap_page_protect(pg,
   1258  1.104      yamt 					    VM_PROT_READ|VM_PROT_EXECUTE);
   1259  1.104      yamt 				} else {
   1260  1.104      yamt 					cleanall = FALSE;
   1261  1.104      yamt 				}
   1262  1.101      yamt 			}
   1263   1.49       chs 		}
   1264  1.101      yamt 
   1265   1.49       chs 		if (flags & PGO_CLEANIT) {
   1266   1.49       chs 			needs_clean = pmap_clear_modify(pg) ||
   1267   1.53     enami 			    (pg->flags & PG_CLEAN) == 0;
   1268   1.49       chs 			pg->flags |= PG_CLEAN;
   1269   1.49       chs 		} else {
   1270   1.49       chs 			needs_clean = FALSE;
   1271   1.37       chs 		}
   1272   1.37       chs 
   1273   1.37       chs 		/*
   1274   1.37       chs 		 * if we're cleaning, build a cluster.
   1275   1.37       chs 		 * the cluster will consist of pages which are currently dirty,
   1276   1.37       chs 		 * but they will be returned to us marked clean.
   1277   1.37       chs 		 * if not cleaning, just operate on the one page.
   1278   1.37       chs 		 */
   1279   1.37       chs 
   1280   1.37       chs 		if (needs_clean) {
   1281  1.101      yamt 			KDASSERT((vp->v_flag & VONWORKLST));
   1282   1.37       chs 			wasclean = FALSE;
   1283   1.37       chs 			memset(pgs, 0, sizeof(pgs));
   1284   1.37       chs 			pg->flags |= PG_BUSY;
   1285   1.37       chs 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1286   1.37       chs 
   1287   1.37       chs 			/*
   1288   1.37       chs 			 * first look backward.
   1289   1.37       chs 			 */
   1290   1.37       chs 
   1291   1.60     enami 			npages = MIN(maxpages >> 1, off >> PAGE_SHIFT);
   1292   1.37       chs 			nback = npages;
   1293   1.37       chs 			uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
   1294   1.37       chs 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
   1295   1.37       chs 			if (nback) {
   1296   1.37       chs 				memmove(&pgs[0], &pgs[npages - nback],
   1297   1.37       chs 				    nback * sizeof(pgs[0]));
   1298   1.47     enami 				if (npages - nback < nback)
   1299   1.47     enami 					memset(&pgs[nback], 0,
   1300   1.47     enami 					    (npages - nback) * sizeof(pgs[0]));
   1301   1.47     enami 				else
   1302   1.47     enami 					memset(&pgs[npages - nback], 0,
   1303   1.47     enami 					    nback * sizeof(pgs[0]));
   1304   1.37       chs 			}
   1305   1.37       chs 
   1306   1.37       chs 			/*
   1307   1.37       chs 			 * then plug in our page of interest.
   1308   1.37       chs 			 */
   1309   1.37       chs 
   1310   1.37       chs 			pgs[nback] = pg;
   1311   1.37       chs 
   1312   1.37       chs 			/*
   1313   1.37       chs 			 * then look forward to fill in the remaining space in
   1314   1.37       chs 			 * the array of pages.
   1315   1.37       chs 			 */
   1316   1.37       chs 
   1317   1.60     enami 			npages = maxpages - nback - 1;
   1318   1.37       chs 			uvn_findpages(uobj, off + PAGE_SIZE, &npages,
   1319   1.37       chs 			    &pgs[nback + 1],
   1320   1.37       chs 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
   1321   1.37       chs 			npages += nback + 1;
   1322   1.37       chs 		} else {
   1323   1.37       chs 			pgs[0] = pg;
   1324   1.37       chs 			npages = 1;
   1325   1.61     enami 			nback = 0;
   1326   1.37       chs 		}
   1327   1.37       chs 
   1328   1.37       chs 		/*
   1329   1.37       chs 		 * apply FREE or DEACTIVATE options if requested.
   1330   1.37       chs 		 */
   1331   1.37       chs 
   1332   1.37       chs 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1333   1.37       chs 			uvm_lock_pageq();
   1334   1.37       chs 		}
   1335   1.37       chs 		for (i = 0; i < npages; i++) {
   1336   1.37       chs 			tpg = pgs[i];
   1337   1.37       chs 			KASSERT(tpg->uobject == uobj);
   1338   1.59     enami 			if (by_list && tpg == TAILQ_NEXT(pg, listq))
   1339   1.59     enami 				pg = tpg;
   1340   1.91     enami 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1341   1.91     enami 				continue;
   1342  1.133      yamt 			if (flags & PGO_DEACTIVATE && tpg->wire_count == 0
   1343  1.133      yamt 			    && tpg->loan_count == 0) {
   1344   1.37       chs 				(void) pmap_clear_reference(tpg);
   1345   1.37       chs 				uvm_pagedeactivate(tpg);
   1346   1.37       chs 			} else if (flags & PGO_FREE) {
   1347   1.37       chs 				pmap_page_protect(tpg, VM_PROT_NONE);
   1348   1.37       chs 				if (tpg->flags & PG_BUSY) {
   1349   1.37       chs 					tpg->flags |= freeflag;
   1350   1.56     enami 					if (pagedaemon) {
   1351   1.37       chs 						uvmexp.paging++;
   1352   1.37       chs 						uvm_pagedequeue(tpg);
   1353   1.37       chs 					}
   1354   1.37       chs 				} else {
   1355   1.59     enami 
   1356   1.59     enami 					/*
   1357   1.59     enami 					 * ``page is not busy''
   1358   1.59     enami 					 * implies that npages is 1
   1359   1.59     enami 					 * and needs_clean is false.
   1360   1.59     enami 					 */
   1361   1.59     enami 
   1362   1.37       chs 					nextpg = TAILQ_NEXT(tpg, listq);
   1363   1.37       chs 					uvm_pagefree(tpg);
   1364   1.89     enami 					if (pagedaemon)
   1365   1.89     enami 						uvmexp.pdfreed++;
   1366   1.37       chs 				}
   1367   1.37       chs 			}
   1368   1.37       chs 		}
   1369   1.37       chs 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1370   1.37       chs 			uvm_unlock_pageq();
   1371   1.37       chs 		}
   1372   1.37       chs 		if (needs_clean) {
   1373  1.103      yamt 			modified = TRUE;
   1374   1.37       chs 
   1375   1.37       chs 			/*
   1376   1.37       chs 			 * start the i/o.  if we're traversing by list,
   1377   1.37       chs 			 * keep our place in the list with a marker page.
   1378   1.37       chs 			 */
   1379   1.37       chs 
   1380   1.37       chs 			if (by_list) {
   1381   1.37       chs 				TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
   1382   1.37       chs 				    listq);
   1383   1.37       chs 			}
   1384   1.46       chs 			simple_unlock(slock);
   1385   1.37       chs 			error = GOP_WRITE(vp, pgs, npages, flags);
   1386   1.46       chs 			simple_lock(slock);
   1387   1.37       chs 			if (by_list) {
   1388   1.37       chs 				pg = TAILQ_NEXT(&curmp, listq);
   1389   1.37       chs 				TAILQ_REMOVE(&uobj->memq, &curmp, listq);
   1390   1.37       chs 			}
   1391   1.37       chs 			if (error) {
   1392   1.37       chs 				break;
   1393   1.37       chs 			}
   1394   1.37       chs 			if (by_list) {
   1395   1.37       chs 				continue;
   1396   1.37       chs 			}
   1397   1.37       chs 		}
   1398   1.37       chs 
   1399   1.37       chs 		/*
   1400   1.37       chs 		 * find the next page and continue if there was no error.
   1401   1.37       chs 		 */
   1402   1.37       chs 
   1403   1.37       chs 		if (by_list) {
   1404   1.37       chs 			if (nextpg) {
   1405   1.37       chs 				pg = nextpg;
   1406   1.37       chs 				nextpg = NULL;
   1407   1.37       chs 			} else {
   1408   1.37       chs 				pg = TAILQ_NEXT(pg, listq);
   1409   1.37       chs 			}
   1410   1.37       chs 		} else {
   1411   1.61     enami 			off += (npages - nback) << PAGE_SHIFT;
   1412   1.37       chs 			if (off < endoff) {
   1413   1.37       chs 				pg = uvm_pagelookup(uobj, off);
   1414   1.37       chs 			}
   1415   1.37       chs 		}
   1416   1.37       chs 	}
   1417   1.37       chs 	if (by_list) {
   1418   1.37       chs 		TAILQ_REMOVE(&uobj->memq, &endmp, listq);
   1419   1.70  christos 		PRELE(l);
   1420   1.37       chs 	}
   1421   1.37       chs 
   1422  1.103      yamt 	if (modified && (vp->v_flag & VWRITEMAPDIRTY) != 0 &&
   1423  1.121   reinoud 	    (vp->v_type != VBLK ||
   1424  1.103      yamt 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
   1425  1.103      yamt 		GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
   1426  1.103      yamt 	}
   1427  1.103      yamt 
   1428   1.37       chs 	/*
   1429   1.37       chs 	 * if we're cleaning and there was nothing to clean,
   1430   1.37       chs 	 * take us off the syncer list.  if we started any i/o
   1431   1.37       chs 	 * and we're doing sync i/o, wait for all writes to finish.
   1432   1.37       chs 	 */
   1433   1.37       chs 
   1434   1.62  perseant 	s = splbio();
   1435  1.104      yamt 	if (cleanall && wasclean && gp->g_dirtygen == dirtygen &&
   1436  1.104      yamt 	    (vp->v_flag & VONWORKLST) != 0) {
   1437  1.103      yamt 		vp->v_flag &= ~VWRITEMAPDIRTY;
   1438  1.103      yamt 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
   1439  1.103      yamt 			vp->v_flag &= ~VONWORKLST;
   1440  1.103      yamt 			LIST_REMOVE(vp, v_synclist);
   1441  1.103      yamt 		}
   1442   1.37       chs 	}
   1443   1.62  perseant 	splx(s);
   1444  1.102      yamt 
   1445  1.102      yamt #if !defined(DEBUG)
   1446  1.102      yamt skip_scan:
   1447  1.102      yamt #endif /* !defined(DEBUG) */
   1448   1.37       chs 	if (!wasclean && !async) {
   1449   1.37       chs 		s = splbio();
   1450   1.71        pk 		/*
   1451   1.71        pk 		 * XXX - we want simple_unlock(&global_v_numoutput_slock);
   1452   1.71        pk 		 *	 but the slot in ltsleep() is taken!
   1453   1.71        pk 		 * XXX - try to recover from missed wakeups with a timeout..
   1454   1.71        pk 		 *	 must think of something better.
   1455   1.71        pk 		 */
   1456   1.37       chs 		while (vp->v_numoutput != 0) {
   1457   1.37       chs 			vp->v_flag |= VBWAIT;
   1458   1.46       chs 			UVM_UNLOCK_AND_WAIT(&vp->v_numoutput, slock, FALSE,
   1459   1.71        pk 			    "genput2", hz);
   1460   1.46       chs 			simple_lock(slock);
   1461   1.37       chs 		}
   1462   1.37       chs 		splx(s);
   1463   1.37       chs 	}
   1464   1.37       chs 	simple_unlock(&uobj->vmobjlock);
   1465   1.53     enami 	return (error);
   1466   1.37       chs }
   1467   1.37       chs 
   1468   1.37       chs int
   1469   1.37       chs genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1470   1.37       chs {
   1471  1.130       chs 	off_t off;
   1472  1.130       chs 	vaddr_t kva;
   1473  1.130       chs 	size_t len;
   1474  1.130       chs 	int error;
   1475  1.130       chs 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1476  1.130       chs 
   1477  1.130       chs 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1478  1.130       chs 	    vp, pgs, npages, flags);
   1479  1.130       chs 
   1480  1.130       chs 	off = pgs[0]->offset;
   1481  1.130       chs 	kva = uvm_pagermapin(pgs, npages,
   1482  1.130       chs 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1483  1.130       chs 	len = npages << PAGE_SHIFT;
   1484  1.130       chs 
   1485  1.130       chs 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1486  1.130       chs 			    uvm_aio_biodone);
   1487  1.130       chs 
   1488  1.130       chs 	return error;
   1489  1.130       chs }
   1490  1.130       chs 
   1491  1.130       chs /*
   1492  1.130       chs  * Backend routine for doing I/O to vnode pages.  Pages are already locked
   1493  1.130       chs  * and mapped into kernel memory.  Here we just look up the underlying
   1494  1.130       chs  * device block addresses and call the strategy routine.
   1495  1.130       chs  */
   1496  1.130       chs 
   1497  1.130       chs static int
   1498  1.130       chs genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
   1499  1.130       chs     enum uio_rw rw, void (*iodone)(struct buf *))
   1500  1.130       chs {
   1501   1.37       chs 	int s, error, run;
   1502   1.37       chs 	int fs_bshift, dev_bshift;
   1503   1.21       chs 	off_t eof, offset, startoffset;
   1504   1.21       chs 	size_t bytes, iobytes, skipbytes;
   1505   1.21       chs 	daddr_t lbn, blkno;
   1506   1.21       chs 	struct buf *mbp, *bp;
   1507   1.36       chs 	struct vnode *devvp;
   1508   1.37       chs 	boolean_t async = (flags & PGO_SYNCIO) == 0;
   1509  1.130       chs 	boolean_t write = rw == UIO_WRITE;
   1510  1.130       chs 	int brw = write ? B_WRITE : B_READ;
   1511  1.130       chs 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1512   1.21       chs 
   1513  1.130       chs 	UVMHIST_LOG(ubchist, "vp %p kva %p len 0x%x flags 0x%x",
   1514  1.130       chs 	    vp, kva, len, flags);
   1515   1.21       chs 
   1516  1.123      yamt 	GOP_SIZE(vp, vp->v_size, &eof, 0);
   1517  1.121   reinoud 	if (vp->v_type != VBLK) {
   1518   1.36       chs 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1519   1.36       chs 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1520   1.36       chs 	} else {
   1521   1.36       chs 		fs_bshift = DEV_BSHIFT;
   1522   1.36       chs 		dev_bshift = DEV_BSHIFT;
   1523   1.36       chs 	}
   1524   1.37       chs 	error = 0;
   1525  1.130       chs 	startoffset = off;
   1526  1.130       chs 	bytes = MIN(len, eof - startoffset);
   1527   1.21       chs 	skipbytes = 0;
   1528   1.21       chs 	KASSERT(bytes != 0);
   1529   1.21       chs 
   1530  1.130       chs 	if (write) {
   1531  1.130       chs 		s = splbio();
   1532  1.130       chs 		simple_lock(&global_v_numoutput_slock);
   1533  1.130       chs 		vp->v_numoutput += 2;
   1534  1.130       chs 		simple_unlock(&global_v_numoutput_slock);
   1535  1.130       chs 		splx(s);
   1536  1.130       chs 	}
   1537  1.119      yamt 	mbp = getiobuf();
   1538   1.21       chs 	UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
   1539   1.53     enami 	    vp, mbp, vp->v_numoutput, bytes);
   1540  1.130       chs 	mbp->b_bufsize = len;
   1541   1.21       chs 	mbp->b_data = (void *)kva;
   1542   1.21       chs 	mbp->b_resid = mbp->b_bcount = bytes;
   1543  1.130       chs 	mbp->b_flags = B_BUSY | brw | B_AGE | (async ? (B_CALL | B_ASYNC) : 0);
   1544  1.130       chs 	mbp->b_iodone = iodone;
   1545   1.21       chs 	mbp->b_vp = vp;
   1546  1.120      yamt 	if (curproc == uvm.pagedaemon_proc)
   1547  1.120      yamt 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
   1548  1.120      yamt 	else if (async)
   1549  1.120      yamt 		BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
   1550  1.120      yamt 	else
   1551  1.120      yamt 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
   1552   1.21       chs 
   1553   1.21       chs 	bp = NULL;
   1554   1.21       chs 	for (offset = startoffset;
   1555   1.53     enami 	    bytes > 0;
   1556   1.53     enami 	    offset += iobytes, bytes -= iobytes) {
   1557   1.21       chs 		lbn = offset >> fs_bshift;
   1558   1.36       chs 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1559   1.21       chs 		if (error) {
   1560   1.21       chs 			UVMHIST_LOG(ubchist, "VOP_BMAP() -> %d", error,0,0,0);
   1561   1.21       chs 			skipbytes += bytes;
   1562   1.21       chs 			bytes = 0;
   1563   1.21       chs 			break;
   1564   1.21       chs 		}
   1565   1.21       chs 
   1566   1.26       chs 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1567   1.26       chs 		    bytes);
   1568   1.21       chs 		if (blkno == (daddr_t)-1) {
   1569  1.130       chs 			if (!write) {
   1570  1.130       chs 				memset((char *)kva + (offset - startoffset), 0,
   1571  1.130       chs 				   iobytes);
   1572  1.130       chs 			}
   1573   1.21       chs 			skipbytes += iobytes;
   1574   1.21       chs 			continue;
   1575   1.21       chs 		}
   1576   1.21       chs 
   1577   1.21       chs 		/* if it's really one i/o, don't make a second buf */
   1578   1.21       chs 		if (offset == startoffset && iobytes == bytes) {
   1579   1.21       chs 			bp = mbp;
   1580   1.21       chs 		} else {
   1581   1.21       chs 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
   1582   1.53     enami 			    vp, bp, vp->v_numoutput, 0);
   1583  1.120      yamt 			bp = getiobuf();
   1584  1.130       chs 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
   1585   1.21       chs 		}
   1586   1.21       chs 		bp->b_lblkno = 0;
   1587   1.21       chs 
   1588   1.21       chs 		/* adjust physical blkno for partial blocks */
   1589   1.25      fvdl 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1590   1.53     enami 		    dev_bshift);
   1591   1.53     enami 		UVMHIST_LOG(ubchist,
   1592   1.53     enami 		    "vp %p offset 0x%x bcount 0x%x blkno 0x%x",
   1593   1.53     enami 		    vp, offset, bp->b_bcount, bp->b_blkno);
   1594  1.114      yamt 
   1595  1.114      yamt 		VOP_STRATEGY(devvp, bp);
   1596   1.21       chs 	}
   1597   1.21       chs 	if (skipbytes) {
   1598   1.29       chs 		UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
   1599   1.21       chs 	}
   1600  1.120      yamt 	nestiobuf_done(mbp, skipbytes, error);
   1601   1.21       chs 	if (async) {
   1602   1.32       chs 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1603   1.53     enami 		return (0);
   1604   1.21       chs 	}
   1605   1.37       chs 	UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
   1606   1.37       chs 	error = biowait(mbp);
   1607  1.134      yamt 	s = splbio();
   1608  1.130       chs 	(*iodone)(mbp);
   1609  1.134      yamt 	splx(s);
   1610   1.21       chs 	UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
   1611   1.53     enami 	return (error);
   1612   1.42       chs }
   1613   1.42       chs 
   1614   1.42       chs /*
   1615   1.42       chs  * VOP_PUTPAGES() for vnodes which never have pages.
   1616   1.42       chs  */
   1617   1.42       chs 
   1618   1.42       chs int
   1619   1.42       chs genfs_null_putpages(void *v)
   1620   1.42       chs {
   1621   1.42       chs 	struct vop_putpages_args /* {
   1622   1.42       chs 		struct vnode *a_vp;
   1623   1.42       chs 		voff_t a_offlo;
   1624   1.42       chs 		voff_t a_offhi;
   1625   1.42       chs 		int a_flags;
   1626   1.42       chs 	} */ *ap = v;
   1627   1.42       chs 	struct vnode *vp = ap->a_vp;
   1628   1.42       chs 
   1629   1.42       chs 	KASSERT(vp->v_uobj.uo_npages == 0);
   1630   1.42       chs 	simple_unlock(&vp->v_interlock);
   1631   1.42       chs 	return (0);
   1632   1.21       chs }
   1633   1.21       chs 
   1634   1.37       chs void
   1635   1.98      yamt genfs_node_init(struct vnode *vp, const struct genfs_ops *ops)
   1636   1.37       chs {
   1637   1.37       chs 	struct genfs_node *gp = VTOG(vp);
   1638   1.37       chs 
   1639   1.37       chs 	lockinit(&gp->g_glock, PINOD, "glock", 0, 0);
   1640   1.37       chs 	gp->g_op = ops;
   1641   1.37       chs }
   1642   1.37       chs 
   1643   1.37       chs void
   1644  1.131  christos genfs_size(struct vnode *vp, off_t size, off_t *eobp, int flags __unused)
   1645   1.21       chs {
   1646   1.21       chs 	int bsize;
   1647   1.21       chs 
   1648   1.37       chs 	bsize = 1 << vp->v_mount->mnt_fs_bshift;
   1649   1.37       chs 	*eobp = (size + bsize - 1) & ~(bsize - 1);
   1650   1.43       chs }
   1651   1.43       chs 
   1652   1.43       chs int
   1653   1.43       chs genfs_compat_getpages(void *v)
   1654   1.43       chs {
   1655   1.43       chs 	struct vop_getpages_args /* {
   1656   1.43       chs 		struct vnode *a_vp;
   1657   1.43       chs 		voff_t a_offset;
   1658   1.43       chs 		struct vm_page **a_m;
   1659   1.43       chs 		int *a_count;
   1660   1.43       chs 		int a_centeridx;
   1661   1.43       chs 		vm_prot_t a_access_type;
   1662   1.43       chs 		int a_advice;
   1663   1.43       chs 		int a_flags;
   1664   1.43       chs 	} */ *ap = v;
   1665   1.43       chs 
   1666   1.43       chs 	off_t origoffset;
   1667   1.43       chs 	struct vnode *vp = ap->a_vp;
   1668   1.43       chs 	struct uvm_object *uobj = &vp->v_uobj;
   1669   1.43       chs 	struct vm_page *pg, **pgs;
   1670   1.43       chs 	vaddr_t kva;
   1671   1.43       chs 	int i, error, orignpages, npages;
   1672   1.43       chs 	struct iovec iov;
   1673   1.43       chs 	struct uio uio;
   1674  1.128        ad 	kauth_cred_t cred = curlwp->l_cred;
   1675   1.43       chs 	boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1676   1.43       chs 
   1677   1.43       chs 	error = 0;
   1678   1.43       chs 	origoffset = ap->a_offset;
   1679   1.43       chs 	orignpages = *ap->a_count;
   1680   1.43       chs 	pgs = ap->a_m;
   1681   1.43       chs 
   1682   1.43       chs 	if (write && (vp->v_flag & VONWORKLST) == 0) {
   1683   1.43       chs 		vn_syncer_add_to_worklist(vp, filedelay);
   1684   1.43       chs 	}
   1685   1.43       chs 	if (ap->a_flags & PGO_LOCKED) {
   1686   1.43       chs 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
   1687   1.54     enami 		    UFP_NOWAIT|UFP_NOALLOC| (write ? UFP_NORDONLY : 0));
   1688   1.43       chs 
   1689   1.53     enami 		return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
   1690   1.43       chs 	}
   1691   1.43       chs 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1692   1.43       chs 		simple_unlock(&uobj->vmobjlock);
   1693   1.53     enami 		return (EINVAL);
   1694   1.43       chs 	}
   1695  1.115      yamt 	if ((ap->a_flags & PGO_SYNCIO) == 0) {
   1696  1.117      yamt 		simple_unlock(&uobj->vmobjlock);
   1697  1.115      yamt 		return 0;
   1698  1.115      yamt 	}
   1699   1.43       chs 	npages = orignpages;
   1700   1.43       chs 	uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
   1701   1.43       chs 	simple_unlock(&uobj->vmobjlock);
   1702   1.53     enami 	kva = uvm_pagermapin(pgs, npages,
   1703   1.53     enami 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1704   1.43       chs 	for (i = 0; i < npages; i++) {
   1705   1.43       chs 		pg = pgs[i];
   1706   1.43       chs 		if ((pg->flags & PG_FAKE) == 0) {
   1707   1.43       chs 			continue;
   1708   1.43       chs 		}
   1709   1.43       chs 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   1710   1.43       chs 		iov.iov_len = PAGE_SIZE;
   1711   1.43       chs 		uio.uio_iov = &iov;
   1712   1.43       chs 		uio.uio_iovcnt = 1;
   1713   1.43       chs 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   1714   1.43       chs 		uio.uio_rw = UIO_READ;
   1715   1.43       chs 		uio.uio_resid = PAGE_SIZE;
   1716  1.122      yamt 		UIO_SETUP_SYSSPACE(&uio);
   1717   1.87      yamt 		/* XXX vn_lock */
   1718   1.43       chs 		error = VOP_READ(vp, &uio, 0, cred);
   1719   1.43       chs 		if (error) {
   1720   1.43       chs 			break;
   1721   1.52       chs 		}
   1722   1.52       chs 		if (uio.uio_resid) {
   1723   1.52       chs 			memset(iov.iov_base, 0, uio.uio_resid);
   1724   1.43       chs 		}
   1725   1.43       chs 	}
   1726   1.43       chs 	uvm_pagermapout(kva, npages);
   1727   1.43       chs 	simple_lock(&uobj->vmobjlock);
   1728   1.43       chs 	uvm_lock_pageq();
   1729   1.43       chs 	for (i = 0; i < npages; i++) {
   1730   1.43       chs 		pg = pgs[i];
   1731   1.43       chs 		if (error && (pg->flags & PG_FAKE) != 0) {
   1732   1.43       chs 			pg->flags |= PG_RELEASED;
   1733   1.43       chs 		} else {
   1734   1.43       chs 			pmap_clear_modify(pg);
   1735   1.43       chs 			uvm_pageactivate(pg);
   1736   1.43       chs 		}
   1737   1.43       chs 	}
   1738   1.43       chs 	if (error) {
   1739   1.43       chs 		uvm_page_unbusy(pgs, npages);
   1740   1.43       chs 	}
   1741   1.43       chs 	uvm_unlock_pageq();
   1742   1.43       chs 	simple_unlock(&uobj->vmobjlock);
   1743   1.53     enami 	return (error);
   1744   1.43       chs }
   1745   1.43       chs 
   1746   1.43       chs int
   1747   1.43       chs genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   1748  1.131  christos     int flags __unused)
   1749   1.43       chs {
   1750   1.43       chs 	off_t offset;
   1751   1.43       chs 	struct iovec iov;
   1752   1.43       chs 	struct uio uio;
   1753  1.128        ad 	kauth_cred_t cred = curlwp->l_cred;
   1754   1.43       chs 	struct buf *bp;
   1755   1.43       chs 	vaddr_t kva;
   1756   1.43       chs 	int s, error;
   1757   1.43       chs 
   1758   1.43       chs 	offset = pgs[0]->offset;
   1759   1.53     enami 	kva = uvm_pagermapin(pgs, npages,
   1760   1.53     enami 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1761   1.43       chs 
   1762   1.43       chs 	iov.iov_base = (void *)kva;
   1763   1.43       chs 	iov.iov_len = npages << PAGE_SHIFT;
   1764   1.43       chs 	uio.uio_iov = &iov;
   1765   1.68      yamt 	uio.uio_iovcnt = 1;
   1766   1.43       chs 	uio.uio_offset = offset;
   1767   1.43       chs 	uio.uio_rw = UIO_WRITE;
   1768   1.43       chs 	uio.uio_resid = npages << PAGE_SHIFT;
   1769  1.122      yamt 	UIO_SETUP_SYSSPACE(&uio);
   1770   1.87      yamt 	/* XXX vn_lock */
   1771   1.43       chs 	error = VOP_WRITE(vp, &uio, 0, cred);
   1772   1.43       chs 
   1773   1.43       chs 	s = splbio();
   1774   1.71        pk 	V_INCR_NUMOUTPUT(vp);
   1775   1.43       chs 	splx(s);
   1776   1.43       chs 
   1777  1.119      yamt 	bp = getiobuf();
   1778   1.43       chs 	bp->b_flags = B_BUSY | B_WRITE | B_AGE;
   1779   1.43       chs 	bp->b_vp = vp;
   1780   1.43       chs 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   1781   1.43       chs 	bp->b_data = (char *)kva;
   1782   1.43       chs 	bp->b_bcount = npages << PAGE_SHIFT;
   1783   1.43       chs 	bp->b_bufsize = npages << PAGE_SHIFT;
   1784   1.43       chs 	bp->b_resid = 0;
   1785   1.43       chs 	if (error) {
   1786   1.43       chs 		bp->b_flags |= B_ERROR;
   1787   1.43       chs 		bp->b_error = error;
   1788   1.43       chs 	}
   1789   1.43       chs 	uvm_aio_aiodone(bp);
   1790   1.53     enami 	return (error);
   1791   1.66  jdolecek }
   1792   1.66  jdolecek 
   1793  1.130       chs /*
   1794  1.130       chs  * Process a uio using direct I/O.  If we reach a part of the request
   1795  1.130       chs  * which cannot be processed in this fashion for some reason, just return.
   1796  1.130       chs  * The caller must handle some additional part of the request using
   1797  1.130       chs  * buffered I/O before trying direct I/O again.
   1798  1.130       chs  */
   1799  1.130       chs 
   1800  1.130       chs void
   1801  1.131  christos genfs_directio(struct vnode *vp, struct uio *uio, int ioflag __unused)
   1802  1.130       chs {
   1803  1.130       chs 	struct vmspace *vs;
   1804  1.130       chs 	struct iovec *iov;
   1805  1.130       chs 	vaddr_t va;
   1806  1.130       chs 	size_t len;
   1807  1.130       chs 	const int mask = DEV_BSIZE - 1;
   1808  1.130       chs 	int error;
   1809  1.130       chs 
   1810  1.130       chs 	/*
   1811  1.130       chs 	 * We only support direct I/O to user space for now.
   1812  1.130       chs 	 */
   1813  1.130       chs 
   1814  1.130       chs 	if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
   1815  1.130       chs 		return;
   1816  1.130       chs 	}
   1817  1.130       chs 
   1818  1.130       chs 	/*
   1819  1.130       chs 	 * If the vnode is mapped, we would need to get the getpages lock
   1820  1.130       chs 	 * to stabilize the bmap, but then we would get into trouble whil e
   1821  1.130       chs 	 * locking the pages if the pages belong to this same vnode (or a
   1822  1.130       chs 	 * multi-vnode cascade to the same effect).  Just fall back to
   1823  1.130       chs 	 * buffered I/O if the vnode is mapped to avoid this mess.
   1824  1.130       chs 	 */
   1825  1.130       chs 
   1826  1.130       chs 	if (vp->v_flag & VMAPPED) {
   1827  1.130       chs 		return;
   1828  1.130       chs 	}
   1829  1.130       chs 
   1830  1.130       chs 	/*
   1831  1.130       chs 	 * Do as much of the uio as possible with direct I/O.
   1832  1.130       chs 	 */
   1833  1.130       chs 
   1834  1.130       chs 	vs = uio->uio_vmspace;
   1835  1.130       chs 	while (uio->uio_resid) {
   1836  1.130       chs 		iov = uio->uio_iov;
   1837  1.130       chs 		if (iov->iov_len == 0) {
   1838  1.130       chs 			uio->uio_iov++;
   1839  1.130       chs 			uio->uio_iovcnt--;
   1840  1.130       chs 			continue;
   1841  1.130       chs 		}
   1842  1.130       chs 		va = (vaddr_t)iov->iov_base;
   1843  1.130       chs 		len = MIN(iov->iov_len, genfs_maxdio);
   1844  1.130       chs 		len &= ~mask;
   1845  1.130       chs 
   1846  1.130       chs 		/*
   1847  1.130       chs 		 * If the next chunk is smaller than DEV_BSIZE or extends past
   1848  1.130       chs 		 * the current EOF, then fall back to buffered I/O.
   1849  1.130       chs 		 */
   1850  1.130       chs 
   1851  1.130       chs 		if (len == 0 || uio->uio_offset + len > vp->v_size) {
   1852  1.130       chs 			return;
   1853  1.130       chs 		}
   1854  1.130       chs 
   1855  1.130       chs 		/*
   1856  1.130       chs 		 * Check alignment.  The file offset must be at least
   1857  1.130       chs 		 * sector-aligned.  The exact constraint on memory alignment
   1858  1.130       chs 		 * is very hardware-dependent, but requiring sector-aligned
   1859  1.130       chs 		 * addresses there too is safe.
   1860  1.130       chs 		 */
   1861  1.130       chs 
   1862  1.130       chs 		if (uio->uio_offset & mask || va & mask) {
   1863  1.130       chs 			return;
   1864  1.130       chs 		}
   1865  1.130       chs 		error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
   1866  1.130       chs 					  uio->uio_rw);
   1867  1.130       chs 		if (error) {
   1868  1.130       chs 			break;
   1869  1.130       chs 		}
   1870  1.130       chs 		iov->iov_base = (caddr_t)iov->iov_base + len;
   1871  1.130       chs 		iov->iov_len -= len;
   1872  1.130       chs 		uio->uio_offset += len;
   1873  1.130       chs 		uio->uio_resid -= len;
   1874  1.130       chs 	}
   1875  1.130       chs }
   1876  1.130       chs 
   1877  1.130       chs /*
   1878  1.130       chs  * Iodone routine for direct I/O.  We don't do much here since the request is
   1879  1.130       chs  * always synchronous, so the caller will do most of the work after biowait().
   1880  1.130       chs  */
   1881  1.130       chs 
   1882  1.130       chs static void
   1883  1.130       chs genfs_dio_iodone(struct buf *bp)
   1884  1.130       chs {
   1885  1.130       chs 	int s;
   1886  1.130       chs 
   1887  1.130       chs 	KASSERT((bp->b_flags & B_ASYNC) == 0);
   1888  1.130       chs 	s = splbio();
   1889  1.130       chs 	if ((bp->b_flags & (B_READ | B_AGE)) == B_AGE) {
   1890  1.130       chs 		vwakeup(bp);
   1891  1.130       chs 	}
   1892  1.130       chs 	putiobuf(bp);
   1893  1.130       chs 	splx(s);
   1894  1.130       chs }
   1895  1.130       chs 
   1896  1.130       chs /*
   1897  1.130       chs  * Process one chunk of a direct I/O request.
   1898  1.130       chs  */
   1899  1.130       chs 
   1900  1.130       chs static int
   1901  1.130       chs genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
   1902  1.130       chs     off_t off, enum uio_rw rw)
   1903  1.130       chs {
   1904  1.130       chs 	struct vm_map *map;
   1905  1.130       chs 	struct pmap *upm, *kpm;
   1906  1.130       chs 	size_t klen = round_page(uva + len) - trunc_page(uva);
   1907  1.130       chs 	off_t spoff, epoff;
   1908  1.130       chs 	vaddr_t kva, puva;
   1909  1.130       chs 	paddr_t pa;
   1910  1.130       chs 	vm_prot_t prot;
   1911  1.130       chs 	int error, rv, poff, koff;
   1912  1.130       chs 	const int pgoflags = PGO_CLEANIT | PGO_SYNCIO |
   1913  1.130       chs 		(rw == UIO_WRITE ? PGO_FREE : 0);
   1914  1.130       chs 
   1915  1.130       chs 	/*
   1916  1.130       chs 	 * For writes, verify that this range of the file already has fully
   1917  1.130       chs 	 * allocated backing store.  If there are any holes, just punt and
   1918  1.130       chs 	 * make the caller take the buffered write path.
   1919  1.130       chs 	 */
   1920  1.130       chs 
   1921  1.130       chs 	if (rw == UIO_WRITE) {
   1922  1.130       chs 		daddr_t lbn, elbn, blkno;
   1923  1.130       chs 		int bsize, bshift, run;
   1924  1.130       chs 
   1925  1.130       chs 		bshift = vp->v_mount->mnt_fs_bshift;
   1926  1.130       chs 		bsize = 1 << bshift;
   1927  1.130       chs 		lbn = off >> bshift;
   1928  1.130       chs 		elbn = (off + len + bsize - 1) >> bshift;
   1929  1.130       chs 		while (lbn < elbn) {
   1930  1.130       chs 			error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
   1931  1.130       chs 			if (error) {
   1932  1.130       chs 				return error;
   1933  1.130       chs 			}
   1934  1.130       chs 			if (blkno == (daddr_t)-1) {
   1935  1.130       chs 				return ENOSPC;
   1936  1.130       chs 			}
   1937  1.130       chs 			lbn += 1 + run;
   1938  1.130       chs 		}
   1939  1.130       chs 	}
   1940  1.130       chs 
   1941  1.130       chs 	/*
   1942  1.130       chs 	 * Flush any cached pages for parts of the file that we're about to
   1943  1.130       chs 	 * access.  If we're writing, invalidate pages as well.
   1944  1.130       chs 	 */
   1945  1.130       chs 
   1946  1.130       chs 	spoff = trunc_page(off);
   1947  1.130       chs 	epoff = round_page(off + len);
   1948  1.130       chs 	simple_lock(&vp->v_interlock);
   1949  1.130       chs 	error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
   1950  1.130       chs 	if (error) {
   1951  1.130       chs 		return error;
   1952  1.130       chs 	}
   1953  1.130       chs 
   1954  1.130       chs 	/*
   1955  1.130       chs 	 * Wire the user pages and remap them into kernel memory.
   1956  1.130       chs 	 */
   1957  1.130       chs 
   1958  1.130       chs 	prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
   1959  1.130       chs 	error = uvm_vslock(vs, (void *)uva, len, prot);
   1960  1.130       chs 	if (error) {
   1961  1.130       chs 		return error;
   1962  1.130       chs 	}
   1963  1.130       chs 
   1964  1.130       chs 	map = &vs->vm_map;
   1965  1.130       chs 	upm = vm_map_pmap(map);
   1966  1.130       chs 	kpm = vm_map_pmap(kernel_map);
   1967  1.130       chs 	kva = uvm_km_alloc(kernel_map, klen, 0,
   1968  1.130       chs 			   UVM_KMF_VAONLY | UVM_KMF_WAITVA);
   1969  1.130       chs 	puva = trunc_page(uva);
   1970  1.130       chs 	for (poff = 0; poff < klen; poff += PAGE_SIZE) {
   1971  1.130       chs 		rv = pmap_extract(upm, puva + poff, &pa);
   1972  1.130       chs 		KASSERT(rv);
   1973  1.130       chs 		pmap_enter(kpm, kva + poff, pa, prot, prot | PMAP_WIRED);
   1974  1.130       chs 	}
   1975  1.130       chs 	pmap_update(kpm);
   1976  1.130       chs 
   1977  1.130       chs 	/*
   1978  1.130       chs 	 * Do the I/O.
   1979  1.130       chs 	 */
   1980  1.130       chs 
   1981  1.130       chs 	koff = uva - trunc_page(uva);
   1982  1.130       chs 	error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
   1983  1.130       chs 			    genfs_dio_iodone);
   1984  1.130       chs 
   1985  1.130       chs 	/*
   1986  1.130       chs 	 * Tear down the kernel mapping.
   1987  1.130       chs 	 */
   1988  1.130       chs 
   1989  1.130       chs 	pmap_remove(kpm, kva, kva + klen);
   1990  1.130       chs 	pmap_update(kpm);
   1991  1.130       chs 	uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
   1992  1.130       chs 
   1993  1.130       chs 	/*
   1994  1.130       chs 	 * Unwire the user pages.
   1995  1.130       chs 	 */
   1996  1.130       chs 
   1997  1.130       chs 	uvm_vsunlock(vs, (void *)uva, len);
   1998  1.130       chs 	return error;
   1999  1.130       chs }
   2000  1.130       chs 
   2001  1.130       chs 
   2002   1.66  jdolecek static void
   2003   1.66  jdolecek filt_genfsdetach(struct knote *kn)
   2004   1.66  jdolecek {
   2005   1.66  jdolecek 	struct vnode *vp = (struct vnode *)kn->kn_hook;
   2006   1.66  jdolecek 
   2007   1.66  jdolecek 	/* XXXLUKEM lock the struct? */
   2008   1.66  jdolecek 	SLIST_REMOVE(&vp->v_klist, kn, knote, kn_selnext);
   2009   1.66  jdolecek }
   2010   1.66  jdolecek 
   2011   1.66  jdolecek static int
   2012   1.66  jdolecek filt_genfsread(struct knote *kn, long hint)
   2013   1.66  jdolecek {
   2014   1.66  jdolecek 	struct vnode *vp = (struct vnode *)kn->kn_hook;
   2015   1.66  jdolecek 
   2016   1.66  jdolecek 	/*
   2017   1.66  jdolecek 	 * filesystem is gone, so set the EOF flag and schedule
   2018   1.66  jdolecek 	 * the knote for deletion.
   2019   1.66  jdolecek 	 */
   2020   1.66  jdolecek 	if (hint == NOTE_REVOKE) {
   2021   1.66  jdolecek 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
   2022   1.66  jdolecek 		return (1);
   2023   1.66  jdolecek 	}
   2024   1.66  jdolecek 
   2025   1.66  jdolecek 	/* XXXLUKEM lock the struct? */
   2026   1.66  jdolecek 	kn->kn_data = vp->v_size - kn->kn_fp->f_offset;
   2027   1.66  jdolecek         return (kn->kn_data != 0);
   2028   1.66  jdolecek }
   2029   1.66  jdolecek 
   2030   1.66  jdolecek static int
   2031   1.66  jdolecek filt_genfsvnode(struct knote *kn, long hint)
   2032   1.66  jdolecek {
   2033   1.66  jdolecek 
   2034   1.66  jdolecek 	if (kn->kn_sfflags & hint)
   2035   1.66  jdolecek 		kn->kn_fflags |= hint;
   2036   1.66  jdolecek 	if (hint == NOTE_REVOKE) {
   2037   1.66  jdolecek 		kn->kn_flags |= EV_EOF;
   2038   1.66  jdolecek 		return (1);
   2039   1.66  jdolecek 	}
   2040   1.66  jdolecek 	return (kn->kn_fflags != 0);
   2041   1.66  jdolecek }
   2042   1.66  jdolecek 
   2043   1.96     perry static const struct filterops genfsread_filtops =
   2044   1.66  jdolecek 	{ 1, NULL, filt_genfsdetach, filt_genfsread };
   2045   1.96     perry static const struct filterops genfsvnode_filtops =
   2046   1.66  jdolecek 	{ 1, NULL, filt_genfsdetach, filt_genfsvnode };
   2047   1.66  jdolecek 
   2048   1.66  jdolecek int
   2049   1.66  jdolecek genfs_kqfilter(void *v)
   2050   1.66  jdolecek {
   2051   1.66  jdolecek 	struct vop_kqfilter_args /* {
   2052   1.66  jdolecek 		struct vnode	*a_vp;
   2053   1.66  jdolecek 		struct knote	*a_kn;
   2054   1.66  jdolecek 	} */ *ap = v;
   2055   1.66  jdolecek 	struct vnode *vp;
   2056   1.66  jdolecek 	struct knote *kn;
   2057   1.66  jdolecek 
   2058   1.66  jdolecek 	vp = ap->a_vp;
   2059   1.66  jdolecek 	kn = ap->a_kn;
   2060   1.66  jdolecek 	switch (kn->kn_filter) {
   2061   1.66  jdolecek 	case EVFILT_READ:
   2062   1.66  jdolecek 		kn->kn_fop = &genfsread_filtops;
   2063   1.66  jdolecek 		break;
   2064   1.66  jdolecek 	case EVFILT_VNODE:
   2065   1.66  jdolecek 		kn->kn_fop = &genfsvnode_filtops;
   2066   1.66  jdolecek 		break;
   2067   1.66  jdolecek 	default:
   2068   1.66  jdolecek 		return (1);
   2069   1.66  jdolecek 	}
   2070   1.66  jdolecek 
   2071   1.66  jdolecek 	kn->kn_hook = vp;
   2072   1.66  jdolecek 
   2073   1.66  jdolecek 	/* XXXLUKEM lock the struct? */
   2074   1.66  jdolecek 	SLIST_INSERT_HEAD(&vp->v_klist, kn, kn_selnext);
   2075   1.66  jdolecek 
   2076   1.66  jdolecek 	return (0);
   2077    1.1   mycroft }
   2078