Home | History | Annotate | Line # | Download | only in genfs
genfs_vnops.c revision 1.128.4.3
      1  1.128.4.3        ad /*	$NetBSD: genfs_vnops.c,v 1.128.4.3 2007/01/30 13:51:42 ad Exp $	*/
      2        1.6      fvdl 
      3        1.6      fvdl /*
      4        1.6      fvdl  * Copyright (c) 1982, 1986, 1989, 1993
      5        1.6      fvdl  *	The Regents of the University of California.  All rights reserved.
      6        1.6      fvdl  *
      7        1.6      fvdl  * Redistribution and use in source and binary forms, with or without
      8        1.6      fvdl  * modification, are permitted provided that the following conditions
      9        1.6      fvdl  * are met:
     10        1.6      fvdl  * 1. Redistributions of source code must retain the above copyright
     11        1.6      fvdl  *    notice, this list of conditions and the following disclaimer.
     12        1.6      fvdl  * 2. Redistributions in binary form must reproduce the above copyright
     13        1.6      fvdl  *    notice, this list of conditions and the following disclaimer in the
     14        1.6      fvdl  *    documentation and/or other materials provided with the distribution.
     15       1.81       agc  * 3. Neither the name of the University nor the names of its contributors
     16        1.6      fvdl  *    may be used to endorse or promote products derived from this software
     17        1.6      fvdl  *    without specific prior written permission.
     18        1.6      fvdl  *
     19        1.6      fvdl  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20        1.6      fvdl  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21        1.6      fvdl  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22        1.6      fvdl  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23        1.6      fvdl  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24        1.6      fvdl  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25        1.6      fvdl  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26        1.6      fvdl  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27        1.6      fvdl  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28        1.6      fvdl  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29        1.6      fvdl  * SUCH DAMAGE.
     30        1.6      fvdl  *
     31        1.6      fvdl  */
     32       1.40     lukem 
     33       1.40     lukem #include <sys/cdefs.h>
     34  1.128.4.3        ad __KERNEL_RCSID(0, "$NetBSD: genfs_vnops.c,v 1.128.4.3 2007/01/30 13:51:42 ad Exp $");
     35        1.8   thorpej 
     36        1.1   mycroft #include <sys/param.h>
     37        1.1   mycroft #include <sys/systm.h>
     38        1.6      fvdl #include <sys/proc.h>
     39        1.1   mycroft #include <sys/kernel.h>
     40        1.1   mycroft #include <sys/mount.h>
     41        1.1   mycroft #include <sys/namei.h>
     42        1.1   mycroft #include <sys/vnode.h>
     43       1.13  wrstuden #include <sys/fcntl.h>
     44  1.128.4.1        ad #include <sys/kmem.h>
     45        1.3   mycroft #include <sys/poll.h>
     46       1.37       chs #include <sys/mman.h>
     47       1.66  jdolecek #include <sys/file.h>
     48      1.125      elad #include <sys/kauth.h>
     49        1.1   mycroft 
     50        1.1   mycroft #include <miscfs/genfs/genfs.h>
     51       1.37       chs #include <miscfs/genfs/genfs_node.h>
     52        1.6      fvdl #include <miscfs/specfs/specdev.h>
     53        1.1   mycroft 
     54       1.21       chs #include <uvm/uvm.h>
     55       1.21       chs #include <uvm/uvm_pager.h>
     56       1.21       chs 
     57  1.128.4.1        ad static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
     58  1.128.4.1        ad     off_t, enum uio_rw);
     59  1.128.4.1        ad static void genfs_dio_iodone(struct buf *);
     60  1.128.4.1        ad 
     61  1.128.4.1        ad static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
     62  1.128.4.1        ad     void (*)(struct buf *));
     63      1.118     perry static inline void genfs_rel_pages(struct vm_page **, int);
     64       1.70  christos static void filt_genfsdetach(struct knote *);
     65       1.70  christos static int filt_genfsread(struct knote *, long);
     66       1.70  christos static int filt_genfsvnode(struct knote *, long);
     67       1.70  christos 
     68      1.110      yamt #define MAX_READ_PAGES	16 	/* XXXUBC 16 */
     69       1.41  christos 
     70  1.128.4.1        ad int genfs_maxdio = MAXPHYS;
     71  1.128.4.1        ad 
     72        1.1   mycroft int
     73       1.53     enami genfs_poll(void *v)
     74        1.1   mycroft {
     75        1.3   mycroft 	struct vop_poll_args /* {
     76        1.1   mycroft 		struct vnode *a_vp;
     77        1.3   mycroft 		int a_events;
     78      1.116  christos 		struct lwp *a_l;
     79        1.1   mycroft 	} */ *ap = v;
     80        1.1   mycroft 
     81        1.3   mycroft 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
     82        1.1   mycroft }
     83        1.1   mycroft 
     84        1.1   mycroft int
     85       1.53     enami genfs_seek(void *v)
     86        1.4    kleink {
     87        1.4    kleink 	struct vop_seek_args /* {
     88        1.4    kleink 		struct vnode *a_vp;
     89        1.4    kleink 		off_t a_oldoff;
     90        1.4    kleink 		off_t a_newoff;
     91      1.125      elad 		kauth_cred_t cred;
     92        1.4    kleink 	} */ *ap = v;
     93        1.4    kleink 
     94        1.4    kleink 	if (ap->a_newoff < 0)
     95        1.4    kleink 		return (EINVAL);
     96        1.4    kleink 
     97        1.4    kleink 	return (0);
     98        1.4    kleink }
     99        1.4    kleink 
    100        1.4    kleink int
    101       1.53     enami genfs_abortop(void *v)
    102        1.1   mycroft {
    103        1.1   mycroft 	struct vop_abortop_args /* {
    104        1.1   mycroft 		struct vnode *a_dvp;
    105        1.1   mycroft 		struct componentname *a_cnp;
    106        1.1   mycroft 	} */ *ap = v;
    107       1.53     enami 
    108        1.1   mycroft 	if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
    109       1.19   thorpej 		PNBUF_PUT(ap->a_cnp->cn_pnbuf);
    110        1.1   mycroft 	return (0);
    111       1.13  wrstuden }
    112       1.13  wrstuden 
    113       1.13  wrstuden int
    114       1.53     enami genfs_fcntl(void *v)
    115       1.13  wrstuden {
    116       1.13  wrstuden 	struct vop_fcntl_args /* {
    117       1.13  wrstuden 		struct vnode *a_vp;
    118       1.13  wrstuden 		u_int a_command;
    119       1.13  wrstuden 		caddr_t a_data;
    120       1.13  wrstuden 		int a_fflag;
    121      1.125      elad 		kauth_cred_t a_cred;
    122      1.116  christos 		struct lwp *a_l;
    123       1.13  wrstuden 	} */ *ap = v;
    124       1.13  wrstuden 
    125       1.13  wrstuden 	if (ap->a_command == F_SETFL)
    126       1.13  wrstuden 		return (0);
    127       1.13  wrstuden 	else
    128       1.13  wrstuden 		return (EOPNOTSUPP);
    129        1.1   mycroft }
    130        1.1   mycroft 
    131        1.1   mycroft /*ARGSUSED*/
    132        1.1   mycroft int
    133       1.53     enami genfs_badop(void *v)
    134        1.1   mycroft {
    135        1.1   mycroft 
    136        1.1   mycroft 	panic("genfs: bad op");
    137        1.1   mycroft }
    138        1.1   mycroft 
    139        1.1   mycroft /*ARGSUSED*/
    140        1.1   mycroft int
    141       1.53     enami genfs_nullop(void *v)
    142        1.1   mycroft {
    143        1.1   mycroft 
    144        1.1   mycroft 	return (0);
    145       1.10    kleink }
    146       1.10    kleink 
    147       1.10    kleink /*ARGSUSED*/
    148       1.10    kleink int
    149       1.53     enami genfs_einval(void *v)
    150       1.10    kleink {
    151       1.10    kleink 
    152       1.10    kleink 	return (EINVAL);
    153        1.1   mycroft }
    154        1.1   mycroft 
    155       1.12  wrstuden /*
    156       1.74  jdolecek  * Called when an fs doesn't support a particular vop.
    157       1.74  jdolecek  * This takes care to vrele, vput, or vunlock passed in vnodes.
    158       1.12  wrstuden  */
    159       1.12  wrstuden int
    160       1.75  jdolecek genfs_eopnotsupp(void *v)
    161       1.12  wrstuden {
    162       1.12  wrstuden 	struct vop_generic_args /*
    163       1.12  wrstuden 		struct vnodeop_desc *a_desc;
    164       1.53     enami 		/ * other random data follows, presumably * /
    165       1.12  wrstuden 	} */ *ap = v;
    166       1.12  wrstuden 	struct vnodeop_desc *desc = ap->a_desc;
    167       1.74  jdolecek 	struct vnode *vp, *vp_last = NULL;
    168       1.12  wrstuden 	int flags, i, j, offset;
    169       1.12  wrstuden 
    170       1.12  wrstuden 	flags = desc->vdesc_flags;
    171       1.12  wrstuden 	for (i = 0; i < VDESC_MAX_VPS; flags >>=1, i++) {
    172       1.12  wrstuden 		if ((offset = desc->vdesc_vp_offsets[i]) == VDESC_NO_OFFSET)
    173       1.12  wrstuden 			break;	/* stop at end of list */
    174       1.12  wrstuden 		if ((j = flags & VDESC_VP0_WILLPUT)) {
    175       1.53     enami 			vp = *VOPARG_OFFSETTO(struct vnode **, offset, ap);
    176       1.74  jdolecek 
    177       1.74  jdolecek 			/* Skip if NULL */
    178       1.74  jdolecek 			if (!vp)
    179       1.74  jdolecek 				continue;
    180       1.74  jdolecek 
    181       1.12  wrstuden 			switch (j) {
    182       1.12  wrstuden 			case VDESC_VP0_WILLPUT:
    183       1.74  jdolecek 				/* Check for dvp == vp cases */
    184       1.74  jdolecek 				if (vp == vp_last)
    185       1.74  jdolecek 					vrele(vp);
    186       1.74  jdolecek 				else {
    187       1.74  jdolecek 					vput(vp);
    188       1.74  jdolecek 					vp_last = vp;
    189       1.74  jdolecek 				}
    190       1.12  wrstuden 				break;
    191       1.12  wrstuden 			case VDESC_VP0_WILLUNLOCK:
    192       1.12  wrstuden 				VOP_UNLOCK(vp, 0);
    193       1.12  wrstuden 				break;
    194       1.12  wrstuden 			case VDESC_VP0_WILLRELE:
    195       1.12  wrstuden 				vrele(vp);
    196       1.12  wrstuden 				break;
    197       1.12  wrstuden 			}
    198       1.12  wrstuden 		}
    199       1.12  wrstuden 	}
    200       1.12  wrstuden 
    201       1.12  wrstuden 	return (EOPNOTSUPP);
    202       1.12  wrstuden }
    203       1.12  wrstuden 
    204        1.1   mycroft /*ARGSUSED*/
    205        1.1   mycroft int
    206       1.53     enami genfs_ebadf(void *v)
    207        1.1   mycroft {
    208        1.1   mycroft 
    209        1.1   mycroft 	return (EBADF);
    210        1.9  matthias }
    211        1.9  matthias 
    212        1.9  matthias /* ARGSUSED */
    213        1.9  matthias int
    214       1.53     enami genfs_enoioctl(void *v)
    215        1.9  matthias {
    216        1.9  matthias 
    217       1.51    atatat 	return (EPASSTHROUGH);
    218        1.6      fvdl }
    219        1.6      fvdl 
    220        1.6      fvdl 
    221        1.6      fvdl /*
    222       1.15      fvdl  * Eliminate all activity associated with the requested vnode
    223        1.6      fvdl  * and with all vnodes aliased to the requested vnode.
    224        1.6      fvdl  */
    225        1.6      fvdl int
    226       1.53     enami genfs_revoke(void *v)
    227        1.6      fvdl {
    228        1.6      fvdl 	struct vop_revoke_args /* {
    229        1.6      fvdl 		struct vnode *a_vp;
    230        1.6      fvdl 		int a_flags;
    231        1.6      fvdl 	} */ *ap = v;
    232        1.6      fvdl 	struct vnode *vp, *vq;
    233      1.116  christos 	struct lwp *l = curlwp;		/* XXX */
    234        1.6      fvdl 
    235        1.6      fvdl #ifdef DIAGNOSTIC
    236        1.6      fvdl 	if ((ap->a_flags & REVOKEALL) == 0)
    237        1.6      fvdl 		panic("genfs_revoke: not revokeall");
    238        1.6      fvdl #endif
    239        1.6      fvdl 
    240        1.6      fvdl 	vp = ap->a_vp;
    241        1.6      fvdl 	simple_lock(&vp->v_interlock);
    242        1.6      fvdl 
    243        1.6      fvdl 	if (vp->v_flag & VALIASED) {
    244        1.6      fvdl 		/*
    245        1.6      fvdl 		 * If a vgone (or vclean) is already in progress,
    246        1.6      fvdl 		 * wait until it is done and return.
    247        1.6      fvdl 		 */
    248        1.6      fvdl 		if (vp->v_flag & VXLOCK) {
    249        1.6      fvdl 			vp->v_flag |= VXWANT;
    250       1.83        pk 			ltsleep(vp, PINOD|PNORELOCK, "vop_revokeall", 0,
    251       1.83        pk 				&vp->v_interlock);
    252        1.6      fvdl 			return (0);
    253        1.6      fvdl 		}
    254        1.6      fvdl 		/*
    255        1.6      fvdl 		 * Ensure that vp will not be vgone'd while we
    256        1.6      fvdl 		 * are eliminating its aliases.
    257        1.6      fvdl 		 */
    258        1.6      fvdl 		vp->v_flag |= VXLOCK;
    259        1.6      fvdl 		simple_unlock(&vp->v_interlock);
    260        1.6      fvdl 		while (vp->v_flag & VALIASED) {
    261        1.6      fvdl 			simple_lock(&spechash_slock);
    262        1.6      fvdl 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
    263        1.6      fvdl 				if (vq->v_rdev != vp->v_rdev ||
    264        1.6      fvdl 				    vq->v_type != vp->v_type || vp == vq)
    265        1.6      fvdl 					continue;
    266        1.6      fvdl 				simple_unlock(&spechash_slock);
    267        1.6      fvdl 				vgone(vq);
    268        1.6      fvdl 				break;
    269        1.6      fvdl 			}
    270        1.6      fvdl 			if (vq == NULLVP)
    271        1.6      fvdl 				simple_unlock(&spechash_slock);
    272        1.6      fvdl 		}
    273        1.6      fvdl 		/*
    274        1.6      fvdl 		 * Remove the lock so that vgone below will
    275        1.6      fvdl 		 * really eliminate the vnode after which time
    276        1.6      fvdl 		 * vgone will awaken any sleepers.
    277        1.6      fvdl 		 */
    278        1.6      fvdl 		simple_lock(&vp->v_interlock);
    279        1.6      fvdl 		vp->v_flag &= ~VXLOCK;
    280        1.6      fvdl 	}
    281      1.116  christos 	vgonel(vp, l);
    282        1.6      fvdl 	return (0);
    283        1.6      fvdl }
    284        1.6      fvdl 
    285        1.6      fvdl /*
    286       1.12  wrstuden  * Lock the node.
    287        1.6      fvdl  */
    288        1.6      fvdl int
    289       1.53     enami genfs_lock(void *v)
    290        1.6      fvdl {
    291        1.6      fvdl 	struct vop_lock_args /* {
    292        1.6      fvdl 		struct vnode *a_vp;
    293        1.6      fvdl 		int a_flags;
    294        1.6      fvdl 	} */ *ap = v;
    295        1.6      fvdl 	struct vnode *vp = ap->a_vp;
    296        1.6      fvdl 
    297       1.86   hannken 	return (lockmgr(vp->v_vnlock, ap->a_flags, &vp->v_interlock));
    298        1.6      fvdl }
    299        1.6      fvdl 
    300        1.6      fvdl /*
    301       1.12  wrstuden  * Unlock the node.
    302        1.6      fvdl  */
    303        1.6      fvdl int
    304       1.53     enami genfs_unlock(void *v)
    305        1.6      fvdl {
    306        1.6      fvdl 	struct vop_unlock_args /* {
    307        1.6      fvdl 		struct vnode *a_vp;
    308        1.6      fvdl 		int a_flags;
    309        1.6      fvdl 	} */ *ap = v;
    310        1.6      fvdl 	struct vnode *vp = ap->a_vp;
    311        1.6      fvdl 
    312       1.86   hannken 	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE,
    313       1.53     enami 	    &vp->v_interlock));
    314        1.6      fvdl }
    315        1.6      fvdl 
    316        1.6      fvdl /*
    317       1.12  wrstuden  * Return whether or not the node is locked.
    318        1.6      fvdl  */
    319        1.6      fvdl int
    320       1.53     enami genfs_islocked(void *v)
    321        1.6      fvdl {
    322        1.6      fvdl 	struct vop_islocked_args /* {
    323        1.6      fvdl 		struct vnode *a_vp;
    324        1.6      fvdl 	} */ *ap = v;
    325        1.6      fvdl 	struct vnode *vp = ap->a_vp;
    326        1.6      fvdl 
    327       1.86   hannken 	return (lockstatus(vp->v_vnlock));
    328       1.12  wrstuden }
    329       1.12  wrstuden 
    330       1.12  wrstuden /*
    331       1.12  wrstuden  * Stubs to use when there is no locking to be done on the underlying object.
    332       1.12  wrstuden  */
    333       1.12  wrstuden int
    334       1.53     enami genfs_nolock(void *v)
    335       1.12  wrstuden {
    336       1.12  wrstuden 	struct vop_lock_args /* {
    337       1.12  wrstuden 		struct vnode *a_vp;
    338       1.12  wrstuden 		int a_flags;
    339      1.116  christos 		struct lwp *a_l;
    340       1.12  wrstuden 	} */ *ap = v;
    341       1.12  wrstuden 
    342       1.12  wrstuden 	/*
    343       1.12  wrstuden 	 * Since we are not using the lock manager, we must clear
    344       1.12  wrstuden 	 * the interlock here.
    345       1.12  wrstuden 	 */
    346       1.12  wrstuden 	if (ap->a_flags & LK_INTERLOCK)
    347       1.12  wrstuden 		simple_unlock(&ap->a_vp->v_interlock);
    348       1.12  wrstuden 	return (0);
    349       1.12  wrstuden }
    350       1.12  wrstuden 
    351       1.12  wrstuden int
    352       1.53     enami genfs_nounlock(void *v)
    353       1.12  wrstuden {
    354       1.53     enami 
    355       1.12  wrstuden 	return (0);
    356       1.12  wrstuden }
    357       1.12  wrstuden 
    358       1.12  wrstuden int
    359       1.53     enami genfs_noislocked(void *v)
    360       1.12  wrstuden {
    361       1.53     enami 
    362       1.12  wrstuden 	return (0);
    363        1.8   thorpej }
    364        1.8   thorpej 
    365        1.8   thorpej /*
    366  1.128.4.2        ad  * Local lease check.
    367        1.8   thorpej  */
    368        1.8   thorpej int
    369       1.53     enami genfs_lease_check(void *v)
    370        1.8   thorpej {
    371        1.8   thorpej 
    372        1.8   thorpej 	return (0);
    373       1.34       chs }
    374       1.34       chs 
    375       1.34       chs int
    376       1.53     enami genfs_mmap(void *v)
    377       1.34       chs {
    378       1.53     enami 
    379       1.53     enami 	return (0);
    380       1.21       chs }
    381       1.21       chs 
    382      1.118     perry static inline void
    383       1.63     enami genfs_rel_pages(struct vm_page **pgs, int npages)
    384       1.63     enami {
    385       1.63     enami 	int i;
    386       1.63     enami 
    387       1.63     enami 	for (i = 0; i < npages; i++) {
    388       1.63     enami 		struct vm_page *pg = pgs[i];
    389       1.63     enami 
    390      1.127      yamt 		if (pg == NULL || pg == PGO_DONTCARE)
    391       1.63     enami 			continue;
    392       1.63     enami 		if (pg->flags & PG_FAKE) {
    393       1.63     enami 			pg->flags |= PG_RELEASED;
    394       1.63     enami 		}
    395       1.63     enami 	}
    396       1.64     enami 	uvm_lock_pageq();
    397       1.63     enami 	uvm_page_unbusy(pgs, npages);
    398       1.64     enami 	uvm_unlock_pageq();
    399       1.63     enami }
    400       1.63     enami 
    401       1.21       chs /*
    402       1.21       chs  * generic VM getpages routine.
    403       1.21       chs  * Return PG_BUSY pages for the given range,
    404       1.21       chs  * reading from backing store if necessary.
    405       1.21       chs  */
    406       1.21       chs 
    407       1.21       chs int
    408       1.53     enami genfs_getpages(void *v)
    409       1.21       chs {
    410       1.21       chs 	struct vop_getpages_args /* {
    411       1.21       chs 		struct vnode *a_vp;
    412       1.21       chs 		voff_t a_offset;
    413       1.33       chs 		struct vm_page **a_m;
    414       1.21       chs 		int *a_count;
    415       1.21       chs 		int a_centeridx;
    416       1.21       chs 		vm_prot_t a_access_type;
    417       1.21       chs 		int a_advice;
    418       1.21       chs 		int a_flags;
    419       1.21       chs 	} */ *ap = v;
    420       1.21       chs 
    421       1.30       chs 	off_t newsize, diskeof, memeof;
    422      1.124      yamt 	off_t offset, origoffset, startoffset, endoffset;
    423       1.21       chs 	daddr_t lbn, blkno;
    424      1.120      yamt 	int i, error, npages, orignpages, npgs, run, ridx, pidx, pcount;
    425       1.37       chs 	int fs_bshift, fs_bsize, dev_bshift;
    426       1.21       chs 	int flags = ap->a_flags;
    427       1.21       chs 	size_t bytes, iobytes, tailbytes, totalbytes, skipbytes;
    428       1.21       chs 	vaddr_t kva;
    429       1.21       chs 	struct buf *bp, *mbp;
    430       1.21       chs 	struct vnode *vp = ap->a_vp;
    431       1.36       chs 	struct vnode *devvp;
    432       1.37       chs 	struct genfs_node *gp = VTOG(vp);
    433       1.37       chs 	struct uvm_object *uobj = &vp->v_uobj;
    434      1.110      yamt 	struct vm_page *pg, **pgs, *pgs_onstack[MAX_READ_PAGES];
    435       1.77      yamt 	int pgs_size;
    436      1.128        ad 	kauth_cred_t cred = curlwp->l_cred;		/* XXXUBC curlwp */
    437       1.21       chs 	boolean_t async = (flags & PGO_SYNCIO) == 0;
    438       1.21       chs 	boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
    439       1.21       chs 	boolean_t sawhole = FALSE;
    440       1.37       chs 	boolean_t overwrite = (flags & PGO_OVERWRITE) != 0;
    441      1.100      yamt 	boolean_t blockalloc = write && (flags & PGO_NOBLOCKALLOC) == 0;
    442      1.126      yamt 	voff_t origvsize;
    443       1.21       chs 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    444       1.21       chs 
    445       1.30       chs 	UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
    446       1.53     enami 	    vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    447       1.30       chs 
    448      1.121   reinoud 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
    449      1.121   reinoud 	    vp->v_type == VLNK || vp->v_type == VBLK);
    450      1.109      yamt 
    451       1.21       chs 	/* XXXUBC temp limit */
    452      1.110      yamt 	if (*ap->a_count > MAX_READ_PAGES) {
    453       1.37       chs 		panic("genfs_getpages: too many pages");
    454       1.21       chs 	}
    455       1.21       chs 
    456      1.126      yamt startover:
    457       1.26       chs 	error = 0;
    458      1.126      yamt 	origvsize = vp->v_size;
    459       1.26       chs 	origoffset = ap->a_offset;
    460       1.26       chs 	orignpages = *ap->a_count;
    461      1.123      yamt 	GOP_SIZE(vp, vp->v_size, &diskeof, 0);
    462       1.26       chs 	if (flags & PGO_PASTEOF) {
    463       1.37       chs 		newsize = MAX(vp->v_size,
    464       1.53     enami 		    origoffset + (orignpages << PAGE_SHIFT));
    465      1.123      yamt 		GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
    466       1.26       chs 	} else {
    467      1.123      yamt 		GOP_SIZE(vp, vp->v_size, &memeof, GOP_SIZE_MEM);
    468       1.21       chs 	}
    469       1.30       chs 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    470       1.30       chs 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    471       1.30       chs 	KASSERT(orignpages > 0);
    472       1.95       chs 
    473       1.95       chs 	/*
    474       1.95       chs 	 * Bounds-check the request.
    475       1.95       chs 	 */
    476       1.95       chs 
    477       1.95       chs 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    478       1.95       chs 		if ((flags & PGO_LOCKED) == 0) {
    479       1.95       chs 			simple_unlock(&uobj->vmobjlock);
    480       1.95       chs 		}
    481       1.95       chs 		UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
    482       1.95       chs 		    origoffset, *ap->a_count, memeof,0);
    483       1.95       chs 		return (EINVAL);
    484       1.95       chs 	}
    485       1.21       chs 
    486       1.99      yamt 	/* uobj is locked */
    487       1.99      yamt 
    488      1.103      yamt 	if ((flags & PGO_NOTIMESTAMP) == 0 &&
    489      1.121   reinoud 	    (vp->v_type != VBLK ||
    490      1.103      yamt 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
    491      1.103      yamt 		int updflags = 0;
    492      1.103      yamt 
    493      1.103      yamt 		if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
    494      1.103      yamt 			updflags = GOP_UPDATE_ACCESSED;
    495      1.103      yamt 		}
    496      1.103      yamt 		if (write) {
    497      1.103      yamt 			updflags |= GOP_UPDATE_MODIFIED;
    498      1.103      yamt 		}
    499      1.103      yamt 		if (updflags != 0) {
    500      1.103      yamt 			GOP_MARKUPDATE(vp, updflags);
    501      1.103      yamt 		}
    502      1.103      yamt 	}
    503      1.103      yamt 
    504      1.101      yamt 	if (write) {
    505      1.101      yamt 		gp->g_dirtygen++;
    506      1.101      yamt 		if ((vp->v_flag & VONWORKLST) == 0) {
    507      1.101      yamt 			vn_syncer_add_to_worklist(vp, filedelay);
    508      1.101      yamt 		}
    509      1.103      yamt 		if ((vp->v_flag & (VWRITEMAP|VWRITEMAPDIRTY)) == VWRITEMAP) {
    510      1.103      yamt 			vp->v_flag |= VWRITEMAPDIRTY;
    511      1.103      yamt 		}
    512       1.99      yamt 	}
    513       1.99      yamt 
    514       1.21       chs 	/*
    515       1.21       chs 	 * For PGO_LOCKED requests, just return whatever's in memory.
    516       1.21       chs 	 */
    517       1.21       chs 
    518       1.21       chs 	if (flags & PGO_LOCKED) {
    519      1.127      yamt 		int nfound;
    520      1.127      yamt 
    521      1.127      yamt 		npages = *ap->a_count;
    522      1.127      yamt #if defined(DEBUG)
    523      1.127      yamt 		for (i = 0; i < npages; i++) {
    524      1.127      yamt 			pg = ap->a_m[i];
    525      1.127      yamt 			KASSERT(pg == NULL || pg == PGO_DONTCARE);
    526      1.127      yamt 		}
    527      1.127      yamt #endif /* defined(DEBUG) */
    528      1.127      yamt 		nfound = uvn_findpages(uobj, origoffset, &npages,
    529      1.127      yamt 		    ap->a_m, UFP_NOWAIT|UFP_NOALLOC|(write ? UFP_NORDONLY : 0));
    530      1.127      yamt 		KASSERT(npages == *ap->a_count);
    531      1.127      yamt 		if (nfound == 0) {
    532      1.127      yamt 			return EBUSY;
    533      1.127      yamt 		}
    534      1.127      yamt 		if (lockmgr(&gp->g_glock, LK_SHARED | LK_NOWAIT, NULL)) {
    535      1.127      yamt 			genfs_rel_pages(ap->a_m, npages);
    536      1.127      yamt 
    537      1.127      yamt 			/*
    538      1.127      yamt 			 * restore the array.
    539      1.127      yamt 			 */
    540      1.127      yamt 
    541      1.127      yamt 			for (i = 0; i < npages; i++) {
    542      1.127      yamt 				pg = ap->a_m[i];
    543       1.21       chs 
    544      1.127      yamt 				if (pg != NULL || pg != PGO_DONTCARE) {
    545      1.127      yamt 					ap->a_m[i] = NULL;
    546      1.127      yamt 				}
    547      1.127      yamt 			}
    548      1.127      yamt 		} else {
    549      1.127      yamt 			lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    550      1.127      yamt 		}
    551       1.53     enami 		return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    552       1.21       chs 	}
    553      1.126      yamt 	simple_unlock(&uobj->vmobjlock);
    554       1.21       chs 
    555       1.21       chs 	/*
    556       1.21       chs 	 * find the requested pages and make some simple checks.
    557       1.21       chs 	 * leave space in the page array for a whole block.
    558       1.21       chs 	 */
    559       1.21       chs 
    560      1.121   reinoud 	if (vp->v_type != VBLK) {
    561       1.36       chs 		fs_bshift = vp->v_mount->mnt_fs_bshift;
    562       1.36       chs 		dev_bshift = vp->v_mount->mnt_dev_bshift;
    563       1.36       chs 	} else {
    564       1.36       chs 		fs_bshift = DEV_BSHIFT;
    565       1.36       chs 		dev_bshift = DEV_BSHIFT;
    566       1.36       chs 	}
    567       1.21       chs 	fs_bsize = 1 << fs_bshift;
    568       1.21       chs 
    569       1.30       chs 	orignpages = MIN(orignpages,
    570       1.30       chs 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    571       1.21       chs 	npages = orignpages;
    572       1.21       chs 	startoffset = origoffset & ~(fs_bsize - 1);
    573       1.53     enami 	endoffset = round_page((origoffset + (npages << PAGE_SHIFT) +
    574       1.53     enami 	    fs_bsize - 1) & ~(fs_bsize - 1));
    575       1.30       chs 	endoffset = MIN(endoffset, round_page(memeof));
    576       1.21       chs 	ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    577       1.21       chs 
    578       1.77      yamt 	pgs_size = sizeof(struct vm_page *) *
    579       1.77      yamt 	    ((endoffset - startoffset) >> PAGE_SHIFT);
    580       1.77      yamt 	if (pgs_size > sizeof(pgs_onstack)) {
    581  1.128.4.1        ad 		pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
    582       1.78    simonb 		if (pgs == NULL) {
    583       1.78    simonb 			return (ENOMEM);
    584       1.78    simonb 		}
    585       1.77      yamt 	} else {
    586       1.77      yamt 		pgs = pgs_onstack;
    587       1.77      yamt 		memset(pgs, 0, pgs_size);
    588       1.77      yamt 	}
    589       1.63     enami 	UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
    590       1.63     enami 	    ridx, npages, startoffset, endoffset);
    591      1.126      yamt 
    592      1.126      yamt 	/*
    593      1.126      yamt 	 * hold g_glock to prevent a race with truncate.
    594      1.126      yamt 	 *
    595      1.126      yamt 	 * check if our idea of v_size is still valid.
    596      1.126      yamt 	 */
    597      1.126      yamt 
    598      1.126      yamt 	if (blockalloc) {
    599      1.126      yamt 		lockmgr(&gp->g_glock, LK_EXCLUSIVE, NULL);
    600      1.126      yamt 	} else {
    601      1.126      yamt 		lockmgr(&gp->g_glock, LK_SHARED, NULL);
    602      1.126      yamt 	}
    603      1.126      yamt 	simple_lock(&uobj->vmobjlock);
    604      1.126      yamt 	if (vp->v_size < origvsize) {
    605      1.126      yamt 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    606      1.126      yamt 		if (pgs != pgs_onstack)
    607  1.128.4.1        ad 			kmem_free(pgs, pgs_size);
    608      1.126      yamt 		goto startover;
    609      1.126      yamt 	}
    610      1.126      yamt 
    611       1.63     enami 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
    612       1.63     enami 	    async ? UFP_NOWAIT : UFP_ALL) != orignpages) {
    613      1.126      yamt 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    614       1.63     enami 		KASSERT(async != 0);
    615       1.63     enami 		genfs_rel_pages(&pgs[ridx], orignpages);
    616       1.63     enami 		simple_unlock(&uobj->vmobjlock);
    617       1.77      yamt 		if (pgs != pgs_onstack)
    618  1.128.4.1        ad 			kmem_free(pgs, pgs_size);
    619       1.63     enami 		return (EBUSY);
    620       1.63     enami 	}
    621       1.21       chs 
    622       1.21       chs 	/*
    623       1.21       chs 	 * if the pages are already resident, just return them.
    624       1.21       chs 	 */
    625       1.21       chs 
    626       1.21       chs 	for (i = 0; i < npages; i++) {
    627       1.97  christos 		struct vm_page *pg1 = pgs[ridx + i];
    628       1.21       chs 
    629       1.97  christos 		if ((pg1->flags & PG_FAKE) ||
    630      1.100      yamt 		    (blockalloc && (pg1->flags & PG_RDONLY))) {
    631       1.21       chs 			break;
    632       1.21       chs 		}
    633       1.21       chs 	}
    634       1.21       chs 	if (i == npages) {
    635      1.126      yamt 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    636       1.21       chs 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    637       1.26       chs 		npages += ridx;
    638      1.110      yamt 		goto out;
    639       1.21       chs 	}
    640       1.21       chs 
    641       1.21       chs 	/*
    642       1.37       chs 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    643       1.37       chs 	 */
    644       1.37       chs 
    645      1.124      yamt 	if (overwrite) {
    646      1.126      yamt 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    647       1.37       chs 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    648       1.37       chs 
    649       1.37       chs 		for (i = 0; i < npages; i++) {
    650       1.97  christos 			struct vm_page *pg1 = pgs[ridx + i];
    651       1.37       chs 
    652       1.97  christos 			pg1->flags &= ~(PG_RDONLY|PG_CLEAN);
    653       1.37       chs 		}
    654       1.37       chs 		npages += ridx;
    655       1.37       chs 		goto out;
    656       1.37       chs 	}
    657       1.37       chs 
    658       1.37       chs 	/*
    659       1.21       chs 	 * the page wasn't resident and we're not overwriting,
    660       1.21       chs 	 * so we're going to have to do some i/o.
    661       1.21       chs 	 * find any additional pages needed to cover the expanded range.
    662       1.21       chs 	 */
    663       1.21       chs 
    664       1.35       chs 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    665       1.35       chs 	if (startoffset != origoffset || npages != orignpages) {
    666       1.21       chs 
    667       1.21       chs 		/*
    668       1.37       chs 		 * we need to avoid deadlocks caused by locking
    669       1.21       chs 		 * additional pages at lower offsets than pages we
    670       1.37       chs 		 * already have locked.  unlock them all and start over.
    671       1.21       chs 		 */
    672       1.21       chs 
    673       1.63     enami 		genfs_rel_pages(&pgs[ridx], orignpages);
    674       1.77      yamt 		memset(pgs, 0, pgs_size);
    675       1.21       chs 
    676       1.21       chs 		UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
    677       1.53     enami 		    startoffset, endoffset, 0,0);
    678       1.21       chs 		npgs = npages;
    679       1.63     enami 		if (uvn_findpages(uobj, startoffset, &npgs, pgs,
    680       1.63     enami 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    681      1.126      yamt 			lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    682       1.63     enami 			KASSERT(async != 0);
    683       1.63     enami 			genfs_rel_pages(pgs, npages);
    684       1.63     enami 			simple_unlock(&uobj->vmobjlock);
    685       1.77      yamt 			if (pgs != pgs_onstack)
    686  1.128.4.1        ad 				kmem_free(pgs, pgs_size);
    687       1.63     enami 			return (EBUSY);
    688       1.63     enami 		}
    689       1.21       chs 	}
    690       1.21       chs 	simple_unlock(&uobj->vmobjlock);
    691       1.21       chs 
    692       1.21       chs 	/*
    693       1.21       chs 	 * read the desired page(s).
    694       1.21       chs 	 */
    695       1.21       chs 
    696       1.21       chs 	totalbytes = npages << PAGE_SHIFT;
    697       1.30       chs 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    698       1.21       chs 	tailbytes = totalbytes - bytes;
    699       1.21       chs 	skipbytes = 0;
    700       1.21       chs 
    701       1.53     enami 	kva = uvm_pagermapin(pgs, npages,
    702       1.53     enami 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
    703       1.21       chs 
    704      1.119      yamt 	mbp = getiobuf();
    705       1.21       chs 	mbp->b_bufsize = totalbytes;
    706       1.21       chs 	mbp->b_data = (void *)kva;
    707       1.21       chs 	mbp->b_resid = mbp->b_bcount = bytes;
    708       1.65      fvdl 	mbp->b_flags = B_BUSY|B_READ| (async ? B_CALL|B_ASYNC : 0);
    709       1.37       chs 	mbp->b_iodone = (async ? uvm_aio_biodone : 0);
    710       1.21       chs 	mbp->b_vp = vp;
    711      1.120      yamt 	if (async)
    712      1.120      yamt 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
    713      1.120      yamt 	else
    714      1.120      yamt 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
    715       1.21       chs 
    716       1.21       chs 	/*
    717       1.31       chs 	 * if EOF is in the middle of the range, zero the part past EOF.
    718       1.38       chs 	 * if the page including EOF is not PG_FAKE, skip over it since
    719       1.38       chs 	 * in that case it has valid data that we need to preserve.
    720       1.21       chs 	 */
    721       1.21       chs 
    722       1.31       chs 	if (tailbytes > 0) {
    723       1.38       chs 		size_t tailstart = bytes;
    724       1.38       chs 
    725       1.38       chs 		if ((pgs[bytes >> PAGE_SHIFT]->flags & PG_FAKE) == 0) {
    726       1.38       chs 			tailstart = round_page(tailstart);
    727       1.38       chs 			tailbytes -= tailstart - bytes;
    728       1.38       chs 		}
    729       1.37       chs 		UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
    730       1.53     enami 		    kva, tailstart, tailbytes,0);
    731       1.38       chs 		memset((void *)(kva + tailstart), 0, tailbytes);
    732       1.21       chs 	}
    733       1.21       chs 
    734       1.21       chs 	/*
    735       1.21       chs 	 * now loop over the pages, reading as needed.
    736       1.21       chs 	 */
    737       1.21       chs 
    738       1.21       chs 	bp = NULL;
    739       1.21       chs 	for (offset = startoffset;
    740       1.53     enami 	    bytes > 0;
    741       1.53     enami 	    offset += iobytes, bytes -= iobytes) {
    742       1.21       chs 
    743       1.21       chs 		/*
    744       1.21       chs 		 * skip pages which don't need to be read.
    745       1.21       chs 		 */
    746       1.21       chs 
    747       1.21       chs 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    748      1.100      yamt 		while ((pgs[pidx]->flags & PG_FAKE) == 0) {
    749       1.21       chs 			size_t b;
    750       1.21       chs 
    751       1.24       chs 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    752      1.100      yamt 			if ((pgs[pidx]->flags & PG_RDONLY)) {
    753      1.100      yamt 				sawhole = TRUE;
    754      1.100      yamt 			}
    755       1.26       chs 			b = MIN(PAGE_SIZE, bytes);
    756       1.21       chs 			offset += b;
    757       1.21       chs 			bytes -= b;
    758       1.21       chs 			skipbytes += b;
    759       1.21       chs 			pidx++;
    760       1.21       chs 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
    761       1.53     enami 			    offset, 0,0,0);
    762       1.21       chs 			if (bytes == 0) {
    763       1.21       chs 				goto loopdone;
    764       1.21       chs 			}
    765       1.21       chs 		}
    766       1.21       chs 
    767       1.21       chs 		/*
    768       1.21       chs 		 * bmap the file to find out the blkno to read from and
    769       1.21       chs 		 * how much we can read in one i/o.  if bmap returns an error,
    770       1.21       chs 		 * skip the rest of the top-level i/o.
    771       1.21       chs 		 */
    772       1.21       chs 
    773       1.21       chs 		lbn = offset >> fs_bshift;
    774       1.36       chs 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    775       1.21       chs 		if (error) {
    776       1.21       chs 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
    777       1.53     enami 			    lbn, error,0,0);
    778       1.21       chs 			skipbytes += bytes;
    779       1.21       chs 			goto loopdone;
    780       1.21       chs 		}
    781       1.21       chs 
    782       1.21       chs 		/*
    783       1.21       chs 		 * see how many pages can be read with this i/o.
    784       1.21       chs 		 * reduce the i/o size if necessary to avoid
    785       1.21       chs 		 * overwriting pages with valid data.
    786       1.21       chs 		 */
    787       1.21       chs 
    788       1.26       chs 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    789       1.26       chs 		    bytes);
    790       1.21       chs 		if (offset + iobytes > round_page(offset)) {
    791       1.21       chs 			pcount = 1;
    792       1.21       chs 			while (pidx + pcount < npages &&
    793       1.53     enami 			    pgs[pidx + pcount]->flags & PG_FAKE) {
    794       1.21       chs 				pcount++;
    795       1.21       chs 			}
    796       1.26       chs 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    797       1.53     enami 			    (offset - trunc_page(offset)));
    798       1.21       chs 		}
    799       1.21       chs 
    800       1.21       chs 		/*
    801       1.53     enami 		 * if this block isn't allocated, zero it instead of
    802      1.100      yamt 		 * reading it.  unless we are going to allocate blocks,
    803      1.100      yamt 		 * mark the pages we zeroed PG_RDONLY.
    804       1.21       chs 		 */
    805       1.21       chs 
    806       1.21       chs 		if (blkno < 0) {
    807       1.53     enami 			int holepages = (round_page(offset + iobytes) -
    808       1.53     enami 			    trunc_page(offset)) >> PAGE_SHIFT;
    809       1.21       chs 			UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
    810       1.21       chs 
    811       1.21       chs 			sawhole = TRUE;
    812       1.21       chs 			memset((char *)kva + (offset - startoffset), 0,
    813       1.53     enami 			    iobytes);
    814       1.21       chs 			skipbytes += iobytes;
    815       1.21       chs 
    816       1.35       chs 			for (i = 0; i < holepages; i++) {
    817       1.35       chs 				if (write) {
    818       1.35       chs 					pgs[pidx + i]->flags &= ~PG_CLEAN;
    819      1.100      yamt 				}
    820      1.100      yamt 				if (!blockalloc) {
    821       1.21       chs 					pgs[pidx + i]->flags |= PG_RDONLY;
    822       1.21       chs 				}
    823       1.21       chs 			}
    824       1.21       chs 			continue;
    825       1.21       chs 		}
    826       1.21       chs 
    827       1.21       chs 		/*
    828       1.21       chs 		 * allocate a sub-buf for this piece of the i/o
    829       1.21       chs 		 * (or just use mbp if there's only 1 piece),
    830       1.21       chs 		 * and start it going.
    831       1.21       chs 		 */
    832       1.21       chs 
    833       1.21       chs 		if (offset == startoffset && iobytes == bytes) {
    834       1.21       chs 			bp = mbp;
    835       1.21       chs 		} else {
    836      1.119      yamt 			bp = getiobuf();
    837      1.120      yamt 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
    838       1.21       chs 		}
    839      1.112      yamt 		bp->b_lblkno = 0;
    840       1.21       chs 
    841       1.21       chs 		/* adjust physical blkno for partial blocks */
    842       1.25      fvdl 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    843       1.53     enami 		    dev_bshift);
    844       1.21       chs 
    845       1.53     enami 		UVMHIST_LOG(ubchist,
    846       1.53     enami 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
    847       1.53     enami 		    bp, offset, iobytes, bp->b_blkno);
    848       1.21       chs 
    849      1.109      yamt 		VOP_STRATEGY(devvp, bp);
    850       1.21       chs 	}
    851       1.21       chs 
    852       1.21       chs loopdone:
    853      1.120      yamt 	nestiobuf_done(mbp, skipbytes, error);
    854       1.21       chs 	if (async) {
    855       1.32       chs 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    856       1.37       chs 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    857       1.77      yamt 		if (pgs != pgs_onstack)
    858  1.128.4.1        ad 			kmem_free(pgs, pgs_size);
    859       1.53     enami 		return (0);
    860       1.21       chs 	}
    861       1.21       chs 	if (bp != NULL) {
    862       1.21       chs 		error = biowait(mbp);
    863       1.21       chs 	}
    864      1.119      yamt 	putiobuf(mbp);
    865       1.21       chs 	uvm_pagermapout(kva, npages);
    866       1.21       chs 
    867       1.21       chs 	/*
    868       1.21       chs 	 * if this we encountered a hole then we have to do a little more work.
    869       1.21       chs 	 * for read faults, we marked the page PG_RDONLY so that future
    870       1.21       chs 	 * write accesses to the page will fault again.
    871       1.21       chs 	 * for write faults, we must make sure that the backing store for
    872       1.21       chs 	 * the page is completely allocated while the pages are locked.
    873       1.21       chs 	 */
    874       1.21       chs 
    875      1.100      yamt 	if (!error && sawhole && blockalloc) {
    876       1.37       chs 		error = GOP_ALLOC(vp, startoffset, npages << PAGE_SHIFT, 0,
    877       1.53     enami 		    cred);
    878       1.37       chs 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
    879       1.37       chs 		    startoffset, npages << PAGE_SHIFT, error,0);
    880      1.100      yamt 		if (!error) {
    881      1.100      yamt 			for (i = 0; i < npages; i++) {
    882      1.100      yamt 				if (pgs[i] == NULL) {
    883      1.100      yamt 					continue;
    884      1.100      yamt 				}
    885      1.100      yamt 				pgs[i]->flags &= ~(PG_CLEAN|PG_RDONLY);
    886      1.100      yamt 				UVMHIST_LOG(ubchist, "mark dirty pg %p",
    887      1.100      yamt 				    pgs[i],0,0,0);
    888      1.100      yamt 			}
    889      1.100      yamt 		}
    890       1.21       chs 	}
    891       1.37       chs 	lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    892       1.21       chs 	simple_lock(&uobj->vmobjlock);
    893       1.21       chs 
    894       1.21       chs 	/*
    895       1.21       chs 	 * we're almost done!  release the pages...
    896       1.21       chs 	 * for errors, we free the pages.
    897       1.21       chs 	 * otherwise we activate them and mark them as valid and clean.
    898       1.21       chs 	 * also, unbusy pages that were not actually requested.
    899       1.21       chs 	 */
    900       1.21       chs 
    901       1.21       chs 	if (error) {
    902       1.21       chs 		for (i = 0; i < npages; i++) {
    903       1.21       chs 			if (pgs[i] == NULL) {
    904       1.21       chs 				continue;
    905       1.21       chs 			}
    906       1.21       chs 			UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    907       1.53     enami 			    pgs[i], pgs[i]->flags, 0,0);
    908       1.26       chs 			if (pgs[i]->flags & PG_FAKE) {
    909       1.37       chs 				pgs[i]->flags |= PG_RELEASED;
    910       1.21       chs 			}
    911       1.21       chs 		}
    912       1.37       chs 		uvm_lock_pageq();
    913       1.37       chs 		uvm_page_unbusy(pgs, npages);
    914       1.21       chs 		uvm_unlock_pageq();
    915       1.21       chs 		simple_unlock(&uobj->vmobjlock);
    916       1.21       chs 		UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
    917       1.77      yamt 		if (pgs != pgs_onstack)
    918  1.128.4.1        ad 			kmem_free(pgs, pgs_size);
    919       1.53     enami 		return (error);
    920       1.21       chs 	}
    921       1.21       chs 
    922       1.37       chs out:
    923       1.21       chs 	UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
    924       1.26       chs 	uvm_lock_pageq();
    925       1.21       chs 	for (i = 0; i < npages; i++) {
    926       1.37       chs 		pg = pgs[i];
    927       1.37       chs 		if (pg == NULL) {
    928       1.21       chs 			continue;
    929       1.21       chs 		}
    930       1.21       chs 		UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    931       1.53     enami 		    pg, pg->flags, 0,0);
    932       1.37       chs 		if (pg->flags & PG_FAKE && !overwrite) {
    933       1.37       chs 			pg->flags &= ~(PG_FAKE);
    934       1.21       chs 			pmap_clear_modify(pgs[i]);
    935       1.21       chs 		}
    936      1.100      yamt 		KASSERT(!write || !blockalloc || (pg->flags & PG_RDONLY) == 0);
    937       1.21       chs 		if (i < ridx || i >= ridx + orignpages || async) {
    938       1.21       chs 			UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
    939       1.53     enami 			    pg, pg->offset,0,0);
    940       1.37       chs 			if (pg->flags & PG_WANTED) {
    941       1.37       chs 				wakeup(pg);
    942       1.37       chs 			}
    943       1.37       chs 			if (pg->flags & PG_FAKE) {
    944       1.37       chs 				KASSERT(overwrite);
    945       1.37       chs 				uvm_pagezero(pg);
    946       1.37       chs 			}
    947       1.37       chs 			if (pg->flags & PG_RELEASED) {
    948       1.37       chs 				uvm_pagefree(pg);
    949       1.26       chs 				continue;
    950       1.21       chs 			}
    951  1.128.4.1        ad 			uvm_pageenqueue(pg);
    952       1.37       chs 			pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
    953       1.37       chs 			UVM_PAGE_OWN(pg, NULL);
    954       1.21       chs 		}
    955       1.21       chs 	}
    956       1.26       chs 	uvm_unlock_pageq();
    957       1.21       chs 	simple_unlock(&uobj->vmobjlock);
    958       1.21       chs 	if (ap->a_m != NULL) {
    959       1.21       chs 		memcpy(ap->a_m, &pgs[ridx],
    960       1.53     enami 		    orignpages * sizeof(struct vm_page *));
    961       1.21       chs 	}
    962       1.77      yamt 	if (pgs != pgs_onstack)
    963  1.128.4.1        ad 		kmem_free(pgs, pgs_size);
    964       1.53     enami 	return (0);
    965       1.21       chs }
    966       1.21       chs 
    967       1.21       chs /*
    968       1.21       chs  * generic VM putpages routine.
    969       1.21       chs  * Write the given range of pages to backing store.
    970       1.37       chs  *
    971       1.37       chs  * => "offhi == 0" means flush all pages at or after "offlo".
    972  1.128.4.2        ad  * => object should be locked by caller.  we return with the
    973  1.128.4.2        ad  *      object unlocked.
    974       1.37       chs  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
    975       1.37       chs  *	thus, a caller might want to unlock higher level resources
    976       1.37       chs  *	(e.g. vm_map) before calling flush.
    977  1.128.4.2        ad  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
    978       1.37       chs  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
    979       1.37       chs  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    980       1.37       chs  *	that new pages are inserted on the tail end of the list.   thus,
    981       1.37       chs  *	we can make a complete pass through the object in one go by starting
    982       1.37       chs  *	at the head and working towards the tail (new pages are put in
    983       1.37       chs  *	front of us).
    984       1.37       chs  * => NOTE: we are allowed to lock the page queues, so the caller
    985       1.37       chs  *	must not be holding the page queue lock.
    986       1.37       chs  *
    987       1.37       chs  * note on "cleaning" object and PG_BUSY pages:
    988       1.37       chs  *	this routine is holding the lock on the object.   the only time
    989       1.37       chs  *	that it can run into a PG_BUSY page that it does not own is if
    990       1.37       chs  *	some other process has started I/O on the page (e.g. either
    991       1.37       chs  *	a pagein, or a pageout).    if the PG_BUSY page is being paged
    992       1.37       chs  *	in, then it can not be dirty (!PG_CLEAN) because no one has
    993       1.37       chs  *	had a chance to modify it yet.    if the PG_BUSY page is being
    994       1.37       chs  *	paged out then it means that someone else has already started
    995       1.53     enami  *	cleaning the page for us (how nice!).    in this case, if we
    996       1.37       chs  *	have syncio specified, then after we make our pass through the
    997       1.53     enami  *	object we need to wait for the other PG_BUSY pages to clear
    998       1.37       chs  *	off (i.e. we need to do an iosync).   also note that once a
    999       1.37       chs  *	page is PG_BUSY it must stay in its object until it is un-busyed.
   1000       1.37       chs  *
   1001       1.37       chs  * note on page traversal:
   1002       1.37       chs  *	we can traverse the pages in an object either by going down the
   1003       1.37       chs  *	linked list in "uobj->memq", or we can go over the address range
   1004       1.37       chs  *	by page doing hash table lookups for each address.    depending
   1005       1.53     enami  *	on how many pages are in the object it may be cheaper to do one
   1006       1.37       chs  *	or the other.   we set "by_list" to true if we are using memq.
   1007       1.37       chs  *	if the cost of a hash lookup was equal to the cost of the list
   1008       1.37       chs  *	traversal we could compare the number of pages in the start->stop
   1009       1.37       chs  *	range to the total number of pages in the object.   however, it
   1010       1.37       chs  *	seems that a hash table lookup is more expensive than the linked
   1011       1.53     enami  *	list traversal, so we multiply the number of pages in the
   1012       1.37       chs  *	range by an estimate of the relatively higher cost of the hash lookup.
   1013       1.21       chs  */
   1014       1.21       chs 
   1015       1.21       chs int
   1016       1.53     enami genfs_putpages(void *v)
   1017       1.21       chs {
   1018       1.21       chs 	struct vop_putpages_args /* {
   1019       1.21       chs 		struct vnode *a_vp;
   1020       1.37       chs 		voff_t a_offlo;
   1021       1.37       chs 		voff_t a_offhi;
   1022       1.21       chs 		int a_flags;
   1023       1.21       chs 	} */ *ap = v;
   1024       1.37       chs 	struct vnode *vp = ap->a_vp;
   1025       1.37       chs 	struct uvm_object *uobj = &vp->v_uobj;
   1026       1.46       chs 	struct simplelock *slock = &uobj->vmobjlock;
   1027       1.37       chs 	off_t startoff = ap->a_offlo;
   1028       1.37       chs 	off_t endoff = ap->a_offhi;
   1029       1.37       chs 	off_t off;
   1030       1.37       chs 	int flags = ap->a_flags;
   1031       1.76       tls 	/* Even for strange MAXPHYS, the shift rounds down to a page */
   1032  1.128.4.2        ad #define maxpages (MAXPHYS >> PAGE_SHIFT)
   1033       1.37       chs 	int i, s, error, npages, nback;
   1034       1.37       chs 	int freeflag;
   1035       1.60     enami 	struct vm_page *pgs[maxpages], *pg, *nextpg, *tpg, curmp, endmp;
   1036       1.97  christos 	boolean_t wasclean, by_list, needs_clean, yld;
   1037       1.37       chs 	boolean_t async = (flags & PGO_SYNCIO) == 0;
   1038       1.56     enami 	boolean_t pagedaemon = curproc == uvm.pagedaemon_proc;
   1039       1.70  christos 	struct lwp *l = curlwp ? curlwp : &lwp0;
   1040      1.101      yamt 	struct genfs_node *gp = VTOG(vp);
   1041      1.101      yamt 	int dirtygen;
   1042      1.103      yamt 	boolean_t modified = FALSE;
   1043      1.104      yamt 	boolean_t cleanall;
   1044       1.70  christos 
   1045       1.37       chs 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
   1046       1.37       chs 
   1047       1.37       chs 	KASSERT(flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
   1048       1.37       chs 	KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
   1049       1.37       chs 	KASSERT(startoff < endoff || endoff == 0);
   1050       1.37       chs 
   1051       1.37       chs 	UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
   1052       1.37       chs 	    vp, uobj->uo_npages, startoff, endoff - startoff);
   1053      1.103      yamt 
   1054      1.103      yamt 	KASSERT((vp->v_flag & VONWORKLST) != 0 ||
   1055      1.103      yamt 	    (vp->v_flag & VWRITEMAPDIRTY) == 0);
   1056       1.37       chs 	if (uobj->uo_npages == 0) {
   1057       1.62  perseant 		s = splbio();
   1058      1.103      yamt 		if (vp->v_flag & VONWORKLST) {
   1059      1.103      yamt 			vp->v_flag &= ~VWRITEMAPDIRTY;
   1060  1.128.4.1        ad 			if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
   1061  1.128.4.1        ad 				vn_syncer_remove_from_worklist(vp);
   1062       1.37       chs 		}
   1063       1.62  perseant 		splx(s);
   1064       1.46       chs 		simple_unlock(slock);
   1065       1.53     enami 		return (0);
   1066       1.37       chs 	}
   1067       1.37       chs 
   1068       1.37       chs 	/*
   1069       1.37       chs 	 * the vnode has pages, set up to process the request.
   1070       1.37       chs 	 */
   1071       1.37       chs 
   1072       1.37       chs 	error = 0;
   1073       1.44       chs 	s = splbio();
   1074       1.71        pk 	simple_lock(&global_v_numoutput_slock);
   1075       1.44       chs 	wasclean = (vp->v_numoutput == 0);
   1076       1.71        pk 	simple_unlock(&global_v_numoutput_slock);
   1077       1.44       chs 	splx(s);
   1078       1.37       chs 	off = startoff;
   1079       1.37       chs 	if (endoff == 0 || flags & PGO_ALLPAGES) {
   1080       1.37       chs 		endoff = trunc_page(LLONG_MAX);
   1081       1.37       chs 	}
   1082       1.37       chs 	by_list = (uobj->uo_npages <=
   1083       1.37       chs 	    ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY);
   1084       1.37       chs 
   1085      1.102      yamt #if !defined(DEBUG)
   1086      1.102      yamt 	/*
   1087      1.102      yamt 	 * if this vnode is known not to have dirty pages,
   1088      1.102      yamt 	 * don't bother to clean it out.
   1089      1.102      yamt 	 */
   1090      1.102      yamt 
   1091      1.102      yamt 	if ((vp->v_flag & VONWORKLST) == 0) {
   1092      1.102      yamt 		if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == 0) {
   1093      1.102      yamt 			goto skip_scan;
   1094      1.102      yamt 		}
   1095      1.102      yamt 		flags &= ~PGO_CLEANIT;
   1096      1.102      yamt 	}
   1097      1.102      yamt #endif /* !defined(DEBUG) */
   1098      1.102      yamt 
   1099       1.37       chs 	/*
   1100       1.37       chs 	 * start the loop.  when scanning by list, hold the last page
   1101       1.37       chs 	 * in the list before we start.  pages allocated after we start
   1102       1.37       chs 	 * will be added to the end of the list, so we can stop at the
   1103       1.37       chs 	 * current last page.
   1104       1.37       chs 	 */
   1105       1.37       chs 
   1106      1.104      yamt 	cleanall = (flags & PGO_CLEANIT) != 0 && wasclean &&
   1107      1.104      yamt 	    startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
   1108      1.104      yamt 	    (vp->v_flag & VONWORKLST) != 0;
   1109      1.101      yamt 	dirtygen = gp->g_dirtygen;
   1110       1.56     enami 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
   1111       1.37       chs 	if (by_list) {
   1112      1.113      yamt 		curmp.uobject = uobj;
   1113      1.113      yamt 		curmp.offset = (voff_t)-1;
   1114      1.113      yamt 		curmp.flags = PG_BUSY;
   1115      1.113      yamt 		endmp.uobject = uobj;
   1116      1.113      yamt 		endmp.offset = (voff_t)-1;
   1117      1.113      yamt 		endmp.flags = PG_BUSY;
   1118       1.37       chs 		pg = TAILQ_FIRST(&uobj->memq);
   1119       1.37       chs 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq);
   1120       1.70  christos 		PHOLD(l);
   1121       1.37       chs 	} else {
   1122       1.37       chs 		pg = uvm_pagelookup(uobj, off);
   1123       1.37       chs 	}
   1124       1.37       chs 	nextpg = NULL;
   1125       1.37       chs 	while (by_list || off < endoff) {
   1126       1.37       chs 
   1127       1.37       chs 		/*
   1128       1.37       chs 		 * if the current page is not interesting, move on to the next.
   1129       1.37       chs 		 */
   1130       1.37       chs 
   1131       1.37       chs 		KASSERT(pg == NULL || pg->uobject == uobj);
   1132       1.37       chs 		KASSERT(pg == NULL ||
   1133       1.53     enami 		    (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
   1134       1.53     enami 		    (pg->flags & PG_BUSY) != 0);
   1135       1.37       chs 		if (by_list) {
   1136       1.37       chs 			if (pg == &endmp) {
   1137       1.37       chs 				break;
   1138       1.37       chs 			}
   1139       1.37       chs 			if (pg->offset < startoff || pg->offset >= endoff ||
   1140       1.37       chs 			    pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1141      1.101      yamt 				if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1142      1.101      yamt 					wasclean = FALSE;
   1143      1.101      yamt 				}
   1144       1.37       chs 				pg = TAILQ_NEXT(pg, listq);
   1145       1.37       chs 				continue;
   1146       1.37       chs 			}
   1147       1.37       chs 			off = pg->offset;
   1148      1.101      yamt 		} else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1149      1.101      yamt 			if (pg != NULL) {
   1150      1.101      yamt 				wasclean = FALSE;
   1151      1.101      yamt 			}
   1152       1.37       chs 			off += PAGE_SIZE;
   1153       1.37       chs 			if (off < endoff) {
   1154       1.37       chs 				pg = uvm_pagelookup(uobj, off);
   1155       1.37       chs 			}
   1156       1.37       chs 			continue;
   1157       1.37       chs 		}
   1158       1.21       chs 
   1159       1.37       chs 		/*
   1160       1.37       chs 		 * if the current page needs to be cleaned and it's busy,
   1161       1.37       chs 		 * wait for it to become unbusy.
   1162       1.37       chs 		 */
   1163       1.37       chs 
   1164       1.97  christos 		yld = (l->l_cpu->ci_schedstate.spc_flags &
   1165       1.56     enami 		    SPCF_SHOULDYIELD) && !pagedaemon;
   1166       1.97  christos 		if (pg->flags & PG_BUSY || yld) {
   1167       1.72  perseant 			UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
   1168       1.72  perseant 			if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
   1169       1.72  perseant 				UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0);
   1170       1.72  perseant 				error = EDEADLK;
   1171       1.72  perseant 				break;
   1172       1.72  perseant 			}
   1173       1.56     enami 			KASSERT(!pagedaemon);
   1174       1.37       chs 			if (by_list) {
   1175       1.37       chs 				TAILQ_INSERT_BEFORE(pg, &curmp, listq);
   1176       1.37       chs 				UVMHIST_LOG(ubchist, "curmp next %p",
   1177       1.53     enami 				    TAILQ_NEXT(&curmp, listq), 0,0,0);
   1178       1.37       chs 			}
   1179       1.97  christos 			if (yld) {
   1180       1.49       chs 				simple_unlock(slock);
   1181  1.128.4.3        ad 				preempt();
   1182       1.49       chs 				simple_lock(slock);
   1183       1.49       chs 			} else {
   1184       1.49       chs 				pg->flags |= PG_WANTED;
   1185       1.49       chs 				UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
   1186       1.49       chs 				simple_lock(slock);
   1187       1.49       chs 			}
   1188       1.37       chs 			if (by_list) {
   1189       1.37       chs 				UVMHIST_LOG(ubchist, "after next %p",
   1190       1.53     enami 				    TAILQ_NEXT(&curmp, listq), 0,0,0);
   1191       1.37       chs 				pg = TAILQ_NEXT(&curmp, listq);
   1192       1.37       chs 				TAILQ_REMOVE(&uobj->memq, &curmp, listq);
   1193       1.37       chs 			} else {
   1194       1.37       chs 				pg = uvm_pagelookup(uobj, off);
   1195       1.37       chs 			}
   1196       1.37       chs 			continue;
   1197       1.49       chs 		}
   1198       1.49       chs 
   1199       1.49       chs 		/*
   1200       1.49       chs 		 * if we're freeing, remove all mappings of the page now.
   1201       1.49       chs 		 * if we're cleaning, check if the page is needs to be cleaned.
   1202       1.49       chs 		 */
   1203       1.49       chs 
   1204       1.49       chs 		if (flags & PGO_FREE) {
   1205       1.49       chs 			pmap_page_protect(pg, VM_PROT_NONE);
   1206      1.101      yamt 		} else if (flags & PGO_CLEANIT) {
   1207      1.101      yamt 
   1208      1.101      yamt 			/*
   1209      1.101      yamt 			 * if we still have some hope to pull this vnode off
   1210      1.101      yamt 			 * from the syncer queue, write-protect the page.
   1211      1.101      yamt 			 */
   1212      1.101      yamt 
   1213      1.104      yamt 			if (cleanall && wasclean &&
   1214      1.104      yamt 			    gp->g_dirtygen == dirtygen) {
   1215      1.104      yamt 
   1216      1.104      yamt 				/*
   1217      1.104      yamt 				 * uobj pages get wired only by uvm_fault
   1218      1.104      yamt 				 * where uobj is locked.
   1219      1.104      yamt 				 */
   1220      1.104      yamt 
   1221      1.104      yamt 				if (pg->wire_count == 0) {
   1222      1.104      yamt 					pmap_page_protect(pg,
   1223      1.104      yamt 					    VM_PROT_READ|VM_PROT_EXECUTE);
   1224      1.104      yamt 				} else {
   1225      1.104      yamt 					cleanall = FALSE;
   1226      1.104      yamt 				}
   1227      1.101      yamt 			}
   1228       1.49       chs 		}
   1229      1.101      yamt 
   1230       1.49       chs 		if (flags & PGO_CLEANIT) {
   1231       1.49       chs 			needs_clean = pmap_clear_modify(pg) ||
   1232       1.53     enami 			    (pg->flags & PG_CLEAN) == 0;
   1233       1.49       chs 			pg->flags |= PG_CLEAN;
   1234       1.49       chs 		} else {
   1235       1.49       chs 			needs_clean = FALSE;
   1236       1.37       chs 		}
   1237       1.37       chs 
   1238       1.37       chs 		/*
   1239       1.37       chs 		 * if we're cleaning, build a cluster.
   1240       1.37       chs 		 * the cluster will consist of pages which are currently dirty,
   1241       1.37       chs 		 * but they will be returned to us marked clean.
   1242       1.37       chs 		 * if not cleaning, just operate on the one page.
   1243       1.37       chs 		 */
   1244       1.37       chs 
   1245       1.37       chs 		if (needs_clean) {
   1246      1.101      yamt 			KDASSERT((vp->v_flag & VONWORKLST));
   1247       1.37       chs 			wasclean = FALSE;
   1248       1.37       chs 			memset(pgs, 0, sizeof(pgs));
   1249       1.37       chs 			pg->flags |= PG_BUSY;
   1250       1.37       chs 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1251       1.37       chs 
   1252       1.37       chs 			/*
   1253       1.37       chs 			 * first look backward.
   1254       1.37       chs 			 */
   1255       1.37       chs 
   1256       1.60     enami 			npages = MIN(maxpages >> 1, off >> PAGE_SHIFT);
   1257       1.37       chs 			nback = npages;
   1258       1.37       chs 			uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
   1259       1.37       chs 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
   1260       1.37       chs 			if (nback) {
   1261       1.37       chs 				memmove(&pgs[0], &pgs[npages - nback],
   1262       1.37       chs 				    nback * sizeof(pgs[0]));
   1263       1.47     enami 				if (npages - nback < nback)
   1264       1.47     enami 					memset(&pgs[nback], 0,
   1265       1.47     enami 					    (npages - nback) * sizeof(pgs[0]));
   1266       1.47     enami 				else
   1267       1.47     enami 					memset(&pgs[npages - nback], 0,
   1268       1.47     enami 					    nback * sizeof(pgs[0]));
   1269       1.37       chs 			}
   1270       1.37       chs 
   1271       1.37       chs 			/*
   1272       1.37       chs 			 * then plug in our page of interest.
   1273       1.37       chs 			 */
   1274       1.37       chs 
   1275       1.37       chs 			pgs[nback] = pg;
   1276       1.37       chs 
   1277       1.37       chs 			/*
   1278       1.37       chs 			 * then look forward to fill in the remaining space in
   1279       1.37       chs 			 * the array of pages.
   1280       1.37       chs 			 */
   1281       1.37       chs 
   1282       1.60     enami 			npages = maxpages - nback - 1;
   1283       1.37       chs 			uvn_findpages(uobj, off + PAGE_SIZE, &npages,
   1284       1.37       chs 			    &pgs[nback + 1],
   1285       1.37       chs 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
   1286       1.37       chs 			npages += nback + 1;
   1287       1.37       chs 		} else {
   1288       1.37       chs 			pgs[0] = pg;
   1289       1.37       chs 			npages = 1;
   1290       1.61     enami 			nback = 0;
   1291       1.37       chs 		}
   1292       1.37       chs 
   1293       1.37       chs 		/*
   1294       1.37       chs 		 * apply FREE or DEACTIVATE options if requested.
   1295       1.37       chs 		 */
   1296       1.37       chs 
   1297       1.37       chs 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1298       1.37       chs 			uvm_lock_pageq();
   1299       1.37       chs 		}
   1300       1.37       chs 		for (i = 0; i < npages; i++) {
   1301       1.37       chs 			tpg = pgs[i];
   1302       1.37       chs 			KASSERT(tpg->uobject == uobj);
   1303       1.59     enami 			if (by_list && tpg == TAILQ_NEXT(pg, listq))
   1304       1.59     enami 				pg = tpg;
   1305       1.91     enami 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1306       1.91     enami 				continue;
   1307  1.128.4.2        ad 			if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
   1308       1.37       chs 				(void) pmap_clear_reference(tpg);
   1309       1.37       chs 				uvm_pagedeactivate(tpg);
   1310       1.37       chs 			} else if (flags & PGO_FREE) {
   1311       1.37       chs 				pmap_page_protect(tpg, VM_PROT_NONE);
   1312       1.37       chs 				if (tpg->flags & PG_BUSY) {
   1313       1.37       chs 					tpg->flags |= freeflag;
   1314       1.56     enami 					if (pagedaemon) {
   1315       1.37       chs 						uvmexp.paging++;
   1316       1.37       chs 						uvm_pagedequeue(tpg);
   1317       1.37       chs 					}
   1318       1.37       chs 				} else {
   1319       1.59     enami 
   1320       1.59     enami 					/*
   1321       1.59     enami 					 * ``page is not busy''
   1322       1.59     enami 					 * implies that npages is 1
   1323       1.59     enami 					 * and needs_clean is false.
   1324       1.59     enami 					 */
   1325       1.59     enami 
   1326       1.37       chs 					nextpg = TAILQ_NEXT(tpg, listq);
   1327       1.37       chs 					uvm_pagefree(tpg);
   1328       1.89     enami 					if (pagedaemon)
   1329       1.89     enami 						uvmexp.pdfreed++;
   1330       1.37       chs 				}
   1331       1.37       chs 			}
   1332       1.37       chs 		}
   1333       1.37       chs 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1334       1.37       chs 			uvm_unlock_pageq();
   1335       1.37       chs 		}
   1336       1.37       chs 		if (needs_clean) {
   1337      1.103      yamt 			modified = TRUE;
   1338       1.37       chs 
   1339       1.37       chs 			/*
   1340       1.37       chs 			 * start the i/o.  if we're traversing by list,
   1341       1.37       chs 			 * keep our place in the list with a marker page.
   1342       1.37       chs 			 */
   1343       1.37       chs 
   1344       1.37       chs 			if (by_list) {
   1345       1.37       chs 				TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
   1346       1.37       chs 				    listq);
   1347       1.37       chs 			}
   1348       1.46       chs 			simple_unlock(slock);
   1349       1.37       chs 			error = GOP_WRITE(vp, pgs, npages, flags);
   1350       1.46       chs 			simple_lock(slock);
   1351       1.37       chs 			if (by_list) {
   1352       1.37       chs 				pg = TAILQ_NEXT(&curmp, listq);
   1353       1.37       chs 				TAILQ_REMOVE(&uobj->memq, &curmp, listq);
   1354       1.37       chs 			}
   1355       1.37       chs 			if (error) {
   1356       1.37       chs 				break;
   1357       1.37       chs 			}
   1358       1.37       chs 			if (by_list) {
   1359       1.37       chs 				continue;
   1360       1.37       chs 			}
   1361       1.37       chs 		}
   1362       1.37       chs 
   1363       1.37       chs 		/*
   1364       1.37       chs 		 * find the next page and continue if there was no error.
   1365       1.37       chs 		 */
   1366       1.37       chs 
   1367       1.37       chs 		if (by_list) {
   1368       1.37       chs 			if (nextpg) {
   1369       1.37       chs 				pg = nextpg;
   1370       1.37       chs 				nextpg = NULL;
   1371       1.37       chs 			} else {
   1372       1.37       chs 				pg = TAILQ_NEXT(pg, listq);
   1373       1.37       chs 			}
   1374       1.37       chs 		} else {
   1375       1.61     enami 			off += (npages - nback) << PAGE_SHIFT;
   1376       1.37       chs 			if (off < endoff) {
   1377       1.37       chs 				pg = uvm_pagelookup(uobj, off);
   1378       1.37       chs 			}
   1379       1.37       chs 		}
   1380       1.37       chs 	}
   1381       1.37       chs 	if (by_list) {
   1382       1.37       chs 		TAILQ_REMOVE(&uobj->memq, &endmp, listq);
   1383       1.70  christos 		PRELE(l);
   1384       1.37       chs 	}
   1385       1.37       chs 
   1386      1.103      yamt 	if (modified && (vp->v_flag & VWRITEMAPDIRTY) != 0 &&
   1387      1.121   reinoud 	    (vp->v_type != VBLK ||
   1388      1.103      yamt 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
   1389      1.103      yamt 		GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
   1390      1.103      yamt 	}
   1391      1.103      yamt 
   1392       1.37       chs 	/*
   1393       1.37       chs 	 * if we're cleaning and there was nothing to clean,
   1394       1.37       chs 	 * take us off the syncer list.  if we started any i/o
   1395       1.37       chs 	 * and we're doing sync i/o, wait for all writes to finish.
   1396       1.37       chs 	 */
   1397       1.37       chs 
   1398       1.62  perseant 	s = splbio();
   1399      1.104      yamt 	if (cleanall && wasclean && gp->g_dirtygen == dirtygen &&
   1400      1.104      yamt 	    (vp->v_flag & VONWORKLST) != 0) {
   1401      1.103      yamt 		vp->v_flag &= ~VWRITEMAPDIRTY;
   1402  1.128.4.1        ad 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
   1403  1.128.4.1        ad 			vn_syncer_remove_from_worklist(vp);
   1404       1.37       chs 	}
   1405       1.62  perseant 	splx(s);
   1406      1.102      yamt 
   1407      1.102      yamt #if !defined(DEBUG)
   1408      1.102      yamt skip_scan:
   1409      1.102      yamt #endif /* !defined(DEBUG) */
   1410       1.37       chs 	if (!wasclean && !async) {
   1411       1.37       chs 		s = splbio();
   1412       1.71        pk 		/*
   1413       1.71        pk 		 * XXX - we want simple_unlock(&global_v_numoutput_slock);
   1414       1.71        pk 		 *	 but the slot in ltsleep() is taken!
   1415       1.71        pk 		 * XXX - try to recover from missed wakeups with a timeout..
   1416       1.71        pk 		 *	 must think of something better.
   1417       1.71        pk 		 */
   1418       1.37       chs 		while (vp->v_numoutput != 0) {
   1419       1.37       chs 			vp->v_flag |= VBWAIT;
   1420       1.46       chs 			UVM_UNLOCK_AND_WAIT(&vp->v_numoutput, slock, FALSE,
   1421       1.71        pk 			    "genput2", hz);
   1422       1.46       chs 			simple_lock(slock);
   1423       1.37       chs 		}
   1424       1.37       chs 		splx(s);
   1425       1.37       chs 	}
   1426  1.128.4.2        ad 	simple_unlock(slock);
   1427       1.53     enami 	return (error);
   1428       1.37       chs }
   1429       1.37       chs 
   1430       1.37       chs int
   1431       1.37       chs genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1432       1.37       chs {
   1433  1.128.4.1        ad 	off_t off;
   1434  1.128.4.1        ad 	vaddr_t kva;
   1435  1.128.4.1        ad 	size_t len;
   1436  1.128.4.1        ad 	int error;
   1437  1.128.4.1        ad 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1438  1.128.4.1        ad 
   1439  1.128.4.1        ad 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1440  1.128.4.1        ad 	    vp, pgs, npages, flags);
   1441  1.128.4.1        ad 
   1442  1.128.4.1        ad 	off = pgs[0]->offset;
   1443  1.128.4.1        ad 	kva = uvm_pagermapin(pgs, npages,
   1444  1.128.4.1        ad 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1445  1.128.4.1        ad 	len = npages << PAGE_SHIFT;
   1446  1.128.4.1        ad 
   1447  1.128.4.1        ad 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1448  1.128.4.1        ad 			    uvm_aio_biodone);
   1449  1.128.4.1        ad 
   1450  1.128.4.1        ad 	return error;
   1451  1.128.4.1        ad }
   1452  1.128.4.1        ad 
   1453  1.128.4.1        ad /*
   1454  1.128.4.1        ad  * Backend routine for doing I/O to vnode pages.  Pages are already locked
   1455  1.128.4.1        ad  * and mapped into kernel memory.  Here we just look up the underlying
   1456  1.128.4.1        ad  * device block addresses and call the strategy routine.
   1457  1.128.4.1        ad  */
   1458  1.128.4.1        ad 
   1459  1.128.4.1        ad static int
   1460  1.128.4.1        ad genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
   1461  1.128.4.1        ad     enum uio_rw rw, void (*iodone)(struct buf *))
   1462  1.128.4.1        ad {
   1463       1.37       chs 	int s, error, run;
   1464       1.37       chs 	int fs_bshift, dev_bshift;
   1465       1.21       chs 	off_t eof, offset, startoffset;
   1466       1.21       chs 	size_t bytes, iobytes, skipbytes;
   1467       1.21       chs 	daddr_t lbn, blkno;
   1468       1.21       chs 	struct buf *mbp, *bp;
   1469       1.36       chs 	struct vnode *devvp;
   1470       1.37       chs 	boolean_t async = (flags & PGO_SYNCIO) == 0;
   1471  1.128.4.1        ad 	boolean_t write = rw == UIO_WRITE;
   1472  1.128.4.1        ad 	int brw = write ? B_WRITE : B_READ;
   1473  1.128.4.1        ad 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1474       1.21       chs 
   1475  1.128.4.1        ad 	UVMHIST_LOG(ubchist, "vp %p kva %p len 0x%x flags 0x%x",
   1476  1.128.4.1        ad 	    vp, kva, len, flags);
   1477       1.21       chs 
   1478      1.123      yamt 	GOP_SIZE(vp, vp->v_size, &eof, 0);
   1479      1.121   reinoud 	if (vp->v_type != VBLK) {
   1480       1.36       chs 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1481       1.36       chs 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1482       1.36       chs 	} else {
   1483       1.36       chs 		fs_bshift = DEV_BSHIFT;
   1484       1.36       chs 		dev_bshift = DEV_BSHIFT;
   1485       1.36       chs 	}
   1486       1.37       chs 	error = 0;
   1487  1.128.4.1        ad 	startoffset = off;
   1488  1.128.4.1        ad 	bytes = MIN(len, eof - startoffset);
   1489       1.21       chs 	skipbytes = 0;
   1490       1.21       chs 	KASSERT(bytes != 0);
   1491       1.21       chs 
   1492  1.128.4.1        ad 	if (write) {
   1493  1.128.4.1        ad 		s = splbio();
   1494  1.128.4.1        ad 		simple_lock(&global_v_numoutput_slock);
   1495  1.128.4.1        ad 		vp->v_numoutput += 2;
   1496  1.128.4.1        ad 		simple_unlock(&global_v_numoutput_slock);
   1497  1.128.4.1        ad 		splx(s);
   1498  1.128.4.1        ad 	}
   1499      1.119      yamt 	mbp = getiobuf();
   1500       1.21       chs 	UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
   1501       1.53     enami 	    vp, mbp, vp->v_numoutput, bytes);
   1502  1.128.4.1        ad 	mbp->b_bufsize = len;
   1503       1.21       chs 	mbp->b_data = (void *)kva;
   1504       1.21       chs 	mbp->b_resid = mbp->b_bcount = bytes;
   1505  1.128.4.1        ad 	mbp->b_flags = B_BUSY | brw | B_AGE | (async ? (B_CALL | B_ASYNC) : 0);
   1506  1.128.4.1        ad 	mbp->b_iodone = iodone;
   1507       1.21       chs 	mbp->b_vp = vp;
   1508      1.120      yamt 	if (curproc == uvm.pagedaemon_proc)
   1509      1.120      yamt 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
   1510      1.120      yamt 	else if (async)
   1511      1.120      yamt 		BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
   1512      1.120      yamt 	else
   1513      1.120      yamt 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
   1514       1.21       chs 
   1515       1.21       chs 	bp = NULL;
   1516       1.21       chs 	for (offset = startoffset;
   1517       1.53     enami 	    bytes > 0;
   1518       1.53     enami 	    offset += iobytes, bytes -= iobytes) {
   1519       1.21       chs 		lbn = offset >> fs_bshift;
   1520       1.36       chs 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1521       1.21       chs 		if (error) {
   1522       1.21       chs 			UVMHIST_LOG(ubchist, "VOP_BMAP() -> %d", error,0,0,0);
   1523       1.21       chs 			skipbytes += bytes;
   1524       1.21       chs 			bytes = 0;
   1525       1.21       chs 			break;
   1526       1.21       chs 		}
   1527       1.21       chs 
   1528       1.26       chs 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1529       1.26       chs 		    bytes);
   1530       1.21       chs 		if (blkno == (daddr_t)-1) {
   1531  1.128.4.1        ad 			if (!write) {
   1532  1.128.4.1        ad 				memset((char *)kva + (offset - startoffset), 0,
   1533  1.128.4.1        ad 				   iobytes);
   1534  1.128.4.1        ad 			}
   1535       1.21       chs 			skipbytes += iobytes;
   1536       1.21       chs 			continue;
   1537       1.21       chs 		}
   1538       1.21       chs 
   1539       1.21       chs 		/* if it's really one i/o, don't make a second buf */
   1540       1.21       chs 		if (offset == startoffset && iobytes == bytes) {
   1541       1.21       chs 			bp = mbp;
   1542       1.21       chs 		} else {
   1543       1.21       chs 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
   1544       1.53     enami 			    vp, bp, vp->v_numoutput, 0);
   1545      1.120      yamt 			bp = getiobuf();
   1546  1.128.4.1        ad 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
   1547       1.21       chs 		}
   1548       1.21       chs 		bp->b_lblkno = 0;
   1549       1.21       chs 
   1550       1.21       chs 		/* adjust physical blkno for partial blocks */
   1551       1.25      fvdl 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1552       1.53     enami 		    dev_bshift);
   1553       1.53     enami 		UVMHIST_LOG(ubchist,
   1554       1.53     enami 		    "vp %p offset 0x%x bcount 0x%x blkno 0x%x",
   1555       1.53     enami 		    vp, offset, bp->b_bcount, bp->b_blkno);
   1556      1.114      yamt 
   1557      1.114      yamt 		VOP_STRATEGY(devvp, bp);
   1558       1.21       chs 	}
   1559       1.21       chs 	if (skipbytes) {
   1560       1.29       chs 		UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
   1561       1.21       chs 	}
   1562      1.120      yamt 	nestiobuf_done(mbp, skipbytes, error);
   1563       1.21       chs 	if (async) {
   1564       1.32       chs 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1565       1.53     enami 		return (0);
   1566       1.21       chs 	}
   1567       1.37       chs 	UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
   1568       1.37       chs 	error = biowait(mbp);
   1569  1.128.4.1        ad 	s = splbio();
   1570  1.128.4.1        ad 	(*iodone)(mbp);
   1571  1.128.4.1        ad 	splx(s);
   1572       1.21       chs 	UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
   1573       1.53     enami 	return (error);
   1574       1.42       chs }
   1575       1.42       chs 
   1576       1.42       chs /*
   1577       1.42       chs  * VOP_PUTPAGES() for vnodes which never have pages.
   1578       1.42       chs  */
   1579       1.42       chs 
   1580       1.42       chs int
   1581       1.42       chs genfs_null_putpages(void *v)
   1582       1.42       chs {
   1583       1.42       chs 	struct vop_putpages_args /* {
   1584       1.42       chs 		struct vnode *a_vp;
   1585       1.42       chs 		voff_t a_offlo;
   1586       1.42       chs 		voff_t a_offhi;
   1587       1.42       chs 		int a_flags;
   1588       1.42       chs 	} */ *ap = v;
   1589       1.42       chs 	struct vnode *vp = ap->a_vp;
   1590       1.42       chs 
   1591       1.42       chs 	KASSERT(vp->v_uobj.uo_npages == 0);
   1592       1.42       chs 	simple_unlock(&vp->v_interlock);
   1593       1.42       chs 	return (0);
   1594       1.21       chs }
   1595       1.21       chs 
   1596       1.37       chs void
   1597       1.98      yamt genfs_node_init(struct vnode *vp, const struct genfs_ops *ops)
   1598       1.37       chs {
   1599       1.37       chs 	struct genfs_node *gp = VTOG(vp);
   1600       1.37       chs 
   1601       1.37       chs 	lockinit(&gp->g_glock, PINOD, "glock", 0, 0);
   1602       1.37       chs 	gp->g_op = ops;
   1603       1.37       chs }
   1604       1.37       chs 
   1605       1.37       chs void
   1606       1.72  perseant genfs_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
   1607       1.21       chs {
   1608       1.21       chs 	int bsize;
   1609       1.21       chs 
   1610       1.37       chs 	bsize = 1 << vp->v_mount->mnt_fs_bshift;
   1611       1.37       chs 	*eobp = (size + bsize - 1) & ~(bsize - 1);
   1612       1.43       chs }
   1613       1.43       chs 
   1614       1.43       chs int
   1615       1.43       chs genfs_compat_getpages(void *v)
   1616       1.43       chs {
   1617       1.43       chs 	struct vop_getpages_args /* {
   1618       1.43       chs 		struct vnode *a_vp;
   1619       1.43       chs 		voff_t a_offset;
   1620       1.43       chs 		struct vm_page **a_m;
   1621       1.43       chs 		int *a_count;
   1622       1.43       chs 		int a_centeridx;
   1623       1.43       chs 		vm_prot_t a_access_type;
   1624       1.43       chs 		int a_advice;
   1625       1.43       chs 		int a_flags;
   1626       1.43       chs 	} */ *ap = v;
   1627       1.43       chs 
   1628       1.43       chs 	off_t origoffset;
   1629       1.43       chs 	struct vnode *vp = ap->a_vp;
   1630       1.43       chs 	struct uvm_object *uobj = &vp->v_uobj;
   1631       1.43       chs 	struct vm_page *pg, **pgs;
   1632       1.43       chs 	vaddr_t kva;
   1633       1.43       chs 	int i, error, orignpages, npages;
   1634       1.43       chs 	struct iovec iov;
   1635       1.43       chs 	struct uio uio;
   1636      1.128        ad 	kauth_cred_t cred = curlwp->l_cred;
   1637       1.43       chs 	boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1638       1.43       chs 
   1639       1.43       chs 	error = 0;
   1640       1.43       chs 	origoffset = ap->a_offset;
   1641       1.43       chs 	orignpages = *ap->a_count;
   1642       1.43       chs 	pgs = ap->a_m;
   1643       1.43       chs 
   1644       1.43       chs 	if (write && (vp->v_flag & VONWORKLST) == 0) {
   1645       1.43       chs 		vn_syncer_add_to_worklist(vp, filedelay);
   1646       1.43       chs 	}
   1647       1.43       chs 	if (ap->a_flags & PGO_LOCKED) {
   1648       1.43       chs 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
   1649       1.54     enami 		    UFP_NOWAIT|UFP_NOALLOC| (write ? UFP_NORDONLY : 0));
   1650       1.43       chs 
   1651       1.53     enami 		return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
   1652       1.43       chs 	}
   1653       1.43       chs 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1654       1.43       chs 		simple_unlock(&uobj->vmobjlock);
   1655       1.53     enami 		return (EINVAL);
   1656       1.43       chs 	}
   1657      1.115      yamt 	if ((ap->a_flags & PGO_SYNCIO) == 0) {
   1658      1.117      yamt 		simple_unlock(&uobj->vmobjlock);
   1659      1.115      yamt 		return 0;
   1660      1.115      yamt 	}
   1661       1.43       chs 	npages = orignpages;
   1662       1.43       chs 	uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
   1663       1.43       chs 	simple_unlock(&uobj->vmobjlock);
   1664       1.53     enami 	kva = uvm_pagermapin(pgs, npages,
   1665       1.53     enami 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1666       1.43       chs 	for (i = 0; i < npages; i++) {
   1667       1.43       chs 		pg = pgs[i];
   1668       1.43       chs 		if ((pg->flags & PG_FAKE) == 0) {
   1669       1.43       chs 			continue;
   1670       1.43       chs 		}
   1671       1.43       chs 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   1672       1.43       chs 		iov.iov_len = PAGE_SIZE;
   1673       1.43       chs 		uio.uio_iov = &iov;
   1674       1.43       chs 		uio.uio_iovcnt = 1;
   1675       1.43       chs 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   1676       1.43       chs 		uio.uio_rw = UIO_READ;
   1677       1.43       chs 		uio.uio_resid = PAGE_SIZE;
   1678      1.122      yamt 		UIO_SETUP_SYSSPACE(&uio);
   1679       1.87      yamt 		/* XXX vn_lock */
   1680       1.43       chs 		error = VOP_READ(vp, &uio, 0, cred);
   1681       1.43       chs 		if (error) {
   1682       1.43       chs 			break;
   1683       1.52       chs 		}
   1684       1.52       chs 		if (uio.uio_resid) {
   1685       1.52       chs 			memset(iov.iov_base, 0, uio.uio_resid);
   1686       1.43       chs 		}
   1687       1.43       chs 	}
   1688       1.43       chs 	uvm_pagermapout(kva, npages);
   1689       1.43       chs 	simple_lock(&uobj->vmobjlock);
   1690       1.43       chs 	uvm_lock_pageq();
   1691       1.43       chs 	for (i = 0; i < npages; i++) {
   1692       1.43       chs 		pg = pgs[i];
   1693       1.43       chs 		if (error && (pg->flags & PG_FAKE) != 0) {
   1694       1.43       chs 			pg->flags |= PG_RELEASED;
   1695       1.43       chs 		} else {
   1696       1.43       chs 			pmap_clear_modify(pg);
   1697       1.43       chs 			uvm_pageactivate(pg);
   1698       1.43       chs 		}
   1699       1.43       chs 	}
   1700       1.43       chs 	if (error) {
   1701       1.43       chs 		uvm_page_unbusy(pgs, npages);
   1702       1.43       chs 	}
   1703       1.43       chs 	uvm_unlock_pageq();
   1704       1.43       chs 	simple_unlock(&uobj->vmobjlock);
   1705       1.53     enami 	return (error);
   1706       1.43       chs }
   1707       1.43       chs 
   1708       1.43       chs int
   1709       1.43       chs genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   1710       1.43       chs     int flags)
   1711       1.43       chs {
   1712       1.43       chs 	off_t offset;
   1713       1.43       chs 	struct iovec iov;
   1714       1.43       chs 	struct uio uio;
   1715      1.128        ad 	kauth_cred_t cred = curlwp->l_cred;
   1716       1.43       chs 	struct buf *bp;
   1717       1.43       chs 	vaddr_t kva;
   1718       1.43       chs 	int s, error;
   1719       1.43       chs 
   1720       1.43       chs 	offset = pgs[0]->offset;
   1721       1.53     enami 	kva = uvm_pagermapin(pgs, npages,
   1722       1.53     enami 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1723       1.43       chs 
   1724       1.43       chs 	iov.iov_base = (void *)kva;
   1725       1.43       chs 	iov.iov_len = npages << PAGE_SHIFT;
   1726       1.43       chs 	uio.uio_iov = &iov;
   1727       1.68      yamt 	uio.uio_iovcnt = 1;
   1728       1.43       chs 	uio.uio_offset = offset;
   1729       1.43       chs 	uio.uio_rw = UIO_WRITE;
   1730       1.43       chs 	uio.uio_resid = npages << PAGE_SHIFT;
   1731      1.122      yamt 	UIO_SETUP_SYSSPACE(&uio);
   1732       1.87      yamt 	/* XXX vn_lock */
   1733       1.43       chs 	error = VOP_WRITE(vp, &uio, 0, cred);
   1734       1.43       chs 
   1735       1.43       chs 	s = splbio();
   1736       1.71        pk 	V_INCR_NUMOUTPUT(vp);
   1737       1.43       chs 	splx(s);
   1738       1.43       chs 
   1739      1.119      yamt 	bp = getiobuf();
   1740       1.43       chs 	bp->b_flags = B_BUSY | B_WRITE | B_AGE;
   1741       1.43       chs 	bp->b_vp = vp;
   1742       1.43       chs 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   1743       1.43       chs 	bp->b_data = (char *)kva;
   1744       1.43       chs 	bp->b_bcount = npages << PAGE_SHIFT;
   1745       1.43       chs 	bp->b_bufsize = npages << PAGE_SHIFT;
   1746       1.43       chs 	bp->b_resid = 0;
   1747       1.43       chs 	if (error) {
   1748       1.43       chs 		bp->b_flags |= B_ERROR;
   1749       1.43       chs 		bp->b_error = error;
   1750       1.43       chs 	}
   1751       1.43       chs 	uvm_aio_aiodone(bp);
   1752       1.53     enami 	return (error);
   1753       1.66  jdolecek }
   1754       1.66  jdolecek 
   1755  1.128.4.1        ad /*
   1756  1.128.4.1        ad  * Process a uio using direct I/O.  If we reach a part of the request
   1757  1.128.4.1        ad  * which cannot be processed in this fashion for some reason, just return.
   1758  1.128.4.1        ad  * The caller must handle some additional part of the request using
   1759  1.128.4.1        ad  * buffered I/O before trying direct I/O again.
   1760  1.128.4.1        ad  */
   1761  1.128.4.1        ad 
   1762  1.128.4.1        ad void
   1763  1.128.4.1        ad genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
   1764  1.128.4.1        ad {
   1765  1.128.4.1        ad 	struct vmspace *vs;
   1766  1.128.4.1        ad 	struct iovec *iov;
   1767  1.128.4.1        ad 	vaddr_t va;
   1768  1.128.4.1        ad 	size_t len;
   1769  1.128.4.1        ad 	const int mask = DEV_BSIZE - 1;
   1770  1.128.4.1        ad 	int error;
   1771  1.128.4.1        ad 
   1772  1.128.4.1        ad 	/*
   1773  1.128.4.1        ad 	 * We only support direct I/O to user space for now.
   1774  1.128.4.1        ad 	 */
   1775  1.128.4.1        ad 
   1776  1.128.4.1        ad 	if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
   1777  1.128.4.1        ad 		return;
   1778  1.128.4.1        ad 	}
   1779  1.128.4.1        ad 
   1780  1.128.4.1        ad 	/*
   1781  1.128.4.1        ad 	 * If the vnode is mapped, we would need to get the getpages lock
   1782  1.128.4.1        ad 	 * to stabilize the bmap, but then we would get into trouble whil e
   1783  1.128.4.1        ad 	 * locking the pages if the pages belong to this same vnode (or a
   1784  1.128.4.1        ad 	 * multi-vnode cascade to the same effect).  Just fall back to
   1785  1.128.4.1        ad 	 * buffered I/O if the vnode is mapped to avoid this mess.
   1786  1.128.4.1        ad 	 */
   1787  1.128.4.1        ad 
   1788  1.128.4.1        ad 	if (vp->v_flag & VMAPPED) {
   1789  1.128.4.1        ad 		return;
   1790  1.128.4.1        ad 	}
   1791  1.128.4.1        ad 
   1792  1.128.4.1        ad 	/*
   1793  1.128.4.1        ad 	 * Do as much of the uio as possible with direct I/O.
   1794  1.128.4.1        ad 	 */
   1795  1.128.4.1        ad 
   1796  1.128.4.1        ad 	vs = uio->uio_vmspace;
   1797  1.128.4.1        ad 	while (uio->uio_resid) {
   1798  1.128.4.1        ad 		iov = uio->uio_iov;
   1799  1.128.4.1        ad 		if (iov->iov_len == 0) {
   1800  1.128.4.1        ad 			uio->uio_iov++;
   1801  1.128.4.1        ad 			uio->uio_iovcnt--;
   1802  1.128.4.1        ad 			continue;
   1803  1.128.4.1        ad 		}
   1804  1.128.4.1        ad 		va = (vaddr_t)iov->iov_base;
   1805  1.128.4.1        ad 		len = MIN(iov->iov_len, genfs_maxdio);
   1806  1.128.4.1        ad 		len &= ~mask;
   1807  1.128.4.1        ad 
   1808  1.128.4.1        ad 		/*
   1809  1.128.4.1        ad 		 * If the next chunk is smaller than DEV_BSIZE or extends past
   1810  1.128.4.1        ad 		 * the current EOF, then fall back to buffered I/O.
   1811  1.128.4.1        ad 		 */
   1812  1.128.4.1        ad 
   1813  1.128.4.1        ad 		if (len == 0 || uio->uio_offset + len > vp->v_size) {
   1814  1.128.4.1        ad 			return;
   1815  1.128.4.1        ad 		}
   1816  1.128.4.1        ad 
   1817  1.128.4.1        ad 		/*
   1818  1.128.4.1        ad 		 * Check alignment.  The file offset must be at least
   1819  1.128.4.1        ad 		 * sector-aligned.  The exact constraint on memory alignment
   1820  1.128.4.1        ad 		 * is very hardware-dependent, but requiring sector-aligned
   1821  1.128.4.1        ad 		 * addresses there too is safe.
   1822  1.128.4.1        ad 		 */
   1823  1.128.4.1        ad 
   1824  1.128.4.1        ad 		if (uio->uio_offset & mask || va & mask) {
   1825  1.128.4.1        ad 			return;
   1826  1.128.4.1        ad 		}
   1827  1.128.4.1        ad 		error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
   1828  1.128.4.1        ad 					  uio->uio_rw);
   1829  1.128.4.1        ad 		if (error) {
   1830  1.128.4.1        ad 			break;
   1831  1.128.4.1        ad 		}
   1832  1.128.4.1        ad 		iov->iov_base = (caddr_t)iov->iov_base + len;
   1833  1.128.4.1        ad 		iov->iov_len -= len;
   1834  1.128.4.1        ad 		uio->uio_offset += len;
   1835  1.128.4.1        ad 		uio->uio_resid -= len;
   1836  1.128.4.1        ad 	}
   1837  1.128.4.1        ad }
   1838  1.128.4.1        ad 
   1839  1.128.4.1        ad /*
   1840  1.128.4.1        ad  * Iodone routine for direct I/O.  We don't do much here since the request is
   1841  1.128.4.1        ad  * always synchronous, so the caller will do most of the work after biowait().
   1842  1.128.4.1        ad  */
   1843  1.128.4.1        ad 
   1844  1.128.4.1        ad static void
   1845  1.128.4.1        ad genfs_dio_iodone(struct buf *bp)
   1846  1.128.4.1        ad {
   1847  1.128.4.1        ad 	int s;
   1848  1.128.4.1        ad 
   1849  1.128.4.1        ad 	KASSERT((bp->b_flags & B_ASYNC) == 0);
   1850  1.128.4.1        ad 	s = splbio();
   1851  1.128.4.1        ad 	if ((bp->b_flags & (B_READ | B_AGE)) == B_AGE) {
   1852  1.128.4.1        ad 		vwakeup(bp);
   1853  1.128.4.1        ad 	}
   1854  1.128.4.1        ad 	putiobuf(bp);
   1855  1.128.4.1        ad 	splx(s);
   1856  1.128.4.1        ad }
   1857  1.128.4.1        ad 
   1858  1.128.4.1        ad /*
   1859  1.128.4.1        ad  * Process one chunk of a direct I/O request.
   1860  1.128.4.1        ad  */
   1861  1.128.4.1        ad 
   1862  1.128.4.1        ad static int
   1863  1.128.4.1        ad genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
   1864  1.128.4.1        ad     off_t off, enum uio_rw rw)
   1865  1.128.4.1        ad {
   1866  1.128.4.1        ad 	struct vm_map *map;
   1867  1.128.4.1        ad 	struct pmap *upm, *kpm;
   1868  1.128.4.1        ad 	size_t klen = round_page(uva + len) - trunc_page(uva);
   1869  1.128.4.1        ad 	off_t spoff, epoff;
   1870  1.128.4.1        ad 	vaddr_t kva, puva;
   1871  1.128.4.1        ad 	paddr_t pa;
   1872  1.128.4.1        ad 	vm_prot_t prot;
   1873  1.128.4.1        ad 	int error, rv, poff, koff;
   1874  1.128.4.1        ad 	const int pgoflags = PGO_CLEANIT | PGO_SYNCIO |
   1875  1.128.4.1        ad 		(rw == UIO_WRITE ? PGO_FREE : 0);
   1876  1.128.4.1        ad 
   1877  1.128.4.1        ad 	/*
   1878  1.128.4.1        ad 	 * For writes, verify that this range of the file already has fully
   1879  1.128.4.1        ad 	 * allocated backing store.  If there are any holes, just punt and
   1880  1.128.4.1        ad 	 * make the caller take the buffered write path.
   1881  1.128.4.1        ad 	 */
   1882  1.128.4.1        ad 
   1883  1.128.4.1        ad 	if (rw == UIO_WRITE) {
   1884  1.128.4.1        ad 		daddr_t lbn, elbn, blkno;
   1885  1.128.4.1        ad 		int bsize, bshift, run;
   1886  1.128.4.1        ad 
   1887  1.128.4.1        ad 		bshift = vp->v_mount->mnt_fs_bshift;
   1888  1.128.4.1        ad 		bsize = 1 << bshift;
   1889  1.128.4.1        ad 		lbn = off >> bshift;
   1890  1.128.4.1        ad 		elbn = (off + len + bsize - 1) >> bshift;
   1891  1.128.4.1        ad 		while (lbn < elbn) {
   1892  1.128.4.1        ad 			error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
   1893  1.128.4.1        ad 			if (error) {
   1894  1.128.4.1        ad 				return error;
   1895  1.128.4.1        ad 			}
   1896  1.128.4.1        ad 			if (blkno == (daddr_t)-1) {
   1897  1.128.4.1        ad 				return ENOSPC;
   1898  1.128.4.1        ad 			}
   1899  1.128.4.1        ad 			lbn += 1 + run;
   1900  1.128.4.1        ad 		}
   1901  1.128.4.1        ad 	}
   1902  1.128.4.1        ad 
   1903  1.128.4.1        ad 	/*
   1904  1.128.4.1        ad 	 * Flush any cached pages for parts of the file that we're about to
   1905  1.128.4.1        ad 	 * access.  If we're writing, invalidate pages as well.
   1906  1.128.4.1        ad 	 */
   1907  1.128.4.1        ad 
   1908  1.128.4.1        ad 	spoff = trunc_page(off);
   1909  1.128.4.1        ad 	epoff = round_page(off + len);
   1910  1.128.4.1        ad 	simple_lock(&vp->v_interlock);
   1911  1.128.4.1        ad 	error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
   1912  1.128.4.1        ad 	if (error) {
   1913  1.128.4.1        ad 		return error;
   1914  1.128.4.1        ad 	}
   1915  1.128.4.1        ad 
   1916  1.128.4.1        ad 	/*
   1917  1.128.4.1        ad 	 * Wire the user pages and remap them into kernel memory.
   1918  1.128.4.1        ad 	 */
   1919  1.128.4.1        ad 
   1920  1.128.4.1        ad 	prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
   1921  1.128.4.1        ad 	error = uvm_vslock(vs, (void *)uva, len, prot);
   1922  1.128.4.1        ad 	if (error) {
   1923  1.128.4.1        ad 		return error;
   1924  1.128.4.1        ad 	}
   1925  1.128.4.1        ad 
   1926  1.128.4.1        ad 	map = &vs->vm_map;
   1927  1.128.4.1        ad 	upm = vm_map_pmap(map);
   1928  1.128.4.1        ad 	kpm = vm_map_pmap(kernel_map);
   1929  1.128.4.1        ad 	kva = uvm_km_alloc(kernel_map, klen, 0,
   1930  1.128.4.1        ad 			   UVM_KMF_VAONLY | UVM_KMF_WAITVA);
   1931  1.128.4.1        ad 	puva = trunc_page(uva);
   1932  1.128.4.1        ad 	for (poff = 0; poff < klen; poff += PAGE_SIZE) {
   1933  1.128.4.1        ad 		rv = pmap_extract(upm, puva + poff, &pa);
   1934  1.128.4.1        ad 		KASSERT(rv);
   1935  1.128.4.1        ad 		pmap_enter(kpm, kva + poff, pa, prot, prot | PMAP_WIRED);
   1936  1.128.4.1        ad 	}
   1937  1.128.4.1        ad 	pmap_update(kpm);
   1938  1.128.4.1        ad 
   1939  1.128.4.1        ad 	/*
   1940  1.128.4.1        ad 	 * Do the I/O.
   1941  1.128.4.1        ad 	 */
   1942  1.128.4.1        ad 
   1943  1.128.4.1        ad 	koff = uva - trunc_page(uva);
   1944  1.128.4.1        ad 	error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
   1945  1.128.4.1        ad 			    genfs_dio_iodone);
   1946  1.128.4.1        ad 
   1947  1.128.4.1        ad 	/*
   1948  1.128.4.1        ad 	 * Tear down the kernel mapping.
   1949  1.128.4.1        ad 	 */
   1950  1.128.4.1        ad 
   1951  1.128.4.1        ad 	pmap_remove(kpm, kva, kva + klen);
   1952  1.128.4.1        ad 	pmap_update(kpm);
   1953  1.128.4.1        ad 	uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
   1954  1.128.4.1        ad 
   1955  1.128.4.1        ad 	/*
   1956  1.128.4.1        ad 	 * Unwire the user pages.
   1957  1.128.4.1        ad 	 */
   1958  1.128.4.1        ad 
   1959  1.128.4.1        ad 	uvm_vsunlock(vs, (void *)uva, len);
   1960  1.128.4.1        ad 	return error;
   1961  1.128.4.1        ad }
   1962  1.128.4.1        ad 
   1963  1.128.4.1        ad 
   1964       1.66  jdolecek static void
   1965       1.66  jdolecek filt_genfsdetach(struct knote *kn)
   1966       1.66  jdolecek {
   1967       1.66  jdolecek 	struct vnode *vp = (struct vnode *)kn->kn_hook;
   1968       1.66  jdolecek 
   1969       1.66  jdolecek 	/* XXXLUKEM lock the struct? */
   1970       1.66  jdolecek 	SLIST_REMOVE(&vp->v_klist, kn, knote, kn_selnext);
   1971       1.66  jdolecek }
   1972       1.66  jdolecek 
   1973       1.66  jdolecek static int
   1974       1.66  jdolecek filt_genfsread(struct knote *kn, long hint)
   1975       1.66  jdolecek {
   1976       1.66  jdolecek 	struct vnode *vp = (struct vnode *)kn->kn_hook;
   1977       1.66  jdolecek 
   1978       1.66  jdolecek 	/*
   1979       1.66  jdolecek 	 * filesystem is gone, so set the EOF flag and schedule
   1980       1.66  jdolecek 	 * the knote for deletion.
   1981       1.66  jdolecek 	 */
   1982       1.66  jdolecek 	if (hint == NOTE_REVOKE) {
   1983       1.66  jdolecek 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
   1984       1.66  jdolecek 		return (1);
   1985       1.66  jdolecek 	}
   1986       1.66  jdolecek 
   1987       1.66  jdolecek 	/* XXXLUKEM lock the struct? */
   1988       1.66  jdolecek 	kn->kn_data = vp->v_size - kn->kn_fp->f_offset;
   1989       1.66  jdolecek         return (kn->kn_data != 0);
   1990       1.66  jdolecek }
   1991       1.66  jdolecek 
   1992       1.66  jdolecek static int
   1993       1.66  jdolecek filt_genfsvnode(struct knote *kn, long hint)
   1994       1.66  jdolecek {
   1995       1.66  jdolecek 
   1996       1.66  jdolecek 	if (kn->kn_sfflags & hint)
   1997       1.66  jdolecek 		kn->kn_fflags |= hint;
   1998       1.66  jdolecek 	if (hint == NOTE_REVOKE) {
   1999       1.66  jdolecek 		kn->kn_flags |= EV_EOF;
   2000       1.66  jdolecek 		return (1);
   2001       1.66  jdolecek 	}
   2002       1.66  jdolecek 	return (kn->kn_fflags != 0);
   2003       1.66  jdolecek }
   2004       1.66  jdolecek 
   2005       1.96     perry static const struct filterops genfsread_filtops =
   2006       1.66  jdolecek 	{ 1, NULL, filt_genfsdetach, filt_genfsread };
   2007       1.96     perry static const struct filterops genfsvnode_filtops =
   2008       1.66  jdolecek 	{ 1, NULL, filt_genfsdetach, filt_genfsvnode };
   2009       1.66  jdolecek 
   2010       1.66  jdolecek int
   2011       1.66  jdolecek genfs_kqfilter(void *v)
   2012       1.66  jdolecek {
   2013       1.66  jdolecek 	struct vop_kqfilter_args /* {
   2014       1.66  jdolecek 		struct vnode	*a_vp;
   2015       1.66  jdolecek 		struct knote	*a_kn;
   2016       1.66  jdolecek 	} */ *ap = v;
   2017       1.66  jdolecek 	struct vnode *vp;
   2018       1.66  jdolecek 	struct knote *kn;
   2019       1.66  jdolecek 
   2020       1.66  jdolecek 	vp = ap->a_vp;
   2021       1.66  jdolecek 	kn = ap->a_kn;
   2022       1.66  jdolecek 	switch (kn->kn_filter) {
   2023       1.66  jdolecek 	case EVFILT_READ:
   2024       1.66  jdolecek 		kn->kn_fop = &genfsread_filtops;
   2025       1.66  jdolecek 		break;
   2026       1.66  jdolecek 	case EVFILT_VNODE:
   2027       1.66  jdolecek 		kn->kn_fop = &genfsvnode_filtops;
   2028       1.66  jdolecek 		break;
   2029       1.66  jdolecek 	default:
   2030       1.66  jdolecek 		return (1);
   2031       1.66  jdolecek 	}
   2032       1.66  jdolecek 
   2033       1.66  jdolecek 	kn->kn_hook = vp;
   2034       1.66  jdolecek 
   2035       1.66  jdolecek 	/* XXXLUKEM lock the struct? */
   2036       1.66  jdolecek 	SLIST_INSERT_HEAD(&vp->v_klist, kn, kn_selnext);
   2037       1.66  jdolecek 
   2038       1.66  jdolecek 	return (0);
   2039        1.1   mycroft }
   2040  1.128.4.1        ad 
   2041  1.128.4.1        ad void
   2042  1.128.4.1        ad genfs_node_wrlock(struct vnode *vp)
   2043  1.128.4.1        ad {
   2044  1.128.4.1        ad 	struct genfs_node *gp = VTOG(vp);
   2045  1.128.4.1        ad 
   2046  1.128.4.1        ad 	lockmgr(&gp->g_glock, LK_EXCLUSIVE, NULL);
   2047  1.128.4.1        ad }
   2048  1.128.4.1        ad 
   2049  1.128.4.1        ad void
   2050  1.128.4.1        ad genfs_node_rdlock(struct vnode *vp)
   2051  1.128.4.1        ad {
   2052  1.128.4.1        ad 	struct genfs_node *gp = VTOG(vp);
   2053  1.128.4.1        ad 
   2054  1.128.4.1        ad 	lockmgr(&gp->g_glock, LK_SHARED, NULL);
   2055  1.128.4.1        ad }
   2056  1.128.4.1        ad 
   2057  1.128.4.1        ad void
   2058  1.128.4.1        ad genfs_node_unlock(struct vnode *vp)
   2059  1.128.4.1        ad {
   2060  1.128.4.1        ad 	struct genfs_node *gp = VTOG(vp);
   2061  1.128.4.1        ad 
   2062  1.128.4.1        ad 	lockmgr(&gp->g_glock, LK_RELEASE, NULL);
   2063  1.128.4.1        ad }
   2064