Home | History | Annotate | Line # | Download | only in genfs
genfs_vnops.c revision 1.150.2.14
      1  1.150.2.14        ad /*	$NetBSD: genfs_vnops.c,v 1.150.2.14 2007/09/16 19:04:34 ad Exp $	*/
      2         1.6      fvdl 
      3         1.6      fvdl /*
      4         1.6      fvdl  * Copyright (c) 1982, 1986, 1989, 1993
      5         1.6      fvdl  *	The Regents of the University of California.  All rights reserved.
      6         1.6      fvdl  *
      7         1.6      fvdl  * Redistribution and use in source and binary forms, with or without
      8         1.6      fvdl  * modification, are permitted provided that the following conditions
      9         1.6      fvdl  * are met:
     10         1.6      fvdl  * 1. Redistributions of source code must retain the above copyright
     11         1.6      fvdl  *    notice, this list of conditions and the following disclaimer.
     12         1.6      fvdl  * 2. Redistributions in binary form must reproduce the above copyright
     13         1.6      fvdl  *    notice, this list of conditions and the following disclaimer in the
     14         1.6      fvdl  *    documentation and/or other materials provided with the distribution.
     15        1.81       agc  * 3. Neither the name of the University nor the names of its contributors
     16         1.6      fvdl  *    may be used to endorse or promote products derived from this software
     17         1.6      fvdl  *    without specific prior written permission.
     18         1.6      fvdl  *
     19         1.6      fvdl  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20         1.6      fvdl  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21         1.6      fvdl  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22         1.6      fvdl  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23         1.6      fvdl  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24         1.6      fvdl  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25         1.6      fvdl  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26         1.6      fvdl  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27         1.6      fvdl  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28         1.6      fvdl  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29         1.6      fvdl  * SUCH DAMAGE.
     30         1.6      fvdl  *
     31         1.6      fvdl  */
     32        1.40     lukem 
     33        1.40     lukem #include <sys/cdefs.h>
     34  1.150.2.14        ad __KERNEL_RCSID(0, "$NetBSD: genfs_vnops.c,v 1.150.2.14 2007/09/16 19:04:34 ad Exp $");
     35         1.8   thorpej 
     36         1.1   mycroft #include <sys/param.h>
     37         1.1   mycroft #include <sys/systm.h>
     38         1.6      fvdl #include <sys/proc.h>
     39         1.1   mycroft #include <sys/kernel.h>
     40         1.1   mycroft #include <sys/mount.h>
     41         1.1   mycroft #include <sys/namei.h>
     42         1.1   mycroft #include <sys/vnode.h>
     43        1.13  wrstuden #include <sys/fcntl.h>
     44       1.135      yamt #include <sys/kmem.h>
     45         1.3   mycroft #include <sys/poll.h>
     46        1.37       chs #include <sys/mman.h>
     47        1.66  jdolecek #include <sys/file.h>
     48       1.125      elad #include <sys/kauth.h>
     49       1.143   hannken #include <sys/fstrans.h>
     50         1.1   mycroft 
     51         1.1   mycroft #include <miscfs/genfs/genfs.h>
     52        1.37       chs #include <miscfs/genfs/genfs_node.h>
     53         1.6      fvdl #include <miscfs/specfs/specdev.h>
     54         1.1   mycroft 
     55        1.21       chs #include <uvm/uvm.h>
     56        1.21       chs #include <uvm/uvm_pager.h>
     57        1.21       chs 
     58       1.130       chs static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
     59       1.130       chs     off_t, enum uio_rw);
     60       1.130       chs static void genfs_dio_iodone(struct buf *);
     61       1.130       chs 
     62       1.130       chs static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
     63       1.130       chs     void (*)(struct buf *));
     64       1.118     perry static inline void genfs_rel_pages(struct vm_page **, int);
     65        1.70  christos static void filt_genfsdetach(struct knote *);
     66        1.70  christos static int filt_genfsread(struct knote *, long);
     67        1.70  christos static int filt_genfsvnode(struct knote *, long);
     68        1.70  christos 
     69       1.110      yamt #define MAX_READ_PAGES	16 	/* XXXUBC 16 */
     70        1.41  christos 
     71       1.130       chs int genfs_maxdio = MAXPHYS;
     72       1.130       chs 
     73         1.1   mycroft int
     74        1.53     enami genfs_poll(void *v)
     75         1.1   mycroft {
     76         1.3   mycroft 	struct vop_poll_args /* {
     77         1.1   mycroft 		struct vnode *a_vp;
     78         1.3   mycroft 		int a_events;
     79       1.116  christos 		struct lwp *a_l;
     80         1.1   mycroft 	} */ *ap = v;
     81         1.1   mycroft 
     82         1.3   mycroft 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
     83         1.1   mycroft }
     84         1.1   mycroft 
     85         1.1   mycroft int
     86        1.53     enami genfs_seek(void *v)
     87         1.4    kleink {
     88         1.4    kleink 	struct vop_seek_args /* {
     89         1.4    kleink 		struct vnode *a_vp;
     90         1.4    kleink 		off_t a_oldoff;
     91         1.4    kleink 		off_t a_newoff;
     92       1.125      elad 		kauth_cred_t cred;
     93         1.4    kleink 	} */ *ap = v;
     94         1.4    kleink 
     95         1.4    kleink 	if (ap->a_newoff < 0)
     96         1.4    kleink 		return (EINVAL);
     97         1.4    kleink 
     98         1.4    kleink 	return (0);
     99         1.4    kleink }
    100         1.4    kleink 
    101         1.4    kleink int
    102        1.53     enami genfs_abortop(void *v)
    103         1.1   mycroft {
    104         1.1   mycroft 	struct vop_abortop_args /* {
    105         1.1   mycroft 		struct vnode *a_dvp;
    106         1.1   mycroft 		struct componentname *a_cnp;
    107         1.1   mycroft 	} */ *ap = v;
    108        1.53     enami 
    109         1.1   mycroft 	if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
    110        1.19   thorpej 		PNBUF_PUT(ap->a_cnp->cn_pnbuf);
    111         1.1   mycroft 	return (0);
    112        1.13  wrstuden }
    113        1.13  wrstuden 
    114        1.13  wrstuden int
    115        1.53     enami genfs_fcntl(void *v)
    116        1.13  wrstuden {
    117        1.13  wrstuden 	struct vop_fcntl_args /* {
    118        1.13  wrstuden 		struct vnode *a_vp;
    119        1.13  wrstuden 		u_int a_command;
    120       1.150  christos 		void *a_data;
    121        1.13  wrstuden 		int a_fflag;
    122       1.125      elad 		kauth_cred_t a_cred;
    123       1.116  christos 		struct lwp *a_l;
    124        1.13  wrstuden 	} */ *ap = v;
    125        1.13  wrstuden 
    126        1.13  wrstuden 	if (ap->a_command == F_SETFL)
    127        1.13  wrstuden 		return (0);
    128        1.13  wrstuden 	else
    129        1.13  wrstuden 		return (EOPNOTSUPP);
    130         1.1   mycroft }
    131         1.1   mycroft 
    132         1.1   mycroft /*ARGSUSED*/
    133         1.1   mycroft int
    134       1.138  christos genfs_badop(void *v)
    135         1.1   mycroft {
    136         1.1   mycroft 
    137         1.1   mycroft 	panic("genfs: bad op");
    138         1.1   mycroft }
    139         1.1   mycroft 
    140         1.1   mycroft /*ARGSUSED*/
    141         1.1   mycroft int
    142       1.138  christos genfs_nullop(void *v)
    143         1.1   mycroft {
    144         1.1   mycroft 
    145         1.1   mycroft 	return (0);
    146        1.10    kleink }
    147        1.10    kleink 
    148        1.10    kleink /*ARGSUSED*/
    149        1.10    kleink int
    150       1.138  christos genfs_einval(void *v)
    151        1.10    kleink {
    152        1.10    kleink 
    153        1.10    kleink 	return (EINVAL);
    154         1.1   mycroft }
    155         1.1   mycroft 
    156        1.12  wrstuden /*
    157        1.74  jdolecek  * Called when an fs doesn't support a particular vop.
    158        1.74  jdolecek  * This takes care to vrele, vput, or vunlock passed in vnodes.
    159        1.12  wrstuden  */
    160        1.12  wrstuden int
    161        1.75  jdolecek genfs_eopnotsupp(void *v)
    162        1.12  wrstuden {
    163        1.12  wrstuden 	struct vop_generic_args /*
    164        1.12  wrstuden 		struct vnodeop_desc *a_desc;
    165        1.53     enami 		/ * other random data follows, presumably * /
    166        1.12  wrstuden 	} */ *ap = v;
    167        1.12  wrstuden 	struct vnodeop_desc *desc = ap->a_desc;
    168        1.74  jdolecek 	struct vnode *vp, *vp_last = NULL;
    169        1.12  wrstuden 	int flags, i, j, offset;
    170        1.12  wrstuden 
    171        1.12  wrstuden 	flags = desc->vdesc_flags;
    172        1.12  wrstuden 	for (i = 0; i < VDESC_MAX_VPS; flags >>=1, i++) {
    173        1.12  wrstuden 		if ((offset = desc->vdesc_vp_offsets[i]) == VDESC_NO_OFFSET)
    174        1.12  wrstuden 			break;	/* stop at end of list */
    175        1.12  wrstuden 		if ((j = flags & VDESC_VP0_WILLPUT)) {
    176        1.53     enami 			vp = *VOPARG_OFFSETTO(struct vnode **, offset, ap);
    177        1.74  jdolecek 
    178        1.74  jdolecek 			/* Skip if NULL */
    179        1.74  jdolecek 			if (!vp)
    180        1.74  jdolecek 				continue;
    181        1.74  jdolecek 
    182        1.12  wrstuden 			switch (j) {
    183        1.12  wrstuden 			case VDESC_VP0_WILLPUT:
    184        1.74  jdolecek 				/* Check for dvp == vp cases */
    185        1.74  jdolecek 				if (vp == vp_last)
    186        1.74  jdolecek 					vrele(vp);
    187        1.74  jdolecek 				else {
    188        1.74  jdolecek 					vput(vp);
    189        1.74  jdolecek 					vp_last = vp;
    190        1.74  jdolecek 				}
    191        1.12  wrstuden 				break;
    192        1.12  wrstuden 			case VDESC_VP0_WILLUNLOCK:
    193        1.12  wrstuden 				VOP_UNLOCK(vp, 0);
    194        1.12  wrstuden 				break;
    195        1.12  wrstuden 			case VDESC_VP0_WILLRELE:
    196        1.12  wrstuden 				vrele(vp);
    197        1.12  wrstuden 				break;
    198        1.12  wrstuden 			}
    199        1.12  wrstuden 		}
    200        1.12  wrstuden 	}
    201        1.12  wrstuden 
    202        1.12  wrstuden 	return (EOPNOTSUPP);
    203        1.12  wrstuden }
    204        1.12  wrstuden 
    205         1.1   mycroft /*ARGSUSED*/
    206         1.1   mycroft int
    207       1.138  christos genfs_ebadf(void *v)
    208         1.1   mycroft {
    209         1.1   mycroft 
    210         1.1   mycroft 	return (EBADF);
    211         1.9  matthias }
    212         1.9  matthias 
    213         1.9  matthias /* ARGSUSED */
    214         1.9  matthias int
    215       1.138  christos genfs_enoioctl(void *v)
    216         1.9  matthias {
    217         1.9  matthias 
    218        1.51    atatat 	return (EPASSTHROUGH);
    219         1.6      fvdl }
    220         1.6      fvdl 
    221         1.6      fvdl 
    222         1.6      fvdl /*
    223        1.15      fvdl  * Eliminate all activity associated with the requested vnode
    224         1.6      fvdl  * and with all vnodes aliased to the requested vnode.
    225         1.6      fvdl  */
    226         1.6      fvdl int
    227        1.53     enami genfs_revoke(void *v)
    228         1.6      fvdl {
    229         1.6      fvdl 	struct vop_revoke_args /* {
    230         1.6      fvdl 		struct vnode *a_vp;
    231         1.6      fvdl 		int a_flags;
    232         1.6      fvdl 	} */ *ap = v;
    233  1.150.2.14        ad 	struct vnode *vp, *vq, **vpp;
    234  1.150.2.14        ad 	enum vtype type;
    235  1.150.2.14        ad 	dev_t dev;
    236         1.6      fvdl 
    237         1.6      fvdl #ifdef DIAGNOSTIC
    238         1.6      fvdl 	if ((ap->a_flags & REVOKEALL) == 0)
    239         1.6      fvdl 		panic("genfs_revoke: not revokeall");
    240         1.6      fvdl #endif
    241         1.6      fvdl 	vp = ap->a_vp;
    242         1.6      fvdl 
    243  1.150.2.14        ad 	mutex_enter(&vp->v_interlock);
    244  1.150.2.14        ad 	if ((vp->v_iflag & VI_CLEAN) != 0) {
    245   1.150.2.1        ad 		mutex_exit(&vp->v_interlock);
    246  1.150.2.14        ad 		return (0);
    247  1.150.2.14        ad 	} else {
    248  1.150.2.14        ad 		dev = vp->v_rdev;
    249  1.150.2.14        ad 		type = vp->v_type;
    250  1.150.2.14        ad 		mutex_exit(&vp->v_interlock);
    251  1.150.2.14        ad 	}
    252  1.150.2.14        ad 
    253  1.150.2.14        ad 	if (type != VBLK && type != VCHR)
    254  1.150.2.14        ad 		return (0);
    255  1.150.2.14        ad 
    256  1.150.2.14        ad 	vpp = &speclisth[SPECHASH(dev)];
    257  1.150.2.14        ad 	mutex_enter(&spechash_lock);
    258  1.150.2.14        ad 	for (vq = *vpp; vq != NULL;) {
    259  1.150.2.14        ad 		if (vq->v_rdev != dev || vq->v_type != type) {
    260  1.150.2.14        ad 			vq = vq->v_specnext;
    261  1.150.2.14        ad 			continue;
    262         1.6      fvdl 		}
    263  1.150.2.14        ad 		mutex_enter(&vq->v_interlock);
    264  1.150.2.14        ad 		mutex_exit(&spechash_lock);
    265  1.150.2.14        ad 		vq->v_usecount++;
    266  1.150.2.14        ad 		vclean(vq, DOCLOSE);
    267  1.150.2.14        ad 		vrelel(vq, 1, 0);
    268  1.150.2.14        ad 		mutex_enter(&spechash_lock);
    269  1.150.2.14        ad 		vq = *vpp;
    270         1.6      fvdl 	}
    271  1.150.2.14        ad 	mutex_exit(&spechash_lock);
    272  1.150.2.14        ad 
    273         1.6      fvdl 	return (0);
    274         1.6      fvdl }
    275         1.6      fvdl 
    276         1.6      fvdl /*
    277        1.12  wrstuden  * Lock the node.
    278         1.6      fvdl  */
    279         1.6      fvdl int
    280        1.53     enami genfs_lock(void *v)
    281         1.6      fvdl {
    282         1.6      fvdl 	struct vop_lock_args /* {
    283         1.6      fvdl 		struct vnode *a_vp;
    284         1.6      fvdl 		int a_flags;
    285         1.6      fvdl 	} */ *ap = v;
    286         1.6      fvdl 	struct vnode *vp = ap->a_vp;
    287         1.6      fvdl 
    288        1.86   hannken 	return (lockmgr(vp->v_vnlock, ap->a_flags, &vp->v_interlock));
    289         1.6      fvdl }
    290         1.6      fvdl 
    291         1.6      fvdl /*
    292        1.12  wrstuden  * Unlock the node.
    293         1.6      fvdl  */
    294         1.6      fvdl int
    295        1.53     enami genfs_unlock(void *v)
    296         1.6      fvdl {
    297         1.6      fvdl 	struct vop_unlock_args /* {
    298         1.6      fvdl 		struct vnode *a_vp;
    299         1.6      fvdl 		int a_flags;
    300         1.6      fvdl 	} */ *ap = v;
    301         1.6      fvdl 	struct vnode *vp = ap->a_vp;
    302         1.6      fvdl 
    303        1.86   hannken 	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE,
    304        1.53     enami 	    &vp->v_interlock));
    305         1.6      fvdl }
    306         1.6      fvdl 
    307         1.6      fvdl /*
    308        1.12  wrstuden  * Return whether or not the node is locked.
    309         1.6      fvdl  */
    310         1.6      fvdl int
    311        1.53     enami genfs_islocked(void *v)
    312         1.6      fvdl {
    313         1.6      fvdl 	struct vop_islocked_args /* {
    314         1.6      fvdl 		struct vnode *a_vp;
    315         1.6      fvdl 	} */ *ap = v;
    316         1.6      fvdl 	struct vnode *vp = ap->a_vp;
    317         1.6      fvdl 
    318        1.86   hannken 	return (lockstatus(vp->v_vnlock));
    319        1.12  wrstuden }
    320        1.12  wrstuden 
    321        1.12  wrstuden /*
    322        1.12  wrstuden  * Stubs to use when there is no locking to be done on the underlying object.
    323        1.12  wrstuden  */
    324        1.12  wrstuden int
    325        1.53     enami genfs_nolock(void *v)
    326        1.12  wrstuden {
    327        1.12  wrstuden 	struct vop_lock_args /* {
    328        1.12  wrstuden 		struct vnode *a_vp;
    329        1.12  wrstuden 		int a_flags;
    330       1.116  christos 		struct lwp *a_l;
    331        1.12  wrstuden 	} */ *ap = v;
    332        1.12  wrstuden 
    333        1.12  wrstuden 	/*
    334        1.12  wrstuden 	 * Since we are not using the lock manager, we must clear
    335        1.12  wrstuden 	 * the interlock here.
    336        1.12  wrstuden 	 */
    337        1.12  wrstuden 	if (ap->a_flags & LK_INTERLOCK)
    338   1.150.2.1        ad 		mutex_exit(&ap->a_vp->v_interlock);
    339        1.12  wrstuden 	return (0);
    340        1.12  wrstuden }
    341        1.12  wrstuden 
    342        1.12  wrstuden int
    343       1.138  christos genfs_nounlock(void *v)
    344        1.12  wrstuden {
    345        1.53     enami 
    346        1.12  wrstuden 	return (0);
    347        1.12  wrstuden }
    348        1.12  wrstuden 
    349        1.12  wrstuden int
    350       1.138  christos genfs_noislocked(void *v)
    351        1.12  wrstuden {
    352        1.53     enami 
    353        1.12  wrstuden 	return (0);
    354         1.8   thorpej }
    355         1.8   thorpej 
    356         1.8   thorpej /*
    357       1.142      yamt  * Local lease check.
    358         1.8   thorpej  */
    359         1.8   thorpej int
    360        1.53     enami genfs_lease_check(void *v)
    361         1.8   thorpej {
    362         1.8   thorpej 
    363         1.8   thorpej 	return (0);
    364        1.34       chs }
    365        1.34       chs 
    366        1.34       chs int
    367       1.138  christos genfs_mmap(void *v)
    368        1.34       chs {
    369        1.53     enami 
    370        1.53     enami 	return (0);
    371        1.21       chs }
    372        1.21       chs 
    373       1.118     perry static inline void
    374        1.63     enami genfs_rel_pages(struct vm_page **pgs, int npages)
    375        1.63     enami {
    376        1.63     enami 	int i;
    377        1.63     enami 
    378        1.63     enami 	for (i = 0; i < npages; i++) {
    379        1.63     enami 		struct vm_page *pg = pgs[i];
    380        1.63     enami 
    381       1.127      yamt 		if (pg == NULL || pg == PGO_DONTCARE)
    382        1.63     enami 			continue;
    383        1.63     enami 		if (pg->flags & PG_FAKE) {
    384        1.63     enami 			pg->flags |= PG_RELEASED;
    385        1.63     enami 		}
    386        1.63     enami 	}
    387   1.150.2.1        ad 	mutex_enter(&uvm_pageqlock);
    388        1.63     enami 	uvm_page_unbusy(pgs, npages);
    389   1.150.2.1        ad 	mutex_exit(&uvm_pageqlock);
    390        1.63     enami }
    391        1.63     enami 
    392        1.21       chs /*
    393        1.21       chs  * generic VM getpages routine.
    394        1.21       chs  * Return PG_BUSY pages for the given range,
    395        1.21       chs  * reading from backing store if necessary.
    396        1.21       chs  */
    397        1.21       chs 
    398        1.21       chs int
    399        1.53     enami genfs_getpages(void *v)
    400        1.21       chs {
    401        1.21       chs 	struct vop_getpages_args /* {
    402        1.21       chs 		struct vnode *a_vp;
    403        1.21       chs 		voff_t a_offset;
    404        1.33       chs 		struct vm_page **a_m;
    405        1.21       chs 		int *a_count;
    406        1.21       chs 		int a_centeridx;
    407        1.21       chs 		vm_prot_t a_access_type;
    408        1.21       chs 		int a_advice;
    409        1.21       chs 		int a_flags;
    410        1.21       chs 	} */ *ap = v;
    411        1.21       chs 
    412        1.30       chs 	off_t newsize, diskeof, memeof;
    413       1.124      yamt 	off_t offset, origoffset, startoffset, endoffset;
    414        1.21       chs 	daddr_t lbn, blkno;
    415       1.120      yamt 	int i, error, npages, orignpages, npgs, run, ridx, pidx, pcount;
    416        1.37       chs 	int fs_bshift, fs_bsize, dev_bshift;
    417        1.21       chs 	int flags = ap->a_flags;
    418   1.150.2.7        ad 	size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
    419        1.21       chs 	vaddr_t kva;
    420        1.21       chs 	struct buf *bp, *mbp;
    421        1.21       chs 	struct vnode *vp = ap->a_vp;
    422        1.36       chs 	struct vnode *devvp;
    423        1.37       chs 	struct genfs_node *gp = VTOG(vp);
    424        1.37       chs 	struct uvm_object *uobj = &vp->v_uobj;
    425       1.110      yamt 	struct vm_page *pg, **pgs, *pgs_onstack[MAX_READ_PAGES];
    426        1.77      yamt 	int pgs_size;
    427       1.128        ad 	kauth_cred_t cred = curlwp->l_cred;		/* XXXUBC curlwp */
    428       1.148   thorpej 	bool async = (flags & PGO_SYNCIO) == 0;
    429       1.148   thorpej 	bool write = (ap->a_access_type & VM_PROT_WRITE) != 0;
    430       1.149   thorpej 	bool sawhole = false;
    431       1.149   thorpej 	bool has_trans = false;
    432       1.148   thorpej 	bool overwrite = (flags & PGO_OVERWRITE) != 0;
    433       1.148   thorpej 	bool blockalloc = write && (flags & PGO_NOBLOCKALLOC) == 0;
    434       1.126      yamt 	voff_t origvsize;
    435        1.21       chs 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    436        1.21       chs 
    437        1.30       chs 	UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
    438        1.53     enami 	    vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    439        1.30       chs 
    440       1.121   reinoud 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
    441       1.121   reinoud 	    vp->v_type == VLNK || vp->v_type == VBLK);
    442       1.109      yamt 
    443        1.21       chs 	/* XXXUBC temp limit */
    444       1.110      yamt 	if (*ap->a_count > MAX_READ_PAGES) {
    445        1.37       chs 		panic("genfs_getpages: too many pages");
    446        1.21       chs 	}
    447        1.21       chs 
    448       1.143   hannken 	pgs = pgs_onstack;
    449       1.143   hannken 	pgs_size = sizeof(pgs_onstack);
    450       1.143   hannken 
    451       1.126      yamt startover:
    452        1.26       chs 	error = 0;
    453       1.126      yamt 	origvsize = vp->v_size;
    454        1.26       chs 	origoffset = ap->a_offset;
    455        1.26       chs 	orignpages = *ap->a_count;
    456   1.150.2.6        ad 	GOP_SIZE(vp, origvsize, &diskeof, 0);
    457        1.26       chs 	if (flags & PGO_PASTEOF) {
    458   1.150.2.7        ad #if defined(DIAGNOSTIC)
    459   1.150.2.7        ad 		off_t writeeof;
    460   1.150.2.7        ad #endif /* defined(DIAGNOSTIC) */
    461   1.150.2.7        ad 
    462   1.150.2.6        ad 		newsize = MAX(origvsize,
    463        1.53     enami 		    origoffset + (orignpages << PAGE_SHIFT));
    464       1.123      yamt 		GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
    465   1.150.2.7        ad #if defined(DIAGNOSTIC)
    466   1.150.2.7        ad 		GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM);
    467   1.150.2.7        ad 		if (newsize > round_page(writeeof)) {
    468   1.150.2.7        ad 			panic("%s: past eof", __func__);
    469   1.150.2.7        ad 		}
    470   1.150.2.7        ad #endif /* defined(DIAGNOSTIC) */
    471        1.26       chs 	} else {
    472   1.150.2.6        ad 		GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM);
    473        1.21       chs 	}
    474        1.30       chs 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    475        1.30       chs 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    476        1.30       chs 	KASSERT(orignpages > 0);
    477        1.95       chs 
    478        1.95       chs 	/*
    479        1.95       chs 	 * Bounds-check the request.
    480        1.95       chs 	 */
    481        1.95       chs 
    482        1.95       chs 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    483        1.95       chs 		if ((flags & PGO_LOCKED) == 0) {
    484   1.150.2.1        ad 			mutex_exit(&uobj->vmobjlock);
    485        1.95       chs 		}
    486        1.95       chs 		UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
    487        1.95       chs 		    origoffset, *ap->a_count, memeof,0);
    488       1.143   hannken 		error = EINVAL;
    489       1.143   hannken 		goto out_err;
    490        1.95       chs 	}
    491        1.21       chs 
    492        1.99      yamt 	/* uobj is locked */
    493        1.99      yamt 
    494       1.103      yamt 	if ((flags & PGO_NOTIMESTAMP) == 0 &&
    495       1.121   reinoud 	    (vp->v_type != VBLK ||
    496       1.103      yamt 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
    497       1.103      yamt 		int updflags = 0;
    498       1.103      yamt 
    499       1.103      yamt 		if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
    500       1.103      yamt 			updflags = GOP_UPDATE_ACCESSED;
    501       1.103      yamt 		}
    502       1.103      yamt 		if (write) {
    503       1.103      yamt 			updflags |= GOP_UPDATE_MODIFIED;
    504       1.103      yamt 		}
    505       1.103      yamt 		if (updflags != 0) {
    506       1.103      yamt 			GOP_MARKUPDATE(vp, updflags);
    507       1.103      yamt 		}
    508       1.103      yamt 	}
    509       1.103      yamt 
    510       1.101      yamt 	if (write) {
    511       1.101      yamt 		gp->g_dirtygen++;
    512   1.150.2.8        ad 		if ((vp->v_iflag & VI_ONWORKLST) == 0) {
    513       1.101      yamt 			vn_syncer_add_to_worklist(vp, filedelay);
    514       1.101      yamt 		}
    515   1.150.2.8        ad 		if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
    516   1.150.2.8        ad 			vp->v_iflag |= VI_WRMAPDIRTY;
    517       1.103      yamt 		}
    518        1.99      yamt 	}
    519        1.99      yamt 
    520        1.21       chs 	/*
    521        1.21       chs 	 * For PGO_LOCKED requests, just return whatever's in memory.
    522        1.21       chs 	 */
    523        1.21       chs 
    524        1.21       chs 	if (flags & PGO_LOCKED) {
    525       1.127      yamt 		int nfound;
    526       1.127      yamt 
    527       1.127      yamt 		npages = *ap->a_count;
    528       1.127      yamt #if defined(DEBUG)
    529       1.127      yamt 		for (i = 0; i < npages; i++) {
    530       1.127      yamt 			pg = ap->a_m[i];
    531       1.127      yamt 			KASSERT(pg == NULL || pg == PGO_DONTCARE);
    532       1.127      yamt 		}
    533       1.127      yamt #endif /* defined(DEBUG) */
    534       1.127      yamt 		nfound = uvn_findpages(uobj, origoffset, &npages,
    535       1.127      yamt 		    ap->a_m, UFP_NOWAIT|UFP_NOALLOC|(write ? UFP_NORDONLY : 0));
    536       1.127      yamt 		KASSERT(npages == *ap->a_count);
    537       1.127      yamt 		if (nfound == 0) {
    538       1.143   hannken 			error = EBUSY;
    539       1.143   hannken 			goto out_err;
    540       1.127      yamt 		}
    541       1.146        ad 		if (!rw_tryenter(&gp->g_glock, RW_READER)) {
    542       1.127      yamt 			genfs_rel_pages(ap->a_m, npages);
    543       1.127      yamt 
    544       1.127      yamt 			/*
    545       1.127      yamt 			 * restore the array.
    546       1.127      yamt 			 */
    547       1.127      yamt 
    548       1.127      yamt 			for (i = 0; i < npages; i++) {
    549       1.127      yamt 				pg = ap->a_m[i];
    550        1.21       chs 
    551       1.127      yamt 				if (pg != NULL || pg != PGO_DONTCARE) {
    552       1.127      yamt 					ap->a_m[i] = NULL;
    553       1.127      yamt 				}
    554       1.127      yamt 			}
    555       1.127      yamt 		} else {
    556       1.146        ad 			rw_exit(&gp->g_glock);
    557       1.127      yamt 		}
    558       1.143   hannken 		error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    559       1.143   hannken 		goto out_err;
    560        1.21       chs 	}
    561   1.150.2.1        ad 	mutex_exit(&uobj->vmobjlock);
    562        1.21       chs 
    563        1.21       chs 	/*
    564        1.21       chs 	 * find the requested pages and make some simple checks.
    565        1.21       chs 	 * leave space in the page array for a whole block.
    566        1.21       chs 	 */
    567        1.21       chs 
    568       1.121   reinoud 	if (vp->v_type != VBLK) {
    569        1.36       chs 		fs_bshift = vp->v_mount->mnt_fs_bshift;
    570        1.36       chs 		dev_bshift = vp->v_mount->mnt_dev_bshift;
    571        1.36       chs 	} else {
    572        1.36       chs 		fs_bshift = DEV_BSHIFT;
    573        1.36       chs 		dev_bshift = DEV_BSHIFT;
    574        1.36       chs 	}
    575        1.21       chs 	fs_bsize = 1 << fs_bshift;
    576        1.21       chs 
    577        1.30       chs 	orignpages = MIN(orignpages,
    578        1.30       chs 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    579        1.21       chs 	npages = orignpages;
    580        1.21       chs 	startoffset = origoffset & ~(fs_bsize - 1);
    581        1.53     enami 	endoffset = round_page((origoffset + (npages << PAGE_SHIFT) +
    582        1.53     enami 	    fs_bsize - 1) & ~(fs_bsize - 1));
    583        1.30       chs 	endoffset = MIN(endoffset, round_page(memeof));
    584        1.21       chs 	ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    585        1.21       chs 
    586        1.77      yamt 	pgs_size = sizeof(struct vm_page *) *
    587        1.77      yamt 	    ((endoffset - startoffset) >> PAGE_SHIFT);
    588        1.77      yamt 	if (pgs_size > sizeof(pgs_onstack)) {
    589       1.135      yamt 		pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
    590        1.78    simonb 		if (pgs == NULL) {
    591       1.143   hannken 			pgs = pgs_onstack;
    592       1.143   hannken 			error = ENOMEM;
    593       1.143   hannken 			goto out_err;
    594        1.78    simonb 		}
    595        1.77      yamt 	} else {
    596       1.143   hannken 		/* pgs == pgs_onstack */
    597        1.77      yamt 		memset(pgs, 0, pgs_size);
    598        1.77      yamt 	}
    599        1.63     enami 	UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
    600        1.63     enami 	    ridx, npages, startoffset, endoffset);
    601       1.126      yamt 
    602   1.150.2.6        ad 	if (!has_trans) {
    603   1.150.2.6        ad 		fstrans_start(vp->v_mount, FSTRANS_SHARED);
    604   1.150.2.6        ad 		has_trans = true;
    605       1.143   hannken 	}
    606       1.143   hannken 
    607       1.126      yamt 	/*
    608       1.126      yamt 	 * hold g_glock to prevent a race with truncate.
    609       1.126      yamt 	 *
    610       1.126      yamt 	 * check if our idea of v_size is still valid.
    611       1.126      yamt 	 */
    612       1.126      yamt 
    613       1.126      yamt 	if (blockalloc) {
    614       1.146        ad 		rw_enter(&gp->g_glock, RW_WRITER);
    615       1.126      yamt 	} else {
    616       1.146        ad 		rw_enter(&gp->g_glock, RW_READER);
    617       1.126      yamt 	}
    618   1.150.2.1        ad 	mutex_enter(&uobj->vmobjlock);
    619       1.126      yamt 	if (vp->v_size < origvsize) {
    620       1.146        ad 		rw_exit(&gp->g_glock);
    621       1.126      yamt 		if (pgs != pgs_onstack)
    622       1.135      yamt 			kmem_free(pgs, pgs_size);
    623       1.126      yamt 		goto startover;
    624       1.126      yamt 	}
    625       1.126      yamt 
    626        1.63     enami 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
    627        1.63     enami 	    async ? UFP_NOWAIT : UFP_ALL) != orignpages) {
    628       1.146        ad 		rw_exit(&gp->g_glock);
    629        1.63     enami 		KASSERT(async != 0);
    630        1.63     enami 		genfs_rel_pages(&pgs[ridx], orignpages);
    631   1.150.2.1        ad 		mutex_exit(&uobj->vmobjlock);
    632       1.143   hannken 		error = EBUSY;
    633       1.143   hannken 		goto out_err;
    634        1.63     enami 	}
    635        1.21       chs 
    636        1.21       chs 	/*
    637        1.21       chs 	 * if the pages are already resident, just return them.
    638        1.21       chs 	 */
    639        1.21       chs 
    640        1.21       chs 	for (i = 0; i < npages; i++) {
    641        1.97  christos 		struct vm_page *pg1 = pgs[ridx + i];
    642        1.21       chs 
    643        1.97  christos 		if ((pg1->flags & PG_FAKE) ||
    644       1.100      yamt 		    (blockalloc && (pg1->flags & PG_RDONLY))) {
    645        1.21       chs 			break;
    646        1.21       chs 		}
    647        1.21       chs 	}
    648        1.21       chs 	if (i == npages) {
    649       1.146        ad 		rw_exit(&gp->g_glock);
    650        1.21       chs 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    651        1.26       chs 		npages += ridx;
    652       1.110      yamt 		goto out;
    653        1.21       chs 	}
    654        1.21       chs 
    655        1.21       chs 	/*
    656        1.37       chs 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    657        1.37       chs 	 */
    658        1.37       chs 
    659       1.124      yamt 	if (overwrite) {
    660       1.146        ad 		rw_exit(&gp->g_glock);
    661        1.37       chs 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    662        1.37       chs 
    663        1.37       chs 		for (i = 0; i < npages; i++) {
    664        1.97  christos 			struct vm_page *pg1 = pgs[ridx + i];
    665        1.37       chs 
    666        1.97  christos 			pg1->flags &= ~(PG_RDONLY|PG_CLEAN);
    667        1.37       chs 		}
    668        1.37       chs 		npages += ridx;
    669        1.37       chs 		goto out;
    670        1.37       chs 	}
    671        1.37       chs 
    672        1.37       chs 	/*
    673        1.21       chs 	 * the page wasn't resident and we're not overwriting,
    674        1.21       chs 	 * so we're going to have to do some i/o.
    675        1.21       chs 	 * find any additional pages needed to cover the expanded range.
    676        1.21       chs 	 */
    677        1.21       chs 
    678        1.35       chs 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    679        1.35       chs 	if (startoffset != origoffset || npages != orignpages) {
    680        1.21       chs 
    681        1.21       chs 		/*
    682        1.37       chs 		 * we need to avoid deadlocks caused by locking
    683        1.21       chs 		 * additional pages at lower offsets than pages we
    684        1.37       chs 		 * already have locked.  unlock them all and start over.
    685        1.21       chs 		 */
    686        1.21       chs 
    687        1.63     enami 		genfs_rel_pages(&pgs[ridx], orignpages);
    688        1.77      yamt 		memset(pgs, 0, pgs_size);
    689        1.21       chs 
    690        1.21       chs 		UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
    691        1.53     enami 		    startoffset, endoffset, 0,0);
    692        1.21       chs 		npgs = npages;
    693        1.63     enami 		if (uvn_findpages(uobj, startoffset, &npgs, pgs,
    694        1.63     enami 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    695       1.146        ad 			rw_exit(&gp->g_glock);
    696        1.63     enami 			KASSERT(async != 0);
    697        1.63     enami 			genfs_rel_pages(pgs, npages);
    698   1.150.2.1        ad 			mutex_exit(&uobj->vmobjlock);
    699       1.143   hannken 			error = EBUSY;
    700       1.143   hannken 			goto out_err;
    701        1.63     enami 		}
    702        1.21       chs 	}
    703   1.150.2.1        ad 	mutex_exit(&uobj->vmobjlock);
    704        1.21       chs 
    705        1.21       chs 	/*
    706        1.21       chs 	 * read the desired page(s).
    707        1.21       chs 	 */
    708        1.21       chs 
    709        1.21       chs 	totalbytes = npages << PAGE_SHIFT;
    710        1.30       chs 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    711        1.21       chs 	tailbytes = totalbytes - bytes;
    712        1.21       chs 	skipbytes = 0;
    713        1.21       chs 
    714        1.53     enami 	kva = uvm_pagermapin(pgs, npages,
    715        1.53     enami 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
    716        1.21       chs 
    717  1.150.2.12        ad 	mbp = getiobuf(vp, true);
    718        1.21       chs 	mbp->b_bufsize = totalbytes;
    719        1.21       chs 	mbp->b_data = (void *)kva;
    720        1.21       chs 	mbp->b_resid = mbp->b_bcount = bytes;
    721  1.150.2.12        ad 	mbp->b_cflags = BC_BUSY;
    722  1.150.2.12        ad 	if (async) {
    723  1.150.2.12        ad 		mbp->b_flags = B_READ | B_ASYNC;
    724  1.150.2.12        ad 		mbp->b_iodone = uvm_aio_biodone;
    725  1.150.2.12        ad 	} else {
    726  1.150.2.12        ad 		mbp->b_flags = B_READ;
    727  1.150.2.12        ad 		mbp->b_iodone = NULL;
    728  1.150.2.12        ad 	}
    729       1.120      yamt 	if (async)
    730       1.120      yamt 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
    731       1.120      yamt 	else
    732       1.120      yamt 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
    733        1.21       chs 
    734        1.21       chs 	/*
    735        1.31       chs 	 * if EOF is in the middle of the range, zero the part past EOF.
    736   1.150.2.7        ad 	 * skip over pages which are not PG_FAKE since in that case they have
    737   1.150.2.7        ad 	 * valid data that we need to preserve.
    738        1.21       chs 	 */
    739        1.21       chs 
    740   1.150.2.7        ad 	tailstart = bytes;
    741   1.150.2.7        ad 	while (tailbytes > 0) {
    742   1.150.2.7        ad 		const int len = PAGE_SIZE - (tailstart & PAGE_MASK);
    743   1.150.2.7        ad 
    744   1.150.2.7        ad 		KASSERT(len <= tailbytes);
    745   1.150.2.7        ad 		if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) {
    746   1.150.2.7        ad 			memset((void *)(kva + tailstart), 0, len);
    747   1.150.2.7        ad 			UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
    748   1.150.2.7        ad 			    kva, tailstart, len, 0);
    749        1.38       chs 		}
    750   1.150.2.7        ad 		tailstart += len;
    751   1.150.2.7        ad 		tailbytes -= len;
    752        1.21       chs 	}
    753        1.21       chs 
    754        1.21       chs 	/*
    755        1.21       chs 	 * now loop over the pages, reading as needed.
    756        1.21       chs 	 */
    757        1.21       chs 
    758        1.21       chs 	bp = NULL;
    759        1.21       chs 	for (offset = startoffset;
    760        1.53     enami 	    bytes > 0;
    761        1.53     enami 	    offset += iobytes, bytes -= iobytes) {
    762        1.21       chs 
    763        1.21       chs 		/*
    764        1.21       chs 		 * skip pages which don't need to be read.
    765        1.21       chs 		 */
    766        1.21       chs 
    767        1.21       chs 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    768       1.100      yamt 		while ((pgs[pidx]->flags & PG_FAKE) == 0) {
    769        1.21       chs 			size_t b;
    770        1.21       chs 
    771        1.24       chs 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    772       1.100      yamt 			if ((pgs[pidx]->flags & PG_RDONLY)) {
    773       1.149   thorpej 				sawhole = true;
    774       1.100      yamt 			}
    775        1.26       chs 			b = MIN(PAGE_SIZE, bytes);
    776        1.21       chs 			offset += b;
    777        1.21       chs 			bytes -= b;
    778        1.21       chs 			skipbytes += b;
    779        1.21       chs 			pidx++;
    780        1.21       chs 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
    781        1.53     enami 			    offset, 0,0,0);
    782        1.21       chs 			if (bytes == 0) {
    783        1.21       chs 				goto loopdone;
    784        1.21       chs 			}
    785        1.21       chs 		}
    786        1.21       chs 
    787        1.21       chs 		/*
    788        1.21       chs 		 * bmap the file to find out the blkno to read from and
    789        1.21       chs 		 * how much we can read in one i/o.  if bmap returns an error,
    790        1.21       chs 		 * skip the rest of the top-level i/o.
    791        1.21       chs 		 */
    792        1.21       chs 
    793        1.21       chs 		lbn = offset >> fs_bshift;
    794        1.36       chs 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    795        1.21       chs 		if (error) {
    796        1.21       chs 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
    797        1.53     enami 			    lbn, error,0,0);
    798        1.21       chs 			skipbytes += bytes;
    799        1.21       chs 			goto loopdone;
    800        1.21       chs 		}
    801        1.21       chs 
    802        1.21       chs 		/*
    803        1.21       chs 		 * see how many pages can be read with this i/o.
    804        1.21       chs 		 * reduce the i/o size if necessary to avoid
    805        1.21       chs 		 * overwriting pages with valid data.
    806        1.21       chs 		 */
    807        1.21       chs 
    808        1.26       chs 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    809        1.26       chs 		    bytes);
    810        1.21       chs 		if (offset + iobytes > round_page(offset)) {
    811        1.21       chs 			pcount = 1;
    812        1.21       chs 			while (pidx + pcount < npages &&
    813        1.53     enami 			    pgs[pidx + pcount]->flags & PG_FAKE) {
    814        1.21       chs 				pcount++;
    815        1.21       chs 			}
    816        1.26       chs 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    817        1.53     enami 			    (offset - trunc_page(offset)));
    818        1.21       chs 		}
    819        1.21       chs 
    820        1.21       chs 		/*
    821        1.53     enami 		 * if this block isn't allocated, zero it instead of
    822       1.100      yamt 		 * reading it.  unless we are going to allocate blocks,
    823       1.100      yamt 		 * mark the pages we zeroed PG_RDONLY.
    824        1.21       chs 		 */
    825        1.21       chs 
    826        1.21       chs 		if (blkno < 0) {
    827        1.53     enami 			int holepages = (round_page(offset + iobytes) -
    828        1.53     enami 			    trunc_page(offset)) >> PAGE_SHIFT;
    829        1.21       chs 			UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
    830        1.21       chs 
    831       1.149   thorpej 			sawhole = true;
    832        1.21       chs 			memset((char *)kva + (offset - startoffset), 0,
    833        1.53     enami 			    iobytes);
    834        1.21       chs 			skipbytes += iobytes;
    835        1.21       chs 
    836        1.35       chs 			for (i = 0; i < holepages; i++) {
    837        1.35       chs 				if (write) {
    838        1.35       chs 					pgs[pidx + i]->flags &= ~PG_CLEAN;
    839       1.100      yamt 				}
    840       1.100      yamt 				if (!blockalloc) {
    841        1.21       chs 					pgs[pidx + i]->flags |= PG_RDONLY;
    842        1.21       chs 				}
    843        1.21       chs 			}
    844        1.21       chs 			continue;
    845        1.21       chs 		}
    846        1.21       chs 
    847        1.21       chs 		/*
    848        1.21       chs 		 * allocate a sub-buf for this piece of the i/o
    849        1.21       chs 		 * (or just use mbp if there's only 1 piece),
    850        1.21       chs 		 * and start it going.
    851        1.21       chs 		 */
    852        1.21       chs 
    853        1.21       chs 		if (offset == startoffset && iobytes == bytes) {
    854        1.21       chs 			bp = mbp;
    855        1.21       chs 		} else {
    856  1.150.2.12        ad 			bp = getiobuf(vp, true);
    857       1.120      yamt 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
    858        1.21       chs 		}
    859       1.112      yamt 		bp->b_lblkno = 0;
    860        1.21       chs 
    861        1.21       chs 		/* adjust physical blkno for partial blocks */
    862        1.25      fvdl 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    863        1.53     enami 		    dev_bshift);
    864        1.21       chs 
    865        1.53     enami 		UVMHIST_LOG(ubchist,
    866        1.53     enami 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
    867        1.53     enami 		    bp, offset, iobytes, bp->b_blkno);
    868        1.21       chs 
    869       1.109      yamt 		VOP_STRATEGY(devvp, bp);
    870        1.21       chs 	}
    871        1.21       chs 
    872        1.21       chs loopdone:
    873       1.120      yamt 	nestiobuf_done(mbp, skipbytes, error);
    874        1.21       chs 	if (async) {
    875        1.32       chs 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    876       1.146        ad 		rw_exit(&gp->g_glock);
    877       1.143   hannken 		error = 0;
    878       1.143   hannken 		goto out_err;
    879        1.21       chs 	}
    880        1.21       chs 	if (bp != NULL) {
    881        1.21       chs 		error = biowait(mbp);
    882        1.21       chs 	}
    883       1.119      yamt 	putiobuf(mbp);
    884        1.21       chs 	uvm_pagermapout(kva, npages);
    885        1.21       chs 
    886        1.21       chs 	/*
    887        1.21       chs 	 * if this we encountered a hole then we have to do a little more work.
    888        1.21       chs 	 * for read faults, we marked the page PG_RDONLY so that future
    889        1.21       chs 	 * write accesses to the page will fault again.
    890        1.21       chs 	 * for write faults, we must make sure that the backing store for
    891        1.21       chs 	 * the page is completely allocated while the pages are locked.
    892        1.21       chs 	 */
    893        1.21       chs 
    894       1.100      yamt 	if (!error && sawhole && blockalloc) {
    895        1.37       chs 		error = GOP_ALLOC(vp, startoffset, npages << PAGE_SHIFT, 0,
    896        1.53     enami 		    cred);
    897        1.37       chs 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
    898        1.37       chs 		    startoffset, npages << PAGE_SHIFT, error,0);
    899       1.100      yamt 		if (!error) {
    900       1.100      yamt 			for (i = 0; i < npages; i++) {
    901       1.100      yamt 				if (pgs[i] == NULL) {
    902       1.100      yamt 					continue;
    903       1.100      yamt 				}
    904       1.100      yamt 				pgs[i]->flags &= ~(PG_CLEAN|PG_RDONLY);
    905       1.100      yamt 				UVMHIST_LOG(ubchist, "mark dirty pg %p",
    906       1.100      yamt 				    pgs[i],0,0,0);
    907       1.100      yamt 			}
    908       1.100      yamt 		}
    909        1.21       chs 	}
    910       1.146        ad 	rw_exit(&gp->g_glock);
    911   1.150.2.1        ad 	mutex_enter(&uobj->vmobjlock);
    912        1.21       chs 
    913        1.21       chs 	/*
    914        1.21       chs 	 * we're almost done!  release the pages...
    915        1.21       chs 	 * for errors, we free the pages.
    916        1.21       chs 	 * otherwise we activate them and mark them as valid and clean.
    917        1.21       chs 	 * also, unbusy pages that were not actually requested.
    918        1.21       chs 	 */
    919        1.21       chs 
    920        1.21       chs 	if (error) {
    921        1.21       chs 		for (i = 0; i < npages; i++) {
    922        1.21       chs 			if (pgs[i] == NULL) {
    923        1.21       chs 				continue;
    924        1.21       chs 			}
    925        1.21       chs 			UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    926        1.53     enami 			    pgs[i], pgs[i]->flags, 0,0);
    927        1.26       chs 			if (pgs[i]->flags & PG_FAKE) {
    928        1.37       chs 				pgs[i]->flags |= PG_RELEASED;
    929        1.21       chs 			}
    930        1.21       chs 		}
    931   1.150.2.1        ad 		mutex_enter(&uvm_pageqlock);
    932        1.37       chs 		uvm_page_unbusy(pgs, npages);
    933   1.150.2.1        ad 		mutex_exit(&uvm_pageqlock);
    934   1.150.2.1        ad 		mutex_exit(&uobj->vmobjlock);
    935        1.21       chs 		UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
    936       1.143   hannken 		goto out_err;
    937        1.21       chs 	}
    938        1.21       chs 
    939        1.37       chs out:
    940        1.21       chs 	UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
    941       1.143   hannken 	error = 0;
    942   1.150.2.1        ad 	mutex_enter(&uvm_pageqlock);
    943        1.21       chs 	for (i = 0; i < npages; i++) {
    944        1.37       chs 		pg = pgs[i];
    945        1.37       chs 		if (pg == NULL) {
    946        1.21       chs 			continue;
    947        1.21       chs 		}
    948        1.21       chs 		UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    949        1.53     enami 		    pg, pg->flags, 0,0);
    950        1.37       chs 		if (pg->flags & PG_FAKE && !overwrite) {
    951        1.37       chs 			pg->flags &= ~(PG_FAKE);
    952        1.21       chs 			pmap_clear_modify(pgs[i]);
    953        1.21       chs 		}
    954       1.100      yamt 		KASSERT(!write || !blockalloc || (pg->flags & PG_RDONLY) == 0);
    955        1.21       chs 		if (i < ridx || i >= ridx + orignpages || async) {
    956        1.21       chs 			UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
    957        1.53     enami 			    pg, pg->offset,0,0);
    958        1.37       chs 			if (pg->flags & PG_WANTED) {
    959        1.37       chs 				wakeup(pg);
    960        1.37       chs 			}
    961        1.37       chs 			if (pg->flags & PG_FAKE) {
    962        1.37       chs 				KASSERT(overwrite);
    963        1.37       chs 				uvm_pagezero(pg);
    964        1.37       chs 			}
    965        1.37       chs 			if (pg->flags & PG_RELEASED) {
    966        1.37       chs 				uvm_pagefree(pg);
    967        1.26       chs 				continue;
    968        1.21       chs 			}
    969       1.129      yamt 			uvm_pageenqueue(pg);
    970        1.37       chs 			pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
    971        1.37       chs 			UVM_PAGE_OWN(pg, NULL);
    972        1.21       chs 		}
    973        1.21       chs 	}
    974   1.150.2.1        ad 	mutex_exit(&uvm_pageqlock);
    975   1.150.2.1        ad 	mutex_exit(&uobj->vmobjlock);
    976        1.21       chs 	if (ap->a_m != NULL) {
    977        1.21       chs 		memcpy(ap->a_m, &pgs[ridx],
    978        1.53     enami 		    orignpages * sizeof(struct vm_page *));
    979        1.21       chs 	}
    980       1.143   hannken 
    981       1.143   hannken out_err:
    982        1.77      yamt 	if (pgs != pgs_onstack)
    983       1.135      yamt 		kmem_free(pgs, pgs_size);
    984       1.143   hannken 	if (has_trans)
    985       1.143   hannken 		fstrans_done(vp->v_mount);
    986       1.143   hannken 	return (error);
    987        1.21       chs }
    988        1.21       chs 
    989        1.21       chs /*
    990        1.21       chs  * generic VM putpages routine.
    991        1.21       chs  * Write the given range of pages to backing store.
    992        1.37       chs  *
    993        1.37       chs  * => "offhi == 0" means flush all pages at or after "offlo".
    994       1.140     pooka  * => object should be locked by caller.  we return with the
    995       1.140     pooka  *      object unlocked.
    996        1.37       chs  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
    997        1.37       chs  *	thus, a caller might want to unlock higher level resources
    998        1.37       chs  *	(e.g. vm_map) before calling flush.
    999       1.140     pooka  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
   1000        1.37       chs  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
   1001        1.37       chs  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
   1002        1.37       chs  *	that new pages are inserted on the tail end of the list.   thus,
   1003        1.37       chs  *	we can make a complete pass through the object in one go by starting
   1004        1.37       chs  *	at the head and working towards the tail (new pages are put in
   1005        1.37       chs  *	front of us).
   1006        1.37       chs  * => NOTE: we are allowed to lock the page queues, so the caller
   1007        1.37       chs  *	must not be holding the page queue lock.
   1008        1.37       chs  *
   1009        1.37       chs  * note on "cleaning" object and PG_BUSY pages:
   1010        1.37       chs  *	this routine is holding the lock on the object.   the only time
   1011        1.37       chs  *	that it can run into a PG_BUSY page that it does not own is if
   1012        1.37       chs  *	some other process has started I/O on the page (e.g. either
   1013        1.37       chs  *	a pagein, or a pageout).    if the PG_BUSY page is being paged
   1014        1.37       chs  *	in, then it can not be dirty (!PG_CLEAN) because no one has
   1015        1.37       chs  *	had a chance to modify it yet.    if the PG_BUSY page is being
   1016        1.37       chs  *	paged out then it means that someone else has already started
   1017        1.53     enami  *	cleaning the page for us (how nice!).    in this case, if we
   1018        1.37       chs  *	have syncio specified, then after we make our pass through the
   1019        1.53     enami  *	object we need to wait for the other PG_BUSY pages to clear
   1020        1.37       chs  *	off (i.e. we need to do an iosync).   also note that once a
   1021        1.37       chs  *	page is PG_BUSY it must stay in its object until it is un-busyed.
   1022        1.37       chs  *
   1023        1.37       chs  * note on page traversal:
   1024        1.37       chs  *	we can traverse the pages in an object either by going down the
   1025        1.37       chs  *	linked list in "uobj->memq", or we can go over the address range
   1026        1.37       chs  *	by page doing hash table lookups for each address.    depending
   1027        1.53     enami  *	on how many pages are in the object it may be cheaper to do one
   1028        1.37       chs  *	or the other.   we set "by_list" to true if we are using memq.
   1029        1.37       chs  *	if the cost of a hash lookup was equal to the cost of the list
   1030        1.37       chs  *	traversal we could compare the number of pages in the start->stop
   1031        1.37       chs  *	range to the total number of pages in the object.   however, it
   1032        1.37       chs  *	seems that a hash table lookup is more expensive than the linked
   1033        1.53     enami  *	list traversal, so we multiply the number of pages in the
   1034        1.37       chs  *	range by an estimate of the relatively higher cost of the hash lookup.
   1035        1.21       chs  */
   1036        1.21       chs 
   1037        1.21       chs int
   1038        1.53     enami genfs_putpages(void *v)
   1039        1.21       chs {
   1040        1.21       chs 	struct vop_putpages_args /* {
   1041        1.21       chs 		struct vnode *a_vp;
   1042        1.37       chs 		voff_t a_offlo;
   1043        1.37       chs 		voff_t a_offhi;
   1044        1.21       chs 		int a_flags;
   1045        1.21       chs 	} */ *ap = v;
   1046   1.150.2.6        ad 
   1047   1.150.2.6        ad 	return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi,
   1048   1.150.2.6        ad 	    ap->a_flags, NULL);
   1049   1.150.2.6        ad }
   1050   1.150.2.6        ad 
   1051   1.150.2.6        ad int
   1052   1.150.2.6        ad genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff, int flags,
   1053   1.150.2.6        ad 	struct vm_page **busypg)
   1054   1.150.2.6        ad {
   1055        1.37       chs 	struct uvm_object *uobj = &vp->v_uobj;
   1056   1.150.2.1        ad 	kmutex_t *slock = &uobj->vmobjlock;
   1057        1.37       chs 	off_t off;
   1058        1.76       tls 	/* Even for strange MAXPHYS, the shift rounds down to a page */
   1059       1.139  christos #define maxpages (MAXPHYS >> PAGE_SHIFT)
   1060   1.150.2.8        ad 	int i, error, npages, nback;
   1061        1.37       chs 	int freeflag;
   1062        1.60     enami 	struct vm_page *pgs[maxpages], *pg, *nextpg, *tpg, curmp, endmp;
   1063       1.148   thorpej 	bool wasclean, by_list, needs_clean, yld;
   1064       1.148   thorpej 	bool async = (flags & PGO_SYNCIO) == 0;
   1065   1.150.2.4        ad 	bool pagedaemon = curlwp == uvm.pagedaemon_lwp;
   1066        1.70  christos 	struct lwp *l = curlwp ? curlwp : &lwp0;
   1067       1.101      yamt 	struct genfs_node *gp = VTOG(vp);
   1068       1.101      yamt 	int dirtygen;
   1069       1.149   thorpej 	bool modified = false;
   1070       1.149   thorpej 	bool has_trans = false;
   1071       1.148   thorpej 	bool cleanall;
   1072        1.70  christos 
   1073        1.37       chs 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
   1074        1.37       chs 
   1075        1.37       chs 	KASSERT(flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
   1076        1.37       chs 	KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
   1077        1.37       chs 	KASSERT(startoff < endoff || endoff == 0);
   1078        1.37       chs 
   1079        1.37       chs 	UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
   1080        1.37       chs 	    vp, uobj->uo_npages, startoff, endoff - startoff);
   1081       1.103      yamt 
   1082   1.150.2.8        ad 	KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 ||
   1083   1.150.2.8        ad 	    (vp->v_iflag & VI_WRMAPDIRTY) == 0);
   1084        1.37       chs 	if (uobj->uo_npages == 0) {
   1085   1.150.2.8        ad 		if (vp->v_iflag & VI_ONWORKLST) {
   1086   1.150.2.8        ad 			vp->v_iflag &= ~VI_WRMAPDIRTY;
   1087       1.137   reinoud 			if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
   1088       1.137   reinoud 				vn_syncer_remove_from_worklist(vp);
   1089        1.37       chs 		}
   1090   1.150.2.1        ad 		mutex_exit(slock);
   1091        1.53     enami 		return (0);
   1092        1.37       chs 	}
   1093        1.37       chs 
   1094        1.37       chs 	/*
   1095        1.37       chs 	 * the vnode has pages, set up to process the request.
   1096        1.37       chs 	 */
   1097        1.37       chs 
   1098       1.143   hannken 	if ((flags & PGO_CLEANIT) != 0) {
   1099   1.150.2.1        ad 		mutex_exit(slock);
   1100   1.150.2.6        ad 		if (pagedaemon) {
   1101       1.144   hannken 			error = fstrans_start_nowait(vp->v_mount, FSTRANS_LAZY);
   1102   1.150.2.6        ad 			if (error)
   1103   1.150.2.6        ad 				return error;
   1104   1.150.2.6        ad 		} else
   1105   1.150.2.6        ad 			fstrans_start(vp->v_mount, FSTRANS_LAZY);
   1106       1.149   thorpej 		has_trans = true;
   1107   1.150.2.1        ad 		mutex_enter(slock);
   1108       1.143   hannken 	}
   1109       1.143   hannken 
   1110        1.37       chs 	error = 0;
   1111        1.44       chs 	wasclean = (vp->v_numoutput == 0);
   1112        1.37       chs 	off = startoff;
   1113        1.37       chs 	if (endoff == 0 || flags & PGO_ALLPAGES) {
   1114        1.37       chs 		endoff = trunc_page(LLONG_MAX);
   1115        1.37       chs 	}
   1116        1.37       chs 	by_list = (uobj->uo_npages <=
   1117        1.37       chs 	    ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY);
   1118        1.37       chs 
   1119       1.102      yamt #if !defined(DEBUG)
   1120       1.102      yamt 	/*
   1121       1.102      yamt 	 * if this vnode is known not to have dirty pages,
   1122       1.102      yamt 	 * don't bother to clean it out.
   1123       1.102      yamt 	 */
   1124       1.102      yamt 
   1125   1.150.2.8        ad 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
   1126       1.102      yamt 		if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == 0) {
   1127       1.102      yamt 			goto skip_scan;
   1128       1.102      yamt 		}
   1129       1.102      yamt 		flags &= ~PGO_CLEANIT;
   1130       1.102      yamt 	}
   1131       1.102      yamt #endif /* !defined(DEBUG) */
   1132       1.102      yamt 
   1133        1.37       chs 	/*
   1134        1.37       chs 	 * start the loop.  when scanning by list, hold the last page
   1135        1.37       chs 	 * in the list before we start.  pages allocated after we start
   1136        1.37       chs 	 * will be added to the end of the list, so we can stop at the
   1137        1.37       chs 	 * current last page.
   1138        1.37       chs 	 */
   1139        1.37       chs 
   1140       1.104      yamt 	cleanall = (flags & PGO_CLEANIT) != 0 && wasclean &&
   1141       1.104      yamt 	    startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
   1142   1.150.2.8        ad 	    (vp->v_iflag & VI_ONWORKLST) != 0;
   1143       1.101      yamt 	dirtygen = gp->g_dirtygen;
   1144        1.56     enami 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
   1145        1.37       chs 	if (by_list) {
   1146       1.113      yamt 		curmp.uobject = uobj;
   1147       1.113      yamt 		curmp.offset = (voff_t)-1;
   1148       1.113      yamt 		curmp.flags = PG_BUSY;
   1149       1.113      yamt 		endmp.uobject = uobj;
   1150       1.113      yamt 		endmp.offset = (voff_t)-1;
   1151       1.113      yamt 		endmp.flags = PG_BUSY;
   1152        1.37       chs 		pg = TAILQ_FIRST(&uobj->memq);
   1153        1.37       chs 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq);
   1154   1.150.2.3        ad 		uvm_lwp_hold(l);
   1155        1.37       chs 	} else {
   1156        1.37       chs 		pg = uvm_pagelookup(uobj, off);
   1157        1.37       chs 	}
   1158        1.37       chs 	nextpg = NULL;
   1159        1.37       chs 	while (by_list || off < endoff) {
   1160        1.37       chs 
   1161        1.37       chs 		/*
   1162        1.37       chs 		 * if the current page is not interesting, move on to the next.
   1163        1.37       chs 		 */
   1164        1.37       chs 
   1165        1.37       chs 		KASSERT(pg == NULL || pg->uobject == uobj);
   1166        1.37       chs 		KASSERT(pg == NULL ||
   1167        1.53     enami 		    (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
   1168        1.53     enami 		    (pg->flags & PG_BUSY) != 0);
   1169        1.37       chs 		if (by_list) {
   1170        1.37       chs 			if (pg == &endmp) {
   1171        1.37       chs 				break;
   1172        1.37       chs 			}
   1173        1.37       chs 			if (pg->offset < startoff || pg->offset >= endoff ||
   1174        1.37       chs 			    pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1175       1.101      yamt 				if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1176       1.149   thorpej 					wasclean = false;
   1177       1.101      yamt 				}
   1178        1.37       chs 				pg = TAILQ_NEXT(pg, listq);
   1179        1.37       chs 				continue;
   1180        1.37       chs 			}
   1181        1.37       chs 			off = pg->offset;
   1182       1.101      yamt 		} else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1183       1.101      yamt 			if (pg != NULL) {
   1184       1.149   thorpej 				wasclean = false;
   1185       1.101      yamt 			}
   1186        1.37       chs 			off += PAGE_SIZE;
   1187        1.37       chs 			if (off < endoff) {
   1188        1.37       chs 				pg = uvm_pagelookup(uobj, off);
   1189        1.37       chs 			}
   1190        1.37       chs 			continue;
   1191        1.37       chs 		}
   1192        1.21       chs 
   1193        1.37       chs 		/*
   1194        1.37       chs 		 * if the current page needs to be cleaned and it's busy,
   1195        1.37       chs 		 * wait for it to become unbusy.
   1196        1.37       chs 		 */
   1197        1.37       chs 
   1198        1.97  christos 		yld = (l->l_cpu->ci_schedstate.spc_flags &
   1199        1.56     enami 		    SPCF_SHOULDYIELD) && !pagedaemon;
   1200        1.97  christos 		if (pg->flags & PG_BUSY || yld) {
   1201        1.72  perseant 			UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
   1202        1.72  perseant 			if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
   1203        1.72  perseant 				UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0);
   1204        1.72  perseant 				error = EDEADLK;
   1205   1.150.2.6        ad 				if (busypg != NULL)
   1206   1.150.2.6        ad 					*busypg = pg;
   1207        1.72  perseant 				break;
   1208        1.72  perseant 			}
   1209  1.150.2.13      yamt 			if (pagedaemon) {
   1210  1.150.2.13      yamt 				/*
   1211  1.150.2.13      yamt 				 * someone has taken the page while we
   1212  1.150.2.13      yamt 				 * dropped the lock for fstrans_start.
   1213  1.150.2.13      yamt 				 */
   1214  1.150.2.13      yamt 				break;
   1215  1.150.2.13      yamt 			}
   1216        1.37       chs 			if (by_list) {
   1217        1.37       chs 				TAILQ_INSERT_BEFORE(pg, &curmp, listq);
   1218        1.37       chs 				UVMHIST_LOG(ubchist, "curmp next %p",
   1219        1.53     enami 				    TAILQ_NEXT(&curmp, listq), 0,0,0);
   1220        1.37       chs 			}
   1221        1.97  christos 			if (yld) {
   1222   1.150.2.1        ad 				mutex_exit(slock);
   1223       1.145        ad 				preempt();
   1224   1.150.2.1        ad 				mutex_enter(slock);
   1225        1.49       chs 			} else {
   1226        1.49       chs 				pg->flags |= PG_WANTED;
   1227        1.49       chs 				UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
   1228   1.150.2.1        ad 				mutex_enter(slock);
   1229        1.49       chs 			}
   1230        1.37       chs 			if (by_list) {
   1231        1.37       chs 				UVMHIST_LOG(ubchist, "after next %p",
   1232        1.53     enami 				    TAILQ_NEXT(&curmp, listq), 0,0,0);
   1233        1.37       chs 				pg = TAILQ_NEXT(&curmp, listq);
   1234        1.37       chs 				TAILQ_REMOVE(&uobj->memq, &curmp, listq);
   1235        1.37       chs 			} else {
   1236        1.37       chs 				pg = uvm_pagelookup(uobj, off);
   1237        1.37       chs 			}
   1238        1.37       chs 			continue;
   1239        1.49       chs 		}
   1240        1.49       chs 
   1241        1.49       chs 		/*
   1242        1.49       chs 		 * if we're freeing, remove all mappings of the page now.
   1243        1.49       chs 		 * if we're cleaning, check if the page is needs to be cleaned.
   1244        1.49       chs 		 */
   1245        1.49       chs 
   1246        1.49       chs 		if (flags & PGO_FREE) {
   1247        1.49       chs 			pmap_page_protect(pg, VM_PROT_NONE);
   1248       1.101      yamt 		} else if (flags & PGO_CLEANIT) {
   1249       1.101      yamt 
   1250       1.101      yamt 			/*
   1251       1.101      yamt 			 * if we still have some hope to pull this vnode off
   1252       1.101      yamt 			 * from the syncer queue, write-protect the page.
   1253       1.101      yamt 			 */
   1254       1.101      yamt 
   1255       1.104      yamt 			if (cleanall && wasclean &&
   1256       1.104      yamt 			    gp->g_dirtygen == dirtygen) {
   1257       1.104      yamt 
   1258       1.104      yamt 				/*
   1259       1.104      yamt 				 * uobj pages get wired only by uvm_fault
   1260       1.104      yamt 				 * where uobj is locked.
   1261       1.104      yamt 				 */
   1262       1.104      yamt 
   1263       1.104      yamt 				if (pg->wire_count == 0) {
   1264       1.104      yamt 					pmap_page_protect(pg,
   1265       1.104      yamt 					    VM_PROT_READ|VM_PROT_EXECUTE);
   1266       1.104      yamt 				} else {
   1267       1.149   thorpej 					cleanall = false;
   1268       1.104      yamt 				}
   1269       1.101      yamt 			}
   1270        1.49       chs 		}
   1271       1.101      yamt 
   1272        1.49       chs 		if (flags & PGO_CLEANIT) {
   1273        1.49       chs 			needs_clean = pmap_clear_modify(pg) ||
   1274        1.53     enami 			    (pg->flags & PG_CLEAN) == 0;
   1275        1.49       chs 			pg->flags |= PG_CLEAN;
   1276        1.49       chs 		} else {
   1277       1.149   thorpej 			needs_clean = false;
   1278        1.37       chs 		}
   1279        1.37       chs 
   1280        1.37       chs 		/*
   1281        1.37       chs 		 * if we're cleaning, build a cluster.
   1282        1.37       chs 		 * the cluster will consist of pages which are currently dirty,
   1283        1.37       chs 		 * but they will be returned to us marked clean.
   1284        1.37       chs 		 * if not cleaning, just operate on the one page.
   1285        1.37       chs 		 */
   1286        1.37       chs 
   1287        1.37       chs 		if (needs_clean) {
   1288   1.150.2.8        ad 			KDASSERT((vp->v_iflag & VI_ONWORKLST));
   1289       1.149   thorpej 			wasclean = false;
   1290        1.37       chs 			memset(pgs, 0, sizeof(pgs));
   1291        1.37       chs 			pg->flags |= PG_BUSY;
   1292        1.37       chs 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1293        1.37       chs 
   1294        1.37       chs 			/*
   1295        1.37       chs 			 * first look backward.
   1296        1.37       chs 			 */
   1297        1.37       chs 
   1298        1.60     enami 			npages = MIN(maxpages >> 1, off >> PAGE_SHIFT);
   1299        1.37       chs 			nback = npages;
   1300        1.37       chs 			uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
   1301        1.37       chs 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
   1302        1.37       chs 			if (nback) {
   1303        1.37       chs 				memmove(&pgs[0], &pgs[npages - nback],
   1304        1.37       chs 				    nback * sizeof(pgs[0]));
   1305        1.47     enami 				if (npages - nback < nback)
   1306        1.47     enami 					memset(&pgs[nback], 0,
   1307        1.47     enami 					    (npages - nback) * sizeof(pgs[0]));
   1308        1.47     enami 				else
   1309        1.47     enami 					memset(&pgs[npages - nback], 0,
   1310        1.47     enami 					    nback * sizeof(pgs[0]));
   1311        1.37       chs 			}
   1312        1.37       chs 
   1313        1.37       chs 			/*
   1314        1.37       chs 			 * then plug in our page of interest.
   1315        1.37       chs 			 */
   1316        1.37       chs 
   1317        1.37       chs 			pgs[nback] = pg;
   1318        1.37       chs 
   1319        1.37       chs 			/*
   1320        1.37       chs 			 * then look forward to fill in the remaining space in
   1321        1.37       chs 			 * the array of pages.
   1322        1.37       chs 			 */
   1323        1.37       chs 
   1324        1.60     enami 			npages = maxpages - nback - 1;
   1325        1.37       chs 			uvn_findpages(uobj, off + PAGE_SIZE, &npages,
   1326        1.37       chs 			    &pgs[nback + 1],
   1327        1.37       chs 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
   1328        1.37       chs 			npages += nback + 1;
   1329        1.37       chs 		} else {
   1330        1.37       chs 			pgs[0] = pg;
   1331        1.37       chs 			npages = 1;
   1332        1.61     enami 			nback = 0;
   1333        1.37       chs 		}
   1334        1.37       chs 
   1335        1.37       chs 		/*
   1336        1.37       chs 		 * apply FREE or DEACTIVATE options if requested.
   1337        1.37       chs 		 */
   1338        1.37       chs 
   1339        1.37       chs 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1340   1.150.2.1        ad 			mutex_enter(&uvm_pageqlock);
   1341        1.37       chs 		}
   1342        1.37       chs 		for (i = 0; i < npages; i++) {
   1343        1.37       chs 			tpg = pgs[i];
   1344        1.37       chs 			KASSERT(tpg->uobject == uobj);
   1345        1.59     enami 			if (by_list && tpg == TAILQ_NEXT(pg, listq))
   1346        1.59     enami 				pg = tpg;
   1347        1.91     enami 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1348        1.91     enami 				continue;
   1349       1.141      yamt 			if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
   1350        1.37       chs 				(void) pmap_clear_reference(tpg);
   1351        1.37       chs 				uvm_pagedeactivate(tpg);
   1352        1.37       chs 			} else if (flags & PGO_FREE) {
   1353        1.37       chs 				pmap_page_protect(tpg, VM_PROT_NONE);
   1354        1.37       chs 				if (tpg->flags & PG_BUSY) {
   1355        1.37       chs 					tpg->flags |= freeflag;
   1356        1.56     enami 					if (pagedaemon) {
   1357  1.150.2.11      yamt 						uvm_pageout_start(1);
   1358        1.37       chs 						uvm_pagedequeue(tpg);
   1359        1.37       chs 					}
   1360        1.37       chs 				} else {
   1361        1.59     enami 
   1362        1.59     enami 					/*
   1363        1.59     enami 					 * ``page is not busy''
   1364        1.59     enami 					 * implies that npages is 1
   1365        1.59     enami 					 * and needs_clean is false.
   1366        1.59     enami 					 */
   1367        1.59     enami 
   1368        1.37       chs 					nextpg = TAILQ_NEXT(tpg, listq);
   1369        1.37       chs 					uvm_pagefree(tpg);
   1370        1.89     enami 					if (pagedaemon)
   1371        1.89     enami 						uvmexp.pdfreed++;
   1372        1.37       chs 				}
   1373        1.37       chs 			}
   1374        1.37       chs 		}
   1375        1.37       chs 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1376   1.150.2.1        ad 			mutex_exit(&uvm_pageqlock);
   1377        1.37       chs 		}
   1378        1.37       chs 		if (needs_clean) {
   1379       1.149   thorpej 			modified = true;
   1380        1.37       chs 
   1381        1.37       chs 			/*
   1382        1.37       chs 			 * start the i/o.  if we're traversing by list,
   1383        1.37       chs 			 * keep our place in the list with a marker page.
   1384        1.37       chs 			 */
   1385        1.37       chs 
   1386        1.37       chs 			if (by_list) {
   1387        1.37       chs 				TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
   1388        1.37       chs 				    listq);
   1389        1.37       chs 			}
   1390   1.150.2.1        ad 			mutex_exit(slock);
   1391        1.37       chs 			error = GOP_WRITE(vp, pgs, npages, flags);
   1392   1.150.2.1        ad 			mutex_enter(slock);
   1393        1.37       chs 			if (by_list) {
   1394        1.37       chs 				pg = TAILQ_NEXT(&curmp, listq);
   1395        1.37       chs 				TAILQ_REMOVE(&uobj->memq, &curmp, listq);
   1396        1.37       chs 			}
   1397        1.37       chs 			if (error) {
   1398        1.37       chs 				break;
   1399        1.37       chs 			}
   1400        1.37       chs 			if (by_list) {
   1401        1.37       chs 				continue;
   1402        1.37       chs 			}
   1403        1.37       chs 		}
   1404        1.37       chs 
   1405        1.37       chs 		/*
   1406        1.37       chs 		 * find the next page and continue if there was no error.
   1407        1.37       chs 		 */
   1408        1.37       chs 
   1409        1.37       chs 		if (by_list) {
   1410        1.37       chs 			if (nextpg) {
   1411        1.37       chs 				pg = nextpg;
   1412        1.37       chs 				nextpg = NULL;
   1413        1.37       chs 			} else {
   1414        1.37       chs 				pg = TAILQ_NEXT(pg, listq);
   1415        1.37       chs 			}
   1416        1.37       chs 		} else {
   1417        1.61     enami 			off += (npages - nback) << PAGE_SHIFT;
   1418        1.37       chs 			if (off < endoff) {
   1419        1.37       chs 				pg = uvm_pagelookup(uobj, off);
   1420        1.37       chs 			}
   1421        1.37       chs 		}
   1422        1.37       chs 	}
   1423        1.37       chs 	if (by_list) {
   1424        1.37       chs 		TAILQ_REMOVE(&uobj->memq, &endmp, listq);
   1425   1.150.2.3        ad 		uvm_lwp_rele(l);
   1426        1.37       chs 	}
   1427        1.37       chs 
   1428   1.150.2.8        ad 	if (modified && (vp->v_iflag & VI_WRMAPDIRTY) != 0 &&
   1429       1.121   reinoud 	    (vp->v_type != VBLK ||
   1430       1.103      yamt 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
   1431       1.103      yamt 		GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
   1432       1.103      yamt 	}
   1433       1.103      yamt 
   1434        1.37       chs 	/*
   1435        1.37       chs 	 * if we're cleaning and there was nothing to clean,
   1436        1.37       chs 	 * take us off the syncer list.  if we started any i/o
   1437        1.37       chs 	 * and we're doing sync i/o, wait for all writes to finish.
   1438        1.37       chs 	 */
   1439        1.37       chs 
   1440       1.104      yamt 	if (cleanall && wasclean && gp->g_dirtygen == dirtygen &&
   1441   1.150.2.8        ad 	    (vp->v_iflag & VI_ONWORKLST) != 0) {
   1442   1.150.2.8        ad 		vp->v_iflag &= ~VI_WRMAPDIRTY;
   1443       1.137   reinoud 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
   1444       1.137   reinoud 			vn_syncer_remove_from_worklist(vp);
   1445        1.37       chs 	}
   1446       1.102      yamt 
   1447       1.102      yamt #if !defined(DEBUG)
   1448       1.102      yamt skip_scan:
   1449       1.102      yamt #endif /* !defined(DEBUG) */
   1450       1.143   hannken 
   1451   1.150.2.9        ad 	/* Wait for output to complete. */
   1452   1.150.2.2        ad 	if (!wasclean && !async && vp->v_numoutput != 0) {
   1453   1.150.2.2        ad 		while (vp->v_numoutput != 0)
   1454   1.150.2.9        ad 			cv_wait(&vp->v_cv, slock);
   1455   1.150.2.2        ad 	}
   1456   1.150.2.9        ad 	mutex_exit(slock);
   1457   1.150.2.2        ad 
   1458       1.143   hannken 	if (has_trans)
   1459       1.143   hannken 		fstrans_done(vp->v_mount);
   1460       1.143   hannken 
   1461        1.53     enami 	return (error);
   1462        1.37       chs }
   1463        1.37       chs 
   1464        1.37       chs int
   1465        1.37       chs genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1466        1.37       chs {
   1467       1.130       chs 	off_t off;
   1468       1.130       chs 	vaddr_t kva;
   1469       1.130       chs 	size_t len;
   1470       1.130       chs 	int error;
   1471       1.130       chs 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1472       1.130       chs 
   1473       1.130       chs 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1474       1.130       chs 	    vp, pgs, npages, flags);
   1475       1.130       chs 
   1476       1.130       chs 	off = pgs[0]->offset;
   1477       1.130       chs 	kva = uvm_pagermapin(pgs, npages,
   1478       1.130       chs 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1479       1.130       chs 	len = npages << PAGE_SHIFT;
   1480       1.130       chs 
   1481       1.130       chs 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1482       1.130       chs 			    uvm_aio_biodone);
   1483       1.130       chs 
   1484       1.130       chs 	return error;
   1485       1.130       chs }
   1486       1.130       chs 
   1487       1.130       chs /*
   1488       1.130       chs  * Backend routine for doing I/O to vnode pages.  Pages are already locked
   1489       1.130       chs  * and mapped into kernel memory.  Here we just look up the underlying
   1490       1.130       chs  * device block addresses and call the strategy routine.
   1491       1.130       chs  */
   1492       1.130       chs 
   1493       1.130       chs static int
   1494       1.130       chs genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
   1495       1.130       chs     enum uio_rw rw, void (*iodone)(struct buf *))
   1496       1.130       chs {
   1497        1.37       chs 	int s, error, run;
   1498        1.37       chs 	int fs_bshift, dev_bshift;
   1499        1.21       chs 	off_t eof, offset, startoffset;
   1500        1.21       chs 	size_t bytes, iobytes, skipbytes;
   1501        1.21       chs 	daddr_t lbn, blkno;
   1502        1.21       chs 	struct buf *mbp, *bp;
   1503        1.36       chs 	struct vnode *devvp;
   1504       1.148   thorpej 	bool async = (flags & PGO_SYNCIO) == 0;
   1505       1.148   thorpej 	bool write = rw == UIO_WRITE;
   1506       1.130       chs 	int brw = write ? B_WRITE : B_READ;
   1507       1.130       chs 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1508        1.21       chs 
   1509       1.130       chs 	UVMHIST_LOG(ubchist, "vp %p kva %p len 0x%x flags 0x%x",
   1510       1.130       chs 	    vp, kva, len, flags);
   1511        1.21       chs 
   1512   1.150.2.7        ad 	KASSERT(vp->v_size <= vp->v_writesize);
   1513   1.150.2.7        ad 	GOP_SIZE(vp, vp->v_writesize, &eof, 0);
   1514       1.121   reinoud 	if (vp->v_type != VBLK) {
   1515        1.36       chs 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1516        1.36       chs 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1517        1.36       chs 	} else {
   1518        1.36       chs 		fs_bshift = DEV_BSHIFT;
   1519        1.36       chs 		dev_bshift = DEV_BSHIFT;
   1520        1.36       chs 	}
   1521        1.37       chs 	error = 0;
   1522       1.130       chs 	startoffset = off;
   1523       1.130       chs 	bytes = MIN(len, eof - startoffset);
   1524        1.21       chs 	skipbytes = 0;
   1525        1.21       chs 	KASSERT(bytes != 0);
   1526        1.21       chs 
   1527       1.130       chs 	if (write) {
   1528   1.150.2.9        ad 		mutex_enter(&vp->v_interlock);
   1529       1.130       chs 		vp->v_numoutput += 2;
   1530   1.150.2.9        ad 		mutex_exit(&vp->v_interlock);
   1531       1.130       chs 	}
   1532  1.150.2.12        ad 	mbp = getiobuf(vp, true);
   1533        1.21       chs 	UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
   1534        1.53     enami 	    vp, mbp, vp->v_numoutput, bytes);
   1535       1.130       chs 	mbp->b_bufsize = len;
   1536        1.21       chs 	mbp->b_data = (void *)kva;
   1537        1.21       chs 	mbp->b_resid = mbp->b_bcount = bytes;
   1538  1.150.2.12        ad 	mbp->b_cflags = BC_BUSY | BC_AGE;
   1539  1.150.2.12        ad 	if (async) {
   1540  1.150.2.12        ad 		mbp->b_flags = brw | B_ASYNC;
   1541  1.150.2.12        ad 		mbp->b_iodone = iodone;
   1542  1.150.2.12        ad 	} else {
   1543  1.150.2.12        ad 		mbp->b_flags = brw;
   1544  1.150.2.12        ad 		mbp->b_iodone = NULL;
   1545  1.150.2.12        ad 	}
   1546   1.150.2.4        ad 	if (curlwp == uvm.pagedaemon_lwp)
   1547       1.120      yamt 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
   1548       1.120      yamt 	else if (async)
   1549       1.120      yamt 		BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
   1550       1.120      yamt 	else
   1551       1.120      yamt 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
   1552        1.21       chs 
   1553        1.21       chs 	bp = NULL;
   1554        1.21       chs 	for (offset = startoffset;
   1555        1.53     enami 	    bytes > 0;
   1556        1.53     enami 	    offset += iobytes, bytes -= iobytes) {
   1557        1.21       chs 		lbn = offset >> fs_bshift;
   1558        1.36       chs 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1559        1.21       chs 		if (error) {
   1560        1.21       chs 			UVMHIST_LOG(ubchist, "VOP_BMAP() -> %d", error,0,0,0);
   1561        1.21       chs 			skipbytes += bytes;
   1562        1.21       chs 			bytes = 0;
   1563        1.21       chs 			break;
   1564        1.21       chs 		}
   1565        1.21       chs 
   1566        1.26       chs 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1567        1.26       chs 		    bytes);
   1568        1.21       chs 		if (blkno == (daddr_t)-1) {
   1569       1.130       chs 			if (!write) {
   1570       1.130       chs 				memset((char *)kva + (offset - startoffset), 0,
   1571       1.130       chs 				   iobytes);
   1572       1.130       chs 			}
   1573        1.21       chs 			skipbytes += iobytes;
   1574        1.21       chs 			continue;
   1575        1.21       chs 		}
   1576        1.21       chs 
   1577        1.21       chs 		/* if it's really one i/o, don't make a second buf */
   1578        1.21       chs 		if (offset == startoffset && iobytes == bytes) {
   1579        1.21       chs 			bp = mbp;
   1580        1.21       chs 		} else {
   1581        1.21       chs 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
   1582        1.53     enami 			    vp, bp, vp->v_numoutput, 0);
   1583  1.150.2.12        ad 			bp = getiobuf(vp, true);
   1584       1.130       chs 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
   1585        1.21       chs 		}
   1586        1.21       chs 		bp->b_lblkno = 0;
   1587        1.21       chs 
   1588        1.21       chs 		/* adjust physical blkno for partial blocks */
   1589        1.25      fvdl 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1590        1.53     enami 		    dev_bshift);
   1591        1.53     enami 		UVMHIST_LOG(ubchist,
   1592        1.53     enami 		    "vp %p offset 0x%x bcount 0x%x blkno 0x%x",
   1593        1.53     enami 		    vp, offset, bp->b_bcount, bp->b_blkno);
   1594       1.114      yamt 
   1595       1.114      yamt 		VOP_STRATEGY(devvp, bp);
   1596        1.21       chs 	}
   1597        1.21       chs 	if (skipbytes) {
   1598        1.29       chs 		UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
   1599        1.21       chs 	}
   1600       1.120      yamt 	nestiobuf_done(mbp, skipbytes, error);
   1601        1.21       chs 	if (async) {
   1602        1.32       chs 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1603        1.53     enami 		return (0);
   1604        1.21       chs 	}
   1605        1.37       chs 	UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
   1606        1.37       chs 	error = biowait(mbp);
   1607       1.134      yamt 	s = splbio();
   1608       1.130       chs 	(*iodone)(mbp);
   1609       1.134      yamt 	splx(s);
   1610        1.21       chs 	UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
   1611        1.53     enami 	return (error);
   1612        1.42       chs }
   1613        1.42       chs 
   1614        1.42       chs /*
   1615        1.42       chs  * VOP_PUTPAGES() for vnodes which never have pages.
   1616        1.42       chs  */
   1617        1.42       chs 
   1618        1.42       chs int
   1619        1.42       chs genfs_null_putpages(void *v)
   1620        1.42       chs {
   1621        1.42       chs 	struct vop_putpages_args /* {
   1622        1.42       chs 		struct vnode *a_vp;
   1623        1.42       chs 		voff_t a_offlo;
   1624        1.42       chs 		voff_t a_offhi;
   1625        1.42       chs 		int a_flags;
   1626        1.42       chs 	} */ *ap = v;
   1627        1.42       chs 	struct vnode *vp = ap->a_vp;
   1628        1.42       chs 
   1629        1.42       chs 	KASSERT(vp->v_uobj.uo_npages == 0);
   1630   1.150.2.1        ad 	mutex_exit(&vp->v_interlock);
   1631        1.42       chs 	return (0);
   1632        1.21       chs }
   1633        1.21       chs 
   1634        1.37       chs void
   1635        1.98      yamt genfs_node_init(struct vnode *vp, const struct genfs_ops *ops)
   1636        1.37       chs {
   1637        1.37       chs 	struct genfs_node *gp = VTOG(vp);
   1638        1.37       chs 
   1639       1.146        ad 	rw_init(&gp->g_glock);
   1640        1.37       chs 	gp->g_op = ops;
   1641        1.37       chs }
   1642        1.37       chs 
   1643        1.37       chs void
   1644       1.147        ad genfs_node_destroy(struct vnode *vp)
   1645       1.147        ad {
   1646       1.147        ad 	struct genfs_node *gp = VTOG(vp);
   1647       1.147        ad 
   1648       1.147        ad 	rw_destroy(&gp->g_glock);
   1649       1.147        ad }
   1650       1.147        ad 
   1651       1.147        ad void
   1652       1.138  christos genfs_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
   1653        1.21       chs {
   1654        1.21       chs 	int bsize;
   1655        1.21       chs 
   1656        1.37       chs 	bsize = 1 << vp->v_mount->mnt_fs_bshift;
   1657        1.37       chs 	*eobp = (size + bsize - 1) & ~(bsize - 1);
   1658        1.43       chs }
   1659        1.43       chs 
   1660        1.43       chs int
   1661        1.43       chs genfs_compat_getpages(void *v)
   1662        1.43       chs {
   1663        1.43       chs 	struct vop_getpages_args /* {
   1664        1.43       chs 		struct vnode *a_vp;
   1665        1.43       chs 		voff_t a_offset;
   1666        1.43       chs 		struct vm_page **a_m;
   1667        1.43       chs 		int *a_count;
   1668        1.43       chs 		int a_centeridx;
   1669        1.43       chs 		vm_prot_t a_access_type;
   1670        1.43       chs 		int a_advice;
   1671        1.43       chs 		int a_flags;
   1672        1.43       chs 	} */ *ap = v;
   1673        1.43       chs 
   1674        1.43       chs 	off_t origoffset;
   1675        1.43       chs 	struct vnode *vp = ap->a_vp;
   1676        1.43       chs 	struct uvm_object *uobj = &vp->v_uobj;
   1677        1.43       chs 	struct vm_page *pg, **pgs;
   1678        1.43       chs 	vaddr_t kva;
   1679        1.43       chs 	int i, error, orignpages, npages;
   1680        1.43       chs 	struct iovec iov;
   1681        1.43       chs 	struct uio uio;
   1682       1.128        ad 	kauth_cred_t cred = curlwp->l_cred;
   1683       1.148   thorpej 	bool write = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1684        1.43       chs 
   1685        1.43       chs 	error = 0;
   1686        1.43       chs 	origoffset = ap->a_offset;
   1687        1.43       chs 	orignpages = *ap->a_count;
   1688        1.43       chs 	pgs = ap->a_m;
   1689        1.43       chs 
   1690   1.150.2.8        ad 	if (write && (vp->v_iflag & VI_ONWORKLST) == 0) {
   1691        1.43       chs 		vn_syncer_add_to_worklist(vp, filedelay);
   1692        1.43       chs 	}
   1693        1.43       chs 	if (ap->a_flags & PGO_LOCKED) {
   1694        1.43       chs 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
   1695        1.54     enami 		    UFP_NOWAIT|UFP_NOALLOC| (write ? UFP_NORDONLY : 0));
   1696        1.43       chs 
   1697        1.53     enami 		return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
   1698        1.43       chs 	}
   1699        1.43       chs 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1700   1.150.2.1        ad 		mutex_exit(&uobj->vmobjlock);
   1701        1.53     enami 		return (EINVAL);
   1702        1.43       chs 	}
   1703       1.115      yamt 	if ((ap->a_flags & PGO_SYNCIO) == 0) {
   1704   1.150.2.1        ad 		mutex_exit(&uobj->vmobjlock);
   1705       1.115      yamt 		return 0;
   1706       1.115      yamt 	}
   1707        1.43       chs 	npages = orignpages;
   1708        1.43       chs 	uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
   1709   1.150.2.1        ad 	mutex_exit(&uobj->vmobjlock);
   1710        1.53     enami 	kva = uvm_pagermapin(pgs, npages,
   1711        1.53     enami 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1712        1.43       chs 	for (i = 0; i < npages; i++) {
   1713        1.43       chs 		pg = pgs[i];
   1714        1.43       chs 		if ((pg->flags & PG_FAKE) == 0) {
   1715        1.43       chs 			continue;
   1716        1.43       chs 		}
   1717        1.43       chs 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   1718        1.43       chs 		iov.iov_len = PAGE_SIZE;
   1719        1.43       chs 		uio.uio_iov = &iov;
   1720        1.43       chs 		uio.uio_iovcnt = 1;
   1721        1.43       chs 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   1722        1.43       chs 		uio.uio_rw = UIO_READ;
   1723        1.43       chs 		uio.uio_resid = PAGE_SIZE;
   1724       1.122      yamt 		UIO_SETUP_SYSSPACE(&uio);
   1725        1.87      yamt 		/* XXX vn_lock */
   1726        1.43       chs 		error = VOP_READ(vp, &uio, 0, cred);
   1727        1.43       chs 		if (error) {
   1728        1.43       chs 			break;
   1729        1.52       chs 		}
   1730        1.52       chs 		if (uio.uio_resid) {
   1731        1.52       chs 			memset(iov.iov_base, 0, uio.uio_resid);
   1732        1.43       chs 		}
   1733        1.43       chs 	}
   1734        1.43       chs 	uvm_pagermapout(kva, npages);
   1735   1.150.2.1        ad 	mutex_enter(&uobj->vmobjlock);
   1736   1.150.2.1        ad 	mutex_enter(&uvm_pageqlock);
   1737        1.43       chs 	for (i = 0; i < npages; i++) {
   1738        1.43       chs 		pg = pgs[i];
   1739        1.43       chs 		if (error && (pg->flags & PG_FAKE) != 0) {
   1740        1.43       chs 			pg->flags |= PG_RELEASED;
   1741        1.43       chs 		} else {
   1742        1.43       chs 			pmap_clear_modify(pg);
   1743        1.43       chs 			uvm_pageactivate(pg);
   1744        1.43       chs 		}
   1745        1.43       chs 	}
   1746        1.43       chs 	if (error) {
   1747        1.43       chs 		uvm_page_unbusy(pgs, npages);
   1748        1.43       chs 	}
   1749   1.150.2.1        ad 	mutex_exit(&uvm_pageqlock);
   1750   1.150.2.1        ad 	mutex_exit(&uobj->vmobjlock);
   1751        1.53     enami 	return (error);
   1752        1.43       chs }
   1753        1.43       chs 
   1754        1.43       chs int
   1755        1.43       chs genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   1756       1.138  christos     int flags)
   1757        1.43       chs {
   1758        1.43       chs 	off_t offset;
   1759        1.43       chs 	struct iovec iov;
   1760        1.43       chs 	struct uio uio;
   1761       1.128        ad 	kauth_cred_t cred = curlwp->l_cred;
   1762        1.43       chs 	struct buf *bp;
   1763        1.43       chs 	vaddr_t kva;
   1764   1.150.2.9        ad 	int error;
   1765        1.43       chs 
   1766        1.43       chs 	offset = pgs[0]->offset;
   1767        1.53     enami 	kva = uvm_pagermapin(pgs, npages,
   1768        1.53     enami 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1769        1.43       chs 
   1770        1.43       chs 	iov.iov_base = (void *)kva;
   1771        1.43       chs 	iov.iov_len = npages << PAGE_SHIFT;
   1772        1.43       chs 	uio.uio_iov = &iov;
   1773        1.68      yamt 	uio.uio_iovcnt = 1;
   1774        1.43       chs 	uio.uio_offset = offset;
   1775        1.43       chs 	uio.uio_rw = UIO_WRITE;
   1776        1.43       chs 	uio.uio_resid = npages << PAGE_SHIFT;
   1777       1.122      yamt 	UIO_SETUP_SYSSPACE(&uio);
   1778        1.87      yamt 	/* XXX vn_lock */
   1779        1.43       chs 	error = VOP_WRITE(vp, &uio, 0, cred);
   1780        1.43       chs 
   1781   1.150.2.9        ad 	mutex_enter(&vp->v_interlock);
   1782   1.150.2.9        ad 	vp->v_numoutput++;
   1783   1.150.2.9        ad 	mutex_exit(&vp->v_interlock);
   1784        1.43       chs 
   1785  1.150.2.12        ad 	bp = getiobuf(vp, true);
   1786  1.150.2.12        ad 	bp->b_cflags = BC_BUSY | BC_AGE;
   1787        1.43       chs 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   1788        1.43       chs 	bp->b_data = (char *)kva;
   1789        1.43       chs 	bp->b_bcount = npages << PAGE_SHIFT;
   1790        1.43       chs 	bp->b_bufsize = npages << PAGE_SHIFT;
   1791        1.43       chs 	bp->b_resid = 0;
   1792  1.150.2.10        ad 	bp->b_error = error;
   1793        1.43       chs 	uvm_aio_aiodone(bp);
   1794        1.53     enami 	return (error);
   1795        1.66  jdolecek }
   1796        1.66  jdolecek 
   1797       1.130       chs /*
   1798       1.130       chs  * Process a uio using direct I/O.  If we reach a part of the request
   1799       1.130       chs  * which cannot be processed in this fashion for some reason, just return.
   1800       1.130       chs  * The caller must handle some additional part of the request using
   1801       1.130       chs  * buffered I/O before trying direct I/O again.
   1802       1.130       chs  */
   1803       1.130       chs 
   1804       1.130       chs void
   1805       1.138  christos genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
   1806       1.130       chs {
   1807       1.130       chs 	struct vmspace *vs;
   1808       1.130       chs 	struct iovec *iov;
   1809       1.130       chs 	vaddr_t va;
   1810       1.130       chs 	size_t len;
   1811       1.130       chs 	const int mask = DEV_BSIZE - 1;
   1812       1.130       chs 	int error;
   1813       1.130       chs 
   1814       1.130       chs 	/*
   1815       1.130       chs 	 * We only support direct I/O to user space for now.
   1816       1.130       chs 	 */
   1817       1.130       chs 
   1818       1.130       chs 	if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
   1819       1.130       chs 		return;
   1820       1.130       chs 	}
   1821       1.130       chs 
   1822       1.130       chs 	/*
   1823       1.130       chs 	 * If the vnode is mapped, we would need to get the getpages lock
   1824       1.130       chs 	 * to stabilize the bmap, but then we would get into trouble whil e
   1825       1.130       chs 	 * locking the pages if the pages belong to this same vnode (or a
   1826       1.130       chs 	 * multi-vnode cascade to the same effect).  Just fall back to
   1827       1.130       chs 	 * buffered I/O if the vnode is mapped to avoid this mess.
   1828       1.130       chs 	 */
   1829       1.130       chs 
   1830   1.150.2.8        ad 	if (vp->v_vflag & VV_MAPPED) {
   1831       1.130       chs 		return;
   1832       1.130       chs 	}
   1833       1.130       chs 
   1834       1.130       chs 	/*
   1835       1.130       chs 	 * Do as much of the uio as possible with direct I/O.
   1836       1.130       chs 	 */
   1837       1.130       chs 
   1838       1.130       chs 	vs = uio->uio_vmspace;
   1839       1.130       chs 	while (uio->uio_resid) {
   1840       1.130       chs 		iov = uio->uio_iov;
   1841       1.130       chs 		if (iov->iov_len == 0) {
   1842       1.130       chs 			uio->uio_iov++;
   1843       1.130       chs 			uio->uio_iovcnt--;
   1844       1.130       chs 			continue;
   1845       1.130       chs 		}
   1846       1.130       chs 		va = (vaddr_t)iov->iov_base;
   1847       1.130       chs 		len = MIN(iov->iov_len, genfs_maxdio);
   1848       1.130       chs 		len &= ~mask;
   1849       1.130       chs 
   1850       1.130       chs 		/*
   1851       1.130       chs 		 * If the next chunk is smaller than DEV_BSIZE or extends past
   1852       1.130       chs 		 * the current EOF, then fall back to buffered I/O.
   1853       1.130       chs 		 */
   1854       1.130       chs 
   1855       1.130       chs 		if (len == 0 || uio->uio_offset + len > vp->v_size) {
   1856       1.130       chs 			return;
   1857       1.130       chs 		}
   1858       1.130       chs 
   1859       1.130       chs 		/*
   1860       1.130       chs 		 * Check alignment.  The file offset must be at least
   1861       1.130       chs 		 * sector-aligned.  The exact constraint on memory alignment
   1862       1.130       chs 		 * is very hardware-dependent, but requiring sector-aligned
   1863       1.130       chs 		 * addresses there too is safe.
   1864       1.130       chs 		 */
   1865       1.130       chs 
   1866       1.130       chs 		if (uio->uio_offset & mask || va & mask) {
   1867       1.130       chs 			return;
   1868       1.130       chs 		}
   1869       1.130       chs 		error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
   1870       1.130       chs 					  uio->uio_rw);
   1871       1.130       chs 		if (error) {
   1872       1.130       chs 			break;
   1873       1.130       chs 		}
   1874       1.150  christos 		iov->iov_base = (char *)iov->iov_base + len;
   1875       1.130       chs 		iov->iov_len -= len;
   1876       1.130       chs 		uio->uio_offset += len;
   1877       1.130       chs 		uio->uio_resid -= len;
   1878       1.130       chs 	}
   1879       1.130       chs }
   1880       1.130       chs 
   1881       1.130       chs /*
   1882       1.130       chs  * Iodone routine for direct I/O.  We don't do much here since the request is
   1883       1.130       chs  * always synchronous, so the caller will do most of the work after biowait().
   1884       1.130       chs  */
   1885       1.130       chs 
   1886       1.130       chs static void
   1887       1.130       chs genfs_dio_iodone(struct buf *bp)
   1888       1.130       chs {
   1889       1.130       chs 
   1890       1.130       chs 	KASSERT((bp->b_flags & B_ASYNC) == 0);
   1891  1.150.2.12        ad 	if ((bp->b_flags & B_READ) == 0 && (bp->b_cflags & BC_AGE) != 0) {
   1892  1.150.2.12        ad 		mutex_enter(bp->b_objlock);
   1893       1.130       chs 		vwakeup(bp);
   1894  1.150.2.12        ad 		mutex_exit(bp->b_objlock);
   1895       1.130       chs 	}
   1896       1.130       chs 	putiobuf(bp);
   1897       1.130       chs }
   1898       1.130       chs 
   1899       1.130       chs /*
   1900       1.130       chs  * Process one chunk of a direct I/O request.
   1901       1.130       chs  */
   1902       1.130       chs 
   1903       1.130       chs static int
   1904       1.130       chs genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
   1905       1.130       chs     off_t off, enum uio_rw rw)
   1906       1.130       chs {
   1907       1.130       chs 	struct vm_map *map;
   1908       1.130       chs 	struct pmap *upm, *kpm;
   1909       1.130       chs 	size_t klen = round_page(uva + len) - trunc_page(uva);
   1910       1.130       chs 	off_t spoff, epoff;
   1911       1.130       chs 	vaddr_t kva, puva;
   1912       1.130       chs 	paddr_t pa;
   1913       1.130       chs 	vm_prot_t prot;
   1914       1.130       chs 	int error, rv, poff, koff;
   1915       1.130       chs 	const int pgoflags = PGO_CLEANIT | PGO_SYNCIO |
   1916       1.130       chs 		(rw == UIO_WRITE ? PGO_FREE : 0);
   1917       1.130       chs 
   1918       1.130       chs 	/*
   1919       1.130       chs 	 * For writes, verify that this range of the file already has fully
   1920       1.130       chs 	 * allocated backing store.  If there are any holes, just punt and
   1921       1.130       chs 	 * make the caller take the buffered write path.
   1922       1.130       chs 	 */
   1923       1.130       chs 
   1924       1.130       chs 	if (rw == UIO_WRITE) {
   1925       1.130       chs 		daddr_t lbn, elbn, blkno;
   1926       1.130       chs 		int bsize, bshift, run;
   1927       1.130       chs 
   1928       1.130       chs 		bshift = vp->v_mount->mnt_fs_bshift;
   1929       1.130       chs 		bsize = 1 << bshift;
   1930       1.130       chs 		lbn = off >> bshift;
   1931       1.130       chs 		elbn = (off + len + bsize - 1) >> bshift;
   1932       1.130       chs 		while (lbn < elbn) {
   1933       1.130       chs 			error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
   1934       1.130       chs 			if (error) {
   1935       1.130       chs 				return error;
   1936       1.130       chs 			}
   1937       1.130       chs 			if (blkno == (daddr_t)-1) {
   1938       1.130       chs 				return ENOSPC;
   1939       1.130       chs 			}
   1940       1.130       chs 			lbn += 1 + run;
   1941       1.130       chs 		}
   1942       1.130       chs 	}
   1943       1.130       chs 
   1944       1.130       chs 	/*
   1945       1.130       chs 	 * Flush any cached pages for parts of the file that we're about to
   1946       1.130       chs 	 * access.  If we're writing, invalidate pages as well.
   1947       1.130       chs 	 */
   1948       1.130       chs 
   1949       1.130       chs 	spoff = trunc_page(off);
   1950       1.130       chs 	epoff = round_page(off + len);
   1951   1.150.2.1        ad 	mutex_enter(&vp->v_interlock);
   1952       1.130       chs 	error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
   1953       1.130       chs 	if (error) {
   1954       1.130       chs 		return error;
   1955       1.130       chs 	}
   1956       1.130       chs 
   1957       1.130       chs 	/*
   1958       1.130       chs 	 * Wire the user pages and remap them into kernel memory.
   1959       1.130       chs 	 */
   1960       1.130       chs 
   1961       1.130       chs 	prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
   1962       1.130       chs 	error = uvm_vslock(vs, (void *)uva, len, prot);
   1963       1.130       chs 	if (error) {
   1964       1.130       chs 		return error;
   1965       1.130       chs 	}
   1966       1.130       chs 
   1967       1.130       chs 	map = &vs->vm_map;
   1968       1.130       chs 	upm = vm_map_pmap(map);
   1969       1.130       chs 	kpm = vm_map_pmap(kernel_map);
   1970       1.130       chs 	kva = uvm_km_alloc(kernel_map, klen, 0,
   1971       1.130       chs 			   UVM_KMF_VAONLY | UVM_KMF_WAITVA);
   1972       1.130       chs 	puva = trunc_page(uva);
   1973       1.130       chs 	for (poff = 0; poff < klen; poff += PAGE_SIZE) {
   1974       1.130       chs 		rv = pmap_extract(upm, puva + poff, &pa);
   1975       1.130       chs 		KASSERT(rv);
   1976       1.130       chs 		pmap_enter(kpm, kva + poff, pa, prot, prot | PMAP_WIRED);
   1977       1.130       chs 	}
   1978       1.130       chs 	pmap_update(kpm);
   1979       1.130       chs 
   1980       1.130       chs 	/*
   1981       1.130       chs 	 * Do the I/O.
   1982       1.130       chs 	 */
   1983       1.130       chs 
   1984       1.130       chs 	koff = uva - trunc_page(uva);
   1985       1.130       chs 	error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
   1986       1.130       chs 			    genfs_dio_iodone);
   1987       1.130       chs 
   1988       1.130       chs 	/*
   1989       1.130       chs 	 * Tear down the kernel mapping.
   1990       1.130       chs 	 */
   1991       1.130       chs 
   1992       1.130       chs 	pmap_remove(kpm, kva, kva + klen);
   1993       1.130       chs 	pmap_update(kpm);
   1994       1.130       chs 	uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
   1995       1.130       chs 
   1996       1.130       chs 	/*
   1997       1.130       chs 	 * Unwire the user pages.
   1998       1.130       chs 	 */
   1999       1.130       chs 
   2000       1.130       chs 	uvm_vsunlock(vs, (void *)uva, len);
   2001       1.130       chs 	return error;
   2002       1.130       chs }
   2003       1.130       chs 
   2004       1.130       chs 
   2005        1.66  jdolecek static void
   2006        1.66  jdolecek filt_genfsdetach(struct knote *kn)
   2007        1.66  jdolecek {
   2008        1.66  jdolecek 	struct vnode *vp = (struct vnode *)kn->kn_hook;
   2009        1.66  jdolecek 
   2010        1.66  jdolecek 	/* XXXLUKEM lock the struct? */
   2011        1.66  jdolecek 	SLIST_REMOVE(&vp->v_klist, kn, knote, kn_selnext);
   2012        1.66  jdolecek }
   2013        1.66  jdolecek 
   2014        1.66  jdolecek static int
   2015        1.66  jdolecek filt_genfsread(struct knote *kn, long hint)
   2016        1.66  jdolecek {
   2017        1.66  jdolecek 	struct vnode *vp = (struct vnode *)kn->kn_hook;
   2018        1.66  jdolecek 
   2019        1.66  jdolecek 	/*
   2020        1.66  jdolecek 	 * filesystem is gone, so set the EOF flag and schedule
   2021        1.66  jdolecek 	 * the knote for deletion.
   2022        1.66  jdolecek 	 */
   2023        1.66  jdolecek 	if (hint == NOTE_REVOKE) {
   2024        1.66  jdolecek 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
   2025        1.66  jdolecek 		return (1);
   2026        1.66  jdolecek 	}
   2027        1.66  jdolecek 
   2028        1.66  jdolecek 	/* XXXLUKEM lock the struct? */
   2029        1.66  jdolecek 	kn->kn_data = vp->v_size - kn->kn_fp->f_offset;
   2030        1.66  jdolecek         return (kn->kn_data != 0);
   2031        1.66  jdolecek }
   2032        1.66  jdolecek 
   2033        1.66  jdolecek static int
   2034        1.66  jdolecek filt_genfsvnode(struct knote *kn, long hint)
   2035        1.66  jdolecek {
   2036        1.66  jdolecek 
   2037        1.66  jdolecek 	if (kn->kn_sfflags & hint)
   2038        1.66  jdolecek 		kn->kn_fflags |= hint;
   2039        1.66  jdolecek 	if (hint == NOTE_REVOKE) {
   2040        1.66  jdolecek 		kn->kn_flags |= EV_EOF;
   2041        1.66  jdolecek 		return (1);
   2042        1.66  jdolecek 	}
   2043        1.66  jdolecek 	return (kn->kn_fflags != 0);
   2044        1.66  jdolecek }
   2045        1.66  jdolecek 
   2046        1.96     perry static const struct filterops genfsread_filtops =
   2047        1.66  jdolecek 	{ 1, NULL, filt_genfsdetach, filt_genfsread };
   2048        1.96     perry static const struct filterops genfsvnode_filtops =
   2049        1.66  jdolecek 	{ 1, NULL, filt_genfsdetach, filt_genfsvnode };
   2050        1.66  jdolecek 
   2051        1.66  jdolecek int
   2052        1.66  jdolecek genfs_kqfilter(void *v)
   2053        1.66  jdolecek {
   2054        1.66  jdolecek 	struct vop_kqfilter_args /* {
   2055        1.66  jdolecek 		struct vnode	*a_vp;
   2056        1.66  jdolecek 		struct knote	*a_kn;
   2057        1.66  jdolecek 	} */ *ap = v;
   2058        1.66  jdolecek 	struct vnode *vp;
   2059        1.66  jdolecek 	struct knote *kn;
   2060        1.66  jdolecek 
   2061        1.66  jdolecek 	vp = ap->a_vp;
   2062        1.66  jdolecek 	kn = ap->a_kn;
   2063        1.66  jdolecek 	switch (kn->kn_filter) {
   2064        1.66  jdolecek 	case EVFILT_READ:
   2065        1.66  jdolecek 		kn->kn_fop = &genfsread_filtops;
   2066        1.66  jdolecek 		break;
   2067        1.66  jdolecek 	case EVFILT_VNODE:
   2068        1.66  jdolecek 		kn->kn_fop = &genfsvnode_filtops;
   2069        1.66  jdolecek 		break;
   2070        1.66  jdolecek 	default:
   2071        1.66  jdolecek 		return (1);
   2072        1.66  jdolecek 	}
   2073        1.66  jdolecek 
   2074        1.66  jdolecek 	kn->kn_hook = vp;
   2075        1.66  jdolecek 
   2076        1.66  jdolecek 	/* XXXLUKEM lock the struct? */
   2077        1.66  jdolecek 	SLIST_INSERT_HEAD(&vp->v_klist, kn, kn_selnext);
   2078        1.66  jdolecek 
   2079        1.66  jdolecek 	return (0);
   2080         1.1   mycroft }
   2081       1.136      yamt 
   2082       1.136      yamt void
   2083       1.136      yamt genfs_node_wrlock(struct vnode *vp)
   2084       1.136      yamt {
   2085       1.136      yamt 	struct genfs_node *gp = VTOG(vp);
   2086       1.136      yamt 
   2087       1.146        ad 	rw_enter(&gp->g_glock, RW_WRITER);
   2088       1.136      yamt }
   2089       1.136      yamt 
   2090       1.136      yamt void
   2091       1.136      yamt genfs_node_rdlock(struct vnode *vp)
   2092       1.136      yamt {
   2093       1.136      yamt 	struct genfs_node *gp = VTOG(vp);
   2094       1.136      yamt 
   2095       1.146        ad 	rw_enter(&gp->g_glock, RW_READER);
   2096       1.136      yamt }
   2097       1.136      yamt 
   2098       1.136      yamt void
   2099       1.136      yamt genfs_node_unlock(struct vnode *vp)
   2100       1.136      yamt {
   2101       1.136      yamt 	struct genfs_node *gp = VTOG(vp);
   2102       1.136      yamt 
   2103       1.146        ad 	rw_exit(&gp->g_glock);
   2104       1.136      yamt }
   2105