Home | History | Annotate | Line # | Download | only in genfs
      1  1.104  riastrad /*	$NetBSD: genfs_io.c,v 1.104 2024/04/05 13:05:40 riastradh Exp $	*/
      2    1.1     pooka 
      3    1.1     pooka /*
      4    1.1     pooka  * Copyright (c) 1982, 1986, 1989, 1993
      5    1.1     pooka  *	The Regents of the University of California.  All rights reserved.
      6    1.1     pooka  *
      7    1.1     pooka  * Redistribution and use in source and binary forms, with or without
      8    1.1     pooka  * modification, are permitted provided that the following conditions
      9    1.1     pooka  * are met:
     10    1.1     pooka  * 1. Redistributions of source code must retain the above copyright
     11    1.1     pooka  *    notice, this list of conditions and the following disclaimer.
     12    1.1     pooka  * 2. Redistributions in binary form must reproduce the above copyright
     13    1.1     pooka  *    notice, this list of conditions and the following disclaimer in the
     14    1.1     pooka  *    documentation and/or other materials provided with the distribution.
     15    1.1     pooka  * 3. Neither the name of the University nor the names of its contributors
     16    1.1     pooka  *    may be used to endorse or promote products derived from this software
     17    1.1     pooka  *    without specific prior written permission.
     18    1.1     pooka  *
     19    1.1     pooka  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20    1.1     pooka  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21    1.1     pooka  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22    1.1     pooka  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23    1.1     pooka  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24    1.1     pooka  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25    1.1     pooka  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26    1.1     pooka  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27    1.1     pooka  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28    1.1     pooka  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29    1.1     pooka  * SUCH DAMAGE.
     30    1.1     pooka  *
     31    1.1     pooka  */
     32    1.1     pooka 
     33    1.1     pooka #include <sys/cdefs.h>
     34  1.104  riastrad __KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.104 2024/04/05 13:05:40 riastradh Exp $");
     35    1.1     pooka 
     36    1.1     pooka #include <sys/param.h>
     37    1.1     pooka #include <sys/systm.h>
     38    1.1     pooka #include <sys/proc.h>
     39    1.1     pooka #include <sys/kernel.h>
     40    1.1     pooka #include <sys/mount.h>
     41    1.1     pooka #include <sys/vnode.h>
     42    1.1     pooka #include <sys/kmem.h>
     43    1.1     pooka #include <sys/kauth.h>
     44    1.1     pooka #include <sys/fstrans.h>
     45   1.15     pooka #include <sys/buf.h>
     46   1.95        ad #include <sys/atomic.h>
     47    1.1     pooka 
     48    1.1     pooka #include <miscfs/genfs/genfs.h>
     49    1.1     pooka #include <miscfs/genfs/genfs_node.h>
     50    1.1     pooka #include <miscfs/specfs/specdev.h>
     51    1.1     pooka 
     52    1.1     pooka #include <uvm/uvm.h>
     53    1.1     pooka #include <uvm/uvm_pager.h>
     54   1.78        ad #include <uvm/uvm_page_array.h>
     55    1.1     pooka 
     56    1.1     pooka static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
     57    1.1     pooka     off_t, enum uio_rw);
     58    1.1     pooka static void genfs_dio_iodone(struct buf *);
     59    1.1     pooka 
     60   1.59  riastrad static int genfs_getpages_read(struct vnode *, struct vm_page **, int, off_t,
     61   1.59  riastrad     off_t, bool, bool, bool, bool);
     62    1.1     pooka static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
     63    1.1     pooka     void (*)(struct buf *));
     64   1.55      yamt static void genfs_rel_pages(struct vm_page **, unsigned int);
     65    1.1     pooka 
     66    1.1     pooka int genfs_maxdio = MAXPHYS;
     67    1.1     pooka 
     68   1.38       chs static void
     69   1.55      yamt genfs_rel_pages(struct vm_page **pgs, unsigned int npages)
     70    1.1     pooka {
     71   1.55      yamt 	unsigned int i;
     72    1.1     pooka 
     73    1.1     pooka 	for (i = 0; i < npages; i++) {
     74    1.1     pooka 		struct vm_page *pg = pgs[i];
     75    1.1     pooka 
     76    1.1     pooka 		if (pg == NULL || pg == PGO_DONTCARE)
     77    1.1     pooka 			continue;
     78   1.86        ad 		KASSERT(uvm_page_owner_locked_p(pg, true));
     79    1.1     pooka 		if (pg->flags & PG_FAKE) {
     80    1.1     pooka 			pg->flags |= PG_RELEASED;
     81    1.1     pooka 		}
     82    1.1     pooka 	}
     83    1.1     pooka 	uvm_page_unbusy(pgs, npages);
     84    1.1     pooka }
     85    1.1     pooka 
     86    1.1     pooka /*
     87    1.1     pooka  * generic VM getpages routine.
     88    1.1     pooka  * Return PG_BUSY pages for the given range,
     89    1.1     pooka  * reading from backing store if necessary.
     90    1.1     pooka  */
     91    1.1     pooka 
     92    1.1     pooka int
     93    1.1     pooka genfs_getpages(void *v)
     94    1.1     pooka {
     95    1.1     pooka 	struct vop_getpages_args /* {
     96    1.1     pooka 		struct vnode *a_vp;
     97    1.1     pooka 		voff_t a_offset;
     98    1.1     pooka 		struct vm_page **a_m;
     99    1.1     pooka 		int *a_count;
    100    1.1     pooka 		int a_centeridx;
    101    1.1     pooka 		vm_prot_t a_access_type;
    102    1.1     pooka 		int a_advice;
    103    1.1     pooka 		int a_flags;
    104   1.22  uebayasi 	} */ * const ap = v;
    105    1.1     pooka 
    106   1.24  uebayasi 	off_t diskeof, memeof;
    107   1.95        ad 	int i, error, npages, iflag;
    108   1.10      yamt 	const int flags = ap->a_flags;
    109   1.22  uebayasi 	struct vnode * const vp = ap->a_vp;
    110   1.22  uebayasi 	struct uvm_object * const uobj = &vp->v_uobj;
    111   1.10      yamt 	const bool async = (flags & PGO_SYNCIO) == 0;
    112   1.35  uebayasi 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
    113   1.10      yamt 	const bool overwrite = (flags & PGO_OVERWRITE) != 0;
    114   1.35  uebayasi 	const bool blockalloc = memwrite && (flags & PGO_NOBLOCKALLOC) == 0;
    115   1.73  jdolecek 	const bool need_wapbl = (vp->v_mount->mnt_wapbl &&
    116   1.73  jdolecek 			(flags & PGO_JOURNALLOCKED) == 0);
    117   1.40       chs 	const bool glocked = (flags & PGO_GLOCKHELD) != 0;
    118   1.64   hannken 	bool holds_wapbl = false;
    119   1.64   hannken 	struct mount *trans_mount = NULL;
    120    1.1     pooka 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    121    1.1     pooka 
    122   1.71  pgoyette 	UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx/%jx count %jd",
    123   1.71  pgoyette 	    (uintptr_t)vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    124    1.1     pooka 
    125   1.84        ad 	KASSERT(memwrite >= overwrite);
    126    1.1     pooka 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
    127    1.1     pooka 	    vp->v_type == VLNK || vp->v_type == VBLK);
    128    1.1     pooka 
    129   1.95        ad 	/*
    130   1.95        ad 	 * the object must be locked.  it can only be a read lock when
    131   1.96        ad 	 * processing a read fault with PGO_LOCKED.
    132   1.95        ad 	 */
    133   1.95        ad 
    134   1.95        ad 	KASSERT(rw_lock_held(uobj->vmobjlock));
    135   1.95        ad 	KASSERT(rw_write_held(uobj->vmobjlock) ||
    136   1.96        ad 	   ((flags & PGO_LOCKED) != 0 && !memwrite));
    137   1.95        ad 
    138   1.74  jdolecek #ifdef DIAGNOSTIC
    139   1.74  jdolecek 	if ((flags & PGO_JOURNALLOCKED) && vp->v_mount->mnt_wapbl)
    140   1.74  jdolecek                 WAPBL_JLOCK_ASSERT(vp->v_mount);
    141   1.74  jdolecek #endif
    142   1.74  jdolecek 
    143   1.95        ad 	/*
    144   1.95        ad 	 * check for reclaimed vnode.  v_interlock is not held here, but
    145   1.95        ad 	 * VI_DEADCHECK is set with vmobjlock held.
    146   1.95        ad 	 */
    147   1.95        ad 
    148   1.95        ad 	iflag = atomic_load_relaxed(&vp->v_iflag);
    149   1.95        ad 	if (__predict_false((iflag & VI_DEADCHECK) != 0)) {
    150   1.95        ad 		mutex_enter(vp->v_interlock);
    151   1.95        ad 		error = vdead_check(vp, VDEAD_NOWAIT);
    152   1.95        ad 		mutex_exit(vp->v_interlock);
    153   1.95        ad 		if (error) {
    154   1.95        ad 			if ((flags & PGO_LOCKED) == 0)
    155   1.95        ad 				rw_exit(uobj->vmobjlock);
    156   1.95        ad 			return error;
    157   1.95        ad 		}
    158   1.70   hannken 	}
    159   1.70   hannken 
    160    1.1     pooka startover:
    161    1.1     pooka 	error = 0;
    162   1.27  uebayasi 	const voff_t origvsize = vp->v_size;
    163   1.27  uebayasi 	const off_t origoffset = ap->a_offset;
    164   1.29  uebayasi 	const int orignpages = *ap->a_count;
    165   1.33  uebayasi 
    166    1.1     pooka 	GOP_SIZE(vp, origvsize, &diskeof, 0);
    167    1.1     pooka 	if (flags & PGO_PASTEOF) {
    168   1.24  uebayasi 		off_t newsize;
    169    1.1     pooka #if defined(DIAGNOSTIC)
    170    1.1     pooka 		off_t writeeof;
    171    1.1     pooka #endif /* defined(DIAGNOSTIC) */
    172    1.1     pooka 
    173    1.1     pooka 		newsize = MAX(origvsize,
    174    1.1     pooka 		    origoffset + (orignpages << PAGE_SHIFT));
    175    1.1     pooka 		GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
    176    1.1     pooka #if defined(DIAGNOSTIC)
    177    1.1     pooka 		GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM);
    178    1.1     pooka 		if (newsize > round_page(writeeof)) {
    179   1.39     pooka 			panic("%s: past eof: %" PRId64 " vs. %" PRId64,
    180   1.39     pooka 			    __func__, newsize, round_page(writeeof));
    181    1.1     pooka 		}
    182    1.1     pooka #endif /* defined(DIAGNOSTIC) */
    183    1.1     pooka 	} else {
    184    1.1     pooka 		GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM);
    185    1.1     pooka 	}
    186    1.1     pooka 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    187  1.103  riastrad 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0);
    188  1.103  riastrad 	KASSERT(origoffset >= 0);
    189    1.1     pooka 	KASSERT(orignpages > 0);
    190    1.1     pooka 
    191    1.1     pooka 	/*
    192    1.1     pooka 	 * Bounds-check the request.
    193    1.1     pooka 	 */
    194    1.1     pooka 
    195    1.1     pooka 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    196    1.1     pooka 		if ((flags & PGO_LOCKED) == 0) {
    197   1.86        ad 			rw_exit(uobj->vmobjlock);
    198    1.1     pooka 		}
    199   1.71  pgoyette 		UVMHIST_LOG(ubchist, "off 0x%jx count %jd goes past EOF 0x%jx",
    200    1.1     pooka 		    origoffset, *ap->a_count, memeof,0);
    201    1.1     pooka 		error = EINVAL;
    202    1.1     pooka 		goto out_err;
    203    1.1     pooka 	}
    204    1.1     pooka 
    205    1.1     pooka 	/* uobj is locked */
    206    1.1     pooka 
    207    1.1     pooka 	if ((flags & PGO_NOTIMESTAMP) == 0 &&
    208    1.1     pooka 	    (vp->v_type != VBLK ||
    209    1.1     pooka 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
    210    1.1     pooka 		int updflags = 0;
    211    1.1     pooka 
    212    1.1     pooka 		if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
    213    1.1     pooka 			updflags = GOP_UPDATE_ACCESSED;
    214    1.1     pooka 		}
    215   1.35  uebayasi 		if (memwrite) {
    216    1.1     pooka 			updflags |= GOP_UPDATE_MODIFIED;
    217    1.1     pooka 		}
    218    1.1     pooka 		if (updflags != 0) {
    219    1.1     pooka 			GOP_MARKUPDATE(vp, updflags);
    220    1.1     pooka 		}
    221    1.1     pooka 	}
    222    1.1     pooka 
    223    1.1     pooka 	/*
    224    1.1     pooka 	 * For PGO_LOCKED requests, just return whatever's in memory.
    225    1.1     pooka 	 */
    226    1.1     pooka 
    227    1.1     pooka 	if (flags & PGO_LOCKED) {
    228    1.1     pooka 		int nfound;
    229   1.31  uebayasi 		struct vm_page *pg;
    230    1.1     pooka 
    231   1.40       chs 		KASSERT(!glocked);
    232    1.1     pooka 		npages = *ap->a_count;
    233    1.1     pooka #if defined(DEBUG)
    234    1.1     pooka 		for (i = 0; i < npages; i++) {
    235    1.1     pooka 			pg = ap->a_m[i];
    236    1.1     pooka 			KASSERT(pg == NULL || pg == PGO_DONTCARE);
    237    1.1     pooka 		}
    238    1.1     pooka #endif /* defined(DEBUG) */
    239   1.95        ad  		nfound = uvn_findpages(uobj, origoffset, &npages,
    240   1.84        ad 		    ap->a_m, NULL,
    241   1.96        ad 		    UFP_NOWAIT | UFP_NOALLOC | UFP_NOBUSY |
    242   1.96        ad 		    (memwrite ? UFP_NORDONLY : 0));
    243    1.1     pooka 		KASSERT(npages == *ap->a_count);
    244    1.1     pooka 		if (nfound == 0) {
    245    1.1     pooka 			error = EBUSY;
    246    1.1     pooka 			goto out_err;
    247    1.1     pooka 		}
    248   1.84        ad 		/*
    249   1.84        ad 		 * lock and unlock g_glock to ensure that no one is truncating
    250   1.84        ad 		 * the file behind us.
    251   1.84        ad 		 */
    252   1.23  uebayasi 		if (!genfs_node_rdtrylock(vp)) {
    253    1.1     pooka 			/*
    254    1.1     pooka 			 * restore the array.
    255    1.1     pooka 			 */
    256    1.1     pooka 
    257    1.1     pooka 			for (i = 0; i < npages; i++) {
    258    1.1     pooka 				pg = ap->a_m[i];
    259    1.1     pooka 
    260   1.41  uebayasi 				if (pg != NULL && pg != PGO_DONTCARE) {
    261    1.1     pooka 					ap->a_m[i] = NULL;
    262    1.1     pooka 				}
    263   1.46  uebayasi 				KASSERT(ap->a_m[i] == NULL ||
    264   1.46  uebayasi 				    ap->a_m[i] == PGO_DONTCARE);
    265    1.1     pooka 			}
    266    1.1     pooka 		} else {
    267   1.23  uebayasi 			genfs_node_unlock(vp);
    268    1.1     pooka 		}
    269    1.1     pooka 		error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    270   1.38       chs 		if (error == 0 && memwrite) {
    271   1.84        ad 			for (i = 0; i < npages; i++) {
    272   1.84        ad 				pg = ap->a_m[i];
    273   1.84        ad 				if (pg == NULL || pg == PGO_DONTCARE) {
    274   1.84        ad 					continue;
    275   1.84        ad 				}
    276   1.84        ad 				if (uvm_pagegetdirty(pg) ==
    277   1.84        ad 				    UVM_PAGE_STATUS_CLEAN) {
    278   1.84        ad 					uvm_pagemarkdirty(pg,
    279   1.84        ad 					    UVM_PAGE_STATUS_UNKNOWN);
    280   1.84        ad 				}
    281   1.84        ad 			}
    282   1.38       chs 		}
    283    1.1     pooka 		goto out_err;
    284    1.1     pooka 	}
    285   1.86        ad 	rw_exit(uobj->vmobjlock);
    286    1.1     pooka 
    287    1.1     pooka 	/*
    288    1.1     pooka 	 * find the requested pages and make some simple checks.
    289    1.1     pooka 	 * leave space in the page array for a whole block.
    290    1.1     pooka 	 */
    291    1.1     pooka 
    292   1.27  uebayasi 	const int fs_bshift = (vp->v_type != VBLK) ?
    293   1.27  uebayasi 	    vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
    294   1.27  uebayasi 	const int fs_bsize = 1 << fs_bshift;
    295   1.30  uebayasi #define	blk_mask	(fs_bsize - 1)
    296   1.30  uebayasi #define	trunc_blk(x)	((x) & ~blk_mask)
    297   1.30  uebayasi #define	round_blk(x)	(((x) + blk_mask) & ~blk_mask)
    298    1.1     pooka 
    299   1.29  uebayasi 	const int orignmempages = MIN(orignpages,
    300    1.1     pooka 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    301   1.29  uebayasi 	npages = orignmempages;
    302   1.30  uebayasi 	const off_t startoffset = trunc_blk(origoffset);
    303   1.30  uebayasi 	const off_t endoffset = MIN(
    304   1.30  uebayasi 	    round_page(round_blk(origoffset + (npages << PAGE_SHIFT))),
    305   1.30  uebayasi 	    round_page(memeof));
    306   1.31  uebayasi 	const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    307    1.1     pooka 
    308   1.33  uebayasi 	const int pgs_size = sizeof(struct vm_page *) *
    309    1.1     pooka 	    ((endoffset - startoffset) >> PAGE_SHIFT);
    310   1.33  uebayasi 	struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES];
    311   1.31  uebayasi 
    312    1.1     pooka 	if (pgs_size > sizeof(pgs_onstack)) {
    313    1.1     pooka 		pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
    314    1.1     pooka 		if (pgs == NULL) {
    315    1.1     pooka 			pgs = pgs_onstack;
    316    1.1     pooka 			error = ENOMEM;
    317   1.32  uebayasi 			goto out_err;
    318    1.1     pooka 		}
    319    1.1     pooka 	} else {
    320   1.14  christos 		pgs = pgs_onstack;
    321   1.14  christos 		(void)memset(pgs, 0, pgs_size);
    322    1.1     pooka 	}
    323   1.14  christos 
    324   1.99       rin 	UVMHIST_LOG(ubchist, "ridx %jd npages %jd startoff %#jx endoff %#jx",
    325    1.1     pooka 	    ridx, npages, startoffset, endoffset);
    326    1.1     pooka 
    327   1.64   hannken 	if (trans_mount == NULL) {
    328   1.64   hannken 		trans_mount = vp->v_mount;
    329   1.69   hannken 		fstrans_start(trans_mount);
    330   1.64   hannken 		/*
    331   1.64   hannken 		 * check if this vnode is still valid.
    332   1.64   hannken 		 */
    333   1.64   hannken 		mutex_enter(vp->v_interlock);
    334   1.64   hannken 		error = vdead_check(vp, 0);
    335   1.64   hannken 		mutex_exit(vp->v_interlock);
    336   1.64   hannken 		if (error)
    337   1.64   hannken 			goto out_err_free;
    338   1.42   hannken 		/*
    339   1.42   hannken 		 * XXX: This assumes that we come here only via
    340   1.42   hannken 		 * the mmio path
    341   1.42   hannken 		 */
    342   1.73  jdolecek 		if (blockalloc && need_wapbl) {
    343   1.64   hannken 			error = WAPBL_BEGIN(trans_mount);
    344   1.64   hannken 			if (error)
    345   1.42   hannken 				goto out_err_free;
    346   1.64   hannken 			holds_wapbl = true;
    347   1.42   hannken 		}
    348    1.1     pooka 	}
    349    1.1     pooka 
    350    1.1     pooka 	/*
    351    1.1     pooka 	 * hold g_glock to prevent a race with truncate.
    352    1.1     pooka 	 *
    353    1.1     pooka 	 * check if our idea of v_size is still valid.
    354    1.1     pooka 	 */
    355    1.1     pooka 
    356   1.40       chs 	KASSERT(!glocked || genfs_node_wrlocked(vp));
    357   1.40       chs 	if (!glocked) {
    358   1.40       chs 		if (blockalloc) {
    359   1.40       chs 			genfs_node_wrlock(vp);
    360   1.40       chs 		} else {
    361   1.40       chs 			genfs_node_rdlock(vp);
    362   1.40       chs 		}
    363    1.1     pooka 	}
    364   1.86        ad 	rw_enter(uobj->vmobjlock, RW_WRITER);
    365    1.1     pooka 	if (vp->v_size < origvsize) {
    366   1.40       chs 		if (!glocked) {
    367   1.40       chs 			genfs_node_unlock(vp);
    368   1.40       chs 		}
    369    1.1     pooka 		if (pgs != pgs_onstack)
    370    1.1     pooka 			kmem_free(pgs, pgs_size);
    371    1.1     pooka 		goto startover;
    372    1.1     pooka 	}
    373    1.1     pooka 
    374   1.84        ad 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx], NULL,
    375   1.29  uebayasi 	    async ? UFP_NOWAIT : UFP_ALL) != orignmempages) {
    376   1.40       chs 		if (!glocked) {
    377   1.40       chs 			genfs_node_unlock(vp);
    378   1.40       chs 		}
    379    1.1     pooka 		KASSERT(async != 0);
    380   1.29  uebayasi 		genfs_rel_pages(&pgs[ridx], orignmempages);
    381   1.86        ad 		rw_exit(uobj->vmobjlock);
    382    1.1     pooka 		error = EBUSY;
    383   1.33  uebayasi 		goto out_err_free;
    384    1.1     pooka 	}
    385    1.1     pooka 
    386    1.1     pooka 	/*
    387   1.84        ad 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    388   1.84        ad 	 */
    389   1.84        ad 
    390   1.84        ad 	if (overwrite) {
    391   1.84        ad 		if (!glocked) {
    392   1.84        ad 			genfs_node_unlock(vp);
    393   1.84        ad 		}
    394   1.84        ad 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    395   1.84        ad 
    396   1.84        ad 		for (i = 0; i < npages; i++) {
    397   1.84        ad 			struct vm_page *pg = pgs[ridx + i];
    398   1.84        ad 
    399   1.84        ad 			/*
    400   1.84        ad 			 * it's caller's responsibility to allocate blocks
    401   1.84        ad 			 * beforehand for the overwrite case.
    402   1.84        ad 			 */
    403   1.84        ad 
    404   1.84        ad 			KASSERT((pg->flags & PG_RDONLY) == 0 || !blockalloc);
    405   1.84        ad 			pg->flags &= ~PG_RDONLY;
    406   1.84        ad 
    407   1.84        ad 			/*
    408   1.84        ad 			 * mark the page DIRTY.
    409   1.84        ad 			 * otherwise another thread can do putpages and pull
    410   1.84        ad 			 * our vnode from syncer's queue before our caller does
    411   1.84        ad 			 * ubc_release.  note that putpages won't see CLEAN
    412   1.84        ad 			 * pages even if they are BUSY.
    413   1.84        ad 			 */
    414   1.84        ad 
    415   1.84        ad 			uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
    416   1.84        ad 		}
    417   1.84        ad 		npages += ridx;
    418   1.84        ad 		goto out;
    419   1.84        ad 	}
    420   1.84        ad 
    421   1.84        ad 	/*
    422    1.1     pooka 	 * if the pages are already resident, just return them.
    423    1.1     pooka 	 */
    424    1.1     pooka 
    425    1.1     pooka 	for (i = 0; i < npages; i++) {
    426   1.31  uebayasi 		struct vm_page *pg = pgs[ridx + i];
    427    1.1     pooka 
    428   1.31  uebayasi 		if ((pg->flags & PG_FAKE) ||
    429   1.84        ad 		    (blockalloc && (pg->flags & PG_RDONLY) != 0)) {
    430    1.1     pooka 			break;
    431    1.1     pooka 		}
    432    1.1     pooka 	}
    433    1.1     pooka 	if (i == npages) {
    434   1.40       chs 		if (!glocked) {
    435   1.40       chs 			genfs_node_unlock(vp);
    436   1.40       chs 		}
    437    1.1     pooka 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    438    1.1     pooka 		npages += ridx;
    439    1.1     pooka 		goto out;
    440    1.1     pooka 	}
    441    1.1     pooka 
    442    1.1     pooka 	/*
    443    1.1     pooka 	 * the page wasn't resident and we're not overwriting,
    444    1.1     pooka 	 * so we're going to have to do some i/o.
    445    1.1     pooka 	 * find any additional pages needed to cover the expanded range.
    446    1.1     pooka 	 */
    447    1.1     pooka 
    448    1.1     pooka 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    449   1.29  uebayasi 	if (startoffset != origoffset || npages != orignmempages) {
    450   1.31  uebayasi 		int npgs;
    451    1.1     pooka 
    452    1.1     pooka 		/*
    453    1.1     pooka 		 * we need to avoid deadlocks caused by locking
    454    1.1     pooka 		 * additional pages at lower offsets than pages we
    455    1.1     pooka 		 * already have locked.  unlock them all and start over.
    456    1.1     pooka 		 */
    457    1.1     pooka 
    458   1.29  uebayasi 		genfs_rel_pages(&pgs[ridx], orignmempages);
    459    1.1     pooka 		memset(pgs, 0, pgs_size);
    460    1.1     pooka 
    461   1.71  pgoyette 		UVMHIST_LOG(ubchist, "reset npages start 0x%jx end 0x%jx",
    462    1.1     pooka 		    startoffset, endoffset, 0,0);
    463    1.1     pooka 		npgs = npages;
    464   1.84        ad 		if (uvn_findpages(uobj, startoffset, &npgs, pgs, NULL,
    465    1.1     pooka 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    466   1.40       chs 			if (!glocked) {
    467   1.40       chs 				genfs_node_unlock(vp);
    468   1.40       chs 			}
    469    1.1     pooka 			KASSERT(async != 0);
    470    1.1     pooka 			genfs_rel_pages(pgs, npages);
    471   1.86        ad 			rw_exit(uobj->vmobjlock);
    472    1.1     pooka 			error = EBUSY;
    473   1.33  uebayasi 			goto out_err_free;
    474    1.1     pooka 		}
    475    1.1     pooka 	}
    476   1.34  uebayasi 
    477   1.86        ad 	rw_exit(uobj->vmobjlock);
    478   1.59  riastrad 	error = genfs_getpages_read(vp, pgs, npages, startoffset, diskeof,
    479   1.59  riastrad 	    async, memwrite, blockalloc, glocked);
    480   1.59  riastrad 	if (!glocked) {
    481   1.59  riastrad 		genfs_node_unlock(vp);
    482   1.59  riastrad 	}
    483   1.67  riastrad 	if (error == 0 && async)
    484   1.67  riastrad 		goto out_err_free;
    485   1.86        ad 	rw_enter(uobj->vmobjlock, RW_WRITER);
    486   1.59  riastrad 
    487   1.59  riastrad 	/*
    488   1.59  riastrad 	 * we're almost done!  release the pages...
    489   1.59  riastrad 	 * for errors, we free the pages.
    490   1.59  riastrad 	 * otherwise we activate them and mark them as valid and clean.
    491   1.59  riastrad 	 * also, unbusy pages that were not actually requested.
    492   1.59  riastrad 	 */
    493   1.59  riastrad 
    494   1.59  riastrad 	if (error) {
    495   1.59  riastrad 		genfs_rel_pages(pgs, npages);
    496   1.86        ad 		rw_exit(uobj->vmobjlock);
    497   1.71  pgoyette 		UVMHIST_LOG(ubchist, "returning error %jd", error,0,0,0);
    498   1.59  riastrad 		goto out_err_free;
    499   1.59  riastrad 	}
    500   1.59  riastrad 
    501   1.59  riastrad out:
    502   1.71  pgoyette 	UVMHIST_LOG(ubchist, "succeeding, npages %jd", npages,0,0,0);
    503   1.59  riastrad 	error = 0;
    504   1.59  riastrad 	for (i = 0; i < npages; i++) {
    505   1.59  riastrad 		struct vm_page *pg = pgs[i];
    506   1.59  riastrad 		if (pg == NULL) {
    507   1.59  riastrad 			continue;
    508   1.59  riastrad 		}
    509   1.71  pgoyette 		UVMHIST_LOG(ubchist, "examining pg %#jx flags 0x%jx",
    510   1.71  pgoyette 		    (uintptr_t)pg, pg->flags, 0,0);
    511   1.59  riastrad 		if (pg->flags & PG_FAKE && !overwrite) {
    512   1.84        ad 			/*
    513   1.84        ad 			 * we've read page's contents from the backing storage.
    514   1.84        ad 			 *
    515   1.84        ad 			 * for a read fault, we keep them CLEAN;  if we
    516   1.84        ad 			 * encountered a hole while reading, the pages can
    517   1.84        ad 			 * already been dirtied with zeros.
    518   1.84        ad 			 */
    519   1.84        ad 			KASSERTMSG(blockalloc || uvm_pagegetdirty(pg) ==
    520   1.84        ad 			    UVM_PAGE_STATUS_CLEAN, "page %p not clean", pg);
    521   1.84        ad 			pg->flags &= ~PG_FAKE;
    522   1.59  riastrad 		}
    523   1.59  riastrad 		KASSERT(!memwrite || !blockalloc || (pg->flags & PG_RDONLY) == 0);
    524   1.59  riastrad 		if (i < ridx || i >= ridx + orignmempages || async) {
    525   1.71  pgoyette 			UVMHIST_LOG(ubchist, "unbusy pg %#jx offset 0x%jx",
    526   1.71  pgoyette 			    (uintptr_t)pg, pg->offset,0,0);
    527   1.59  riastrad 			if (pg->flags & PG_FAKE) {
    528   1.59  riastrad 				KASSERT(overwrite);
    529   1.59  riastrad 				uvm_pagezero(pg);
    530   1.59  riastrad 			}
    531   1.59  riastrad 			if (pg->flags & PG_RELEASED) {
    532   1.59  riastrad 				uvm_pagefree(pg);
    533   1.59  riastrad 				continue;
    534   1.59  riastrad 			}
    535   1.83        ad 			uvm_pagelock(pg);
    536   1.59  riastrad 			uvm_pageenqueue(pg);
    537   1.94        ad 			uvm_pagewakeup(pg);
    538   1.83        ad 			uvm_pageunlock(pg);
    539   1.94        ad 			pg->flags &= ~(PG_BUSY|PG_FAKE);
    540   1.59  riastrad 			UVM_PAGE_OWN(pg, NULL);
    541   1.84        ad 		} else if (memwrite && !overwrite &&
    542   1.84        ad 		    uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN) {
    543   1.84        ad 			/*
    544   1.84        ad 			 * for a write fault, start dirtiness tracking of
    545   1.84        ad 			 * requested pages.
    546   1.84        ad 			 */
    547   1.84        ad 			uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
    548   1.59  riastrad 		}
    549   1.59  riastrad 	}
    550   1.86        ad 	rw_exit(uobj->vmobjlock);
    551   1.59  riastrad 	if (ap->a_m != NULL) {
    552   1.59  riastrad 		memcpy(ap->a_m, &pgs[ridx],
    553   1.59  riastrad 		    orignmempages * sizeof(struct vm_page *));
    554   1.59  riastrad 	}
    555    1.1     pooka 
    556   1.59  riastrad out_err_free:
    557   1.59  riastrad 	if (pgs != NULL && pgs != pgs_onstack)
    558   1.59  riastrad 		kmem_free(pgs, pgs_size);
    559   1.59  riastrad out_err:
    560   1.64   hannken 	if (trans_mount != NULL) {
    561   1.64   hannken 		if (holds_wapbl)
    562   1.64   hannken 			WAPBL_END(trans_mount);
    563   1.64   hannken 		fstrans_done(trans_mount);
    564   1.59  riastrad 	}
    565   1.59  riastrad 	return error;
    566   1.59  riastrad }
    567   1.59  riastrad 
    568   1.59  riastrad /*
    569   1.59  riastrad  * genfs_getpages_read: Read the pages in with VOP_BMAP/VOP_STRATEGY.
    570   1.68  dholland  *
    571   1.68  dholland  * "glocked" (which is currently not actually used) tells us not whether
    572   1.68  dholland  * the genfs_node is locked on entry (it always is) but whether it was
    573   1.68  dholland  * locked on entry to genfs_getpages.
    574   1.59  riastrad  */
    575   1.59  riastrad static int
    576   1.59  riastrad genfs_getpages_read(struct vnode *vp, struct vm_page **pgs, int npages,
    577   1.59  riastrad     off_t startoffset, off_t diskeof,
    578   1.59  riastrad     bool async, bool memwrite, bool blockalloc, bool glocked)
    579   1.59  riastrad {
    580   1.59  riastrad 	struct uvm_object * const uobj = &vp->v_uobj;
    581   1.59  riastrad 	const int fs_bshift = (vp->v_type != VBLK) ?
    582   1.59  riastrad 	    vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
    583   1.59  riastrad 	const int dev_bshift = (vp->v_type != VBLK) ?
    584   1.59  riastrad 	    vp->v_mount->mnt_dev_bshift : DEV_BSHIFT;
    585   1.59  riastrad 	kauth_cred_t const cred = curlwp->l_cred;		/* XXXUBC curlwp */
    586   1.34  uebayasi 	size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
    587   1.34  uebayasi 	vaddr_t kva;
    588   1.34  uebayasi 	struct buf *bp, *mbp;
    589   1.34  uebayasi 	bool sawhole = false;
    590   1.59  riastrad 	int i;
    591   1.59  riastrad 	int error = 0;
    592   1.34  uebayasi 
    593   1.60     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
    594   1.60     skrll 
    595    1.1     pooka 	/*
    596    1.1     pooka 	 * read the desired page(s).
    597    1.1     pooka 	 */
    598    1.1     pooka 
    599    1.1     pooka 	totalbytes = npages << PAGE_SHIFT;
    600    1.1     pooka 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    601    1.1     pooka 	tailbytes = totalbytes - bytes;
    602    1.1     pooka 	skipbytes = 0;
    603    1.1     pooka 
    604    1.1     pooka 	kva = uvm_pagermapin(pgs, npages,
    605   1.55      yamt 	    UVMPAGER_MAPIN_READ | (async ? 0 : UVMPAGER_MAPIN_WAITOK));
    606   1.59  riastrad 	if (kva == 0)
    607   1.59  riastrad 		return EBUSY;
    608    1.1     pooka 
    609    1.2        ad 	mbp = getiobuf(vp, true);
    610    1.1     pooka 	mbp->b_bufsize = totalbytes;
    611    1.1     pooka 	mbp->b_data = (void *)kva;
    612    1.1     pooka 	mbp->b_resid = mbp->b_bcount = bytes;
    613   1.89        ad 	mbp->b_cflags |= BC_BUSY;
    614    1.2        ad 	if (async) {
    615    1.2        ad 		mbp->b_flags = B_READ | B_ASYNC;
    616   1.85       chs 		mbp->b_iodone = uvm_aio_aiodone;
    617    1.2        ad 	} else {
    618    1.2        ad 		mbp->b_flags = B_READ;
    619    1.2        ad 		mbp->b_iodone = NULL;
    620   1.43  uebayasi 	}
    621    1.1     pooka 	if (async)
    622    1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
    623    1.1     pooka 	else
    624    1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
    625    1.1     pooka 
    626    1.1     pooka 	/*
    627    1.1     pooka 	 * if EOF is in the middle of the range, zero the part past EOF.
    628    1.1     pooka 	 * skip over pages which are not PG_FAKE since in that case they have
    629    1.1     pooka 	 * valid data that we need to preserve.
    630    1.1     pooka 	 */
    631    1.1     pooka 
    632    1.1     pooka 	tailstart = bytes;
    633    1.1     pooka 	while (tailbytes > 0) {
    634    1.1     pooka 		const int len = PAGE_SIZE - (tailstart & PAGE_MASK);
    635    1.1     pooka 
    636    1.1     pooka 		KASSERT(len <= tailbytes);
    637    1.1     pooka 		if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) {
    638    1.1     pooka 			memset((void *)(kva + tailstart), 0, len);
    639   1.71  pgoyette 			UVMHIST_LOG(ubchist, "tailbytes %#jx 0x%jx 0x%jx",
    640   1.71  pgoyette 			    (uintptr_t)kva, tailstart, len, 0);
    641    1.1     pooka 		}
    642    1.1     pooka 		tailstart += len;
    643    1.1     pooka 		tailbytes -= len;
    644    1.1     pooka 	}
    645    1.1     pooka 
    646    1.1     pooka 	/*
    647    1.1     pooka 	 * now loop over the pages, reading as needed.
    648    1.1     pooka 	 */
    649    1.1     pooka 
    650    1.1     pooka 	bp = NULL;
    651   1.28  uebayasi 	off_t offset;
    652   1.28  uebayasi 	for (offset = startoffset;
    653    1.1     pooka 	    bytes > 0;
    654    1.1     pooka 	    offset += iobytes, bytes -= iobytes) {
    655   1.30  uebayasi 		int run;
    656   1.25  uebayasi 		daddr_t lbn, blkno;
    657   1.24  uebayasi 		int pidx;
    658   1.26  uebayasi 		struct vnode *devvp;
    659    1.1     pooka 
    660    1.1     pooka 		/*
    661    1.1     pooka 		 * skip pages which don't need to be read.
    662    1.1     pooka 		 */
    663    1.1     pooka 
    664    1.1     pooka 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    665    1.1     pooka 		while ((pgs[pidx]->flags & PG_FAKE) == 0) {
    666    1.1     pooka 			size_t b;
    667    1.1     pooka 
    668    1.1     pooka 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    669    1.1     pooka 			if ((pgs[pidx]->flags & PG_RDONLY)) {
    670    1.1     pooka 				sawhole = true;
    671    1.1     pooka 			}
    672    1.1     pooka 			b = MIN(PAGE_SIZE, bytes);
    673    1.1     pooka 			offset += b;
    674    1.1     pooka 			bytes -= b;
    675    1.1     pooka 			skipbytes += b;
    676    1.1     pooka 			pidx++;
    677   1.71  pgoyette 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%jx",
    678    1.1     pooka 			    offset, 0,0,0);
    679    1.1     pooka 			if (bytes == 0) {
    680    1.1     pooka 				goto loopdone;
    681    1.1     pooka 			}
    682    1.1     pooka 		}
    683    1.1     pooka 
    684    1.1     pooka 		/*
    685    1.1     pooka 		 * bmap the file to find out the blkno to read from and
    686    1.1     pooka 		 * how much we can read in one i/o.  if bmap returns an error,
    687    1.1     pooka 		 * skip the rest of the top-level i/o.
    688    1.1     pooka 		 */
    689    1.1     pooka 
    690    1.1     pooka 		lbn = offset >> fs_bshift;
    691    1.1     pooka 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    692    1.1     pooka 		if (error) {
    693  1.101    simonb 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%jx -> %jd",
    694   1.36  uebayasi 			    lbn,error,0,0);
    695    1.1     pooka 			skipbytes += bytes;
    696   1.36  uebayasi 			bytes = 0;
    697    1.1     pooka 			goto loopdone;
    698    1.1     pooka 		}
    699    1.1     pooka 
    700    1.1     pooka 		/*
    701    1.1     pooka 		 * see how many pages can be read with this i/o.
    702    1.1     pooka 		 * reduce the i/o size if necessary to avoid
    703    1.1     pooka 		 * overwriting pages with valid data.
    704    1.1     pooka 		 */
    705    1.1     pooka 
    706    1.1     pooka 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    707    1.1     pooka 		    bytes);
    708    1.1     pooka 		if (offset + iobytes > round_page(offset)) {
    709   1.24  uebayasi 			int pcount;
    710   1.24  uebayasi 
    711    1.1     pooka 			pcount = 1;
    712    1.1     pooka 			while (pidx + pcount < npages &&
    713    1.1     pooka 			    pgs[pidx + pcount]->flags & PG_FAKE) {
    714    1.1     pooka 				pcount++;
    715    1.1     pooka 			}
    716    1.1     pooka 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    717    1.1     pooka 			    (offset - trunc_page(offset)));
    718    1.1     pooka 		}
    719    1.1     pooka 
    720    1.1     pooka 		/*
    721    1.1     pooka 		 * if this block isn't allocated, zero it instead of
    722    1.1     pooka 		 * reading it.  unless we are going to allocate blocks,
    723    1.1     pooka 		 * mark the pages we zeroed PG_RDONLY.
    724    1.1     pooka 		 */
    725    1.1     pooka 
    726   1.36  uebayasi 		if (blkno == (daddr_t)-1) {
    727    1.1     pooka 			int holepages = (round_page(offset + iobytes) -
    728    1.1     pooka 			    trunc_page(offset)) >> PAGE_SHIFT;
    729   1.71  pgoyette 			UVMHIST_LOG(ubchist, "lbn 0x%jx -> HOLE", lbn,0,0,0);
    730    1.1     pooka 
    731    1.1     pooka 			sawhole = true;
    732    1.1     pooka 			memset((char *)kva + (offset - startoffset), 0,
    733    1.1     pooka 			    iobytes);
    734    1.1     pooka 			skipbytes += iobytes;
    735    1.1     pooka 
    736   1.84        ad 			if (!blockalloc) {
    737   1.86        ad 				rw_enter(uobj->vmobjlock, RW_WRITER);
    738   1.84        ad 				for (i = 0; i < holepages; i++) {
    739    1.1     pooka 					pgs[pidx + i]->flags |= PG_RDONLY;
    740    1.1     pooka 				}
    741   1.86        ad 				rw_exit(uobj->vmobjlock);
    742    1.1     pooka 			}
    743    1.1     pooka 			continue;
    744    1.1     pooka 		}
    745    1.1     pooka 
    746    1.1     pooka 		/*
    747    1.1     pooka 		 * allocate a sub-buf for this piece of the i/o
    748    1.1     pooka 		 * (or just use mbp if there's only 1 piece),
    749    1.1     pooka 		 * and start it going.
    750    1.1     pooka 		 */
    751    1.1     pooka 
    752    1.1     pooka 		if (offset == startoffset && iobytes == bytes) {
    753    1.1     pooka 			bp = mbp;
    754    1.1     pooka 		} else {
    755   1.71  pgoyette 			UVMHIST_LOG(ubchist, "vp %#jx bp %#jx num now %jd",
    756   1.71  pgoyette 			    (uintptr_t)vp, (uintptr_t)bp, vp->v_numoutput, 0);
    757    1.2        ad 			bp = getiobuf(vp, true);
    758    1.1     pooka 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
    759    1.1     pooka 		}
    760    1.1     pooka 		bp->b_lblkno = 0;
    761    1.1     pooka 
    762    1.1     pooka 		/* adjust physical blkno for partial blocks */
    763    1.1     pooka 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    764    1.1     pooka 		    dev_bshift);
    765    1.1     pooka 
    766    1.1     pooka 		UVMHIST_LOG(ubchist,
    767   1.71  pgoyette 		    "bp %#jx offset 0x%x bcount 0x%x blkno 0x%x",
    768   1.71  pgoyette 		    (uintptr_t)bp, offset, bp->b_bcount, bp->b_blkno);
    769    1.1     pooka 
    770    1.1     pooka 		VOP_STRATEGY(devvp, bp);
    771    1.1     pooka 	}
    772    1.1     pooka 
    773    1.1     pooka loopdone:
    774    1.1     pooka 	nestiobuf_done(mbp, skipbytes, error);
    775    1.1     pooka 	if (async) {
    776    1.1     pooka 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    777   1.59  riastrad 		return 0;
    778    1.1     pooka 	}
    779    1.1     pooka 	if (bp != NULL) {
    780    1.1     pooka 		error = biowait(mbp);
    781    1.1     pooka 	}
    782    1.1     pooka 
    783   1.19     rmind 	/* Remove the mapping (make KVA available as soon as possible) */
    784   1.19     rmind 	uvm_pagermapout(kva, npages);
    785   1.19     rmind 
    786    1.1     pooka 	/*
    787    1.1     pooka 	 * if this we encountered a hole then we have to do a little more work.
    788    1.1     pooka 	 * for read faults, we marked the page PG_RDONLY so that future
    789    1.1     pooka 	 * write accesses to the page will fault again.
    790    1.1     pooka 	 * for write faults, we must make sure that the backing store for
    791    1.1     pooka 	 * the page is completely allocated while the pages are locked.
    792    1.1     pooka 	 */
    793    1.1     pooka 
    794    1.1     pooka 	if (!error && sawhole && blockalloc) {
    795   1.42   hannken 		error = GOP_ALLOC(vp, startoffset,
    796   1.42   hannken 		    npages << PAGE_SHIFT, 0, cred);
    797   1.71  pgoyette 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%jx/0x%jx -> %jd",
    798    1.1     pooka 		    startoffset, npages << PAGE_SHIFT, error,0);
    799    1.1     pooka 		if (!error) {
    800   1.86        ad 			rw_enter(uobj->vmobjlock, RW_WRITER);
    801    1.1     pooka 			for (i = 0; i < npages; i++) {
    802   1.31  uebayasi 				struct vm_page *pg = pgs[i];
    803   1.31  uebayasi 
    804   1.31  uebayasi 				if (pg == NULL) {
    805    1.1     pooka 					continue;
    806    1.1     pooka 				}
    807   1.84        ad 				pg->flags &= ~PG_RDONLY;
    808   1.84        ad 				uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
    809   1.71  pgoyette 				UVMHIST_LOG(ubchist, "mark dirty pg %#jx",
    810   1.71  pgoyette 				    (uintptr_t)pg, 0, 0, 0);
    811    1.1     pooka 			}
    812   1.86        ad 			rw_exit(uobj->vmobjlock);
    813    1.1     pooka 		}
    814    1.1     pooka 	}
    815   1.18     rmind 
    816   1.18     rmind 	putiobuf(mbp);
    817   1.38       chs 	return error;
    818    1.1     pooka }
    819    1.1     pooka 
    820    1.1     pooka /*
    821    1.1     pooka  * generic VM putpages routine.
    822    1.1     pooka  * Write the given range of pages to backing store.
    823    1.1     pooka  *
    824    1.1     pooka  * => "offhi == 0" means flush all pages at or after "offlo".
    825    1.1     pooka  * => object should be locked by caller.  we return with the
    826    1.1     pooka  *      object unlocked.
    827    1.1     pooka  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
    828    1.1     pooka  *	thus, a caller might want to unlock higher level resources
    829    1.1     pooka  *	(e.g. vm_map) before calling flush.
    830    1.1     pooka  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
    831    1.1     pooka  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
    832    1.1     pooka  *
    833    1.1     pooka  * note on "cleaning" object and PG_BUSY pages:
    834    1.1     pooka  *	this routine is holding the lock on the object.   the only time
    835    1.1     pooka  *	that it can run into a PG_BUSY page that it does not own is if
    836    1.1     pooka  *	some other process has started I/O on the page (e.g. either
    837   1.84        ad  *	a pagein, or a pageout).  if the PG_BUSY page is being paged
    838   1.84        ad  *	in, then it can not be dirty (!UVM_PAGE_STATUS_CLEAN) because no
    839   1.84        ad  *	one has	had a chance to modify it yet.  if the PG_BUSY page is
    840   1.84        ad  *	being paged out then it means that someone else has already started
    841   1.84        ad  *	cleaning the page for us (how nice!).  in this case, if we
    842    1.1     pooka  *	have syncio specified, then after we make our pass through the
    843    1.1     pooka  *	object we need to wait for the other PG_BUSY pages to clear
    844    1.1     pooka  *	off (i.e. we need to do an iosync).   also note that once a
    845    1.1     pooka  *	page is PG_BUSY it must stay in its object until it is un-busyed.
    846    1.1     pooka  */
    847    1.1     pooka 
    848    1.1     pooka int
    849    1.1     pooka genfs_putpages(void *v)
    850    1.1     pooka {
    851    1.1     pooka 	struct vop_putpages_args /* {
    852    1.1     pooka 		struct vnode *a_vp;
    853    1.1     pooka 		voff_t a_offlo;
    854    1.1     pooka 		voff_t a_offhi;
    855    1.1     pooka 		int a_flags;
    856   1.22  uebayasi 	} */ * const ap = v;
    857    1.1     pooka 
    858    1.1     pooka 	return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi,
    859    1.1     pooka 	    ap->a_flags, NULL);
    860    1.1     pooka }
    861    1.1     pooka 
    862    1.1     pooka int
    863    1.4      yamt genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff,
    864    1.4      yamt     int origflags, struct vm_page **busypg)
    865    1.1     pooka {
    866   1.22  uebayasi 	struct uvm_object * const uobj = &vp->v_uobj;
    867   1.86        ad 	krwlock_t * const slock = uobj->vmobjlock;
    868   1.78        ad 	off_t nextoff;
    869    1.2        ad 	int i, error, npages, nback;
    870    1.1     pooka 	int freeflag;
    871   1.63  christos 	/*
    872   1.63  christos 	 * This array is larger than it should so that it's size is constant.
    873   1.63  christos 	 * The right size is MAXPAGES.
    874   1.63  christos 	 */
    875   1.63  christos 	struct vm_page *pgs[MAXPHYS / MIN_PAGE_SIZE];
    876   1.63  christos #define MAXPAGES (MAXPHYS / PAGE_SIZE)
    877   1.78        ad 	struct vm_page *pg, *tpg;
    878   1.78        ad 	struct uvm_page_array a;
    879   1.78        ad 	bool wasclean, needs_clean;
    880    1.4      yamt 	bool async = (origflags & PGO_SYNCIO) == 0;
    881    1.1     pooka 	bool pagedaemon = curlwp == uvm.pagedaemon_lwp;
    882   1.65   hannken 	struct mount *trans_mp;
    883    1.4      yamt 	int flags;
    884   1.84        ad 	bool modified;		/* if we write out any pages */
    885   1.65   hannken 	bool holds_wapbl;
    886   1.84        ad 	bool cleanall;		/* try to pull off from the syncer's list */
    887    1.4      yamt 	bool onworklst;
    888   1.86        ad 	bool nodirty;
    889   1.84        ad 	const bool dirtyonly = (origflags & (PGO_DEACTIVATE|PGO_FREE)) == 0;
    890    1.1     pooka 
    891    1.1     pooka 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
    892    1.1     pooka 
    893    1.4      yamt 	KASSERT(origflags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
    894  1.103  riastrad 	KASSERT((startoff & PAGE_MASK) == 0);
    895  1.103  riastrad 	KASSERT((endoff & PAGE_MASK) == 0);
    896    1.1     pooka 	KASSERT(startoff < endoff || endoff == 0);
    897   1.86        ad 	KASSERT(rw_write_held(slock));
    898    1.1     pooka 
    899   1.71  pgoyette 	UVMHIST_LOG(ubchist, "vp %#jx pages %jd off 0x%jx len 0x%jx",
    900   1.71  pgoyette 	    (uintptr_t)vp, uobj->uo_npages, startoff, endoff - startoff);
    901    1.1     pooka 
    902   1.74  jdolecek #ifdef DIAGNOSTIC
    903   1.74  jdolecek 	if ((origflags & PGO_JOURNALLOCKED) && vp->v_mount->mnt_wapbl)
    904   1.74  jdolecek                 WAPBL_JLOCK_ASSERT(vp->v_mount);
    905   1.74  jdolecek #endif
    906   1.74  jdolecek 
    907   1.65   hannken 	trans_mp = NULL;
    908   1.65   hannken 	holds_wapbl = false;
    909    1.6   hannken 
    910    1.4      yamt retry:
    911    1.4      yamt 	modified = false;
    912    1.4      yamt 	flags = origflags;
    913   1.84        ad 
    914   1.84        ad 	/*
    915   1.84        ad 	 * shortcut if we have no pages to process.
    916   1.84        ad 	 */
    917   1.84        ad 
    918  1.100       chs 	nodirty = uvm_obj_clean_p(uobj);
    919   1.93        ad #ifdef DIAGNOSTIC
    920   1.93        ad 	mutex_enter(vp->v_interlock);
    921   1.93        ad 	KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 || nodirty);
    922   1.93        ad 	mutex_exit(vp->v_interlock);
    923   1.93        ad #endif
    924   1.86        ad 	if (uobj->uo_npages == 0 || (dirtyonly && nodirty)) {
    925   1.86        ad 		mutex_enter(vp->v_interlock);
    926  1.100       chs 		if (vp->v_iflag & VI_ONWORKLST && LIST_EMPTY(&vp->v_dirtyblkhd)) {
    927  1.100       chs 			vn_syncer_remove_from_worklist(vp);
    928    1.1     pooka 		}
    929   1.86        ad 		mutex_exit(vp->v_interlock);
    930   1.65   hannken 		if (trans_mp) {
    931   1.65   hannken 			if (holds_wapbl)
    932   1.65   hannken 				WAPBL_END(trans_mp);
    933   1.65   hannken 			fstrans_done(trans_mp);
    934   1.12   hannken 		}
    935   1.86        ad 		rw_exit(slock);
    936    1.1     pooka 		return (0);
    937    1.1     pooka 	}
    938    1.1     pooka 
    939    1.1     pooka 	/*
    940    1.1     pooka 	 * the vnode has pages, set up to process the request.
    941    1.1     pooka 	 */
    942    1.1     pooka 
    943   1.65   hannken 	if (trans_mp == NULL && (flags & PGO_CLEANIT) != 0) {
    944    1.1     pooka 		if (pagedaemon) {
    945   1.65   hannken 			/* Pagedaemon must not sleep here. */
    946   1.65   hannken 			trans_mp = vp->v_mount;
    947   1.69   hannken 			error = fstrans_start_nowait(trans_mp);
    948   1.12   hannken 			if (error) {
    949   1.86        ad 				rw_exit(slock);
    950   1.12   hannken 				return error;
    951   1.12   hannken 			}
    952   1.65   hannken 		} else {
    953   1.65   hannken 			/*
    954   1.65   hannken 			 * Cannot use vdeadcheck() here as this operation
    955   1.65   hannken 			 * usually gets used from VOP_RECLAIM().  Test for
    956   1.65   hannken 			 * change of v_mount instead and retry on change.
    957   1.65   hannken 			 */
    958   1.86        ad 			rw_exit(slock);
    959   1.65   hannken 			trans_mp = vp->v_mount;
    960   1.69   hannken 			fstrans_start(trans_mp);
    961   1.65   hannken 			if (vp->v_mount != trans_mp) {
    962   1.65   hannken 				fstrans_done(trans_mp);
    963   1.65   hannken 				trans_mp = NULL;
    964   1.65   hannken 			} else {
    965   1.65   hannken 				holds_wapbl = (trans_mp->mnt_wapbl &&
    966   1.65   hannken 				    (origflags & PGO_JOURNALLOCKED) == 0);
    967   1.65   hannken 				if (holds_wapbl) {
    968   1.65   hannken 					error = WAPBL_BEGIN(trans_mp);
    969   1.65   hannken 					if (error) {
    970   1.65   hannken 						fstrans_done(trans_mp);
    971   1.65   hannken 						return error;
    972   1.65   hannken 					}
    973   1.65   hannken 				}
    974   1.65   hannken 			}
    975   1.86        ad 			rw_enter(slock, RW_WRITER);
    976   1.65   hannken 			goto retry;
    977   1.12   hannken 		}
    978    1.1     pooka 	}
    979    1.1     pooka 
    980    1.1     pooka 	error = 0;
    981  1.100       chs 	wasclean = uvm_obj_nowriteback_p(uobj);
    982   1.78        ad 	nextoff = startoff;
    983    1.1     pooka 	if (endoff == 0 || flags & PGO_ALLPAGES) {
    984    1.1     pooka 		endoff = trunc_page(LLONG_MAX);
    985    1.1     pooka 	}
    986    1.1     pooka 
    987    1.1     pooka 	/*
    988    1.1     pooka 	 * if this vnode is known not to have dirty pages,
    989    1.1     pooka 	 * don't bother to clean it out.
    990    1.1     pooka 	 */
    991    1.1     pooka 
    992   1.86        ad 	if (nodirty) {
    993  1.102  riastrad 		/* We handled the dirtyonly && nodirty case above.  */
    994  1.102  riastrad 		KASSERT(!dirtyonly);
    995    1.1     pooka 		flags &= ~PGO_CLEANIT;
    996    1.1     pooka 	}
    997    1.1     pooka 
    998    1.1     pooka 	/*
    999   1.78        ad 	 * start the loop to scan pages.
   1000    1.1     pooka 	 */
   1001    1.1     pooka 
   1002   1.84        ad 	cleanall = true;
   1003    1.1     pooka 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
   1004   1.97        ad 	uvm_page_array_init(&a, uobj, dirtyonly ? (UVM_PAGE_ARRAY_FILL_DIRTY |
   1005   1.97        ad 	    (!async ? UVM_PAGE_ARRAY_FILL_WRITEBACK : 0)) : 0);
   1006   1.78        ad 	for (;;) {
   1007   1.84        ad 		bool pgprotected;
   1008   1.84        ad 
   1009   1.78        ad 		/*
   1010   1.84        ad 		 * if !dirtyonly, iterate over all resident pages in the range.
   1011   1.84        ad 		 *
   1012   1.84        ad 		 * if dirtyonly, only possibly dirty pages are interesting.
   1013   1.84        ad 		 * however, if we are asked to sync for integrity, we should
   1014   1.84        ad 		 * wait on pages being written back by other threads as well.
   1015   1.78        ad 		 */
   1016   1.78        ad 
   1017   1.97        ad 		pg = uvm_page_array_fill_and_peek(&a, nextoff, 0);
   1018   1.78        ad 		if (pg == NULL) {
   1019   1.78        ad 			break;
   1020   1.78        ad 		}
   1021   1.78        ad 
   1022   1.78        ad 		KASSERT(pg->uobject == uobj);
   1023   1.78        ad 		KASSERT((pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
   1024   1.78        ad 		    (pg->flags & (PG_BUSY)) != 0);
   1025   1.78        ad 		KASSERT(pg->offset >= startoff);
   1026   1.78        ad 		KASSERT(pg->offset >= nextoff);
   1027   1.84        ad 		KASSERT(!dirtyonly ||
   1028   1.84        ad 		    uvm_pagegetdirty(pg) != UVM_PAGE_STATUS_CLEAN ||
   1029  1.100       chs 		    uvm_obj_page_writeback_p(pg));
   1030   1.78        ad 
   1031   1.78        ad 		if (pg->offset >= endoff) {
   1032   1.78        ad 			break;
   1033   1.78        ad 		}
   1034   1.78        ad 
   1035    1.1     pooka 		/*
   1036   1.78        ad 		 * a preempt point.
   1037    1.1     pooka 		 */
   1038    1.1     pooka 
   1039   1.90        ad 		if (preempt_needed()) {
   1040   1.78        ad 			nextoff = pg->offset; /* visit this page again */
   1041   1.86        ad 			rw_exit(slock);
   1042   1.78        ad 			preempt();
   1043   1.78        ad 			/*
   1044   1.78        ad 			 * as we dropped the object lock, our cached pages can
   1045   1.78        ad 			 * be stale.
   1046   1.78        ad 			 */
   1047   1.78        ad 			uvm_page_array_clear(&a);
   1048   1.86        ad 			rw_enter(slock, RW_WRITER);
   1049    1.1     pooka 			continue;
   1050    1.1     pooka 		}
   1051    1.1     pooka 
   1052    1.1     pooka 		/*
   1053   1.84        ad 		 * if the current page is busy, wait for it to become unbusy.
   1054    1.1     pooka 		 */
   1055    1.1     pooka 
   1056   1.84        ad 		if ((pg->flags & PG_BUSY) != 0) {
   1057   1.71  pgoyette 			UVMHIST_LOG(ubchist, "busy %#jx", (uintptr_t)pg,
   1058   1.71  pgoyette 			   0, 0, 0);
   1059   1.84        ad 			if ((pg->flags & (PG_RELEASED|PG_PAGEOUT)) != 0
   1060   1.84        ad 			    && (flags & PGO_BUSYFAIL) != 0) {
   1061   1.71  pgoyette 				UVMHIST_LOG(ubchist, "busyfail %#jx",
   1062   1.71  pgoyette 				    (uintptr_t)pg, 0, 0, 0);
   1063    1.1     pooka 				error = EDEADLK;
   1064    1.1     pooka 				if (busypg != NULL)
   1065    1.1     pooka 					*busypg = pg;
   1066    1.1     pooka 				break;
   1067    1.1     pooka 			}
   1068    1.1     pooka 			if (pagedaemon) {
   1069    1.1     pooka 				/*
   1070    1.1     pooka 				 * someone has taken the page while we
   1071    1.1     pooka 				 * dropped the lock for fstrans_start.
   1072    1.1     pooka 				 */
   1073    1.1     pooka 				break;
   1074    1.1     pooka 			}
   1075   1.84        ad 			/*
   1076   1.84        ad 			 * don't bother to wait on other's activities
   1077   1.84        ad 			 * unless we are asked to sync for integrity.
   1078   1.84        ad 			 */
   1079   1.84        ad 			if (!async && (flags & PGO_RECLAIM) == 0) {
   1080   1.84        ad 				wasclean = false;
   1081   1.84        ad 				nextoff = pg->offset + PAGE_SIZE;
   1082   1.84        ad 				uvm_page_array_advance(&a);
   1083   1.84        ad 				continue;
   1084   1.84        ad 			}
   1085   1.78        ad 			nextoff = pg->offset; /* visit this page again */
   1086   1.92        ad 			uvm_pagewait(pg, slock, "genput");
   1087   1.78        ad 			/*
   1088   1.78        ad 			 * as we dropped the object lock, our cached pages can
   1089   1.78        ad 			 * be stale.
   1090   1.78        ad 			 */
   1091   1.78        ad 			uvm_page_array_clear(&a);
   1092   1.86        ad 			rw_enter(slock, RW_WRITER);
   1093    1.1     pooka 			continue;
   1094    1.1     pooka 		}
   1095    1.1     pooka 
   1096   1.78        ad 		nextoff = pg->offset + PAGE_SIZE;
   1097   1.78        ad 		uvm_page_array_advance(&a);
   1098   1.78        ad 
   1099    1.1     pooka 		/*
   1100    1.1     pooka 		 * if we're freeing, remove all mappings of the page now.
   1101    1.1     pooka 		 * if we're cleaning, check if the page is needs to be cleaned.
   1102    1.1     pooka 		 */
   1103    1.1     pooka 
   1104   1.84        ad 		pgprotected = false;
   1105    1.1     pooka 		if (flags & PGO_FREE) {
   1106    1.1     pooka 			pmap_page_protect(pg, VM_PROT_NONE);
   1107   1.84        ad 			pgprotected = true;
   1108    1.1     pooka 		} else if (flags & PGO_CLEANIT) {
   1109    1.1     pooka 
   1110    1.1     pooka 			/*
   1111    1.1     pooka 			 * if we still have some hope to pull this vnode off
   1112    1.1     pooka 			 * from the syncer queue, write-protect the page.
   1113    1.1     pooka 			 */
   1114    1.1     pooka 
   1115   1.84        ad 			if (cleanall && wasclean) {
   1116    1.1     pooka 
   1117    1.1     pooka 				/*
   1118    1.1     pooka 				 * uobj pages get wired only by uvm_fault
   1119    1.1     pooka 				 * where uobj is locked.
   1120    1.1     pooka 				 */
   1121    1.1     pooka 
   1122    1.1     pooka 				if (pg->wire_count == 0) {
   1123    1.1     pooka 					pmap_page_protect(pg,
   1124    1.1     pooka 					    VM_PROT_READ|VM_PROT_EXECUTE);
   1125   1.84        ad 					pgprotected = true;
   1126    1.1     pooka 				} else {
   1127    1.1     pooka 					cleanall = false;
   1128    1.1     pooka 				}
   1129    1.1     pooka 			}
   1130    1.1     pooka 		}
   1131    1.1     pooka 
   1132    1.1     pooka 		if (flags & PGO_CLEANIT) {
   1133   1.84        ad 			needs_clean = uvm_pagecheckdirty(pg, pgprotected);
   1134    1.1     pooka 		} else {
   1135    1.1     pooka 			needs_clean = false;
   1136    1.1     pooka 		}
   1137    1.1     pooka 
   1138    1.1     pooka 		/*
   1139    1.1     pooka 		 * if we're cleaning, build a cluster.
   1140   1.84        ad 		 * the cluster will consist of pages which are currently dirty.
   1141    1.1     pooka 		 * if not cleaning, just operate on the one page.
   1142    1.1     pooka 		 */
   1143    1.1     pooka 
   1144    1.1     pooka 		if (needs_clean) {
   1145    1.1     pooka 			wasclean = false;
   1146    1.1     pooka 			memset(pgs, 0, sizeof(pgs));
   1147    1.1     pooka 			pg->flags |= PG_BUSY;
   1148    1.1     pooka 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1149    1.1     pooka 
   1150    1.1     pooka 			/*
   1151   1.72       chs 			 * let the fs constrain the offset range of the cluster.
   1152   1.72       chs 			 * we additionally constrain the range here such that
   1153   1.72       chs 			 * it fits in the "pgs" pages array.
   1154   1.72       chs 			 */
   1155   1.72       chs 
   1156   1.78        ad 			off_t fslo, fshi, genlo, lo, off = pg->offset;
   1157   1.72       chs 			GOP_PUTRANGE(vp, off, &fslo, &fshi);
   1158   1.72       chs 			KASSERT(fslo == trunc_page(fslo));
   1159   1.72       chs 			KASSERT(fslo <= off);
   1160   1.72       chs 			KASSERT(fshi == trunc_page(fshi));
   1161   1.72       chs 			KASSERT(fshi == 0 || off < fshi);
   1162   1.72       chs 
   1163   1.72       chs 			if (off > MAXPHYS / 2)
   1164   1.72       chs 				genlo = trunc_page(off - (MAXPHYS / 2));
   1165   1.72       chs 			else
   1166   1.72       chs 				genlo = 0;
   1167   1.72       chs 			lo = MAX(fslo, genlo);
   1168   1.72       chs 
   1169   1.72       chs 			/*
   1170    1.1     pooka 			 * first look backward.
   1171    1.1     pooka 			 */
   1172    1.1     pooka 
   1173   1.72       chs 			npages = (off - lo) >> PAGE_SHIFT;
   1174    1.1     pooka 			nback = npages;
   1175   1.84        ad 			uvn_findpages(uobj, off - PAGE_SIZE, &nback,
   1176   1.84        ad 			    &pgs[0], NULL,
   1177    1.1     pooka 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
   1178    1.1     pooka 			if (nback) {
   1179    1.1     pooka 				memmove(&pgs[0], &pgs[npages - nback],
   1180    1.1     pooka 				    nback * sizeof(pgs[0]));
   1181    1.1     pooka 				if (npages - nback < nback)
   1182    1.1     pooka 					memset(&pgs[nback], 0,
   1183    1.1     pooka 					    (npages - nback) * sizeof(pgs[0]));
   1184    1.1     pooka 				else
   1185    1.1     pooka 					memset(&pgs[npages - nback], 0,
   1186    1.1     pooka 					    nback * sizeof(pgs[0]));
   1187    1.1     pooka 			}
   1188    1.1     pooka 
   1189    1.1     pooka 			/*
   1190    1.1     pooka 			 * then plug in our page of interest.
   1191    1.1     pooka 			 */
   1192    1.1     pooka 
   1193    1.1     pooka 			pgs[nback] = pg;
   1194    1.1     pooka 
   1195    1.1     pooka 			/*
   1196    1.1     pooka 			 * then look forward to fill in the remaining space in
   1197    1.1     pooka 			 * the array of pages.
   1198   1.84        ad 			 *
   1199   1.84        ad 			 * pass our cached array of pages so that hopefully
   1200   1.84        ad 			 * uvn_findpages can find some good pages in it.
   1201   1.84        ad 			 * the array a was filled above with the one of
   1202   1.84        ad 			 * following sets of flags:
   1203   1.84        ad 			 *	0
   1204   1.84        ad 			 *	UVM_PAGE_ARRAY_FILL_DIRTY
   1205   1.84        ad 			 *	UVM_PAGE_ARRAY_FILL_DIRTY|WRITEBACK
   1206   1.98        ad 			 *
   1207   1.98        ad 			 * XXX this is fragile but it'll work: the array
   1208   1.98        ad 			 * was earlier filled sparsely, but UFP_DIRTYONLY
   1209   1.98        ad 			 * implies dense.  see corresponding comment in
   1210   1.98        ad 			 * uvn_findpages().
   1211    1.1     pooka 			 */
   1212    1.1     pooka 
   1213   1.62  christos 			npages = MAXPAGES - nback - 1;
   1214   1.72       chs 			if (fshi)
   1215   1.72       chs 				npages = MIN(npages,
   1216   1.72       chs 					     (fshi - off - 1) >> PAGE_SHIFT);
   1217    1.1     pooka 			uvn_findpages(uobj, off + PAGE_SIZE, &npages,
   1218   1.98        ad 			    &pgs[nback + 1], &a,
   1219    1.1     pooka 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
   1220    1.1     pooka 			npages += nback + 1;
   1221    1.1     pooka 		} else {
   1222    1.1     pooka 			pgs[0] = pg;
   1223    1.1     pooka 			npages = 1;
   1224    1.1     pooka 			nback = 0;
   1225    1.1     pooka 		}
   1226    1.1     pooka 
   1227    1.1     pooka 		/*
   1228    1.1     pooka 		 * apply FREE or DEACTIVATE options if requested.
   1229    1.1     pooka 		 */
   1230    1.1     pooka 
   1231    1.1     pooka 		for (i = 0; i < npages; i++) {
   1232    1.1     pooka 			tpg = pgs[i];
   1233    1.1     pooka 			KASSERT(tpg->uobject == uobj);
   1234   1.84        ad 			KASSERT(i == 0 ||
   1235   1.84        ad 			    pgs[i-1]->offset + PAGE_SIZE == tpg->offset);
   1236   1.84        ad 			KASSERT(!needs_clean || uvm_pagegetdirty(pgs[i]) !=
   1237   1.84        ad 			    UVM_PAGE_STATUS_DIRTY);
   1238   1.84        ad 			if (needs_clean) {
   1239   1.84        ad 				/*
   1240   1.84        ad 				 * mark pages as WRITEBACK so that concurrent
   1241   1.84        ad 				 * fsync can find and wait for our activities.
   1242   1.84        ad 				 */
   1243  1.100       chs 				uvm_obj_page_set_writeback(pgs[i]);
   1244   1.84        ad 			}
   1245    1.1     pooka 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1246    1.1     pooka 				continue;
   1247    1.1     pooka 			if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
   1248   1.83        ad 				uvm_pagelock(tpg);
   1249    1.1     pooka 				uvm_pagedeactivate(tpg);
   1250   1.83        ad 				uvm_pageunlock(tpg);
   1251    1.1     pooka 			} else if (flags & PGO_FREE) {
   1252    1.1     pooka 				pmap_page_protect(tpg, VM_PROT_NONE);
   1253    1.1     pooka 				if (tpg->flags & PG_BUSY) {
   1254    1.1     pooka 					tpg->flags |= freeflag;
   1255    1.1     pooka 					if (pagedaemon) {
   1256    1.2        ad 						uvm_pageout_start(1);
   1257   1.83        ad 						uvm_pagelock(tpg);
   1258    1.1     pooka 						uvm_pagedequeue(tpg);
   1259   1.83        ad 						uvm_pageunlock(tpg);
   1260    1.1     pooka 					}
   1261    1.1     pooka 				} else {
   1262    1.1     pooka 
   1263    1.1     pooka 					/*
   1264    1.1     pooka 					 * ``page is not busy''
   1265    1.1     pooka 					 * implies that npages is 1
   1266    1.1     pooka 					 * and needs_clean is false.
   1267    1.1     pooka 					 */
   1268    1.1     pooka 
   1269   1.78        ad 					KASSERT(npages == 1);
   1270   1.78        ad 					KASSERT(!needs_clean);
   1271   1.78        ad 					KASSERT(pg == tpg);
   1272   1.78        ad 					KASSERT(nextoff ==
   1273   1.78        ad 					    tpg->offset + PAGE_SIZE);
   1274    1.1     pooka 					uvm_pagefree(tpg);
   1275    1.1     pooka 					if (pagedaemon)
   1276    1.1     pooka 						uvmexp.pdfreed++;
   1277    1.1     pooka 				}
   1278    1.1     pooka 			}
   1279    1.1     pooka 		}
   1280    1.1     pooka 		if (needs_clean) {
   1281    1.1     pooka 			modified = true;
   1282   1.78        ad 			KASSERT(nextoff == pg->offset + PAGE_SIZE);
   1283   1.78        ad 			KASSERT(nback < npages);
   1284   1.78        ad 			nextoff = pg->offset + ((npages - nback) << PAGE_SHIFT);
   1285   1.78        ad 			KASSERT(pgs[nback] == pg);
   1286   1.78        ad 			KASSERT(nextoff == pgs[npages - 1]->offset + PAGE_SIZE);
   1287    1.1     pooka 
   1288    1.1     pooka 			/*
   1289   1.78        ad 			 * start the i/o.
   1290    1.1     pooka 			 */
   1291   1.86        ad 			rw_exit(slock);
   1292    1.1     pooka 			error = GOP_WRITE(vp, pgs, npages, flags);
   1293   1.78        ad 			/*
   1294   1.78        ad 			 * as we dropped the object lock, our cached pages can
   1295   1.78        ad 			 * be stale.
   1296   1.78        ad 			 */
   1297   1.78        ad 			uvm_page_array_clear(&a);
   1298   1.86        ad 			rw_enter(slock, RW_WRITER);
   1299    1.1     pooka 			if (error) {
   1300    1.1     pooka 				break;
   1301    1.1     pooka 			}
   1302    1.1     pooka 		}
   1303    1.1     pooka 	}
   1304   1.78        ad 	uvm_page_array_fini(&a);
   1305    1.1     pooka 
   1306   1.84        ad 	/*
   1307   1.84        ad 	 * update ctime/mtime if the modification we started writing out might
   1308   1.84        ad 	 * be from mmap'ed write.
   1309   1.84        ad 	 *
   1310   1.84        ad 	 * this is necessary when an application keeps a file mmaped and
   1311   1.84        ad 	 * repeatedly modifies it via the window.  note that, because we
   1312   1.84        ad 	 * don't always write-protect pages when cleaning, such modifications
   1313   1.84        ad 	 * might not involve any page faults.
   1314   1.84        ad 	 */
   1315   1.84        ad 
   1316   1.86        ad 	mutex_enter(vp->v_interlock);
   1317   1.93        ad 	if (modified && (vp->v_iflag & VI_WRMAP) != 0 &&
   1318    1.1     pooka 	    (vp->v_type != VBLK ||
   1319    1.1     pooka 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
   1320    1.1     pooka 		GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
   1321    1.1     pooka 	}
   1322    1.1     pooka 
   1323    1.1     pooka 	/*
   1324   1.84        ad 	 * if we no longer have any possibly dirty pages, take us off the
   1325   1.84        ad 	 * syncer list.
   1326    1.1     pooka 	 */
   1327    1.1     pooka 
   1328  1.100       chs 	if ((vp->v_iflag & VI_ONWORKLST) != 0 && uvm_obj_clean_p(uobj) &&
   1329  1.100       chs 	    LIST_EMPTY(&vp->v_dirtyblkhd)) {
   1330  1.100       chs 		vn_syncer_remove_from_worklist(vp);
   1331    1.1     pooka 	}
   1332    1.1     pooka 
   1333    1.2        ad 	/* Wait for output to complete. */
   1334   1.86        ad 	rw_exit(slock);
   1335    1.2        ad 	if (!wasclean && !async && vp->v_numoutput != 0) {
   1336    1.2        ad 		while (vp->v_numoutput != 0)
   1337   1.86        ad 			cv_wait(&vp->v_cv, vp->v_interlock);
   1338    1.1     pooka 	}
   1339    1.4      yamt 	onworklst = (vp->v_iflag & VI_ONWORKLST) != 0;
   1340   1.86        ad 	mutex_exit(vp->v_interlock);
   1341    1.1     pooka 
   1342    1.4      yamt 	if ((flags & PGO_RECLAIM) != 0 && onworklst) {
   1343    1.4      yamt 		/*
   1344    1.4      yamt 		 * in the case of PGO_RECLAIM, ensure to make the vnode clean.
   1345    1.4      yamt 		 * retrying is not a big deal because, in many cases,
   1346    1.4      yamt 		 * uobj->uo_npages is already 0 here.
   1347    1.4      yamt 		 */
   1348   1.86        ad 		rw_enter(slock, RW_WRITER);
   1349    1.4      yamt 		goto retry;
   1350    1.4      yamt 	}
   1351    1.4      yamt 
   1352   1.65   hannken 	if (trans_mp) {
   1353   1.65   hannken 		if (holds_wapbl)
   1354   1.65   hannken 			WAPBL_END(trans_mp);
   1355   1.65   hannken 		fstrans_done(trans_mp);
   1356   1.12   hannken 	}
   1357    1.6   hannken 
   1358    1.1     pooka 	return (error);
   1359    1.1     pooka }
   1360    1.1     pooka 
   1361   1.72       chs /*
   1362   1.72       chs  * Default putrange method for file systems that do not care
   1363   1.72       chs  * how many pages are given to one GOP_WRITE() call.
   1364   1.72       chs  */
   1365   1.72       chs void
   1366   1.72       chs genfs_gop_putrange(struct vnode *vp, off_t off, off_t *lop, off_t *hip)
   1367   1.72       chs {
   1368   1.72       chs 
   1369   1.72       chs 	*lop = 0;
   1370   1.72       chs 	*hip = 0;
   1371   1.72       chs }
   1372   1.72       chs 
   1373    1.1     pooka int
   1374    1.1     pooka genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1375    1.1     pooka {
   1376    1.1     pooka 	off_t off;
   1377    1.1     pooka 	vaddr_t kva;
   1378    1.1     pooka 	size_t len;
   1379    1.1     pooka 	int error;
   1380    1.1     pooka 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1381    1.1     pooka 
   1382   1.71  pgoyette 	UVMHIST_LOG(ubchist, "vp %#jx pgs %#jx npages %jd flags 0x%jx",
   1383   1.71  pgoyette 	    (uintptr_t)vp, (uintptr_t)pgs, npages, flags);
   1384    1.1     pooka 
   1385    1.1     pooka 	off = pgs[0]->offset;
   1386    1.1     pooka 	kva = uvm_pagermapin(pgs, npages,
   1387    1.1     pooka 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1388    1.1     pooka 	len = npages << PAGE_SHIFT;
   1389    1.1     pooka 
   1390    1.1     pooka 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1391   1.85       chs 			    uvm_aio_aiodone);
   1392    1.1     pooka 
   1393    1.1     pooka 	return error;
   1394    1.1     pooka }
   1395    1.1     pooka 
   1396   1.78        ad /*
   1397   1.78        ad  * genfs_gop_write_rwmap:
   1398   1.78        ad  *
   1399   1.78        ad  * a variant of genfs_gop_write.  it's used by UDF for its directory buffers.
   1400   1.78        ad  * this maps pages with PROT_WRITE so that VOP_STRATEGY can modifies
   1401   1.78        ad  * the contents before writing it out to the underlying storage.
   1402   1.78        ad  */
   1403   1.78        ad 
   1404    1.7   reinoud int
   1405   1.78        ad genfs_gop_write_rwmap(struct vnode *vp, struct vm_page **pgs, int npages,
   1406   1.78        ad     int flags)
   1407    1.7   reinoud {
   1408    1.7   reinoud 	off_t off;
   1409    1.7   reinoud 	vaddr_t kva;
   1410    1.7   reinoud 	size_t len;
   1411    1.7   reinoud 	int error;
   1412    1.7   reinoud 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1413    1.7   reinoud 
   1414   1.71  pgoyette 	UVMHIST_LOG(ubchist, "vp %#jx pgs %#jx npages %jd flags 0x%jx",
   1415   1.71  pgoyette 	    (uintptr_t)vp, (uintptr_t)pgs, npages, flags);
   1416    1.7   reinoud 
   1417    1.7   reinoud 	off = pgs[0]->offset;
   1418    1.7   reinoud 	kva = uvm_pagermapin(pgs, npages,
   1419    1.7   reinoud 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1420    1.7   reinoud 	len = npages << PAGE_SHIFT;
   1421    1.7   reinoud 
   1422    1.7   reinoud 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1423   1.85       chs 			    uvm_aio_aiodone);
   1424    1.7   reinoud 
   1425    1.7   reinoud 	return error;
   1426    1.7   reinoud }
   1427    1.7   reinoud 
   1428    1.1     pooka /*
   1429    1.1     pooka  * Backend routine for doing I/O to vnode pages.  Pages are already locked
   1430    1.1     pooka  * and mapped into kernel memory.  Here we just look up the underlying
   1431    1.1     pooka  * device block addresses and call the strategy routine.
   1432    1.1     pooka  */
   1433    1.1     pooka 
   1434    1.1     pooka static int
   1435    1.1     pooka genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
   1436    1.1     pooka     enum uio_rw rw, void (*iodone)(struct buf *))
   1437    1.1     pooka {
   1438   1.36  uebayasi 	int s, error;
   1439    1.1     pooka 	int fs_bshift, dev_bshift;
   1440    1.1     pooka 	off_t eof, offset, startoffset;
   1441    1.1     pooka 	size_t bytes, iobytes, skipbytes;
   1442    1.1     pooka 	struct buf *mbp, *bp;
   1443   1.35  uebayasi 	const bool async = (flags & PGO_SYNCIO) == 0;
   1444   1.54       chs 	const bool lazy = (flags & PGO_LAZY) == 0;
   1445   1.35  uebayasi 	const bool iowrite = rw == UIO_WRITE;
   1446   1.35  uebayasi 	const int brw = iowrite ? B_WRITE : B_READ;
   1447    1.1     pooka 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1448    1.1     pooka 
   1449   1.71  pgoyette 	UVMHIST_LOG(ubchist, "vp %#jx kva %#jx len 0x%jx flags 0x%jx",
   1450   1.71  pgoyette 	    (uintptr_t)vp, (uintptr_t)kva, len, flags);
   1451    1.1     pooka 
   1452  1.104  riastrad 	KASSERT(vp->v_size != VSIZENOTSET);
   1453  1.104  riastrad 	KASSERT(vp->v_writesize != VSIZENOTSET);
   1454  1.104  riastrad 	KASSERTMSG(vp->v_size <= vp->v_writesize, "vp=%p"
   1455  1.104  riastrad 	    " v_size=0x%llx v_writesize=0x%llx", vp,
   1456  1.104  riastrad 	    (unsigned long long)vp->v_size,
   1457  1.104  riastrad 	    (unsigned long long)vp->v_writesize);
   1458    1.1     pooka 	GOP_SIZE(vp, vp->v_writesize, &eof, 0);
   1459    1.1     pooka 	if (vp->v_type != VBLK) {
   1460    1.1     pooka 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1461    1.1     pooka 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1462    1.1     pooka 	} else {
   1463    1.1     pooka 		fs_bshift = DEV_BSHIFT;
   1464    1.1     pooka 		dev_bshift = DEV_BSHIFT;
   1465    1.1     pooka 	}
   1466    1.1     pooka 	error = 0;
   1467    1.1     pooka 	startoffset = off;
   1468    1.1     pooka 	bytes = MIN(len, eof - startoffset);
   1469    1.1     pooka 	skipbytes = 0;
   1470    1.1     pooka 	KASSERT(bytes != 0);
   1471    1.1     pooka 
   1472   1.35  uebayasi 	if (iowrite) {
   1473   1.78        ad 		/*
   1474   1.78        ad 		 * why += 2?
   1475   1.78        ad 		 * 1 for biodone, 1 for uvm_aio_aiodone.
   1476   1.78        ad 		 */
   1477   1.49     rmind 		mutex_enter(vp->v_interlock);
   1478    1.1     pooka 		vp->v_numoutput += 2;
   1479   1.49     rmind 		mutex_exit(vp->v_interlock);
   1480    1.1     pooka 	}
   1481    1.2        ad 	mbp = getiobuf(vp, true);
   1482   1.71  pgoyette 	UVMHIST_LOG(ubchist, "vp %#jx mbp %#jx num now %jd bytes 0x%jx",
   1483   1.71  pgoyette 	    (uintptr_t)vp, (uintptr_t)mbp, vp->v_numoutput, bytes);
   1484    1.1     pooka 	mbp->b_bufsize = len;
   1485    1.1     pooka 	mbp->b_data = (void *)kva;
   1486    1.1     pooka 	mbp->b_resid = mbp->b_bcount = bytes;
   1487   1.89        ad 	mbp->b_cflags |= BC_BUSY | BC_AGE;
   1488    1.2        ad 	if (async) {
   1489    1.2        ad 		mbp->b_flags = brw | B_ASYNC;
   1490    1.2        ad 		mbp->b_iodone = iodone;
   1491    1.2        ad 	} else {
   1492    1.2        ad 		mbp->b_flags = brw;
   1493    1.2        ad 		mbp->b_iodone = NULL;
   1494    1.2        ad 	}
   1495    1.1     pooka 	if (curlwp == uvm.pagedaemon_lwp)
   1496    1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
   1497   1.54       chs 	else if (async || lazy)
   1498    1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
   1499    1.1     pooka 	else
   1500    1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
   1501    1.1     pooka 
   1502    1.1     pooka 	bp = NULL;
   1503    1.1     pooka 	for (offset = startoffset;
   1504    1.1     pooka 	    bytes > 0;
   1505    1.1     pooka 	    offset += iobytes, bytes -= iobytes) {
   1506   1.36  uebayasi 		int run;
   1507   1.36  uebayasi 		daddr_t lbn, blkno;
   1508   1.36  uebayasi 		struct vnode *devvp;
   1509   1.36  uebayasi 
   1510   1.36  uebayasi 		/*
   1511   1.36  uebayasi 		 * bmap the file to find out the blkno to read from and
   1512   1.36  uebayasi 		 * how much we can read in one i/o.  if bmap returns an error,
   1513   1.36  uebayasi 		 * skip the rest of the top-level i/o.
   1514   1.36  uebayasi 		 */
   1515   1.36  uebayasi 
   1516    1.1     pooka 		lbn = offset >> fs_bshift;
   1517    1.1     pooka 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1518    1.1     pooka 		if (error) {
   1519  1.101    simonb 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%jx -> %jd",
   1520   1.71  pgoyette 			    lbn, error, 0, 0);
   1521    1.1     pooka 			skipbytes += bytes;
   1522    1.1     pooka 			bytes = 0;
   1523   1.36  uebayasi 			goto loopdone;
   1524    1.1     pooka 		}
   1525    1.1     pooka 
   1526   1.36  uebayasi 		/*
   1527   1.36  uebayasi 		 * see how many pages can be read with this i/o.
   1528   1.36  uebayasi 		 * reduce the i/o size if necessary to avoid
   1529   1.36  uebayasi 		 * overwriting pages with valid data.
   1530   1.36  uebayasi 		 */
   1531   1.36  uebayasi 
   1532    1.1     pooka 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1533    1.1     pooka 		    bytes);
   1534   1.36  uebayasi 
   1535   1.36  uebayasi 		/*
   1536   1.36  uebayasi 		 * if this block isn't allocated, zero it instead of
   1537   1.36  uebayasi 		 * reading it.  unless we are going to allocate blocks,
   1538   1.36  uebayasi 		 * mark the pages we zeroed PG_RDONLY.
   1539   1.36  uebayasi 		 */
   1540   1.36  uebayasi 
   1541    1.1     pooka 		if (blkno == (daddr_t)-1) {
   1542   1.35  uebayasi 			if (!iowrite) {
   1543    1.1     pooka 				memset((char *)kva + (offset - startoffset), 0,
   1544   1.36  uebayasi 				    iobytes);
   1545    1.1     pooka 			}
   1546    1.1     pooka 			skipbytes += iobytes;
   1547    1.1     pooka 			continue;
   1548    1.1     pooka 		}
   1549    1.1     pooka 
   1550   1.36  uebayasi 		/*
   1551   1.36  uebayasi 		 * allocate a sub-buf for this piece of the i/o
   1552   1.36  uebayasi 		 * (or just use mbp if there's only 1 piece),
   1553   1.36  uebayasi 		 * and start it going.
   1554   1.36  uebayasi 		 */
   1555   1.36  uebayasi 
   1556    1.1     pooka 		if (offset == startoffset && iobytes == bytes) {
   1557    1.1     pooka 			bp = mbp;
   1558    1.1     pooka 		} else {
   1559   1.71  pgoyette 			UVMHIST_LOG(ubchist, "vp %#jx bp %#jx num now %jd",
   1560   1.71  pgoyette 			    (uintptr_t)vp, (uintptr_t)bp, vp->v_numoutput, 0);
   1561    1.2        ad 			bp = getiobuf(vp, true);
   1562    1.1     pooka 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
   1563    1.1     pooka 		}
   1564    1.1     pooka 		bp->b_lblkno = 0;
   1565    1.1     pooka 
   1566    1.1     pooka 		/* adjust physical blkno for partial blocks */
   1567    1.1     pooka 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1568    1.1     pooka 		    dev_bshift);
   1569   1.36  uebayasi 
   1570    1.1     pooka 		UVMHIST_LOG(ubchist,
   1571   1.71  pgoyette 		    "bp %#jx offset 0x%jx bcount 0x%jx blkno 0x%jx",
   1572   1.71  pgoyette 		    (uintptr_t)bp, offset, bp->b_bcount, bp->b_blkno);
   1573    1.1     pooka 
   1574    1.1     pooka 		VOP_STRATEGY(devvp, bp);
   1575    1.1     pooka 	}
   1576   1.36  uebayasi 
   1577   1.36  uebayasi loopdone:
   1578    1.1     pooka 	if (skipbytes) {
   1579   1.71  pgoyette 		UVMHIST_LOG(ubchist, "skipbytes %jd", skipbytes, 0,0,0);
   1580    1.1     pooka 	}
   1581    1.1     pooka 	nestiobuf_done(mbp, skipbytes, error);
   1582    1.1     pooka 	if (async) {
   1583    1.1     pooka 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1584    1.1     pooka 		return (0);
   1585    1.1     pooka 	}
   1586   1.71  pgoyette 	UVMHIST_LOG(ubchist, "waiting for mbp %#jx", (uintptr_t)mbp, 0, 0, 0);
   1587    1.1     pooka 	error = biowait(mbp);
   1588    1.1     pooka 	s = splbio();
   1589    1.1     pooka 	(*iodone)(mbp);
   1590    1.1     pooka 	splx(s);
   1591   1.71  pgoyette 	UVMHIST_LOG(ubchist, "returning, error %jd", error, 0, 0, 0);
   1592    1.1     pooka 	return (error);
   1593    1.1     pooka }
   1594    1.1     pooka 
   1595    1.1     pooka int
   1596    1.1     pooka genfs_compat_getpages(void *v)
   1597    1.1     pooka {
   1598    1.1     pooka 	struct vop_getpages_args /* {
   1599    1.1     pooka 		struct vnode *a_vp;
   1600    1.1     pooka 		voff_t a_offset;
   1601    1.1     pooka 		struct vm_page **a_m;
   1602    1.1     pooka 		int *a_count;
   1603    1.1     pooka 		int a_centeridx;
   1604    1.1     pooka 		vm_prot_t a_access_type;
   1605    1.1     pooka 		int a_advice;
   1606    1.1     pooka 		int a_flags;
   1607    1.1     pooka 	} */ *ap = v;
   1608    1.1     pooka 
   1609    1.1     pooka 	off_t origoffset;
   1610    1.1     pooka 	struct vnode *vp = ap->a_vp;
   1611    1.1     pooka 	struct uvm_object *uobj = &vp->v_uobj;
   1612    1.1     pooka 	struct vm_page *pg, **pgs;
   1613    1.1     pooka 	vaddr_t kva;
   1614    1.1     pooka 	int i, error, orignpages, npages;
   1615    1.1     pooka 	struct iovec iov;
   1616    1.1     pooka 	struct uio uio;
   1617    1.1     pooka 	kauth_cred_t cred = curlwp->l_cred;
   1618   1.35  uebayasi 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1619    1.1     pooka 
   1620    1.1     pooka 	error = 0;
   1621    1.1     pooka 	origoffset = ap->a_offset;
   1622    1.1     pooka 	orignpages = *ap->a_count;
   1623    1.1     pooka 	pgs = ap->a_m;
   1624    1.1     pooka 
   1625    1.1     pooka 	if (ap->a_flags & PGO_LOCKED) {
   1626   1.84        ad 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m, NULL,
   1627   1.35  uebayasi 		    UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0));
   1628    1.1     pooka 
   1629   1.38       chs 		error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
   1630   1.38       chs 		return error;
   1631    1.1     pooka 	}
   1632    1.1     pooka 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1633   1.86        ad 		rw_exit(uobj->vmobjlock);
   1634   1.38       chs 		return EINVAL;
   1635    1.1     pooka 	}
   1636    1.1     pooka 	if ((ap->a_flags & PGO_SYNCIO) == 0) {
   1637   1.86        ad 		rw_exit(uobj->vmobjlock);
   1638    1.1     pooka 		return 0;
   1639    1.1     pooka 	}
   1640    1.1     pooka 	npages = orignpages;
   1641   1.84        ad 	uvn_findpages(uobj, origoffset, &npages, pgs, NULL, UFP_ALL);
   1642   1.86        ad 	rw_exit(uobj->vmobjlock);
   1643    1.1     pooka 	kva = uvm_pagermapin(pgs, npages,
   1644    1.1     pooka 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1645    1.1     pooka 	for (i = 0; i < npages; i++) {
   1646    1.1     pooka 		pg = pgs[i];
   1647    1.1     pooka 		if ((pg->flags & PG_FAKE) == 0) {
   1648    1.1     pooka 			continue;
   1649    1.1     pooka 		}
   1650    1.1     pooka 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   1651    1.1     pooka 		iov.iov_len = PAGE_SIZE;
   1652    1.1     pooka 		uio.uio_iov = &iov;
   1653    1.1     pooka 		uio.uio_iovcnt = 1;
   1654    1.1     pooka 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   1655    1.1     pooka 		uio.uio_rw = UIO_READ;
   1656    1.1     pooka 		uio.uio_resid = PAGE_SIZE;
   1657    1.1     pooka 		UIO_SETUP_SYSSPACE(&uio);
   1658    1.1     pooka 		/* XXX vn_lock */
   1659    1.1     pooka 		error = VOP_READ(vp, &uio, 0, cred);
   1660    1.1     pooka 		if (error) {
   1661    1.1     pooka 			break;
   1662    1.1     pooka 		}
   1663    1.1     pooka 		if (uio.uio_resid) {
   1664    1.1     pooka 			memset(iov.iov_base, 0, uio.uio_resid);
   1665    1.1     pooka 		}
   1666    1.1     pooka 	}
   1667    1.1     pooka 	uvm_pagermapout(kva, npages);
   1668   1.86        ad 	rw_enter(uobj->vmobjlock, RW_WRITER);
   1669    1.1     pooka 	for (i = 0; i < npages; i++) {
   1670    1.1     pooka 		pg = pgs[i];
   1671    1.1     pooka 		if (error && (pg->flags & PG_FAKE) != 0) {
   1672    1.1     pooka 			pg->flags |= PG_RELEASED;
   1673    1.1     pooka 		} else {
   1674   1.84        ad 			uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
   1675   1.83        ad 			uvm_pagelock(pg);
   1676    1.1     pooka 			uvm_pageactivate(pg);
   1677   1.83        ad 			uvm_pageunlock(pg);
   1678    1.1     pooka 		}
   1679    1.1     pooka 	}
   1680    1.1     pooka 	if (error) {
   1681    1.1     pooka 		uvm_page_unbusy(pgs, npages);
   1682    1.1     pooka 	}
   1683   1.86        ad 	rw_exit(uobj->vmobjlock);
   1684   1.38       chs 	return error;
   1685    1.1     pooka }
   1686    1.1     pooka 
   1687    1.1     pooka int
   1688    1.1     pooka genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   1689    1.1     pooka     int flags)
   1690    1.1     pooka {
   1691    1.1     pooka 	off_t offset;
   1692    1.1     pooka 	struct iovec iov;
   1693    1.1     pooka 	struct uio uio;
   1694    1.1     pooka 	kauth_cred_t cred = curlwp->l_cred;
   1695    1.1     pooka 	struct buf *bp;
   1696    1.1     pooka 	vaddr_t kva;
   1697    1.2        ad 	int error;
   1698    1.1     pooka 
   1699    1.1     pooka 	offset = pgs[0]->offset;
   1700    1.1     pooka 	kva = uvm_pagermapin(pgs, npages,
   1701    1.1     pooka 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1702    1.1     pooka 
   1703    1.1     pooka 	iov.iov_base = (void *)kva;
   1704    1.1     pooka 	iov.iov_len = npages << PAGE_SHIFT;
   1705    1.1     pooka 	uio.uio_iov = &iov;
   1706    1.1     pooka 	uio.uio_iovcnt = 1;
   1707    1.1     pooka 	uio.uio_offset = offset;
   1708    1.1     pooka 	uio.uio_rw = UIO_WRITE;
   1709    1.1     pooka 	uio.uio_resid = npages << PAGE_SHIFT;
   1710    1.1     pooka 	UIO_SETUP_SYSSPACE(&uio);
   1711    1.1     pooka 	/* XXX vn_lock */
   1712    1.1     pooka 	error = VOP_WRITE(vp, &uio, 0, cred);
   1713    1.1     pooka 
   1714   1.49     rmind 	mutex_enter(vp->v_interlock);
   1715    1.2        ad 	vp->v_numoutput++;
   1716   1.49     rmind 	mutex_exit(vp->v_interlock);
   1717    1.1     pooka 
   1718    1.2        ad 	bp = getiobuf(vp, true);
   1719   1.89        ad 	bp->b_cflags |= BC_BUSY | BC_AGE;
   1720    1.1     pooka 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   1721    1.1     pooka 	bp->b_data = (char *)kva;
   1722    1.1     pooka 	bp->b_bcount = npages << PAGE_SHIFT;
   1723    1.1     pooka 	bp->b_bufsize = npages << PAGE_SHIFT;
   1724    1.1     pooka 	bp->b_resid = 0;
   1725    1.1     pooka 	bp->b_error = error;
   1726    1.1     pooka 	uvm_aio_aiodone(bp);
   1727    1.1     pooka 	return (error);
   1728    1.1     pooka }
   1729    1.1     pooka 
   1730    1.1     pooka /*
   1731    1.1     pooka  * Process a uio using direct I/O.  If we reach a part of the request
   1732    1.1     pooka  * which cannot be processed in this fashion for some reason, just return.
   1733    1.1     pooka  * The caller must handle some additional part of the request using
   1734    1.1     pooka  * buffered I/O before trying direct I/O again.
   1735    1.1     pooka  */
   1736    1.1     pooka 
   1737    1.1     pooka void
   1738    1.1     pooka genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
   1739    1.1     pooka {
   1740    1.1     pooka 	struct vmspace *vs;
   1741    1.1     pooka 	struct iovec *iov;
   1742    1.1     pooka 	vaddr_t va;
   1743    1.1     pooka 	size_t len;
   1744    1.1     pooka 	const int mask = DEV_BSIZE - 1;
   1745    1.1     pooka 	int error;
   1746   1.16     joerg 	bool need_wapbl = (vp->v_mount && vp->v_mount->mnt_wapbl &&
   1747   1.16     joerg 	    (ioflag & IO_JOURNALLOCKED) == 0);
   1748    1.1     pooka 
   1749   1.74  jdolecek #ifdef DIAGNOSTIC
   1750   1.74  jdolecek 	if ((ioflag & IO_JOURNALLOCKED) && vp->v_mount->mnt_wapbl)
   1751   1.74  jdolecek                 WAPBL_JLOCK_ASSERT(vp->v_mount);
   1752   1.74  jdolecek #endif
   1753   1.74  jdolecek 
   1754    1.1     pooka 	/*
   1755    1.1     pooka 	 * We only support direct I/O to user space for now.
   1756    1.1     pooka 	 */
   1757    1.1     pooka 
   1758    1.1     pooka 	if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
   1759    1.1     pooka 		return;
   1760    1.1     pooka 	}
   1761    1.1     pooka 
   1762    1.1     pooka 	/*
   1763    1.1     pooka 	 * If the vnode is mapped, we would need to get the getpages lock
   1764   1.53      yamt 	 * to stabilize the bmap, but then we would get into trouble while
   1765    1.1     pooka 	 * locking the pages if the pages belong to this same vnode (or a
   1766    1.1     pooka 	 * multi-vnode cascade to the same effect).  Just fall back to
   1767    1.1     pooka 	 * buffered I/O if the vnode is mapped to avoid this mess.
   1768    1.1     pooka 	 */
   1769    1.1     pooka 
   1770    1.1     pooka 	if (vp->v_vflag & VV_MAPPED) {
   1771    1.1     pooka 		return;
   1772    1.1     pooka 	}
   1773    1.1     pooka 
   1774   1.16     joerg 	if (need_wapbl) {
   1775   1.13   hannken 		error = WAPBL_BEGIN(vp->v_mount);
   1776   1.13   hannken 		if (error)
   1777   1.13   hannken 			return;
   1778   1.13   hannken 	}
   1779   1.13   hannken 
   1780    1.1     pooka 	/*
   1781    1.1     pooka 	 * Do as much of the uio as possible with direct I/O.
   1782    1.1     pooka 	 */
   1783    1.1     pooka 
   1784    1.1     pooka 	vs = uio->uio_vmspace;
   1785    1.1     pooka 	while (uio->uio_resid) {
   1786    1.1     pooka 		iov = uio->uio_iov;
   1787    1.1     pooka 		if (iov->iov_len == 0) {
   1788    1.1     pooka 			uio->uio_iov++;
   1789    1.1     pooka 			uio->uio_iovcnt--;
   1790    1.1     pooka 			continue;
   1791    1.1     pooka 		}
   1792    1.1     pooka 		va = (vaddr_t)iov->iov_base;
   1793    1.1     pooka 		len = MIN(iov->iov_len, genfs_maxdio);
   1794    1.1     pooka 		len &= ~mask;
   1795    1.1     pooka 
   1796    1.1     pooka 		/*
   1797    1.1     pooka 		 * If the next chunk is smaller than DEV_BSIZE or extends past
   1798    1.1     pooka 		 * the current EOF, then fall back to buffered I/O.
   1799    1.1     pooka 		 */
   1800    1.1     pooka 
   1801    1.1     pooka 		if (len == 0 || uio->uio_offset + len > vp->v_size) {
   1802   1.13   hannken 			break;
   1803    1.1     pooka 		}
   1804    1.1     pooka 
   1805    1.1     pooka 		/*
   1806    1.1     pooka 		 * Check alignment.  The file offset must be at least
   1807    1.1     pooka 		 * sector-aligned.  The exact constraint on memory alignment
   1808    1.1     pooka 		 * is very hardware-dependent, but requiring sector-aligned
   1809    1.1     pooka 		 * addresses there too is safe.
   1810    1.1     pooka 		 */
   1811    1.1     pooka 
   1812    1.1     pooka 		if (uio->uio_offset & mask || va & mask) {
   1813   1.13   hannken 			break;
   1814    1.1     pooka 		}
   1815    1.1     pooka 		error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
   1816    1.1     pooka 					  uio->uio_rw);
   1817    1.1     pooka 		if (error) {
   1818    1.1     pooka 			break;
   1819    1.1     pooka 		}
   1820    1.1     pooka 		iov->iov_base = (char *)iov->iov_base + len;
   1821    1.1     pooka 		iov->iov_len -= len;
   1822    1.1     pooka 		uio->uio_offset += len;
   1823    1.1     pooka 		uio->uio_resid -= len;
   1824    1.1     pooka 	}
   1825   1.13   hannken 
   1826   1.16     joerg 	if (need_wapbl)
   1827   1.13   hannken 		WAPBL_END(vp->v_mount);
   1828    1.1     pooka }
   1829    1.1     pooka 
   1830    1.1     pooka /*
   1831    1.1     pooka  * Iodone routine for direct I/O.  We don't do much here since the request is
   1832    1.1     pooka  * always synchronous, so the caller will do most of the work after biowait().
   1833    1.1     pooka  */
   1834    1.1     pooka 
   1835    1.1     pooka static void
   1836    1.1     pooka genfs_dio_iodone(struct buf *bp)
   1837    1.1     pooka {
   1838    1.1     pooka 
   1839    1.1     pooka 	KASSERT((bp->b_flags & B_ASYNC) == 0);
   1840    1.2        ad 	if ((bp->b_flags & B_READ) == 0 && (bp->b_cflags & BC_AGE) != 0) {
   1841    1.2        ad 		mutex_enter(bp->b_objlock);
   1842    1.1     pooka 		vwakeup(bp);
   1843    1.2        ad 		mutex_exit(bp->b_objlock);
   1844    1.1     pooka 	}
   1845    1.1     pooka 	putiobuf(bp);
   1846    1.1     pooka }
   1847    1.1     pooka 
   1848    1.1     pooka /*
   1849    1.1     pooka  * Process one chunk of a direct I/O request.
   1850    1.1     pooka  */
   1851    1.1     pooka 
   1852    1.1     pooka static int
   1853    1.1     pooka genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
   1854    1.1     pooka     off_t off, enum uio_rw rw)
   1855    1.1     pooka {
   1856    1.1     pooka 	struct vm_map *map;
   1857   1.56    martin 	struct pmap *upm, *kpm __unused;
   1858    1.1     pooka 	size_t klen = round_page(uva + len) - trunc_page(uva);
   1859    1.1     pooka 	off_t spoff, epoff;
   1860    1.1     pooka 	vaddr_t kva, puva;
   1861    1.1     pooka 	paddr_t pa;
   1862    1.1     pooka 	vm_prot_t prot;
   1863   1.58    martin 	int error, rv __diagused, poff, koff;
   1864   1.13   hannken 	const int pgoflags = PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED |
   1865    1.1     pooka 		(rw == UIO_WRITE ? PGO_FREE : 0);
   1866    1.1     pooka 
   1867    1.1     pooka 	/*
   1868    1.1     pooka 	 * For writes, verify that this range of the file already has fully
   1869    1.1     pooka 	 * allocated backing store.  If there are any holes, just punt and
   1870    1.1     pooka 	 * make the caller take the buffered write path.
   1871    1.1     pooka 	 */
   1872    1.1     pooka 
   1873    1.1     pooka 	if (rw == UIO_WRITE) {
   1874    1.1     pooka 		daddr_t lbn, elbn, blkno;
   1875    1.1     pooka 		int bsize, bshift, run;
   1876    1.1     pooka 
   1877    1.1     pooka 		bshift = vp->v_mount->mnt_fs_bshift;
   1878    1.1     pooka 		bsize = 1 << bshift;
   1879    1.1     pooka 		lbn = off >> bshift;
   1880    1.1     pooka 		elbn = (off + len + bsize - 1) >> bshift;
   1881    1.1     pooka 		while (lbn < elbn) {
   1882    1.1     pooka 			error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
   1883    1.1     pooka 			if (error) {
   1884    1.1     pooka 				return error;
   1885    1.1     pooka 			}
   1886    1.1     pooka 			if (blkno == (daddr_t)-1) {
   1887    1.1     pooka 				return ENOSPC;
   1888    1.1     pooka 			}
   1889    1.1     pooka 			lbn += 1 + run;
   1890    1.1     pooka 		}
   1891    1.1     pooka 	}
   1892    1.1     pooka 
   1893    1.1     pooka 	/*
   1894    1.1     pooka 	 * Flush any cached pages for parts of the file that we're about to
   1895    1.1     pooka 	 * access.  If we're writing, invalidate pages as well.
   1896    1.1     pooka 	 */
   1897    1.1     pooka 
   1898    1.1     pooka 	spoff = trunc_page(off);
   1899    1.1     pooka 	epoff = round_page(off + len);
   1900   1.87        ad 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
   1901    1.1     pooka 	error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
   1902    1.1     pooka 	if (error) {
   1903    1.1     pooka 		return error;
   1904    1.1     pooka 	}
   1905    1.1     pooka 
   1906    1.1     pooka 	/*
   1907    1.1     pooka 	 * Wire the user pages and remap them into kernel memory.
   1908    1.1     pooka 	 */
   1909    1.1     pooka 
   1910    1.1     pooka 	prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
   1911    1.1     pooka 	error = uvm_vslock(vs, (void *)uva, len, prot);
   1912    1.1     pooka 	if (error) {
   1913    1.1     pooka 		return error;
   1914    1.1     pooka 	}
   1915    1.1     pooka 
   1916    1.1     pooka 	map = &vs->vm_map;
   1917    1.1     pooka 	upm = vm_map_pmap(map);
   1918    1.1     pooka 	kpm = vm_map_pmap(kernel_map);
   1919    1.1     pooka 	puva = trunc_page(uva);
   1920   1.51      matt 	kva = uvm_km_alloc(kernel_map, klen, atop(puva) & uvmexp.colormask,
   1921   1.51      matt 	    UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
   1922    1.1     pooka 	for (poff = 0; poff < klen; poff += PAGE_SIZE) {
   1923    1.1     pooka 		rv = pmap_extract(upm, puva + poff, &pa);
   1924    1.1     pooka 		KASSERT(rv);
   1925   1.51      matt 		pmap_kenter_pa(kva + poff, pa, prot, PMAP_WIRED);
   1926    1.1     pooka 	}
   1927    1.1     pooka 	pmap_update(kpm);
   1928    1.1     pooka 
   1929    1.1     pooka 	/*
   1930    1.1     pooka 	 * Do the I/O.
   1931    1.1     pooka 	 */
   1932    1.1     pooka 
   1933    1.1     pooka 	koff = uva - trunc_page(uva);
   1934    1.1     pooka 	error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
   1935    1.1     pooka 			    genfs_dio_iodone);
   1936    1.1     pooka 
   1937    1.1     pooka 	/*
   1938    1.1     pooka 	 * Tear down the kernel mapping.
   1939    1.1     pooka 	 */
   1940    1.1     pooka 
   1941   1.51      matt 	pmap_kremove(kva, klen);
   1942    1.1     pooka 	pmap_update(kpm);
   1943    1.1     pooka 	uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
   1944    1.1     pooka 
   1945    1.1     pooka 	/*
   1946    1.1     pooka 	 * Unwire the user pages.
   1947    1.1     pooka 	 */
   1948    1.1     pooka 
   1949    1.1     pooka 	uvm_vsunlock(vs, (void *)uva, len);
   1950    1.1     pooka 	return error;
   1951    1.1     pooka }
   1952