Home | History | Annotate | Line # | Download | only in genfs
genfs_io.c revision 1.92
      1  1.92        ad /*	$NetBSD: genfs_io.c,v 1.92 2020/03/14 20:23:51 ad Exp $	*/
      2   1.1     pooka 
      3   1.1     pooka /*
      4   1.1     pooka  * Copyright (c) 1982, 1986, 1989, 1993
      5   1.1     pooka  *	The Regents of the University of California.  All rights reserved.
      6   1.1     pooka  *
      7   1.1     pooka  * Redistribution and use in source and binary forms, with or without
      8   1.1     pooka  * modification, are permitted provided that the following conditions
      9   1.1     pooka  * are met:
     10   1.1     pooka  * 1. Redistributions of source code must retain the above copyright
     11   1.1     pooka  *    notice, this list of conditions and the following disclaimer.
     12   1.1     pooka  * 2. Redistributions in binary form must reproduce the above copyright
     13   1.1     pooka  *    notice, this list of conditions and the following disclaimer in the
     14   1.1     pooka  *    documentation and/or other materials provided with the distribution.
     15   1.1     pooka  * 3. Neither the name of the University nor the names of its contributors
     16   1.1     pooka  *    may be used to endorse or promote products derived from this software
     17   1.1     pooka  *    without specific prior written permission.
     18   1.1     pooka  *
     19   1.1     pooka  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20   1.1     pooka  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21   1.1     pooka  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22   1.1     pooka  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23   1.1     pooka  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24   1.1     pooka  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25   1.1     pooka  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26   1.1     pooka  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27   1.1     pooka  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28   1.1     pooka  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29   1.1     pooka  * SUCH DAMAGE.
     30   1.1     pooka  *
     31   1.1     pooka  */
     32   1.1     pooka 
     33   1.1     pooka #include <sys/cdefs.h>
     34  1.92        ad __KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.92 2020/03/14 20:23:51 ad Exp $");
     35   1.1     pooka 
     36   1.1     pooka #include <sys/param.h>
     37   1.1     pooka #include <sys/systm.h>
     38   1.1     pooka #include <sys/proc.h>
     39   1.1     pooka #include <sys/kernel.h>
     40   1.1     pooka #include <sys/mount.h>
     41   1.1     pooka #include <sys/vnode.h>
     42   1.1     pooka #include <sys/kmem.h>
     43   1.1     pooka #include <sys/kauth.h>
     44   1.1     pooka #include <sys/fstrans.h>
     45  1.15     pooka #include <sys/buf.h>
     46   1.1     pooka 
     47   1.1     pooka #include <miscfs/genfs/genfs.h>
     48   1.1     pooka #include <miscfs/genfs/genfs_node.h>
     49   1.1     pooka #include <miscfs/specfs/specdev.h>
     50   1.1     pooka 
     51   1.1     pooka #include <uvm/uvm.h>
     52   1.1     pooka #include <uvm/uvm_pager.h>
     53  1.78        ad #include <uvm/uvm_page_array.h>
     54   1.1     pooka 
     55   1.1     pooka static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
     56   1.1     pooka     off_t, enum uio_rw);
     57   1.1     pooka static void genfs_dio_iodone(struct buf *);
     58   1.1     pooka 
     59  1.59  riastrad static int genfs_getpages_read(struct vnode *, struct vm_page **, int, off_t,
     60  1.59  riastrad     off_t, bool, bool, bool, bool);
     61   1.1     pooka static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
     62   1.1     pooka     void (*)(struct buf *));
     63  1.55      yamt static void genfs_rel_pages(struct vm_page **, unsigned int);
     64  1.38       chs static void genfs_markdirty(struct vnode *);
     65   1.1     pooka 
     66   1.1     pooka int genfs_maxdio = MAXPHYS;
     67   1.1     pooka 
     68  1.38       chs static void
     69  1.55      yamt genfs_rel_pages(struct vm_page **pgs, unsigned int npages)
     70   1.1     pooka {
     71  1.55      yamt 	unsigned int i;
     72   1.1     pooka 
     73   1.1     pooka 	for (i = 0; i < npages; i++) {
     74   1.1     pooka 		struct vm_page *pg = pgs[i];
     75   1.1     pooka 
     76   1.1     pooka 		if (pg == NULL || pg == PGO_DONTCARE)
     77   1.1     pooka 			continue;
     78  1.86        ad 		KASSERT(uvm_page_owner_locked_p(pg, true));
     79   1.1     pooka 		if (pg->flags & PG_FAKE) {
     80   1.1     pooka 			pg->flags |= PG_RELEASED;
     81   1.1     pooka 		}
     82   1.1     pooka 	}
     83   1.1     pooka 	uvm_page_unbusy(pgs, npages);
     84   1.1     pooka }
     85   1.1     pooka 
     86  1.38       chs static void
     87  1.38       chs genfs_markdirty(struct vnode *vp)
     88  1.38       chs {
     89  1.38       chs 
     90  1.88        ad 	KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
     91  1.86        ad 
     92  1.86        ad 	mutex_enter(vp->v_interlock);
     93  1.38       chs 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
     94  1.38       chs 		vn_syncer_add_to_worklist(vp, filedelay);
     95  1.38       chs 	}
     96  1.38       chs 	if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
     97  1.38       chs 		vp->v_iflag |= VI_WRMAPDIRTY;
     98  1.38       chs 	}
     99  1.86        ad 	mutex_exit(vp->v_interlock);
    100  1.38       chs }
    101  1.38       chs 
    102   1.1     pooka /*
    103   1.1     pooka  * generic VM getpages routine.
    104   1.1     pooka  * Return PG_BUSY pages for the given range,
    105   1.1     pooka  * reading from backing store if necessary.
    106   1.1     pooka  */
    107   1.1     pooka 
    108   1.1     pooka int
    109   1.1     pooka genfs_getpages(void *v)
    110   1.1     pooka {
    111   1.1     pooka 	struct vop_getpages_args /* {
    112   1.1     pooka 		struct vnode *a_vp;
    113   1.1     pooka 		voff_t a_offset;
    114   1.1     pooka 		struct vm_page **a_m;
    115   1.1     pooka 		int *a_count;
    116   1.1     pooka 		int a_centeridx;
    117   1.1     pooka 		vm_prot_t a_access_type;
    118   1.1     pooka 		int a_advice;
    119   1.1     pooka 		int a_flags;
    120  1.22  uebayasi 	} */ * const ap = v;
    121   1.1     pooka 
    122  1.24  uebayasi 	off_t diskeof, memeof;
    123  1.31  uebayasi 	int i, error, npages;
    124  1.10      yamt 	const int flags = ap->a_flags;
    125  1.22  uebayasi 	struct vnode * const vp = ap->a_vp;
    126  1.22  uebayasi 	struct uvm_object * const uobj = &vp->v_uobj;
    127  1.10      yamt 	const bool async = (flags & PGO_SYNCIO) == 0;
    128  1.35  uebayasi 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
    129  1.10      yamt 	const bool overwrite = (flags & PGO_OVERWRITE) != 0;
    130  1.35  uebayasi 	const bool blockalloc = memwrite && (flags & PGO_NOBLOCKALLOC) == 0;
    131  1.73  jdolecek 	const bool need_wapbl = (vp->v_mount->mnt_wapbl &&
    132  1.73  jdolecek 			(flags & PGO_JOURNALLOCKED) == 0);
    133  1.40       chs 	const bool glocked = (flags & PGO_GLOCKHELD) != 0;
    134  1.64   hannken 	bool holds_wapbl = false;
    135  1.64   hannken 	struct mount *trans_mount = NULL;
    136   1.1     pooka 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    137   1.1     pooka 
    138  1.71  pgoyette 	UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx/%jx count %jd",
    139  1.71  pgoyette 	    (uintptr_t)vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    140   1.1     pooka 
    141  1.84        ad 	KASSERT(memwrite >= overwrite);
    142   1.1     pooka 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
    143   1.1     pooka 	    vp->v_type == VLNK || vp->v_type == VBLK);
    144   1.1     pooka 
    145  1.74  jdolecek #ifdef DIAGNOSTIC
    146  1.74  jdolecek 	if ((flags & PGO_JOURNALLOCKED) && vp->v_mount->mnt_wapbl)
    147  1.74  jdolecek                 WAPBL_JLOCK_ASSERT(vp->v_mount);
    148  1.74  jdolecek #endif
    149  1.74  jdolecek 
    150  1.86        ad 	mutex_enter(vp->v_interlock);
    151  1.70   hannken 	error = vdead_check(vp, VDEAD_NOWAIT);
    152  1.86        ad 	mutex_exit(vp->v_interlock);
    153  1.70   hannken 	if (error) {
    154  1.70   hannken 		if ((flags & PGO_LOCKED) == 0)
    155  1.86        ad 			rw_exit(uobj->vmobjlock);
    156  1.70   hannken 		return error;
    157  1.70   hannken 	}
    158  1.70   hannken 
    159   1.1     pooka startover:
    160   1.1     pooka 	error = 0;
    161  1.27  uebayasi 	const voff_t origvsize = vp->v_size;
    162  1.27  uebayasi 	const off_t origoffset = ap->a_offset;
    163  1.29  uebayasi 	const int orignpages = *ap->a_count;
    164  1.33  uebayasi 
    165   1.1     pooka 	GOP_SIZE(vp, origvsize, &diskeof, 0);
    166   1.1     pooka 	if (flags & PGO_PASTEOF) {
    167  1.24  uebayasi 		off_t newsize;
    168   1.1     pooka #if defined(DIAGNOSTIC)
    169   1.1     pooka 		off_t writeeof;
    170   1.1     pooka #endif /* defined(DIAGNOSTIC) */
    171   1.1     pooka 
    172   1.1     pooka 		newsize = MAX(origvsize,
    173   1.1     pooka 		    origoffset + (orignpages << PAGE_SHIFT));
    174   1.1     pooka 		GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
    175   1.1     pooka #if defined(DIAGNOSTIC)
    176   1.1     pooka 		GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM);
    177   1.1     pooka 		if (newsize > round_page(writeeof)) {
    178  1.39     pooka 			panic("%s: past eof: %" PRId64 " vs. %" PRId64,
    179  1.39     pooka 			    __func__, newsize, round_page(writeeof));
    180   1.1     pooka 		}
    181   1.1     pooka #endif /* defined(DIAGNOSTIC) */
    182   1.1     pooka 	} else {
    183   1.1     pooka 		GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM);
    184   1.1     pooka 	}
    185   1.1     pooka 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    186   1.1     pooka 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    187   1.1     pooka 	KASSERT(orignpages > 0);
    188   1.1     pooka 
    189   1.1     pooka 	/*
    190   1.1     pooka 	 * Bounds-check the request.
    191   1.1     pooka 	 */
    192   1.1     pooka 
    193   1.1     pooka 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    194   1.1     pooka 		if ((flags & PGO_LOCKED) == 0) {
    195  1.86        ad 			rw_exit(uobj->vmobjlock);
    196   1.1     pooka 		}
    197  1.71  pgoyette 		UVMHIST_LOG(ubchist, "off 0x%jx count %jd goes past EOF 0x%jx",
    198   1.1     pooka 		    origoffset, *ap->a_count, memeof,0);
    199   1.1     pooka 		error = EINVAL;
    200   1.1     pooka 		goto out_err;
    201   1.1     pooka 	}
    202   1.1     pooka 
    203   1.1     pooka 	/* uobj is locked */
    204   1.1     pooka 
    205   1.1     pooka 	if ((flags & PGO_NOTIMESTAMP) == 0 &&
    206   1.1     pooka 	    (vp->v_type != VBLK ||
    207   1.1     pooka 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
    208   1.1     pooka 		int updflags = 0;
    209   1.1     pooka 
    210   1.1     pooka 		if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
    211   1.1     pooka 			updflags = GOP_UPDATE_ACCESSED;
    212   1.1     pooka 		}
    213  1.35  uebayasi 		if (memwrite) {
    214   1.1     pooka 			updflags |= GOP_UPDATE_MODIFIED;
    215   1.1     pooka 		}
    216   1.1     pooka 		if (updflags != 0) {
    217   1.1     pooka 			GOP_MARKUPDATE(vp, updflags);
    218   1.1     pooka 		}
    219   1.1     pooka 	}
    220   1.1     pooka 
    221   1.1     pooka 	/*
    222   1.1     pooka 	 * For PGO_LOCKED requests, just return whatever's in memory.
    223   1.1     pooka 	 */
    224   1.1     pooka 
    225   1.1     pooka 	if (flags & PGO_LOCKED) {
    226   1.1     pooka 		int nfound;
    227  1.31  uebayasi 		struct vm_page *pg;
    228   1.1     pooka 
    229  1.40       chs 		KASSERT(!glocked);
    230   1.1     pooka 		npages = *ap->a_count;
    231   1.1     pooka #if defined(DEBUG)
    232   1.1     pooka 		for (i = 0; i < npages; i++) {
    233   1.1     pooka 			pg = ap->a_m[i];
    234   1.1     pooka 			KASSERT(pg == NULL || pg == PGO_DONTCARE);
    235   1.1     pooka 		}
    236   1.1     pooka #endif /* defined(DEBUG) */
    237   1.1     pooka 		nfound = uvn_findpages(uobj, origoffset, &npages,
    238  1.84        ad 		    ap->a_m, NULL,
    239  1.84        ad 		    UFP_NOWAIT|UFP_NOALLOC|(memwrite ? UFP_NORDONLY : 0));
    240   1.1     pooka 		KASSERT(npages == *ap->a_count);
    241   1.1     pooka 		if (nfound == 0) {
    242   1.1     pooka 			error = EBUSY;
    243   1.1     pooka 			goto out_err;
    244   1.1     pooka 		}
    245  1.84        ad 		/*
    246  1.84        ad 		 * lock and unlock g_glock to ensure that no one is truncating
    247  1.84        ad 		 * the file behind us.
    248  1.84        ad 		 */
    249  1.23  uebayasi 		if (!genfs_node_rdtrylock(vp)) {
    250   1.1     pooka 			genfs_rel_pages(ap->a_m, npages);
    251   1.1     pooka 
    252   1.1     pooka 			/*
    253   1.1     pooka 			 * restore the array.
    254   1.1     pooka 			 */
    255   1.1     pooka 
    256   1.1     pooka 			for (i = 0; i < npages; i++) {
    257   1.1     pooka 				pg = ap->a_m[i];
    258   1.1     pooka 
    259  1.41  uebayasi 				if (pg != NULL && pg != PGO_DONTCARE) {
    260   1.1     pooka 					ap->a_m[i] = NULL;
    261   1.1     pooka 				}
    262  1.46  uebayasi 				KASSERT(ap->a_m[i] == NULL ||
    263  1.46  uebayasi 				    ap->a_m[i] == PGO_DONTCARE);
    264   1.1     pooka 			}
    265   1.1     pooka 		} else {
    266  1.23  uebayasi 			genfs_node_unlock(vp);
    267   1.1     pooka 		}
    268   1.1     pooka 		error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    269  1.38       chs 		if (error == 0 && memwrite) {
    270  1.84        ad 			for (i = 0; i < npages; i++) {
    271  1.84        ad 				pg = ap->a_m[i];
    272  1.84        ad 				if (pg == NULL || pg == PGO_DONTCARE) {
    273  1.84        ad 					continue;
    274  1.84        ad 				}
    275  1.84        ad 				if (uvm_pagegetdirty(pg) ==
    276  1.84        ad 				    UVM_PAGE_STATUS_CLEAN) {
    277  1.84        ad 					uvm_pagemarkdirty(pg,
    278  1.84        ad 					    UVM_PAGE_STATUS_UNKNOWN);
    279  1.84        ad 				}
    280  1.84        ad 			}
    281  1.38       chs 			genfs_markdirty(vp);
    282  1.38       chs 		}
    283   1.1     pooka 		goto out_err;
    284   1.1     pooka 	}
    285  1.86        ad 	rw_exit(uobj->vmobjlock);
    286   1.1     pooka 
    287   1.1     pooka 	/*
    288   1.1     pooka 	 * find the requested pages and make some simple checks.
    289   1.1     pooka 	 * leave space in the page array for a whole block.
    290   1.1     pooka 	 */
    291   1.1     pooka 
    292  1.27  uebayasi 	const int fs_bshift = (vp->v_type != VBLK) ?
    293  1.27  uebayasi 	    vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
    294  1.27  uebayasi 	const int fs_bsize = 1 << fs_bshift;
    295  1.30  uebayasi #define	blk_mask	(fs_bsize - 1)
    296  1.30  uebayasi #define	trunc_blk(x)	((x) & ~blk_mask)
    297  1.30  uebayasi #define	round_blk(x)	(((x) + blk_mask) & ~blk_mask)
    298   1.1     pooka 
    299  1.29  uebayasi 	const int orignmempages = MIN(orignpages,
    300   1.1     pooka 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    301  1.29  uebayasi 	npages = orignmempages;
    302  1.30  uebayasi 	const off_t startoffset = trunc_blk(origoffset);
    303  1.30  uebayasi 	const off_t endoffset = MIN(
    304  1.30  uebayasi 	    round_page(round_blk(origoffset + (npages << PAGE_SHIFT))),
    305  1.30  uebayasi 	    round_page(memeof));
    306  1.31  uebayasi 	const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    307   1.1     pooka 
    308  1.33  uebayasi 	const int pgs_size = sizeof(struct vm_page *) *
    309   1.1     pooka 	    ((endoffset - startoffset) >> PAGE_SHIFT);
    310  1.33  uebayasi 	struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES];
    311  1.31  uebayasi 
    312   1.1     pooka 	if (pgs_size > sizeof(pgs_onstack)) {
    313   1.1     pooka 		pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
    314   1.1     pooka 		if (pgs == NULL) {
    315   1.1     pooka 			pgs = pgs_onstack;
    316   1.1     pooka 			error = ENOMEM;
    317  1.32  uebayasi 			goto out_err;
    318   1.1     pooka 		}
    319   1.1     pooka 	} else {
    320  1.14  christos 		pgs = pgs_onstack;
    321  1.14  christos 		(void)memset(pgs, 0, pgs_size);
    322   1.1     pooka 	}
    323  1.14  christos 
    324  1.71  pgoyette 	UVMHIST_LOG(ubchist, "ridx %jd npages %jd startoff %jd endoff %jd",
    325   1.1     pooka 	    ridx, npages, startoffset, endoffset);
    326   1.1     pooka 
    327  1.64   hannken 	if (trans_mount == NULL) {
    328  1.64   hannken 		trans_mount = vp->v_mount;
    329  1.69   hannken 		fstrans_start(trans_mount);
    330  1.64   hannken 		/*
    331  1.64   hannken 		 * check if this vnode is still valid.
    332  1.64   hannken 		 */
    333  1.64   hannken 		mutex_enter(vp->v_interlock);
    334  1.64   hannken 		error = vdead_check(vp, 0);
    335  1.64   hannken 		mutex_exit(vp->v_interlock);
    336  1.64   hannken 		if (error)
    337  1.64   hannken 			goto out_err_free;
    338  1.42   hannken 		/*
    339  1.42   hannken 		 * XXX: This assumes that we come here only via
    340  1.42   hannken 		 * the mmio path
    341  1.42   hannken 		 */
    342  1.73  jdolecek 		if (blockalloc && need_wapbl) {
    343  1.64   hannken 			error = WAPBL_BEGIN(trans_mount);
    344  1.64   hannken 			if (error)
    345  1.42   hannken 				goto out_err_free;
    346  1.64   hannken 			holds_wapbl = true;
    347  1.42   hannken 		}
    348   1.1     pooka 	}
    349   1.1     pooka 
    350   1.1     pooka 	/*
    351   1.1     pooka 	 * hold g_glock to prevent a race with truncate.
    352   1.1     pooka 	 *
    353   1.1     pooka 	 * check if our idea of v_size is still valid.
    354   1.1     pooka 	 */
    355   1.1     pooka 
    356  1.40       chs 	KASSERT(!glocked || genfs_node_wrlocked(vp));
    357  1.40       chs 	if (!glocked) {
    358  1.40       chs 		if (blockalloc) {
    359  1.40       chs 			genfs_node_wrlock(vp);
    360  1.40       chs 		} else {
    361  1.40       chs 			genfs_node_rdlock(vp);
    362  1.40       chs 		}
    363   1.1     pooka 	}
    364  1.86        ad 	rw_enter(uobj->vmobjlock, RW_WRITER);
    365   1.1     pooka 	if (vp->v_size < origvsize) {
    366  1.40       chs 		if (!glocked) {
    367  1.40       chs 			genfs_node_unlock(vp);
    368  1.40       chs 		}
    369   1.1     pooka 		if (pgs != pgs_onstack)
    370   1.1     pooka 			kmem_free(pgs, pgs_size);
    371   1.1     pooka 		goto startover;
    372   1.1     pooka 	}
    373   1.1     pooka 
    374  1.84        ad 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx], NULL,
    375  1.29  uebayasi 	    async ? UFP_NOWAIT : UFP_ALL) != orignmempages) {
    376  1.40       chs 		if (!glocked) {
    377  1.40       chs 			genfs_node_unlock(vp);
    378  1.40       chs 		}
    379   1.1     pooka 		KASSERT(async != 0);
    380  1.29  uebayasi 		genfs_rel_pages(&pgs[ridx], orignmempages);
    381  1.86        ad 		rw_exit(uobj->vmobjlock);
    382   1.1     pooka 		error = EBUSY;
    383  1.33  uebayasi 		goto out_err_free;
    384   1.1     pooka 	}
    385   1.1     pooka 
    386   1.1     pooka 	/*
    387  1.84        ad 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    388  1.84        ad 	 */
    389  1.84        ad 
    390  1.84        ad 	if (overwrite) {
    391  1.84        ad 		if (!glocked) {
    392  1.84        ad 			genfs_node_unlock(vp);
    393  1.84        ad 		}
    394  1.84        ad 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    395  1.84        ad 
    396  1.84        ad 		for (i = 0; i < npages; i++) {
    397  1.84        ad 			struct vm_page *pg = pgs[ridx + i];
    398  1.84        ad 
    399  1.84        ad 			/*
    400  1.84        ad 			 * it's caller's responsibility to allocate blocks
    401  1.84        ad 			 * beforehand for the overwrite case.
    402  1.84        ad 			 */
    403  1.84        ad 
    404  1.84        ad 			KASSERT((pg->flags & PG_RDONLY) == 0 || !blockalloc);
    405  1.84        ad 			pg->flags &= ~PG_RDONLY;
    406  1.84        ad 
    407  1.84        ad 			/*
    408  1.84        ad 			 * mark the page DIRTY.
    409  1.84        ad 			 * otherwise another thread can do putpages and pull
    410  1.84        ad 			 * our vnode from syncer's queue before our caller does
    411  1.84        ad 			 * ubc_release.  note that putpages won't see CLEAN
    412  1.84        ad 			 * pages even if they are BUSY.
    413  1.84        ad 			 */
    414  1.84        ad 
    415  1.84        ad 			uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
    416  1.84        ad 		}
    417  1.84        ad 		npages += ridx;
    418  1.84        ad 		goto out;
    419  1.84        ad 	}
    420  1.84        ad 
    421  1.84        ad 	/*
    422   1.1     pooka 	 * if the pages are already resident, just return them.
    423   1.1     pooka 	 */
    424   1.1     pooka 
    425   1.1     pooka 	for (i = 0; i < npages; i++) {
    426  1.31  uebayasi 		struct vm_page *pg = pgs[ridx + i];
    427   1.1     pooka 
    428  1.31  uebayasi 		if ((pg->flags & PG_FAKE) ||
    429  1.84        ad 		    (blockalloc && (pg->flags & PG_RDONLY) != 0)) {
    430   1.1     pooka 			break;
    431   1.1     pooka 		}
    432   1.1     pooka 	}
    433   1.1     pooka 	if (i == npages) {
    434  1.40       chs 		if (!glocked) {
    435  1.40       chs 			genfs_node_unlock(vp);
    436  1.40       chs 		}
    437   1.1     pooka 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    438   1.1     pooka 		npages += ridx;
    439   1.1     pooka 		goto out;
    440   1.1     pooka 	}
    441   1.1     pooka 
    442   1.1     pooka 	/*
    443   1.1     pooka 	 * the page wasn't resident and we're not overwriting,
    444   1.1     pooka 	 * so we're going to have to do some i/o.
    445   1.1     pooka 	 * find any additional pages needed to cover the expanded range.
    446   1.1     pooka 	 */
    447   1.1     pooka 
    448   1.1     pooka 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    449  1.29  uebayasi 	if (startoffset != origoffset || npages != orignmempages) {
    450  1.31  uebayasi 		int npgs;
    451   1.1     pooka 
    452   1.1     pooka 		/*
    453   1.1     pooka 		 * we need to avoid deadlocks caused by locking
    454   1.1     pooka 		 * additional pages at lower offsets than pages we
    455   1.1     pooka 		 * already have locked.  unlock them all and start over.
    456   1.1     pooka 		 */
    457   1.1     pooka 
    458  1.29  uebayasi 		genfs_rel_pages(&pgs[ridx], orignmempages);
    459   1.1     pooka 		memset(pgs, 0, pgs_size);
    460   1.1     pooka 
    461  1.71  pgoyette 		UVMHIST_LOG(ubchist, "reset npages start 0x%jx end 0x%jx",
    462   1.1     pooka 		    startoffset, endoffset, 0,0);
    463   1.1     pooka 		npgs = npages;
    464  1.84        ad 		if (uvn_findpages(uobj, startoffset, &npgs, pgs, NULL,
    465   1.1     pooka 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    466  1.40       chs 			if (!glocked) {
    467  1.40       chs 				genfs_node_unlock(vp);
    468  1.40       chs 			}
    469   1.1     pooka 			KASSERT(async != 0);
    470   1.1     pooka 			genfs_rel_pages(pgs, npages);
    471  1.86        ad 			rw_exit(uobj->vmobjlock);
    472   1.1     pooka 			error = EBUSY;
    473  1.33  uebayasi 			goto out_err_free;
    474   1.1     pooka 		}
    475   1.1     pooka 	}
    476  1.34  uebayasi 
    477  1.86        ad 	rw_exit(uobj->vmobjlock);
    478  1.59  riastrad 	error = genfs_getpages_read(vp, pgs, npages, startoffset, diskeof,
    479  1.59  riastrad 	    async, memwrite, blockalloc, glocked);
    480  1.59  riastrad 	if (!glocked) {
    481  1.59  riastrad 		genfs_node_unlock(vp);
    482  1.59  riastrad 	}
    483  1.67  riastrad 	if (error == 0 && async)
    484  1.67  riastrad 		goto out_err_free;
    485  1.86        ad 	rw_enter(uobj->vmobjlock, RW_WRITER);
    486  1.59  riastrad 
    487  1.59  riastrad 	/*
    488  1.59  riastrad 	 * we're almost done!  release the pages...
    489  1.59  riastrad 	 * for errors, we free the pages.
    490  1.59  riastrad 	 * otherwise we activate them and mark them as valid and clean.
    491  1.59  riastrad 	 * also, unbusy pages that were not actually requested.
    492  1.59  riastrad 	 */
    493  1.59  riastrad 
    494  1.59  riastrad 	if (error) {
    495  1.59  riastrad 		genfs_rel_pages(pgs, npages);
    496  1.86        ad 		rw_exit(uobj->vmobjlock);
    497  1.71  pgoyette 		UVMHIST_LOG(ubchist, "returning error %jd", error,0,0,0);
    498  1.59  riastrad 		goto out_err_free;
    499  1.59  riastrad 	}
    500  1.59  riastrad 
    501  1.59  riastrad out:
    502  1.71  pgoyette 	UVMHIST_LOG(ubchist, "succeeding, npages %jd", npages,0,0,0);
    503  1.59  riastrad 	error = 0;
    504  1.59  riastrad 	for (i = 0; i < npages; i++) {
    505  1.59  riastrad 		struct vm_page *pg = pgs[i];
    506  1.59  riastrad 		if (pg == NULL) {
    507  1.59  riastrad 			continue;
    508  1.59  riastrad 		}
    509  1.71  pgoyette 		UVMHIST_LOG(ubchist, "examining pg %#jx flags 0x%jx",
    510  1.71  pgoyette 		    (uintptr_t)pg, pg->flags, 0,0);
    511  1.59  riastrad 		if (pg->flags & PG_FAKE && !overwrite) {
    512  1.84        ad 			/*
    513  1.84        ad 			 * we've read page's contents from the backing storage.
    514  1.84        ad 			 *
    515  1.84        ad 			 * for a read fault, we keep them CLEAN;  if we
    516  1.84        ad 			 * encountered a hole while reading, the pages can
    517  1.84        ad 			 * already been dirtied with zeros.
    518  1.84        ad 			 */
    519  1.84        ad 			KASSERTMSG(blockalloc || uvm_pagegetdirty(pg) ==
    520  1.84        ad 			    UVM_PAGE_STATUS_CLEAN, "page %p not clean", pg);
    521  1.84        ad 			pg->flags &= ~PG_FAKE;
    522  1.59  riastrad 		}
    523  1.59  riastrad 		KASSERT(!memwrite || !blockalloc || (pg->flags & PG_RDONLY) == 0);
    524  1.59  riastrad 		if (i < ridx || i >= ridx + orignmempages || async) {
    525  1.71  pgoyette 			UVMHIST_LOG(ubchist, "unbusy pg %#jx offset 0x%jx",
    526  1.71  pgoyette 			    (uintptr_t)pg, pg->offset,0,0);
    527  1.59  riastrad 			if (pg->flags & PG_FAKE) {
    528  1.59  riastrad 				KASSERT(overwrite);
    529  1.59  riastrad 				uvm_pagezero(pg);
    530  1.59  riastrad 			}
    531  1.59  riastrad 			if (pg->flags & PG_RELEASED) {
    532  1.59  riastrad 				uvm_pagefree(pg);
    533  1.59  riastrad 				continue;
    534  1.59  riastrad 			}
    535  1.83        ad 			uvm_pagelock(pg);
    536  1.59  riastrad 			uvm_pageenqueue(pg);
    537  1.92        ad 			uvm_pageunbusy(pg);
    538  1.83        ad 			uvm_pageunlock(pg);
    539  1.92        ad 			pg->flags &= ~PG_FAKE;
    540  1.59  riastrad 			UVM_PAGE_OWN(pg, NULL);
    541  1.84        ad 		} else if (memwrite && !overwrite &&
    542  1.84        ad 		    uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN) {
    543  1.84        ad 			/*
    544  1.84        ad 			 * for a write fault, start dirtiness tracking of
    545  1.84        ad 			 * requested pages.
    546  1.84        ad 			 */
    547  1.84        ad 			uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
    548  1.59  riastrad 		}
    549  1.59  riastrad 	}
    550  1.59  riastrad 	if (memwrite) {
    551  1.59  riastrad 		genfs_markdirty(vp);
    552  1.59  riastrad 	}
    553  1.86        ad 	rw_exit(uobj->vmobjlock);
    554  1.59  riastrad 	if (ap->a_m != NULL) {
    555  1.59  riastrad 		memcpy(ap->a_m, &pgs[ridx],
    556  1.59  riastrad 		    orignmempages * sizeof(struct vm_page *));
    557  1.59  riastrad 	}
    558   1.1     pooka 
    559  1.59  riastrad out_err_free:
    560  1.59  riastrad 	if (pgs != NULL && pgs != pgs_onstack)
    561  1.59  riastrad 		kmem_free(pgs, pgs_size);
    562  1.59  riastrad out_err:
    563  1.64   hannken 	if (trans_mount != NULL) {
    564  1.64   hannken 		if (holds_wapbl)
    565  1.64   hannken 			WAPBL_END(trans_mount);
    566  1.64   hannken 		fstrans_done(trans_mount);
    567  1.59  riastrad 	}
    568  1.59  riastrad 	return error;
    569  1.59  riastrad }
    570  1.59  riastrad 
    571  1.59  riastrad /*
    572  1.59  riastrad  * genfs_getpages_read: Read the pages in with VOP_BMAP/VOP_STRATEGY.
    573  1.68  dholland  *
    574  1.68  dholland  * "glocked" (which is currently not actually used) tells us not whether
    575  1.68  dholland  * the genfs_node is locked on entry (it always is) but whether it was
    576  1.68  dholland  * locked on entry to genfs_getpages.
    577  1.59  riastrad  */
    578  1.59  riastrad static int
    579  1.59  riastrad genfs_getpages_read(struct vnode *vp, struct vm_page **pgs, int npages,
    580  1.59  riastrad     off_t startoffset, off_t diskeof,
    581  1.59  riastrad     bool async, bool memwrite, bool blockalloc, bool glocked)
    582  1.59  riastrad {
    583  1.59  riastrad 	struct uvm_object * const uobj = &vp->v_uobj;
    584  1.59  riastrad 	const int fs_bshift = (vp->v_type != VBLK) ?
    585  1.59  riastrad 	    vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
    586  1.59  riastrad 	const int dev_bshift = (vp->v_type != VBLK) ?
    587  1.59  riastrad 	    vp->v_mount->mnt_dev_bshift : DEV_BSHIFT;
    588  1.59  riastrad 	kauth_cred_t const cred = curlwp->l_cred;		/* XXXUBC curlwp */
    589  1.34  uebayasi 	size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
    590  1.34  uebayasi 	vaddr_t kva;
    591  1.34  uebayasi 	struct buf *bp, *mbp;
    592  1.34  uebayasi 	bool sawhole = false;
    593  1.59  riastrad 	int i;
    594  1.59  riastrad 	int error = 0;
    595  1.34  uebayasi 
    596  1.60     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
    597  1.60     skrll 
    598   1.1     pooka 	/*
    599   1.1     pooka 	 * read the desired page(s).
    600   1.1     pooka 	 */
    601   1.1     pooka 
    602   1.1     pooka 	totalbytes = npages << PAGE_SHIFT;
    603   1.1     pooka 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    604   1.1     pooka 	tailbytes = totalbytes - bytes;
    605   1.1     pooka 	skipbytes = 0;
    606   1.1     pooka 
    607   1.1     pooka 	kva = uvm_pagermapin(pgs, npages,
    608  1.55      yamt 	    UVMPAGER_MAPIN_READ | (async ? 0 : UVMPAGER_MAPIN_WAITOK));
    609  1.59  riastrad 	if (kva == 0)
    610  1.59  riastrad 		return EBUSY;
    611   1.1     pooka 
    612   1.2        ad 	mbp = getiobuf(vp, true);
    613   1.1     pooka 	mbp->b_bufsize = totalbytes;
    614   1.1     pooka 	mbp->b_data = (void *)kva;
    615   1.1     pooka 	mbp->b_resid = mbp->b_bcount = bytes;
    616  1.89        ad 	mbp->b_cflags |= BC_BUSY;
    617   1.2        ad 	if (async) {
    618   1.2        ad 		mbp->b_flags = B_READ | B_ASYNC;
    619  1.85       chs 		mbp->b_iodone = uvm_aio_aiodone;
    620   1.2        ad 	} else {
    621   1.2        ad 		mbp->b_flags = B_READ;
    622   1.2        ad 		mbp->b_iodone = NULL;
    623  1.43  uebayasi 	}
    624   1.1     pooka 	if (async)
    625   1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
    626   1.1     pooka 	else
    627   1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
    628   1.1     pooka 
    629   1.1     pooka 	/*
    630   1.1     pooka 	 * if EOF is in the middle of the range, zero the part past EOF.
    631   1.1     pooka 	 * skip over pages which are not PG_FAKE since in that case they have
    632   1.1     pooka 	 * valid data that we need to preserve.
    633   1.1     pooka 	 */
    634   1.1     pooka 
    635   1.1     pooka 	tailstart = bytes;
    636   1.1     pooka 	while (tailbytes > 0) {
    637   1.1     pooka 		const int len = PAGE_SIZE - (tailstart & PAGE_MASK);
    638   1.1     pooka 
    639   1.1     pooka 		KASSERT(len <= tailbytes);
    640   1.1     pooka 		if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) {
    641   1.1     pooka 			memset((void *)(kva + tailstart), 0, len);
    642  1.71  pgoyette 			UVMHIST_LOG(ubchist, "tailbytes %#jx 0x%jx 0x%jx",
    643  1.71  pgoyette 			    (uintptr_t)kva, tailstart, len, 0);
    644   1.1     pooka 		}
    645   1.1     pooka 		tailstart += len;
    646   1.1     pooka 		tailbytes -= len;
    647   1.1     pooka 	}
    648   1.1     pooka 
    649   1.1     pooka 	/*
    650   1.1     pooka 	 * now loop over the pages, reading as needed.
    651   1.1     pooka 	 */
    652   1.1     pooka 
    653   1.1     pooka 	bp = NULL;
    654  1.28  uebayasi 	off_t offset;
    655  1.28  uebayasi 	for (offset = startoffset;
    656   1.1     pooka 	    bytes > 0;
    657   1.1     pooka 	    offset += iobytes, bytes -= iobytes) {
    658  1.30  uebayasi 		int run;
    659  1.25  uebayasi 		daddr_t lbn, blkno;
    660  1.24  uebayasi 		int pidx;
    661  1.26  uebayasi 		struct vnode *devvp;
    662   1.1     pooka 
    663   1.1     pooka 		/*
    664   1.1     pooka 		 * skip pages which don't need to be read.
    665   1.1     pooka 		 */
    666   1.1     pooka 
    667   1.1     pooka 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    668   1.1     pooka 		while ((pgs[pidx]->flags & PG_FAKE) == 0) {
    669   1.1     pooka 			size_t b;
    670   1.1     pooka 
    671   1.1     pooka 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    672   1.1     pooka 			if ((pgs[pidx]->flags & PG_RDONLY)) {
    673   1.1     pooka 				sawhole = true;
    674   1.1     pooka 			}
    675   1.1     pooka 			b = MIN(PAGE_SIZE, bytes);
    676   1.1     pooka 			offset += b;
    677   1.1     pooka 			bytes -= b;
    678   1.1     pooka 			skipbytes += b;
    679   1.1     pooka 			pidx++;
    680  1.71  pgoyette 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%jx",
    681   1.1     pooka 			    offset, 0,0,0);
    682   1.1     pooka 			if (bytes == 0) {
    683   1.1     pooka 				goto loopdone;
    684   1.1     pooka 			}
    685   1.1     pooka 		}
    686   1.1     pooka 
    687   1.1     pooka 		/*
    688   1.1     pooka 		 * bmap the file to find out the blkno to read from and
    689   1.1     pooka 		 * how much we can read in one i/o.  if bmap returns an error,
    690   1.1     pooka 		 * skip the rest of the top-level i/o.
    691   1.1     pooka 		 */
    692   1.1     pooka 
    693   1.1     pooka 		lbn = offset >> fs_bshift;
    694   1.1     pooka 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    695   1.1     pooka 		if (error) {
    696  1.71  pgoyette 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%jx -> %jd\n",
    697  1.36  uebayasi 			    lbn,error,0,0);
    698   1.1     pooka 			skipbytes += bytes;
    699  1.36  uebayasi 			bytes = 0;
    700   1.1     pooka 			goto loopdone;
    701   1.1     pooka 		}
    702   1.1     pooka 
    703   1.1     pooka 		/*
    704   1.1     pooka 		 * see how many pages can be read with this i/o.
    705   1.1     pooka 		 * reduce the i/o size if necessary to avoid
    706   1.1     pooka 		 * overwriting pages with valid data.
    707   1.1     pooka 		 */
    708   1.1     pooka 
    709   1.1     pooka 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    710   1.1     pooka 		    bytes);
    711   1.1     pooka 		if (offset + iobytes > round_page(offset)) {
    712  1.24  uebayasi 			int pcount;
    713  1.24  uebayasi 
    714   1.1     pooka 			pcount = 1;
    715   1.1     pooka 			while (pidx + pcount < npages &&
    716   1.1     pooka 			    pgs[pidx + pcount]->flags & PG_FAKE) {
    717   1.1     pooka 				pcount++;
    718   1.1     pooka 			}
    719   1.1     pooka 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    720   1.1     pooka 			    (offset - trunc_page(offset)));
    721   1.1     pooka 		}
    722   1.1     pooka 
    723   1.1     pooka 		/*
    724   1.1     pooka 		 * if this block isn't allocated, zero it instead of
    725   1.1     pooka 		 * reading it.  unless we are going to allocate blocks,
    726   1.1     pooka 		 * mark the pages we zeroed PG_RDONLY.
    727   1.1     pooka 		 */
    728   1.1     pooka 
    729  1.36  uebayasi 		if (blkno == (daddr_t)-1) {
    730   1.1     pooka 			int holepages = (round_page(offset + iobytes) -
    731   1.1     pooka 			    trunc_page(offset)) >> PAGE_SHIFT;
    732  1.71  pgoyette 			UVMHIST_LOG(ubchist, "lbn 0x%jx -> HOLE", lbn,0,0,0);
    733   1.1     pooka 
    734   1.1     pooka 			sawhole = true;
    735   1.1     pooka 			memset((char *)kva + (offset - startoffset), 0,
    736   1.1     pooka 			    iobytes);
    737   1.1     pooka 			skipbytes += iobytes;
    738   1.1     pooka 
    739  1.84        ad 			if (!blockalloc) {
    740  1.86        ad 				rw_enter(uobj->vmobjlock, RW_WRITER);
    741  1.84        ad 				for (i = 0; i < holepages; i++) {
    742   1.1     pooka 					pgs[pidx + i]->flags |= PG_RDONLY;
    743   1.1     pooka 				}
    744  1.86        ad 				rw_exit(uobj->vmobjlock);
    745   1.1     pooka 			}
    746   1.1     pooka 			continue;
    747   1.1     pooka 		}
    748   1.1     pooka 
    749   1.1     pooka 		/*
    750   1.1     pooka 		 * allocate a sub-buf for this piece of the i/o
    751   1.1     pooka 		 * (or just use mbp if there's only 1 piece),
    752   1.1     pooka 		 * and start it going.
    753   1.1     pooka 		 */
    754   1.1     pooka 
    755   1.1     pooka 		if (offset == startoffset && iobytes == bytes) {
    756   1.1     pooka 			bp = mbp;
    757   1.1     pooka 		} else {
    758  1.71  pgoyette 			UVMHIST_LOG(ubchist, "vp %#jx bp %#jx num now %jd",
    759  1.71  pgoyette 			    (uintptr_t)vp, (uintptr_t)bp, vp->v_numoutput, 0);
    760   1.2        ad 			bp = getiobuf(vp, true);
    761   1.1     pooka 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
    762   1.1     pooka 		}
    763   1.1     pooka 		bp->b_lblkno = 0;
    764   1.1     pooka 
    765   1.1     pooka 		/* adjust physical blkno for partial blocks */
    766   1.1     pooka 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    767   1.1     pooka 		    dev_bshift);
    768   1.1     pooka 
    769   1.1     pooka 		UVMHIST_LOG(ubchist,
    770  1.71  pgoyette 		    "bp %#jx offset 0x%x bcount 0x%x blkno 0x%x",
    771  1.71  pgoyette 		    (uintptr_t)bp, offset, bp->b_bcount, bp->b_blkno);
    772   1.1     pooka 
    773   1.1     pooka 		VOP_STRATEGY(devvp, bp);
    774   1.1     pooka 	}
    775   1.1     pooka 
    776   1.1     pooka loopdone:
    777   1.1     pooka 	nestiobuf_done(mbp, skipbytes, error);
    778   1.1     pooka 	if (async) {
    779   1.1     pooka 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    780  1.59  riastrad 		return 0;
    781   1.1     pooka 	}
    782   1.1     pooka 	if (bp != NULL) {
    783   1.1     pooka 		error = biowait(mbp);
    784   1.1     pooka 	}
    785   1.1     pooka 
    786  1.19     rmind 	/* Remove the mapping (make KVA available as soon as possible) */
    787  1.19     rmind 	uvm_pagermapout(kva, npages);
    788  1.19     rmind 
    789   1.1     pooka 	/*
    790   1.1     pooka 	 * if this we encountered a hole then we have to do a little more work.
    791   1.1     pooka 	 * for read faults, we marked the page PG_RDONLY so that future
    792   1.1     pooka 	 * write accesses to the page will fault again.
    793   1.1     pooka 	 * for write faults, we must make sure that the backing store for
    794   1.1     pooka 	 * the page is completely allocated while the pages are locked.
    795   1.1     pooka 	 */
    796   1.1     pooka 
    797   1.1     pooka 	if (!error && sawhole && blockalloc) {
    798  1.42   hannken 		error = GOP_ALLOC(vp, startoffset,
    799  1.42   hannken 		    npages << PAGE_SHIFT, 0, cred);
    800  1.71  pgoyette 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%jx/0x%jx -> %jd",
    801   1.1     pooka 		    startoffset, npages << PAGE_SHIFT, error,0);
    802   1.1     pooka 		if (!error) {
    803  1.86        ad 			rw_enter(uobj->vmobjlock, RW_WRITER);
    804   1.1     pooka 			for (i = 0; i < npages; i++) {
    805  1.31  uebayasi 				struct vm_page *pg = pgs[i];
    806  1.31  uebayasi 
    807  1.31  uebayasi 				if (pg == NULL) {
    808   1.1     pooka 					continue;
    809   1.1     pooka 				}
    810  1.84        ad 				pg->flags &= ~PG_RDONLY;
    811  1.84        ad 				uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
    812  1.71  pgoyette 				UVMHIST_LOG(ubchist, "mark dirty pg %#jx",
    813  1.71  pgoyette 				    (uintptr_t)pg, 0, 0, 0);
    814   1.1     pooka 			}
    815  1.86        ad 			rw_exit(uobj->vmobjlock);
    816   1.1     pooka 		}
    817   1.1     pooka 	}
    818  1.18     rmind 
    819  1.18     rmind 	putiobuf(mbp);
    820  1.38       chs 	return error;
    821   1.1     pooka }
    822   1.1     pooka 
    823   1.1     pooka /*
    824   1.1     pooka  * generic VM putpages routine.
    825   1.1     pooka  * Write the given range of pages to backing store.
    826   1.1     pooka  *
    827   1.1     pooka  * => "offhi == 0" means flush all pages at or after "offlo".
    828   1.1     pooka  * => object should be locked by caller.  we return with the
    829   1.1     pooka  *      object unlocked.
    830   1.1     pooka  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
    831   1.1     pooka  *	thus, a caller might want to unlock higher level resources
    832   1.1     pooka  *	(e.g. vm_map) before calling flush.
    833   1.1     pooka  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
    834   1.1     pooka  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
    835   1.1     pooka  *
    836   1.1     pooka  * note on "cleaning" object and PG_BUSY pages:
    837   1.1     pooka  *	this routine is holding the lock on the object.   the only time
    838   1.1     pooka  *	that it can run into a PG_BUSY page that it does not own is if
    839   1.1     pooka  *	some other process has started I/O on the page (e.g. either
    840  1.84        ad  *	a pagein, or a pageout).  if the PG_BUSY page is being paged
    841  1.84        ad  *	in, then it can not be dirty (!UVM_PAGE_STATUS_CLEAN) because no
    842  1.84        ad  *	one has	had a chance to modify it yet.  if the PG_BUSY page is
    843  1.84        ad  *	being paged out then it means that someone else has already started
    844  1.84        ad  *	cleaning the page for us (how nice!).  in this case, if we
    845   1.1     pooka  *	have syncio specified, then after we make our pass through the
    846   1.1     pooka  *	object we need to wait for the other PG_BUSY pages to clear
    847   1.1     pooka  *	off (i.e. we need to do an iosync).   also note that once a
    848   1.1     pooka  *	page is PG_BUSY it must stay in its object until it is un-busyed.
    849   1.1     pooka  */
    850   1.1     pooka 
    851   1.1     pooka int
    852   1.1     pooka genfs_putpages(void *v)
    853   1.1     pooka {
    854   1.1     pooka 	struct vop_putpages_args /* {
    855   1.1     pooka 		struct vnode *a_vp;
    856   1.1     pooka 		voff_t a_offlo;
    857   1.1     pooka 		voff_t a_offhi;
    858   1.1     pooka 		int a_flags;
    859  1.22  uebayasi 	} */ * const ap = v;
    860   1.1     pooka 
    861   1.1     pooka 	return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi,
    862   1.1     pooka 	    ap->a_flags, NULL);
    863   1.1     pooka }
    864   1.1     pooka 
    865   1.1     pooka int
    866   1.4      yamt genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff,
    867   1.4      yamt     int origflags, struct vm_page **busypg)
    868   1.1     pooka {
    869  1.22  uebayasi 	struct uvm_object * const uobj = &vp->v_uobj;
    870  1.86        ad 	krwlock_t * const slock = uobj->vmobjlock;
    871  1.78        ad 	off_t nextoff;
    872   1.2        ad 	int i, error, npages, nback;
    873   1.1     pooka 	int freeflag;
    874  1.63  christos 	/*
    875  1.63  christos 	 * This array is larger than it should so that it's size is constant.
    876  1.63  christos 	 * The right size is MAXPAGES.
    877  1.63  christos 	 */
    878  1.63  christos 	struct vm_page *pgs[MAXPHYS / MIN_PAGE_SIZE];
    879  1.63  christos #define MAXPAGES (MAXPHYS / PAGE_SIZE)
    880  1.78        ad 	struct vm_page *pg, *tpg;
    881  1.78        ad 	struct uvm_page_array a;
    882  1.78        ad 	bool wasclean, needs_clean;
    883   1.4      yamt 	bool async = (origflags & PGO_SYNCIO) == 0;
    884   1.1     pooka 	bool pagedaemon = curlwp == uvm.pagedaemon_lwp;
    885  1.65   hannken 	struct mount *trans_mp;
    886   1.4      yamt 	int flags;
    887  1.84        ad 	bool modified;		/* if we write out any pages */
    888  1.65   hannken 	bool holds_wapbl;
    889  1.84        ad 	bool cleanall;		/* try to pull off from the syncer's list */
    890   1.4      yamt 	bool onworklst;
    891  1.86        ad 	bool nodirty;
    892  1.84        ad 	const bool dirtyonly = (origflags & (PGO_DEACTIVATE|PGO_FREE)) == 0;
    893   1.1     pooka 
    894   1.1     pooka 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
    895   1.1     pooka 
    896   1.4      yamt 	KASSERT(origflags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
    897   1.1     pooka 	KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
    898   1.1     pooka 	KASSERT(startoff < endoff || endoff == 0);
    899  1.86        ad 	KASSERT(rw_write_held(slock));
    900   1.1     pooka 
    901  1.71  pgoyette 	UVMHIST_LOG(ubchist, "vp %#jx pages %jd off 0x%jx len 0x%jx",
    902  1.71  pgoyette 	    (uintptr_t)vp, uobj->uo_npages, startoff, endoff - startoff);
    903   1.1     pooka 
    904  1.74  jdolecek #ifdef DIAGNOSTIC
    905  1.74  jdolecek 	if ((origflags & PGO_JOURNALLOCKED) && vp->v_mount->mnt_wapbl)
    906  1.74  jdolecek                 WAPBL_JLOCK_ASSERT(vp->v_mount);
    907  1.74  jdolecek #endif
    908  1.74  jdolecek 
    909  1.65   hannken 	trans_mp = NULL;
    910  1.65   hannken 	holds_wapbl = false;
    911   1.6   hannken 
    912   1.4      yamt retry:
    913   1.4      yamt 	modified = false;
    914   1.4      yamt 	flags = origflags;
    915   1.1     pooka 	KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 ||
    916   1.1     pooka 	    (vp->v_iflag & VI_WRMAPDIRTY) == 0);
    917  1.84        ad 
    918  1.84        ad 	/*
    919  1.84        ad 	 * shortcut if we have no pages to process.
    920  1.84        ad 	 */
    921  1.84        ad 
    922  1.86        ad 	nodirty = radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
    923  1.86        ad             UVM_PAGE_DIRTY_TAG);
    924  1.86        ad 	if (uobj->uo_npages == 0 || (dirtyonly && nodirty)) {
    925  1.86        ad 		mutex_enter(vp->v_interlock);
    926   1.1     pooka 		if (vp->v_iflag & VI_ONWORKLST) {
    927   1.1     pooka 			vp->v_iflag &= ~VI_WRMAPDIRTY;
    928   1.1     pooka 			if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
    929   1.1     pooka 				vn_syncer_remove_from_worklist(vp);
    930   1.1     pooka 		}
    931  1.86        ad 		mutex_exit(vp->v_interlock);
    932  1.65   hannken 		if (trans_mp) {
    933  1.65   hannken 			if (holds_wapbl)
    934  1.65   hannken 				WAPBL_END(trans_mp);
    935  1.65   hannken 			fstrans_done(trans_mp);
    936  1.12   hannken 		}
    937  1.86        ad 		rw_exit(slock);
    938   1.1     pooka 		return (0);
    939   1.1     pooka 	}
    940   1.1     pooka 
    941   1.1     pooka 	/*
    942   1.1     pooka 	 * the vnode has pages, set up to process the request.
    943   1.1     pooka 	 */
    944   1.1     pooka 
    945  1.65   hannken 	if (trans_mp == NULL && (flags & PGO_CLEANIT) != 0) {
    946   1.1     pooka 		if (pagedaemon) {
    947  1.65   hannken 			/* Pagedaemon must not sleep here. */
    948  1.65   hannken 			trans_mp = vp->v_mount;
    949  1.69   hannken 			error = fstrans_start_nowait(trans_mp);
    950  1.12   hannken 			if (error) {
    951  1.86        ad 				rw_exit(slock);
    952  1.12   hannken 				return error;
    953  1.12   hannken 			}
    954  1.65   hannken 		} else {
    955  1.65   hannken 			/*
    956  1.65   hannken 			 * Cannot use vdeadcheck() here as this operation
    957  1.65   hannken 			 * usually gets used from VOP_RECLAIM().  Test for
    958  1.65   hannken 			 * change of v_mount instead and retry on change.
    959  1.65   hannken 			 */
    960  1.86        ad 			rw_exit(slock);
    961  1.65   hannken 			trans_mp = vp->v_mount;
    962  1.69   hannken 			fstrans_start(trans_mp);
    963  1.65   hannken 			if (vp->v_mount != trans_mp) {
    964  1.65   hannken 				fstrans_done(trans_mp);
    965  1.65   hannken 				trans_mp = NULL;
    966  1.65   hannken 			} else {
    967  1.65   hannken 				holds_wapbl = (trans_mp->mnt_wapbl &&
    968  1.65   hannken 				    (origflags & PGO_JOURNALLOCKED) == 0);
    969  1.65   hannken 				if (holds_wapbl) {
    970  1.65   hannken 					error = WAPBL_BEGIN(trans_mp);
    971  1.65   hannken 					if (error) {
    972  1.65   hannken 						fstrans_done(trans_mp);
    973  1.65   hannken 						return error;
    974  1.65   hannken 					}
    975  1.65   hannken 				}
    976  1.65   hannken 			}
    977  1.86        ad 			rw_enter(slock, RW_WRITER);
    978  1.65   hannken 			goto retry;
    979  1.12   hannken 		}
    980   1.1     pooka 	}
    981   1.1     pooka 
    982   1.1     pooka 	error = 0;
    983  1.86        ad 	wasclean = radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
    984  1.86        ad             UVM_PAGE_WRITEBACK_TAG);
    985  1.78        ad 	nextoff = startoff;
    986   1.1     pooka 	if (endoff == 0 || flags & PGO_ALLPAGES) {
    987   1.1     pooka 		endoff = trunc_page(LLONG_MAX);
    988   1.1     pooka 	}
    989   1.1     pooka 
    990   1.1     pooka 	/*
    991   1.1     pooka 	 * if this vnode is known not to have dirty pages,
    992   1.1     pooka 	 * don't bother to clean it out.
    993   1.1     pooka 	 */
    994   1.1     pooka 
    995  1.86        ad 	if (nodirty) {
    996  1.48      matt #if !defined(DEBUG)
    997  1.84        ad 		if (dirtyonly) {
    998   1.1     pooka 			goto skip_scan;
    999   1.1     pooka 		}
   1000  1.48      matt #endif /* !defined(DEBUG) */
   1001   1.1     pooka 		flags &= ~PGO_CLEANIT;
   1002   1.1     pooka 	}
   1003   1.1     pooka 
   1004   1.1     pooka 	/*
   1005  1.78        ad 	 * start the loop to scan pages.
   1006   1.1     pooka 	 */
   1007   1.1     pooka 
   1008  1.84        ad 	cleanall = true;
   1009   1.1     pooka 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
   1010  1.78        ad 	uvm_page_array_init(&a);
   1011  1.78        ad 	for (;;) {
   1012  1.84        ad 		bool pgprotected;
   1013  1.84        ad 
   1014  1.78        ad 		/*
   1015  1.84        ad 		 * if !dirtyonly, iterate over all resident pages in the range.
   1016  1.84        ad 		 *
   1017  1.84        ad 		 * if dirtyonly, only possibly dirty pages are interesting.
   1018  1.84        ad 		 * however, if we are asked to sync for integrity, we should
   1019  1.84        ad 		 * wait on pages being written back by other threads as well.
   1020  1.78        ad 		 */
   1021  1.78        ad 
   1022  1.84        ad 		pg = uvm_page_array_fill_and_peek(&a, uobj, nextoff, 0,
   1023  1.84        ad 		    dirtyonly ? (UVM_PAGE_ARRAY_FILL_DIRTY |
   1024  1.84        ad 		    (!async ? UVM_PAGE_ARRAY_FILL_WRITEBACK : 0)) : 0);
   1025  1.78        ad 		if (pg == NULL) {
   1026  1.78        ad 			break;
   1027  1.78        ad 		}
   1028  1.78        ad 
   1029  1.78        ad 		KASSERT(pg->uobject == uobj);
   1030  1.78        ad 		KASSERT((pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
   1031  1.78        ad 		    (pg->flags & (PG_BUSY)) != 0);
   1032  1.78        ad 		KASSERT(pg->offset >= startoff);
   1033  1.78        ad 		KASSERT(pg->offset >= nextoff);
   1034  1.84        ad 		KASSERT(!dirtyonly ||
   1035  1.84        ad 		    uvm_pagegetdirty(pg) != UVM_PAGE_STATUS_CLEAN ||
   1036  1.84        ad 		    radix_tree_get_tag(&uobj->uo_pages,
   1037  1.84        ad 			pg->offset >> PAGE_SHIFT, UVM_PAGE_WRITEBACK_TAG));
   1038  1.78        ad 
   1039  1.78        ad 		if (pg->offset >= endoff) {
   1040  1.78        ad 			break;
   1041  1.78        ad 		}
   1042  1.78        ad 
   1043   1.1     pooka 		/*
   1044  1.78        ad 		 * a preempt point.
   1045   1.1     pooka 		 */
   1046   1.1     pooka 
   1047  1.90        ad 		if (preempt_needed()) {
   1048  1.78        ad 			nextoff = pg->offset; /* visit this page again */
   1049  1.86        ad 			rw_exit(slock);
   1050  1.78        ad 			preempt();
   1051  1.78        ad 			/*
   1052  1.78        ad 			 * as we dropped the object lock, our cached pages can
   1053  1.78        ad 			 * be stale.
   1054  1.78        ad 			 */
   1055  1.78        ad 			uvm_page_array_clear(&a);
   1056  1.86        ad 			rw_enter(slock, RW_WRITER);
   1057   1.1     pooka 			continue;
   1058   1.1     pooka 		}
   1059   1.1     pooka 
   1060   1.1     pooka 		/*
   1061  1.84        ad 		 * if the current page is busy, wait for it to become unbusy.
   1062   1.1     pooka 		 */
   1063   1.1     pooka 
   1064  1.84        ad 		if ((pg->flags & PG_BUSY) != 0) {
   1065  1.71  pgoyette 			UVMHIST_LOG(ubchist, "busy %#jx", (uintptr_t)pg,
   1066  1.71  pgoyette 			   0, 0, 0);
   1067  1.84        ad 			if ((pg->flags & (PG_RELEASED|PG_PAGEOUT)) != 0
   1068  1.84        ad 			    && (flags & PGO_BUSYFAIL) != 0) {
   1069  1.71  pgoyette 				UVMHIST_LOG(ubchist, "busyfail %#jx",
   1070  1.71  pgoyette 				    (uintptr_t)pg, 0, 0, 0);
   1071   1.1     pooka 				error = EDEADLK;
   1072   1.1     pooka 				if (busypg != NULL)
   1073   1.1     pooka 					*busypg = pg;
   1074   1.1     pooka 				break;
   1075   1.1     pooka 			}
   1076   1.1     pooka 			if (pagedaemon) {
   1077   1.1     pooka 				/*
   1078   1.1     pooka 				 * someone has taken the page while we
   1079   1.1     pooka 				 * dropped the lock for fstrans_start.
   1080   1.1     pooka 				 */
   1081   1.1     pooka 				break;
   1082   1.1     pooka 			}
   1083  1.84        ad 			/*
   1084  1.84        ad 			 * don't bother to wait on other's activities
   1085  1.84        ad 			 * unless we are asked to sync for integrity.
   1086  1.84        ad 			 */
   1087  1.84        ad 			if (!async && (flags & PGO_RECLAIM) == 0) {
   1088  1.84        ad 				wasclean = false;
   1089  1.84        ad 				nextoff = pg->offset + PAGE_SIZE;
   1090  1.84        ad 				uvm_page_array_advance(&a);
   1091  1.84        ad 				continue;
   1092  1.84        ad 			}
   1093  1.78        ad 			nextoff = pg->offset; /* visit this page again */
   1094  1.92        ad 			uvm_pagewait(pg, slock, "genput");
   1095  1.78        ad 			/*
   1096  1.78        ad 			 * as we dropped the object lock, our cached pages can
   1097  1.78        ad 			 * be stale.
   1098  1.78        ad 			 */
   1099  1.78        ad 			uvm_page_array_clear(&a);
   1100  1.86        ad 			rw_enter(slock, RW_WRITER);
   1101   1.1     pooka 			continue;
   1102   1.1     pooka 		}
   1103   1.1     pooka 
   1104  1.78        ad 		nextoff = pg->offset + PAGE_SIZE;
   1105  1.78        ad 		uvm_page_array_advance(&a);
   1106  1.78        ad 
   1107   1.1     pooka 		/*
   1108   1.1     pooka 		 * if we're freeing, remove all mappings of the page now.
   1109   1.1     pooka 		 * if we're cleaning, check if the page is needs to be cleaned.
   1110   1.1     pooka 		 */
   1111   1.1     pooka 
   1112  1.84        ad 		pgprotected = false;
   1113   1.1     pooka 		if (flags & PGO_FREE) {
   1114   1.1     pooka 			pmap_page_protect(pg, VM_PROT_NONE);
   1115  1.84        ad 			pgprotected = true;
   1116   1.1     pooka 		} else if (flags & PGO_CLEANIT) {
   1117   1.1     pooka 
   1118   1.1     pooka 			/*
   1119   1.1     pooka 			 * if we still have some hope to pull this vnode off
   1120   1.1     pooka 			 * from the syncer queue, write-protect the page.
   1121   1.1     pooka 			 */
   1122   1.1     pooka 
   1123  1.84        ad 			if (cleanall && wasclean) {
   1124   1.1     pooka 
   1125   1.1     pooka 				/*
   1126   1.1     pooka 				 * uobj pages get wired only by uvm_fault
   1127   1.1     pooka 				 * where uobj is locked.
   1128   1.1     pooka 				 */
   1129   1.1     pooka 
   1130   1.1     pooka 				if (pg->wire_count == 0) {
   1131   1.1     pooka 					pmap_page_protect(pg,
   1132   1.1     pooka 					    VM_PROT_READ|VM_PROT_EXECUTE);
   1133  1.84        ad 					pgprotected = true;
   1134   1.1     pooka 				} else {
   1135   1.1     pooka 					cleanall = false;
   1136   1.1     pooka 				}
   1137   1.1     pooka 			}
   1138   1.1     pooka 		}
   1139   1.1     pooka 
   1140   1.1     pooka 		if (flags & PGO_CLEANIT) {
   1141  1.84        ad 			needs_clean = uvm_pagecheckdirty(pg, pgprotected);
   1142   1.1     pooka 		} else {
   1143   1.1     pooka 			needs_clean = false;
   1144   1.1     pooka 		}
   1145   1.1     pooka 
   1146   1.1     pooka 		/*
   1147   1.1     pooka 		 * if we're cleaning, build a cluster.
   1148  1.84        ad 		 * the cluster will consist of pages which are currently dirty.
   1149   1.1     pooka 		 * if not cleaning, just operate on the one page.
   1150   1.1     pooka 		 */
   1151   1.1     pooka 
   1152   1.1     pooka 		if (needs_clean) {
   1153   1.1     pooka 			KDASSERT((vp->v_iflag & VI_ONWORKLST));
   1154   1.1     pooka 			wasclean = false;
   1155   1.1     pooka 			memset(pgs, 0, sizeof(pgs));
   1156   1.1     pooka 			pg->flags |= PG_BUSY;
   1157   1.1     pooka 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1158   1.1     pooka 
   1159   1.1     pooka 			/*
   1160  1.72       chs 			 * let the fs constrain the offset range of the cluster.
   1161  1.72       chs 			 * we additionally constrain the range here such that
   1162  1.72       chs 			 * it fits in the "pgs" pages array.
   1163  1.72       chs 			 */
   1164  1.72       chs 
   1165  1.78        ad 			off_t fslo, fshi, genlo, lo, off = pg->offset;
   1166  1.72       chs 			GOP_PUTRANGE(vp, off, &fslo, &fshi);
   1167  1.72       chs 			KASSERT(fslo == trunc_page(fslo));
   1168  1.72       chs 			KASSERT(fslo <= off);
   1169  1.72       chs 			KASSERT(fshi == trunc_page(fshi));
   1170  1.72       chs 			KASSERT(fshi == 0 || off < fshi);
   1171  1.72       chs 
   1172  1.72       chs 			if (off > MAXPHYS / 2)
   1173  1.72       chs 				genlo = trunc_page(off - (MAXPHYS / 2));
   1174  1.72       chs 			else
   1175  1.72       chs 				genlo = 0;
   1176  1.72       chs 			lo = MAX(fslo, genlo);
   1177  1.72       chs 
   1178  1.72       chs 			/*
   1179   1.1     pooka 			 * first look backward.
   1180   1.1     pooka 			 */
   1181   1.1     pooka 
   1182  1.72       chs 			npages = (off - lo) >> PAGE_SHIFT;
   1183   1.1     pooka 			nback = npages;
   1184  1.84        ad 			uvn_findpages(uobj, off - PAGE_SIZE, &nback,
   1185  1.84        ad 			    &pgs[0], NULL,
   1186   1.1     pooka 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
   1187   1.1     pooka 			if (nback) {
   1188   1.1     pooka 				memmove(&pgs[0], &pgs[npages - nback],
   1189   1.1     pooka 				    nback * sizeof(pgs[0]));
   1190   1.1     pooka 				if (npages - nback < nback)
   1191   1.1     pooka 					memset(&pgs[nback], 0,
   1192   1.1     pooka 					    (npages - nback) * sizeof(pgs[0]));
   1193   1.1     pooka 				else
   1194   1.1     pooka 					memset(&pgs[npages - nback], 0,
   1195   1.1     pooka 					    nback * sizeof(pgs[0]));
   1196   1.1     pooka 			}
   1197   1.1     pooka 
   1198   1.1     pooka 			/*
   1199   1.1     pooka 			 * then plug in our page of interest.
   1200   1.1     pooka 			 */
   1201   1.1     pooka 
   1202   1.1     pooka 			pgs[nback] = pg;
   1203   1.1     pooka 
   1204   1.1     pooka 			/*
   1205   1.1     pooka 			 * then look forward to fill in the remaining space in
   1206   1.1     pooka 			 * the array of pages.
   1207  1.84        ad 			 *
   1208  1.84        ad 			 * pass our cached array of pages so that hopefully
   1209  1.84        ad 			 * uvn_findpages can find some good pages in it.
   1210  1.84        ad 			 * the array a was filled above with the one of
   1211  1.84        ad 			 * following sets of flags:
   1212  1.84        ad 			 *	0
   1213  1.84        ad 			 *	UVM_PAGE_ARRAY_FILL_DIRTY
   1214  1.84        ad 			 *	UVM_PAGE_ARRAY_FILL_DIRTY|WRITEBACK
   1215   1.1     pooka 			 */
   1216   1.1     pooka 
   1217  1.62  christos 			npages = MAXPAGES - nback - 1;
   1218  1.72       chs 			if (fshi)
   1219  1.72       chs 				npages = MIN(npages,
   1220  1.72       chs 					     (fshi - off - 1) >> PAGE_SHIFT);
   1221   1.1     pooka 			uvn_findpages(uobj, off + PAGE_SIZE, &npages,
   1222  1.84        ad 			    &pgs[nback + 1], NULL,
   1223   1.1     pooka 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
   1224   1.1     pooka 			npages += nback + 1;
   1225   1.1     pooka 		} else {
   1226   1.1     pooka 			pgs[0] = pg;
   1227   1.1     pooka 			npages = 1;
   1228   1.1     pooka 			nback = 0;
   1229   1.1     pooka 		}
   1230   1.1     pooka 
   1231   1.1     pooka 		/*
   1232   1.1     pooka 		 * apply FREE or DEACTIVATE options if requested.
   1233   1.1     pooka 		 */
   1234   1.1     pooka 
   1235   1.1     pooka 		for (i = 0; i < npages; i++) {
   1236   1.1     pooka 			tpg = pgs[i];
   1237   1.1     pooka 			KASSERT(tpg->uobject == uobj);
   1238  1.84        ad 			KASSERT(i == 0 ||
   1239  1.84        ad 			    pgs[i-1]->offset + PAGE_SIZE == tpg->offset);
   1240  1.84        ad 			KASSERT(!needs_clean || uvm_pagegetdirty(pgs[i]) !=
   1241  1.84        ad 			    UVM_PAGE_STATUS_DIRTY);
   1242  1.84        ad 			if (needs_clean) {
   1243  1.84        ad 				/*
   1244  1.84        ad 				 * mark pages as WRITEBACK so that concurrent
   1245  1.84        ad 				 * fsync can find and wait for our activities.
   1246  1.84        ad 				 */
   1247  1.84        ad 				radix_tree_set_tag(&uobj->uo_pages,
   1248  1.84        ad 				    pgs[i]->offset >> PAGE_SHIFT,
   1249  1.84        ad 				    UVM_PAGE_WRITEBACK_TAG);
   1250  1.84        ad 			}
   1251   1.1     pooka 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1252   1.1     pooka 				continue;
   1253   1.1     pooka 			if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
   1254  1.83        ad 				uvm_pagelock(tpg);
   1255   1.1     pooka 				uvm_pagedeactivate(tpg);
   1256  1.83        ad 				uvm_pageunlock(tpg);
   1257   1.1     pooka 			} else if (flags & PGO_FREE) {
   1258   1.1     pooka 				pmap_page_protect(tpg, VM_PROT_NONE);
   1259   1.1     pooka 				if (tpg->flags & PG_BUSY) {
   1260   1.1     pooka 					tpg->flags |= freeflag;
   1261   1.1     pooka 					if (pagedaemon) {
   1262   1.2        ad 						uvm_pageout_start(1);
   1263  1.83        ad 						uvm_pagelock(tpg);
   1264   1.1     pooka 						uvm_pagedequeue(tpg);
   1265  1.83        ad 						uvm_pageunlock(tpg);
   1266   1.1     pooka 					}
   1267   1.1     pooka 				} else {
   1268   1.1     pooka 
   1269   1.1     pooka 					/*
   1270   1.1     pooka 					 * ``page is not busy''
   1271   1.1     pooka 					 * implies that npages is 1
   1272   1.1     pooka 					 * and needs_clean is false.
   1273   1.1     pooka 					 */
   1274   1.1     pooka 
   1275  1.78        ad 					KASSERT(npages == 1);
   1276  1.78        ad 					KASSERT(!needs_clean);
   1277  1.78        ad 					KASSERT(pg == tpg);
   1278  1.78        ad 					KASSERT(nextoff ==
   1279  1.78        ad 					    tpg->offset + PAGE_SIZE);
   1280   1.1     pooka 					uvm_pagefree(tpg);
   1281   1.1     pooka 					if (pagedaemon)
   1282   1.1     pooka 						uvmexp.pdfreed++;
   1283   1.1     pooka 				}
   1284   1.1     pooka 			}
   1285   1.1     pooka 		}
   1286   1.1     pooka 		if (needs_clean) {
   1287   1.1     pooka 			modified = true;
   1288  1.78        ad 			KASSERT(nextoff == pg->offset + PAGE_SIZE);
   1289  1.78        ad 			KASSERT(nback < npages);
   1290  1.78        ad 			nextoff = pg->offset + ((npages - nback) << PAGE_SHIFT);
   1291  1.78        ad 			KASSERT(pgs[nback] == pg);
   1292  1.78        ad 			KASSERT(nextoff == pgs[npages - 1]->offset + PAGE_SIZE);
   1293   1.1     pooka 
   1294   1.1     pooka 			/*
   1295  1.78        ad 			 * start the i/o.
   1296   1.1     pooka 			 */
   1297  1.86        ad 			rw_exit(slock);
   1298   1.1     pooka 			error = GOP_WRITE(vp, pgs, npages, flags);
   1299  1.78        ad 			/*
   1300  1.78        ad 			 * as we dropped the object lock, our cached pages can
   1301  1.78        ad 			 * be stale.
   1302  1.78        ad 			 */
   1303  1.78        ad 			uvm_page_array_clear(&a);
   1304  1.86        ad 			rw_enter(slock, RW_WRITER);
   1305   1.1     pooka 			if (error) {
   1306   1.1     pooka 				break;
   1307   1.1     pooka 			}
   1308   1.1     pooka 		}
   1309   1.1     pooka 	}
   1310  1.78        ad 	uvm_page_array_fini(&a);
   1311   1.1     pooka 
   1312  1.84        ad 	/*
   1313  1.84        ad 	 * update ctime/mtime if the modification we started writing out might
   1314  1.84        ad 	 * be from mmap'ed write.
   1315  1.84        ad 	 *
   1316  1.84        ad 	 * this is necessary when an application keeps a file mmaped and
   1317  1.84        ad 	 * repeatedly modifies it via the window.  note that, because we
   1318  1.84        ad 	 * don't always write-protect pages when cleaning, such modifications
   1319  1.84        ad 	 * might not involve any page faults.
   1320  1.84        ad 	 */
   1321  1.84        ad 
   1322  1.86        ad 	mutex_enter(vp->v_interlock);
   1323   1.1     pooka 	if (modified && (vp->v_iflag & VI_WRMAPDIRTY) != 0 &&
   1324   1.1     pooka 	    (vp->v_type != VBLK ||
   1325   1.1     pooka 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
   1326   1.1     pooka 		GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
   1327   1.1     pooka 	}
   1328   1.1     pooka 
   1329   1.1     pooka 	/*
   1330  1.84        ad 	 * if we no longer have any possibly dirty pages, take us off the
   1331  1.84        ad 	 * syncer list.
   1332   1.1     pooka 	 */
   1333   1.1     pooka 
   1334  1.84        ad 	if ((vp->v_iflag & VI_ONWORKLST) != 0 &&
   1335  1.84        ad 	    radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
   1336  1.84        ad 	    UVM_PAGE_DIRTY_TAG)) {
   1337   1.1     pooka 		vp->v_iflag &= ~VI_WRMAPDIRTY;
   1338   1.1     pooka 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
   1339   1.1     pooka 			vn_syncer_remove_from_worklist(vp);
   1340   1.1     pooka 	}
   1341   1.1     pooka 
   1342   1.1     pooka #if !defined(DEBUG)
   1343   1.1     pooka skip_scan:
   1344   1.1     pooka #endif /* !defined(DEBUG) */
   1345   1.2        ad 
   1346   1.2        ad 	/* Wait for output to complete. */
   1347  1.86        ad 	rw_exit(slock);
   1348   1.2        ad 	if (!wasclean && !async && vp->v_numoutput != 0) {
   1349   1.2        ad 		while (vp->v_numoutput != 0)
   1350  1.86        ad 			cv_wait(&vp->v_cv, vp->v_interlock);
   1351   1.1     pooka 	}
   1352   1.4      yamt 	onworklst = (vp->v_iflag & VI_ONWORKLST) != 0;
   1353  1.86        ad 	mutex_exit(vp->v_interlock);
   1354   1.1     pooka 
   1355   1.4      yamt 	if ((flags & PGO_RECLAIM) != 0 && onworklst) {
   1356   1.4      yamt 		/*
   1357   1.4      yamt 		 * in the case of PGO_RECLAIM, ensure to make the vnode clean.
   1358   1.4      yamt 		 * retrying is not a big deal because, in many cases,
   1359   1.4      yamt 		 * uobj->uo_npages is already 0 here.
   1360   1.4      yamt 		 */
   1361  1.86        ad 		rw_enter(slock, RW_WRITER);
   1362   1.4      yamt 		goto retry;
   1363   1.4      yamt 	}
   1364   1.4      yamt 
   1365  1.65   hannken 	if (trans_mp) {
   1366  1.65   hannken 		if (holds_wapbl)
   1367  1.65   hannken 			WAPBL_END(trans_mp);
   1368  1.65   hannken 		fstrans_done(trans_mp);
   1369  1.12   hannken 	}
   1370   1.6   hannken 
   1371   1.1     pooka 	return (error);
   1372   1.1     pooka }
   1373   1.1     pooka 
   1374  1.72       chs /*
   1375  1.72       chs  * Default putrange method for file systems that do not care
   1376  1.72       chs  * how many pages are given to one GOP_WRITE() call.
   1377  1.72       chs  */
   1378  1.72       chs void
   1379  1.72       chs genfs_gop_putrange(struct vnode *vp, off_t off, off_t *lop, off_t *hip)
   1380  1.72       chs {
   1381  1.72       chs 
   1382  1.72       chs 	*lop = 0;
   1383  1.72       chs 	*hip = 0;
   1384  1.72       chs }
   1385  1.72       chs 
   1386   1.1     pooka int
   1387   1.1     pooka genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1388   1.1     pooka {
   1389   1.1     pooka 	off_t off;
   1390   1.1     pooka 	vaddr_t kva;
   1391   1.1     pooka 	size_t len;
   1392   1.1     pooka 	int error;
   1393   1.1     pooka 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1394   1.1     pooka 
   1395  1.71  pgoyette 	UVMHIST_LOG(ubchist, "vp %#jx pgs %#jx npages %jd flags 0x%jx",
   1396  1.71  pgoyette 	    (uintptr_t)vp, (uintptr_t)pgs, npages, flags);
   1397   1.1     pooka 
   1398   1.1     pooka 	off = pgs[0]->offset;
   1399   1.1     pooka 	kva = uvm_pagermapin(pgs, npages,
   1400   1.1     pooka 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1401   1.1     pooka 	len = npages << PAGE_SHIFT;
   1402   1.1     pooka 
   1403   1.1     pooka 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1404  1.85       chs 			    uvm_aio_aiodone);
   1405   1.1     pooka 
   1406   1.1     pooka 	return error;
   1407   1.1     pooka }
   1408   1.1     pooka 
   1409  1.78        ad /*
   1410  1.78        ad  * genfs_gop_write_rwmap:
   1411  1.78        ad  *
   1412  1.78        ad  * a variant of genfs_gop_write.  it's used by UDF for its directory buffers.
   1413  1.78        ad  * this maps pages with PROT_WRITE so that VOP_STRATEGY can modifies
   1414  1.78        ad  * the contents before writing it out to the underlying storage.
   1415  1.78        ad  */
   1416  1.78        ad 
   1417   1.7   reinoud int
   1418  1.78        ad genfs_gop_write_rwmap(struct vnode *vp, struct vm_page **pgs, int npages,
   1419  1.78        ad     int flags)
   1420   1.7   reinoud {
   1421   1.7   reinoud 	off_t off;
   1422   1.7   reinoud 	vaddr_t kva;
   1423   1.7   reinoud 	size_t len;
   1424   1.7   reinoud 	int error;
   1425   1.7   reinoud 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1426   1.7   reinoud 
   1427  1.71  pgoyette 	UVMHIST_LOG(ubchist, "vp %#jx pgs %#jx npages %jd flags 0x%jx",
   1428  1.71  pgoyette 	    (uintptr_t)vp, (uintptr_t)pgs, npages, flags);
   1429   1.7   reinoud 
   1430   1.7   reinoud 	off = pgs[0]->offset;
   1431   1.7   reinoud 	kva = uvm_pagermapin(pgs, npages,
   1432   1.7   reinoud 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1433   1.7   reinoud 	len = npages << PAGE_SHIFT;
   1434   1.7   reinoud 
   1435   1.7   reinoud 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1436  1.85       chs 			    uvm_aio_aiodone);
   1437   1.7   reinoud 
   1438   1.7   reinoud 	return error;
   1439   1.7   reinoud }
   1440   1.7   reinoud 
   1441   1.1     pooka /*
   1442   1.1     pooka  * Backend routine for doing I/O to vnode pages.  Pages are already locked
   1443   1.1     pooka  * and mapped into kernel memory.  Here we just look up the underlying
   1444   1.1     pooka  * device block addresses and call the strategy routine.
   1445   1.1     pooka  */
   1446   1.1     pooka 
   1447   1.1     pooka static int
   1448   1.1     pooka genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
   1449   1.1     pooka     enum uio_rw rw, void (*iodone)(struct buf *))
   1450   1.1     pooka {
   1451  1.36  uebayasi 	int s, error;
   1452   1.1     pooka 	int fs_bshift, dev_bshift;
   1453   1.1     pooka 	off_t eof, offset, startoffset;
   1454   1.1     pooka 	size_t bytes, iobytes, skipbytes;
   1455   1.1     pooka 	struct buf *mbp, *bp;
   1456  1.35  uebayasi 	const bool async = (flags & PGO_SYNCIO) == 0;
   1457  1.54       chs 	const bool lazy = (flags & PGO_LAZY) == 0;
   1458  1.35  uebayasi 	const bool iowrite = rw == UIO_WRITE;
   1459  1.35  uebayasi 	const int brw = iowrite ? B_WRITE : B_READ;
   1460   1.1     pooka 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1461   1.1     pooka 
   1462  1.71  pgoyette 	UVMHIST_LOG(ubchist, "vp %#jx kva %#jx len 0x%jx flags 0x%jx",
   1463  1.71  pgoyette 	    (uintptr_t)vp, (uintptr_t)kva, len, flags);
   1464   1.1     pooka 
   1465   1.1     pooka 	KASSERT(vp->v_size <= vp->v_writesize);
   1466   1.1     pooka 	GOP_SIZE(vp, vp->v_writesize, &eof, 0);
   1467   1.1     pooka 	if (vp->v_type != VBLK) {
   1468   1.1     pooka 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1469   1.1     pooka 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1470   1.1     pooka 	} else {
   1471   1.1     pooka 		fs_bshift = DEV_BSHIFT;
   1472   1.1     pooka 		dev_bshift = DEV_BSHIFT;
   1473   1.1     pooka 	}
   1474   1.1     pooka 	error = 0;
   1475   1.1     pooka 	startoffset = off;
   1476   1.1     pooka 	bytes = MIN(len, eof - startoffset);
   1477   1.1     pooka 	skipbytes = 0;
   1478   1.1     pooka 	KASSERT(bytes != 0);
   1479   1.1     pooka 
   1480  1.35  uebayasi 	if (iowrite) {
   1481  1.78        ad 		/*
   1482  1.78        ad 		 * why += 2?
   1483  1.78        ad 		 * 1 for biodone, 1 for uvm_aio_aiodone.
   1484  1.78        ad 		 */
   1485  1.49     rmind 		mutex_enter(vp->v_interlock);
   1486   1.1     pooka 		vp->v_numoutput += 2;
   1487  1.49     rmind 		mutex_exit(vp->v_interlock);
   1488   1.1     pooka 	}
   1489   1.2        ad 	mbp = getiobuf(vp, true);
   1490  1.71  pgoyette 	UVMHIST_LOG(ubchist, "vp %#jx mbp %#jx num now %jd bytes 0x%jx",
   1491  1.71  pgoyette 	    (uintptr_t)vp, (uintptr_t)mbp, vp->v_numoutput, bytes);
   1492   1.1     pooka 	mbp->b_bufsize = len;
   1493   1.1     pooka 	mbp->b_data = (void *)kva;
   1494   1.1     pooka 	mbp->b_resid = mbp->b_bcount = bytes;
   1495  1.89        ad 	mbp->b_cflags |= BC_BUSY | BC_AGE;
   1496   1.2        ad 	if (async) {
   1497   1.2        ad 		mbp->b_flags = brw | B_ASYNC;
   1498   1.2        ad 		mbp->b_iodone = iodone;
   1499   1.2        ad 	} else {
   1500   1.2        ad 		mbp->b_flags = brw;
   1501   1.2        ad 		mbp->b_iodone = NULL;
   1502   1.2        ad 	}
   1503   1.1     pooka 	if (curlwp == uvm.pagedaemon_lwp)
   1504   1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
   1505  1.54       chs 	else if (async || lazy)
   1506   1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
   1507   1.1     pooka 	else
   1508   1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
   1509   1.1     pooka 
   1510   1.1     pooka 	bp = NULL;
   1511   1.1     pooka 	for (offset = startoffset;
   1512   1.1     pooka 	    bytes > 0;
   1513   1.1     pooka 	    offset += iobytes, bytes -= iobytes) {
   1514  1.36  uebayasi 		int run;
   1515  1.36  uebayasi 		daddr_t lbn, blkno;
   1516  1.36  uebayasi 		struct vnode *devvp;
   1517  1.36  uebayasi 
   1518  1.36  uebayasi 		/*
   1519  1.36  uebayasi 		 * bmap the file to find out the blkno to read from and
   1520  1.36  uebayasi 		 * how much we can read in one i/o.  if bmap returns an error,
   1521  1.36  uebayasi 		 * skip the rest of the top-level i/o.
   1522  1.36  uebayasi 		 */
   1523  1.36  uebayasi 
   1524   1.1     pooka 		lbn = offset >> fs_bshift;
   1525   1.1     pooka 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1526   1.1     pooka 		if (error) {
   1527  1.71  pgoyette 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%jx -> %jd\n",
   1528  1.71  pgoyette 			    lbn, error, 0, 0);
   1529   1.1     pooka 			skipbytes += bytes;
   1530   1.1     pooka 			bytes = 0;
   1531  1.36  uebayasi 			goto loopdone;
   1532   1.1     pooka 		}
   1533   1.1     pooka 
   1534  1.36  uebayasi 		/*
   1535  1.36  uebayasi 		 * see how many pages can be read with this i/o.
   1536  1.36  uebayasi 		 * reduce the i/o size if necessary to avoid
   1537  1.36  uebayasi 		 * overwriting pages with valid data.
   1538  1.36  uebayasi 		 */
   1539  1.36  uebayasi 
   1540   1.1     pooka 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1541   1.1     pooka 		    bytes);
   1542  1.36  uebayasi 
   1543  1.36  uebayasi 		/*
   1544  1.36  uebayasi 		 * if this block isn't allocated, zero it instead of
   1545  1.36  uebayasi 		 * reading it.  unless we are going to allocate blocks,
   1546  1.36  uebayasi 		 * mark the pages we zeroed PG_RDONLY.
   1547  1.36  uebayasi 		 */
   1548  1.36  uebayasi 
   1549   1.1     pooka 		if (blkno == (daddr_t)-1) {
   1550  1.35  uebayasi 			if (!iowrite) {
   1551   1.1     pooka 				memset((char *)kva + (offset - startoffset), 0,
   1552  1.36  uebayasi 				    iobytes);
   1553   1.1     pooka 			}
   1554   1.1     pooka 			skipbytes += iobytes;
   1555   1.1     pooka 			continue;
   1556   1.1     pooka 		}
   1557   1.1     pooka 
   1558  1.36  uebayasi 		/*
   1559  1.36  uebayasi 		 * allocate a sub-buf for this piece of the i/o
   1560  1.36  uebayasi 		 * (or just use mbp if there's only 1 piece),
   1561  1.36  uebayasi 		 * and start it going.
   1562  1.36  uebayasi 		 */
   1563  1.36  uebayasi 
   1564   1.1     pooka 		if (offset == startoffset && iobytes == bytes) {
   1565   1.1     pooka 			bp = mbp;
   1566   1.1     pooka 		} else {
   1567  1.71  pgoyette 			UVMHIST_LOG(ubchist, "vp %#jx bp %#jx num now %jd",
   1568  1.71  pgoyette 			    (uintptr_t)vp, (uintptr_t)bp, vp->v_numoutput, 0);
   1569   1.2        ad 			bp = getiobuf(vp, true);
   1570   1.1     pooka 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
   1571   1.1     pooka 		}
   1572   1.1     pooka 		bp->b_lblkno = 0;
   1573   1.1     pooka 
   1574   1.1     pooka 		/* adjust physical blkno for partial blocks */
   1575   1.1     pooka 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1576   1.1     pooka 		    dev_bshift);
   1577  1.36  uebayasi 
   1578   1.1     pooka 		UVMHIST_LOG(ubchist,
   1579  1.71  pgoyette 		    "bp %#jx offset 0x%jx bcount 0x%jx blkno 0x%jx",
   1580  1.71  pgoyette 		    (uintptr_t)bp, offset, bp->b_bcount, bp->b_blkno);
   1581   1.1     pooka 
   1582   1.1     pooka 		VOP_STRATEGY(devvp, bp);
   1583   1.1     pooka 	}
   1584  1.36  uebayasi 
   1585  1.36  uebayasi loopdone:
   1586   1.1     pooka 	if (skipbytes) {
   1587  1.71  pgoyette 		UVMHIST_LOG(ubchist, "skipbytes %jd", skipbytes, 0,0,0);
   1588   1.1     pooka 	}
   1589   1.1     pooka 	nestiobuf_done(mbp, skipbytes, error);
   1590   1.1     pooka 	if (async) {
   1591   1.1     pooka 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1592   1.1     pooka 		return (0);
   1593   1.1     pooka 	}
   1594  1.71  pgoyette 	UVMHIST_LOG(ubchist, "waiting for mbp %#jx", (uintptr_t)mbp, 0, 0, 0);
   1595   1.1     pooka 	error = biowait(mbp);
   1596   1.1     pooka 	s = splbio();
   1597   1.1     pooka 	(*iodone)(mbp);
   1598   1.1     pooka 	splx(s);
   1599  1.71  pgoyette 	UVMHIST_LOG(ubchist, "returning, error %jd", error, 0, 0, 0);
   1600   1.1     pooka 	return (error);
   1601   1.1     pooka }
   1602   1.1     pooka 
   1603   1.1     pooka int
   1604   1.1     pooka genfs_compat_getpages(void *v)
   1605   1.1     pooka {
   1606   1.1     pooka 	struct vop_getpages_args /* {
   1607   1.1     pooka 		struct vnode *a_vp;
   1608   1.1     pooka 		voff_t a_offset;
   1609   1.1     pooka 		struct vm_page **a_m;
   1610   1.1     pooka 		int *a_count;
   1611   1.1     pooka 		int a_centeridx;
   1612   1.1     pooka 		vm_prot_t a_access_type;
   1613   1.1     pooka 		int a_advice;
   1614   1.1     pooka 		int a_flags;
   1615   1.1     pooka 	} */ *ap = v;
   1616   1.1     pooka 
   1617   1.1     pooka 	off_t origoffset;
   1618   1.1     pooka 	struct vnode *vp = ap->a_vp;
   1619   1.1     pooka 	struct uvm_object *uobj = &vp->v_uobj;
   1620   1.1     pooka 	struct vm_page *pg, **pgs;
   1621   1.1     pooka 	vaddr_t kva;
   1622   1.1     pooka 	int i, error, orignpages, npages;
   1623   1.1     pooka 	struct iovec iov;
   1624   1.1     pooka 	struct uio uio;
   1625   1.1     pooka 	kauth_cred_t cred = curlwp->l_cred;
   1626  1.35  uebayasi 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1627   1.1     pooka 
   1628   1.1     pooka 	error = 0;
   1629   1.1     pooka 	origoffset = ap->a_offset;
   1630   1.1     pooka 	orignpages = *ap->a_count;
   1631   1.1     pooka 	pgs = ap->a_m;
   1632   1.1     pooka 
   1633   1.1     pooka 	if (ap->a_flags & PGO_LOCKED) {
   1634  1.84        ad 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m, NULL,
   1635  1.35  uebayasi 		    UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0));
   1636   1.1     pooka 
   1637  1.38       chs 		error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
   1638  1.38       chs 		if (error == 0 && memwrite) {
   1639  1.38       chs 			genfs_markdirty(vp);
   1640  1.38       chs 		}
   1641  1.38       chs 		return error;
   1642   1.1     pooka 	}
   1643   1.1     pooka 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1644  1.86        ad 		rw_exit(uobj->vmobjlock);
   1645  1.38       chs 		return EINVAL;
   1646   1.1     pooka 	}
   1647   1.1     pooka 	if ((ap->a_flags & PGO_SYNCIO) == 0) {
   1648  1.86        ad 		rw_exit(uobj->vmobjlock);
   1649   1.1     pooka 		return 0;
   1650   1.1     pooka 	}
   1651   1.1     pooka 	npages = orignpages;
   1652  1.84        ad 	uvn_findpages(uobj, origoffset, &npages, pgs, NULL, UFP_ALL);
   1653  1.86        ad 	rw_exit(uobj->vmobjlock);
   1654   1.1     pooka 	kva = uvm_pagermapin(pgs, npages,
   1655   1.1     pooka 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1656   1.1     pooka 	for (i = 0; i < npages; i++) {
   1657   1.1     pooka 		pg = pgs[i];
   1658   1.1     pooka 		if ((pg->flags & PG_FAKE) == 0) {
   1659   1.1     pooka 			continue;
   1660   1.1     pooka 		}
   1661   1.1     pooka 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   1662   1.1     pooka 		iov.iov_len = PAGE_SIZE;
   1663   1.1     pooka 		uio.uio_iov = &iov;
   1664   1.1     pooka 		uio.uio_iovcnt = 1;
   1665   1.1     pooka 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   1666   1.1     pooka 		uio.uio_rw = UIO_READ;
   1667   1.1     pooka 		uio.uio_resid = PAGE_SIZE;
   1668   1.1     pooka 		UIO_SETUP_SYSSPACE(&uio);
   1669   1.1     pooka 		/* XXX vn_lock */
   1670   1.1     pooka 		error = VOP_READ(vp, &uio, 0, cred);
   1671   1.1     pooka 		if (error) {
   1672   1.1     pooka 			break;
   1673   1.1     pooka 		}
   1674   1.1     pooka 		if (uio.uio_resid) {
   1675   1.1     pooka 			memset(iov.iov_base, 0, uio.uio_resid);
   1676   1.1     pooka 		}
   1677   1.1     pooka 	}
   1678   1.1     pooka 	uvm_pagermapout(kva, npages);
   1679  1.86        ad 	rw_enter(uobj->vmobjlock, RW_WRITER);
   1680   1.1     pooka 	for (i = 0; i < npages; i++) {
   1681   1.1     pooka 		pg = pgs[i];
   1682   1.1     pooka 		if (error && (pg->flags & PG_FAKE) != 0) {
   1683   1.1     pooka 			pg->flags |= PG_RELEASED;
   1684   1.1     pooka 		} else {
   1685  1.84        ad 			uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
   1686  1.83        ad 			uvm_pagelock(pg);
   1687   1.1     pooka 			uvm_pageactivate(pg);
   1688  1.83        ad 			uvm_pageunlock(pg);
   1689   1.1     pooka 		}
   1690   1.1     pooka 	}
   1691   1.1     pooka 	if (error) {
   1692   1.1     pooka 		uvm_page_unbusy(pgs, npages);
   1693   1.1     pooka 	}
   1694  1.38       chs 	if (error == 0 && memwrite) {
   1695  1.38       chs 		genfs_markdirty(vp);
   1696  1.38       chs 	}
   1697  1.86        ad 	rw_exit(uobj->vmobjlock);
   1698  1.38       chs 	return error;
   1699   1.1     pooka }
   1700   1.1     pooka 
   1701   1.1     pooka int
   1702   1.1     pooka genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   1703   1.1     pooka     int flags)
   1704   1.1     pooka {
   1705   1.1     pooka 	off_t offset;
   1706   1.1     pooka 	struct iovec iov;
   1707   1.1     pooka 	struct uio uio;
   1708   1.1     pooka 	kauth_cred_t cred = curlwp->l_cred;
   1709   1.1     pooka 	struct buf *bp;
   1710   1.1     pooka 	vaddr_t kva;
   1711   1.2        ad 	int error;
   1712   1.1     pooka 
   1713   1.1     pooka 	offset = pgs[0]->offset;
   1714   1.1     pooka 	kva = uvm_pagermapin(pgs, npages,
   1715   1.1     pooka 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1716   1.1     pooka 
   1717   1.1     pooka 	iov.iov_base = (void *)kva;
   1718   1.1     pooka 	iov.iov_len = npages << PAGE_SHIFT;
   1719   1.1     pooka 	uio.uio_iov = &iov;
   1720   1.1     pooka 	uio.uio_iovcnt = 1;
   1721   1.1     pooka 	uio.uio_offset = offset;
   1722   1.1     pooka 	uio.uio_rw = UIO_WRITE;
   1723   1.1     pooka 	uio.uio_resid = npages << PAGE_SHIFT;
   1724   1.1     pooka 	UIO_SETUP_SYSSPACE(&uio);
   1725   1.1     pooka 	/* XXX vn_lock */
   1726   1.1     pooka 	error = VOP_WRITE(vp, &uio, 0, cred);
   1727   1.1     pooka 
   1728  1.49     rmind 	mutex_enter(vp->v_interlock);
   1729   1.2        ad 	vp->v_numoutput++;
   1730  1.49     rmind 	mutex_exit(vp->v_interlock);
   1731   1.1     pooka 
   1732   1.2        ad 	bp = getiobuf(vp, true);
   1733  1.89        ad 	bp->b_cflags |= BC_BUSY | BC_AGE;
   1734   1.1     pooka 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   1735   1.1     pooka 	bp->b_data = (char *)kva;
   1736   1.1     pooka 	bp->b_bcount = npages << PAGE_SHIFT;
   1737   1.1     pooka 	bp->b_bufsize = npages << PAGE_SHIFT;
   1738   1.1     pooka 	bp->b_resid = 0;
   1739   1.1     pooka 	bp->b_error = error;
   1740   1.1     pooka 	uvm_aio_aiodone(bp);
   1741   1.1     pooka 	return (error);
   1742   1.1     pooka }
   1743   1.1     pooka 
   1744   1.1     pooka /*
   1745   1.1     pooka  * Process a uio using direct I/O.  If we reach a part of the request
   1746   1.1     pooka  * which cannot be processed in this fashion for some reason, just return.
   1747   1.1     pooka  * The caller must handle some additional part of the request using
   1748   1.1     pooka  * buffered I/O before trying direct I/O again.
   1749   1.1     pooka  */
   1750   1.1     pooka 
   1751   1.1     pooka void
   1752   1.1     pooka genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
   1753   1.1     pooka {
   1754   1.1     pooka 	struct vmspace *vs;
   1755   1.1     pooka 	struct iovec *iov;
   1756   1.1     pooka 	vaddr_t va;
   1757   1.1     pooka 	size_t len;
   1758   1.1     pooka 	const int mask = DEV_BSIZE - 1;
   1759   1.1     pooka 	int error;
   1760  1.16     joerg 	bool need_wapbl = (vp->v_mount && vp->v_mount->mnt_wapbl &&
   1761  1.16     joerg 	    (ioflag & IO_JOURNALLOCKED) == 0);
   1762   1.1     pooka 
   1763  1.74  jdolecek #ifdef DIAGNOSTIC
   1764  1.74  jdolecek 	if ((ioflag & IO_JOURNALLOCKED) && vp->v_mount->mnt_wapbl)
   1765  1.74  jdolecek                 WAPBL_JLOCK_ASSERT(vp->v_mount);
   1766  1.74  jdolecek #endif
   1767  1.74  jdolecek 
   1768   1.1     pooka 	/*
   1769   1.1     pooka 	 * We only support direct I/O to user space for now.
   1770   1.1     pooka 	 */
   1771   1.1     pooka 
   1772   1.1     pooka 	if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
   1773   1.1     pooka 		return;
   1774   1.1     pooka 	}
   1775   1.1     pooka 
   1776   1.1     pooka 	/*
   1777   1.1     pooka 	 * If the vnode is mapped, we would need to get the getpages lock
   1778  1.53      yamt 	 * to stabilize the bmap, but then we would get into trouble while
   1779   1.1     pooka 	 * locking the pages if the pages belong to this same vnode (or a
   1780   1.1     pooka 	 * multi-vnode cascade to the same effect).  Just fall back to
   1781   1.1     pooka 	 * buffered I/O if the vnode is mapped to avoid this mess.
   1782   1.1     pooka 	 */
   1783   1.1     pooka 
   1784   1.1     pooka 	if (vp->v_vflag & VV_MAPPED) {
   1785   1.1     pooka 		return;
   1786   1.1     pooka 	}
   1787   1.1     pooka 
   1788  1.16     joerg 	if (need_wapbl) {
   1789  1.13   hannken 		error = WAPBL_BEGIN(vp->v_mount);
   1790  1.13   hannken 		if (error)
   1791  1.13   hannken 			return;
   1792  1.13   hannken 	}
   1793  1.13   hannken 
   1794   1.1     pooka 	/*
   1795   1.1     pooka 	 * Do as much of the uio as possible with direct I/O.
   1796   1.1     pooka 	 */
   1797   1.1     pooka 
   1798   1.1     pooka 	vs = uio->uio_vmspace;
   1799   1.1     pooka 	while (uio->uio_resid) {
   1800   1.1     pooka 		iov = uio->uio_iov;
   1801   1.1     pooka 		if (iov->iov_len == 0) {
   1802   1.1     pooka 			uio->uio_iov++;
   1803   1.1     pooka 			uio->uio_iovcnt--;
   1804   1.1     pooka 			continue;
   1805   1.1     pooka 		}
   1806   1.1     pooka 		va = (vaddr_t)iov->iov_base;
   1807   1.1     pooka 		len = MIN(iov->iov_len, genfs_maxdio);
   1808   1.1     pooka 		len &= ~mask;
   1809   1.1     pooka 
   1810   1.1     pooka 		/*
   1811   1.1     pooka 		 * If the next chunk is smaller than DEV_BSIZE or extends past
   1812   1.1     pooka 		 * the current EOF, then fall back to buffered I/O.
   1813   1.1     pooka 		 */
   1814   1.1     pooka 
   1815   1.1     pooka 		if (len == 0 || uio->uio_offset + len > vp->v_size) {
   1816  1.13   hannken 			break;
   1817   1.1     pooka 		}
   1818   1.1     pooka 
   1819   1.1     pooka 		/*
   1820   1.1     pooka 		 * Check alignment.  The file offset must be at least
   1821   1.1     pooka 		 * sector-aligned.  The exact constraint on memory alignment
   1822   1.1     pooka 		 * is very hardware-dependent, but requiring sector-aligned
   1823   1.1     pooka 		 * addresses there too is safe.
   1824   1.1     pooka 		 */
   1825   1.1     pooka 
   1826   1.1     pooka 		if (uio->uio_offset & mask || va & mask) {
   1827  1.13   hannken 			break;
   1828   1.1     pooka 		}
   1829   1.1     pooka 		error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
   1830   1.1     pooka 					  uio->uio_rw);
   1831   1.1     pooka 		if (error) {
   1832   1.1     pooka 			break;
   1833   1.1     pooka 		}
   1834   1.1     pooka 		iov->iov_base = (char *)iov->iov_base + len;
   1835   1.1     pooka 		iov->iov_len -= len;
   1836   1.1     pooka 		uio->uio_offset += len;
   1837   1.1     pooka 		uio->uio_resid -= len;
   1838   1.1     pooka 	}
   1839  1.13   hannken 
   1840  1.16     joerg 	if (need_wapbl)
   1841  1.13   hannken 		WAPBL_END(vp->v_mount);
   1842   1.1     pooka }
   1843   1.1     pooka 
   1844   1.1     pooka /*
   1845   1.1     pooka  * Iodone routine for direct I/O.  We don't do much here since the request is
   1846   1.1     pooka  * always synchronous, so the caller will do most of the work after biowait().
   1847   1.1     pooka  */
   1848   1.1     pooka 
   1849   1.1     pooka static void
   1850   1.1     pooka genfs_dio_iodone(struct buf *bp)
   1851   1.1     pooka {
   1852   1.1     pooka 
   1853   1.1     pooka 	KASSERT((bp->b_flags & B_ASYNC) == 0);
   1854   1.2        ad 	if ((bp->b_flags & B_READ) == 0 && (bp->b_cflags & BC_AGE) != 0) {
   1855   1.2        ad 		mutex_enter(bp->b_objlock);
   1856   1.1     pooka 		vwakeup(bp);
   1857   1.2        ad 		mutex_exit(bp->b_objlock);
   1858   1.1     pooka 	}
   1859   1.1     pooka 	putiobuf(bp);
   1860   1.1     pooka }
   1861   1.1     pooka 
   1862   1.1     pooka /*
   1863   1.1     pooka  * Process one chunk of a direct I/O request.
   1864   1.1     pooka  */
   1865   1.1     pooka 
   1866   1.1     pooka static int
   1867   1.1     pooka genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
   1868   1.1     pooka     off_t off, enum uio_rw rw)
   1869   1.1     pooka {
   1870   1.1     pooka 	struct vm_map *map;
   1871  1.56    martin 	struct pmap *upm, *kpm __unused;
   1872   1.1     pooka 	size_t klen = round_page(uva + len) - trunc_page(uva);
   1873   1.1     pooka 	off_t spoff, epoff;
   1874   1.1     pooka 	vaddr_t kva, puva;
   1875   1.1     pooka 	paddr_t pa;
   1876   1.1     pooka 	vm_prot_t prot;
   1877  1.58    martin 	int error, rv __diagused, poff, koff;
   1878  1.13   hannken 	const int pgoflags = PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED |
   1879   1.1     pooka 		(rw == UIO_WRITE ? PGO_FREE : 0);
   1880   1.1     pooka 
   1881   1.1     pooka 	/*
   1882   1.1     pooka 	 * For writes, verify that this range of the file already has fully
   1883   1.1     pooka 	 * allocated backing store.  If there are any holes, just punt and
   1884   1.1     pooka 	 * make the caller take the buffered write path.
   1885   1.1     pooka 	 */
   1886   1.1     pooka 
   1887   1.1     pooka 	if (rw == UIO_WRITE) {
   1888   1.1     pooka 		daddr_t lbn, elbn, blkno;
   1889   1.1     pooka 		int bsize, bshift, run;
   1890   1.1     pooka 
   1891   1.1     pooka 		bshift = vp->v_mount->mnt_fs_bshift;
   1892   1.1     pooka 		bsize = 1 << bshift;
   1893   1.1     pooka 		lbn = off >> bshift;
   1894   1.1     pooka 		elbn = (off + len + bsize - 1) >> bshift;
   1895   1.1     pooka 		while (lbn < elbn) {
   1896   1.1     pooka 			error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
   1897   1.1     pooka 			if (error) {
   1898   1.1     pooka 				return error;
   1899   1.1     pooka 			}
   1900   1.1     pooka 			if (blkno == (daddr_t)-1) {
   1901   1.1     pooka 				return ENOSPC;
   1902   1.1     pooka 			}
   1903   1.1     pooka 			lbn += 1 + run;
   1904   1.1     pooka 		}
   1905   1.1     pooka 	}
   1906   1.1     pooka 
   1907   1.1     pooka 	/*
   1908   1.1     pooka 	 * Flush any cached pages for parts of the file that we're about to
   1909   1.1     pooka 	 * access.  If we're writing, invalidate pages as well.
   1910   1.1     pooka 	 */
   1911   1.1     pooka 
   1912   1.1     pooka 	spoff = trunc_page(off);
   1913   1.1     pooka 	epoff = round_page(off + len);
   1914  1.87        ad 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
   1915   1.1     pooka 	error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
   1916   1.1     pooka 	if (error) {
   1917   1.1     pooka 		return error;
   1918   1.1     pooka 	}
   1919   1.1     pooka 
   1920   1.1     pooka 	/*
   1921   1.1     pooka 	 * Wire the user pages and remap them into kernel memory.
   1922   1.1     pooka 	 */
   1923   1.1     pooka 
   1924   1.1     pooka 	prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
   1925   1.1     pooka 	error = uvm_vslock(vs, (void *)uva, len, prot);
   1926   1.1     pooka 	if (error) {
   1927   1.1     pooka 		return error;
   1928   1.1     pooka 	}
   1929   1.1     pooka 
   1930   1.1     pooka 	map = &vs->vm_map;
   1931   1.1     pooka 	upm = vm_map_pmap(map);
   1932   1.1     pooka 	kpm = vm_map_pmap(kernel_map);
   1933   1.1     pooka 	puva = trunc_page(uva);
   1934  1.51      matt 	kva = uvm_km_alloc(kernel_map, klen, atop(puva) & uvmexp.colormask,
   1935  1.51      matt 	    UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
   1936   1.1     pooka 	for (poff = 0; poff < klen; poff += PAGE_SIZE) {
   1937   1.1     pooka 		rv = pmap_extract(upm, puva + poff, &pa);
   1938   1.1     pooka 		KASSERT(rv);
   1939  1.51      matt 		pmap_kenter_pa(kva + poff, pa, prot, PMAP_WIRED);
   1940   1.1     pooka 	}
   1941   1.1     pooka 	pmap_update(kpm);
   1942   1.1     pooka 
   1943   1.1     pooka 	/*
   1944   1.1     pooka 	 * Do the I/O.
   1945   1.1     pooka 	 */
   1946   1.1     pooka 
   1947   1.1     pooka 	koff = uva - trunc_page(uva);
   1948   1.1     pooka 	error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
   1949   1.1     pooka 			    genfs_dio_iodone);
   1950   1.1     pooka 
   1951   1.1     pooka 	/*
   1952   1.1     pooka 	 * Tear down the kernel mapping.
   1953   1.1     pooka 	 */
   1954   1.1     pooka 
   1955  1.51      matt 	pmap_kremove(kva, klen);
   1956   1.1     pooka 	pmap_update(kpm);
   1957   1.1     pooka 	uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
   1958   1.1     pooka 
   1959   1.1     pooka 	/*
   1960   1.1     pooka 	 * Unwire the user pages.
   1961   1.1     pooka 	 */
   1962   1.1     pooka 
   1963   1.1     pooka 	uvm_vsunlock(vs, (void *)uva, len);
   1964   1.1     pooka 	return error;
   1965   1.1     pooka }
   1966