Home | History | Annotate | Line # | Download | only in genfs
genfs_io.c revision 1.6.2.5
      1  1.6.2.5     yamt /*	$NetBSD: genfs_io.c,v 1.6.2.5 2010/10/09 03:32:34 yamt Exp $	*/
      2      1.1    pooka 
      3      1.1    pooka /*
      4      1.1    pooka  * Copyright (c) 1982, 1986, 1989, 1993
      5      1.1    pooka  *	The Regents of the University of California.  All rights reserved.
      6      1.1    pooka  *
      7      1.1    pooka  * Redistribution and use in source and binary forms, with or without
      8      1.1    pooka  * modification, are permitted provided that the following conditions
      9      1.1    pooka  * are met:
     10      1.1    pooka  * 1. Redistributions of source code must retain the above copyright
     11      1.1    pooka  *    notice, this list of conditions and the following disclaimer.
     12      1.1    pooka  * 2. Redistributions in binary form must reproduce the above copyright
     13      1.1    pooka  *    notice, this list of conditions and the following disclaimer in the
     14      1.1    pooka  *    documentation and/or other materials provided with the distribution.
     15      1.1    pooka  * 3. Neither the name of the University nor the names of its contributors
     16      1.1    pooka  *    may be used to endorse or promote products derived from this software
     17      1.1    pooka  *    without specific prior written permission.
     18      1.1    pooka  *
     19      1.1    pooka  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20      1.1    pooka  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21      1.1    pooka  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22      1.1    pooka  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23      1.1    pooka  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24      1.1    pooka  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25      1.1    pooka  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26      1.1    pooka  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27      1.1    pooka  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28      1.1    pooka  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29      1.1    pooka  * SUCH DAMAGE.
     30      1.1    pooka  *
     31      1.1    pooka  */
     32      1.1    pooka 
     33      1.1    pooka #include <sys/cdefs.h>
     34  1.6.2.5     yamt __KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.6.2.5 2010/10/09 03:32:34 yamt Exp $");
     35      1.1    pooka 
     36      1.1    pooka #include <sys/param.h>
     37      1.1    pooka #include <sys/systm.h>
     38      1.1    pooka #include <sys/proc.h>
     39      1.1    pooka #include <sys/kernel.h>
     40      1.1    pooka #include <sys/mount.h>
     41      1.1    pooka #include <sys/namei.h>
     42      1.1    pooka #include <sys/vnode.h>
     43      1.1    pooka #include <sys/fcntl.h>
     44      1.1    pooka #include <sys/kmem.h>
     45      1.1    pooka #include <sys/poll.h>
     46      1.1    pooka #include <sys/mman.h>
     47      1.1    pooka #include <sys/file.h>
     48      1.1    pooka #include <sys/kauth.h>
     49      1.1    pooka #include <sys/fstrans.h>
     50  1.6.2.2     yamt #include <sys/buf.h>
     51      1.1    pooka 
     52      1.1    pooka #include <miscfs/genfs/genfs.h>
     53      1.1    pooka #include <miscfs/genfs/genfs_node.h>
     54      1.1    pooka #include <miscfs/specfs/specdev.h>
     55      1.1    pooka 
     56      1.1    pooka #include <uvm/uvm.h>
     57      1.1    pooka #include <uvm/uvm_pager.h>
     58      1.1    pooka 
     59      1.1    pooka static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
     60      1.1    pooka     off_t, enum uio_rw);
     61      1.1    pooka static void genfs_dio_iodone(struct buf *);
     62      1.1    pooka 
     63      1.1    pooka static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
     64      1.1    pooka     void (*)(struct buf *));
     65  1.6.2.4     yamt static void genfs_rel_pages(struct vm_page **, int);
     66  1.6.2.4     yamt static void genfs_markdirty(struct vnode *);
     67      1.1    pooka 
     68      1.1    pooka int genfs_maxdio = MAXPHYS;
     69      1.1    pooka 
     70  1.6.2.4     yamt static void
     71      1.1    pooka genfs_rel_pages(struct vm_page **pgs, int npages)
     72      1.1    pooka {
     73      1.1    pooka 	int i;
     74      1.1    pooka 
     75      1.1    pooka 	for (i = 0; i < npages; i++) {
     76      1.1    pooka 		struct vm_page *pg = pgs[i];
     77      1.1    pooka 
     78      1.1    pooka 		if (pg == NULL || pg == PGO_DONTCARE)
     79      1.1    pooka 			continue;
     80      1.1    pooka 		if (pg->flags & PG_FAKE) {
     81      1.1    pooka 			pg->flags |= PG_RELEASED;
     82      1.1    pooka 		}
     83      1.1    pooka 	}
     84      1.2       ad 	mutex_enter(&uvm_pageqlock);
     85      1.1    pooka 	uvm_page_unbusy(pgs, npages);
     86      1.2       ad 	mutex_exit(&uvm_pageqlock);
     87      1.1    pooka }
     88      1.1    pooka 
     89  1.6.2.4     yamt static void
     90  1.6.2.4     yamt genfs_markdirty(struct vnode *vp)
     91  1.6.2.4     yamt {
     92  1.6.2.4     yamt 	struct genfs_node * const gp = VTOG(vp);
     93  1.6.2.4     yamt 
     94  1.6.2.4     yamt 	KASSERT(mutex_owned(&vp->v_interlock));
     95  1.6.2.4     yamt 	gp->g_dirtygen++;
     96  1.6.2.4     yamt 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
     97  1.6.2.4     yamt 		vn_syncer_add_to_worklist(vp, filedelay);
     98  1.6.2.4     yamt 	}
     99  1.6.2.4     yamt 	if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
    100  1.6.2.4     yamt 		vp->v_iflag |= VI_WRMAPDIRTY;
    101  1.6.2.4     yamt 	}
    102  1.6.2.4     yamt }
    103  1.6.2.4     yamt 
    104      1.1    pooka /*
    105      1.1    pooka  * generic VM getpages routine.
    106      1.1    pooka  * Return PG_BUSY pages for the given range,
    107      1.1    pooka  * reading from backing store if necessary.
    108      1.1    pooka  */
    109      1.1    pooka 
    110      1.1    pooka int
    111      1.1    pooka genfs_getpages(void *v)
    112      1.1    pooka {
    113      1.1    pooka 	struct vop_getpages_args /* {
    114      1.1    pooka 		struct vnode *a_vp;
    115      1.1    pooka 		voff_t a_offset;
    116      1.1    pooka 		struct vm_page **a_m;
    117      1.1    pooka 		int *a_count;
    118      1.1    pooka 		int a_centeridx;
    119      1.1    pooka 		vm_prot_t a_access_type;
    120      1.1    pooka 		int a_advice;
    121      1.1    pooka 		int a_flags;
    122  1.6.2.3     yamt 	} */ * const ap = v;
    123      1.1    pooka 
    124  1.6.2.3     yamt 	off_t diskeof, memeof;
    125  1.6.2.3     yamt 	int i, error, npages;
    126  1.6.2.2     yamt 	const int flags = ap->a_flags;
    127  1.6.2.3     yamt 	struct vnode * const vp = ap->a_vp;
    128  1.6.2.3     yamt 	struct uvm_object * const uobj = &vp->v_uobj;
    129  1.6.2.3     yamt 	kauth_cred_t const cred = curlwp->l_cred;		/* XXXUBC curlwp */
    130  1.6.2.2     yamt 	const bool async = (flags & PGO_SYNCIO) == 0;
    131  1.6.2.3     yamt 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
    132      1.1    pooka 	bool has_trans = false;
    133  1.6.2.2     yamt 	const bool overwrite = (flags & PGO_OVERWRITE) != 0;
    134  1.6.2.3     yamt 	const bool blockalloc = memwrite && (flags & PGO_NOBLOCKALLOC) == 0;
    135  1.6.2.5     yamt 	const bool glocked = (flags & PGO_GLOCKHELD) != 0;
    136      1.1    pooka 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    137      1.1    pooka 
    138      1.1    pooka 	UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
    139      1.1    pooka 	    vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    140      1.1    pooka 
    141      1.1    pooka 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
    142      1.1    pooka 	    vp->v_type == VLNK || vp->v_type == VBLK);
    143      1.1    pooka 
    144      1.1    pooka startover:
    145      1.1    pooka 	error = 0;
    146  1.6.2.3     yamt 	const voff_t origvsize = vp->v_size;
    147  1.6.2.3     yamt 	const off_t origoffset = ap->a_offset;
    148  1.6.2.3     yamt 	const int orignpages = *ap->a_count;
    149  1.6.2.3     yamt 
    150      1.1    pooka 	GOP_SIZE(vp, origvsize, &diskeof, 0);
    151      1.1    pooka 	if (flags & PGO_PASTEOF) {
    152  1.6.2.3     yamt 		off_t newsize;
    153      1.1    pooka #if defined(DIAGNOSTIC)
    154      1.1    pooka 		off_t writeeof;
    155      1.1    pooka #endif /* defined(DIAGNOSTIC) */
    156      1.1    pooka 
    157      1.1    pooka 		newsize = MAX(origvsize,
    158      1.1    pooka 		    origoffset + (orignpages << PAGE_SHIFT));
    159      1.1    pooka 		GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
    160      1.1    pooka #if defined(DIAGNOSTIC)
    161      1.1    pooka 		GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM);
    162      1.1    pooka 		if (newsize > round_page(writeeof)) {
    163  1.6.2.5     yamt 			panic("%s: past eof: %" PRId64 " vs. %" PRId64,
    164  1.6.2.5     yamt 			    __func__, newsize, round_page(writeeof));
    165      1.1    pooka 		}
    166      1.1    pooka #endif /* defined(DIAGNOSTIC) */
    167      1.1    pooka 	} else {
    168      1.1    pooka 		GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM);
    169      1.1    pooka 	}
    170      1.1    pooka 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    171      1.1    pooka 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    172      1.1    pooka 	KASSERT(orignpages > 0);
    173      1.1    pooka 
    174      1.1    pooka 	/*
    175      1.1    pooka 	 * Bounds-check the request.
    176      1.1    pooka 	 */
    177      1.1    pooka 
    178      1.1    pooka 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    179      1.1    pooka 		if ((flags & PGO_LOCKED) == 0) {
    180      1.2       ad 			mutex_exit(&uobj->vmobjlock);
    181      1.1    pooka 		}
    182      1.1    pooka 		UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
    183      1.1    pooka 		    origoffset, *ap->a_count, memeof,0);
    184      1.1    pooka 		error = EINVAL;
    185      1.1    pooka 		goto out_err;
    186      1.1    pooka 	}
    187      1.1    pooka 
    188      1.1    pooka 	/* uobj is locked */
    189      1.1    pooka 
    190      1.1    pooka 	if ((flags & PGO_NOTIMESTAMP) == 0 &&
    191      1.1    pooka 	    (vp->v_type != VBLK ||
    192      1.1    pooka 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
    193      1.1    pooka 		int updflags = 0;
    194      1.1    pooka 
    195      1.1    pooka 		if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
    196      1.1    pooka 			updflags = GOP_UPDATE_ACCESSED;
    197      1.1    pooka 		}
    198  1.6.2.3     yamt 		if (memwrite) {
    199      1.1    pooka 			updflags |= GOP_UPDATE_MODIFIED;
    200      1.1    pooka 		}
    201      1.1    pooka 		if (updflags != 0) {
    202      1.1    pooka 			GOP_MARKUPDATE(vp, updflags);
    203      1.1    pooka 		}
    204      1.1    pooka 	}
    205      1.1    pooka 
    206      1.1    pooka 	/*
    207      1.1    pooka 	 * For PGO_LOCKED requests, just return whatever's in memory.
    208      1.1    pooka 	 */
    209      1.1    pooka 
    210      1.1    pooka 	if (flags & PGO_LOCKED) {
    211      1.1    pooka 		int nfound;
    212  1.6.2.3     yamt 		struct vm_page *pg;
    213      1.1    pooka 
    214  1.6.2.5     yamt 		KASSERT(!glocked);
    215      1.1    pooka 		npages = *ap->a_count;
    216      1.1    pooka #if defined(DEBUG)
    217      1.1    pooka 		for (i = 0; i < npages; i++) {
    218      1.1    pooka 			pg = ap->a_m[i];
    219      1.1    pooka 			KASSERT(pg == NULL || pg == PGO_DONTCARE);
    220      1.1    pooka 		}
    221      1.1    pooka #endif /* defined(DEBUG) */
    222      1.1    pooka 		nfound = uvn_findpages(uobj, origoffset, &npages,
    223  1.6.2.3     yamt 		    ap->a_m, UFP_NOWAIT|UFP_NOALLOC|(memwrite ? UFP_NORDONLY : 0));
    224      1.1    pooka 		KASSERT(npages == *ap->a_count);
    225      1.1    pooka 		if (nfound == 0) {
    226      1.1    pooka 			error = EBUSY;
    227      1.1    pooka 			goto out_err;
    228      1.1    pooka 		}
    229  1.6.2.3     yamt 		if (!genfs_node_rdtrylock(vp)) {
    230      1.1    pooka 			genfs_rel_pages(ap->a_m, npages);
    231      1.1    pooka 
    232      1.1    pooka 			/*
    233      1.1    pooka 			 * restore the array.
    234      1.1    pooka 			 */
    235      1.1    pooka 
    236      1.1    pooka 			for (i = 0; i < npages; i++) {
    237      1.1    pooka 				pg = ap->a_m[i];
    238      1.1    pooka 
    239      1.1    pooka 				if (pg != NULL || pg != PGO_DONTCARE) {
    240      1.1    pooka 					ap->a_m[i] = NULL;
    241      1.1    pooka 				}
    242      1.1    pooka 			}
    243      1.1    pooka 		} else {
    244  1.6.2.3     yamt 			genfs_node_unlock(vp);
    245      1.1    pooka 		}
    246      1.1    pooka 		error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    247  1.6.2.4     yamt 		if (error == 0 && memwrite) {
    248  1.6.2.4     yamt 			genfs_markdirty(vp);
    249  1.6.2.4     yamt 		}
    250      1.1    pooka 		goto out_err;
    251      1.1    pooka 	}
    252      1.2       ad 	mutex_exit(&uobj->vmobjlock);
    253      1.1    pooka 
    254      1.1    pooka 	/*
    255      1.1    pooka 	 * find the requested pages and make some simple checks.
    256      1.1    pooka 	 * leave space in the page array for a whole block.
    257      1.1    pooka 	 */
    258      1.1    pooka 
    259  1.6.2.3     yamt 	const int fs_bshift = (vp->v_type != VBLK) ?
    260  1.6.2.3     yamt 	    vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
    261  1.6.2.3     yamt 	const int dev_bshift = (vp->v_type != VBLK) ?
    262  1.6.2.3     yamt 	    vp->v_mount->mnt_dev_bshift : DEV_BSHIFT;
    263  1.6.2.3     yamt 	const int fs_bsize = 1 << fs_bshift;
    264  1.6.2.3     yamt #define	blk_mask	(fs_bsize - 1)
    265  1.6.2.3     yamt #define	trunc_blk(x)	((x) & ~blk_mask)
    266  1.6.2.3     yamt #define	round_blk(x)	(((x) + blk_mask) & ~blk_mask)
    267      1.1    pooka 
    268  1.6.2.3     yamt 	const int orignmempages = MIN(orignpages,
    269      1.1    pooka 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    270  1.6.2.3     yamt 	npages = orignmempages;
    271  1.6.2.3     yamt 	const off_t startoffset = trunc_blk(origoffset);
    272  1.6.2.3     yamt 	const off_t endoffset = MIN(
    273  1.6.2.3     yamt 	    round_page(round_blk(origoffset + (npages << PAGE_SHIFT))),
    274  1.6.2.3     yamt 	    round_page(memeof));
    275  1.6.2.3     yamt 	const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    276      1.1    pooka 
    277  1.6.2.3     yamt 	const int pgs_size = sizeof(struct vm_page *) *
    278      1.1    pooka 	    ((endoffset - startoffset) >> PAGE_SHIFT);
    279  1.6.2.3     yamt 	struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES];
    280  1.6.2.3     yamt 
    281      1.1    pooka 	if (pgs_size > sizeof(pgs_onstack)) {
    282      1.1    pooka 		pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
    283      1.1    pooka 		if (pgs == NULL) {
    284      1.1    pooka 			pgs = pgs_onstack;
    285      1.1    pooka 			error = ENOMEM;
    286      1.1    pooka 			goto out_err;
    287      1.1    pooka 		}
    288      1.1    pooka 	} else {
    289  1.6.2.2     yamt 		pgs = pgs_onstack;
    290  1.6.2.2     yamt 		(void)memset(pgs, 0, pgs_size);
    291      1.1    pooka 	}
    292  1.6.2.2     yamt 
    293      1.1    pooka 	UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
    294      1.1    pooka 	    ridx, npages, startoffset, endoffset);
    295      1.1    pooka 
    296      1.1    pooka 	if (!has_trans) {
    297      1.1    pooka 		fstrans_start(vp->v_mount, FSTRANS_SHARED);
    298      1.1    pooka 		has_trans = true;
    299      1.1    pooka 	}
    300      1.1    pooka 
    301      1.1    pooka 	/*
    302      1.1    pooka 	 * hold g_glock to prevent a race with truncate.
    303      1.1    pooka 	 *
    304      1.1    pooka 	 * check if our idea of v_size is still valid.
    305      1.1    pooka 	 */
    306      1.1    pooka 
    307  1.6.2.5     yamt 	KASSERT(!glocked || genfs_node_wrlocked(vp));
    308  1.6.2.5     yamt 	if (!glocked) {
    309  1.6.2.5     yamt 		if (blockalloc) {
    310  1.6.2.5     yamt 			genfs_node_wrlock(vp);
    311  1.6.2.5     yamt 		} else {
    312  1.6.2.5     yamt 			genfs_node_rdlock(vp);
    313  1.6.2.5     yamt 		}
    314      1.1    pooka 	}
    315      1.2       ad 	mutex_enter(&uobj->vmobjlock);
    316      1.1    pooka 	if (vp->v_size < origvsize) {
    317  1.6.2.5     yamt 		if (!glocked) {
    318  1.6.2.5     yamt 			genfs_node_unlock(vp);
    319  1.6.2.5     yamt 		}
    320      1.1    pooka 		if (pgs != pgs_onstack)
    321      1.1    pooka 			kmem_free(pgs, pgs_size);
    322      1.1    pooka 		goto startover;
    323      1.1    pooka 	}
    324      1.1    pooka 
    325      1.1    pooka 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
    326  1.6.2.3     yamt 	    async ? UFP_NOWAIT : UFP_ALL) != orignmempages) {
    327  1.6.2.5     yamt 		if (!glocked) {
    328  1.6.2.5     yamt 			genfs_node_unlock(vp);
    329  1.6.2.5     yamt 		}
    330      1.1    pooka 		KASSERT(async != 0);
    331  1.6.2.3     yamt 		genfs_rel_pages(&pgs[ridx], orignmempages);
    332      1.2       ad 		mutex_exit(&uobj->vmobjlock);
    333      1.1    pooka 		error = EBUSY;
    334  1.6.2.3     yamt 		goto out_err_free;
    335      1.1    pooka 	}
    336      1.1    pooka 
    337      1.1    pooka 	/*
    338      1.1    pooka 	 * if the pages are already resident, just return them.
    339      1.1    pooka 	 */
    340      1.1    pooka 
    341      1.1    pooka 	for (i = 0; i < npages; i++) {
    342  1.6.2.3     yamt 		struct vm_page *pg = pgs[ridx + i];
    343      1.1    pooka 
    344  1.6.2.3     yamt 		if ((pg->flags & PG_FAKE) ||
    345  1.6.2.3     yamt 		    (blockalloc && (pg->flags & PG_RDONLY))) {
    346      1.1    pooka 			break;
    347      1.1    pooka 		}
    348      1.1    pooka 	}
    349      1.1    pooka 	if (i == npages) {
    350  1.6.2.5     yamt 		if (!glocked) {
    351  1.6.2.5     yamt 			genfs_node_unlock(vp);
    352  1.6.2.5     yamt 		}
    353      1.1    pooka 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    354      1.1    pooka 		npages += ridx;
    355      1.1    pooka 		goto out;
    356      1.1    pooka 	}
    357      1.1    pooka 
    358      1.1    pooka 	/*
    359      1.1    pooka 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    360      1.1    pooka 	 */
    361      1.1    pooka 
    362      1.1    pooka 	if (overwrite) {
    363  1.6.2.5     yamt 		if (!glocked) {
    364  1.6.2.5     yamt 			genfs_node_unlock(vp);
    365  1.6.2.5     yamt 		}
    366      1.1    pooka 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    367      1.1    pooka 
    368      1.1    pooka 		for (i = 0; i < npages; i++) {
    369  1.6.2.3     yamt 			struct vm_page *pg = pgs[ridx + i];
    370      1.1    pooka 
    371  1.6.2.3     yamt 			pg->flags &= ~(PG_RDONLY|PG_CLEAN);
    372      1.1    pooka 		}
    373      1.1    pooka 		npages += ridx;
    374      1.1    pooka 		goto out;
    375      1.1    pooka 	}
    376      1.1    pooka 
    377      1.1    pooka 	/*
    378      1.1    pooka 	 * the page wasn't resident and we're not overwriting,
    379      1.1    pooka 	 * so we're going to have to do some i/o.
    380      1.1    pooka 	 * find any additional pages needed to cover the expanded range.
    381      1.1    pooka 	 */
    382      1.1    pooka 
    383      1.1    pooka 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    384  1.6.2.3     yamt 	if (startoffset != origoffset || npages != orignmempages) {
    385  1.6.2.3     yamt 		int npgs;
    386      1.1    pooka 
    387      1.1    pooka 		/*
    388      1.1    pooka 		 * we need to avoid deadlocks caused by locking
    389      1.1    pooka 		 * additional pages at lower offsets than pages we
    390      1.1    pooka 		 * already have locked.  unlock them all and start over.
    391      1.1    pooka 		 */
    392      1.1    pooka 
    393  1.6.2.3     yamt 		genfs_rel_pages(&pgs[ridx], orignmempages);
    394      1.1    pooka 		memset(pgs, 0, pgs_size);
    395      1.1    pooka 
    396      1.1    pooka 		UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
    397      1.1    pooka 		    startoffset, endoffset, 0,0);
    398      1.1    pooka 		npgs = npages;
    399      1.1    pooka 		if (uvn_findpages(uobj, startoffset, &npgs, pgs,
    400      1.1    pooka 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    401  1.6.2.5     yamt 			if (!glocked) {
    402  1.6.2.5     yamt 				genfs_node_unlock(vp);
    403  1.6.2.5     yamt 			}
    404      1.1    pooka 			KASSERT(async != 0);
    405      1.1    pooka 			genfs_rel_pages(pgs, npages);
    406      1.2       ad 			mutex_exit(&uobj->vmobjlock);
    407      1.1    pooka 			error = EBUSY;
    408  1.6.2.3     yamt 			goto out_err_free;
    409      1.1    pooka 		}
    410      1.1    pooka 	}
    411  1.6.2.3     yamt 
    412      1.2       ad 	mutex_exit(&uobj->vmobjlock);
    413      1.1    pooka 
    414  1.6.2.3     yamt     {
    415  1.6.2.3     yamt 	size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
    416  1.6.2.3     yamt 	vaddr_t kva;
    417  1.6.2.3     yamt 	struct buf *bp, *mbp;
    418  1.6.2.3     yamt 	bool sawhole = false;
    419  1.6.2.3     yamt 
    420      1.1    pooka 	/*
    421      1.1    pooka 	 * read the desired page(s).
    422      1.1    pooka 	 */
    423      1.1    pooka 
    424      1.1    pooka 	totalbytes = npages << PAGE_SHIFT;
    425      1.1    pooka 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    426      1.1    pooka 	tailbytes = totalbytes - bytes;
    427      1.1    pooka 	skipbytes = 0;
    428      1.1    pooka 
    429      1.1    pooka 	kva = uvm_pagermapin(pgs, npages,
    430      1.1    pooka 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
    431      1.1    pooka 
    432      1.2       ad 	mbp = getiobuf(vp, true);
    433      1.1    pooka 	mbp->b_bufsize = totalbytes;
    434      1.1    pooka 	mbp->b_data = (void *)kva;
    435      1.1    pooka 	mbp->b_resid = mbp->b_bcount = bytes;
    436      1.2       ad 	mbp->b_cflags = BC_BUSY;
    437      1.2       ad 	if (async) {
    438      1.2       ad 		mbp->b_flags = B_READ | B_ASYNC;
    439      1.2       ad 		mbp->b_iodone = uvm_aio_biodone;
    440      1.2       ad 	} else {
    441      1.2       ad 		mbp->b_flags = B_READ;
    442      1.2       ad 		mbp->b_iodone = NULL;
    443      1.2       ad 	}
    444      1.1    pooka 	if (async)
    445      1.1    pooka 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
    446      1.1    pooka 	else
    447      1.1    pooka 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
    448      1.1    pooka 
    449      1.1    pooka 	/*
    450      1.1    pooka 	 * if EOF is in the middle of the range, zero the part past EOF.
    451      1.1    pooka 	 * skip over pages which are not PG_FAKE since in that case they have
    452      1.1    pooka 	 * valid data that we need to preserve.
    453      1.1    pooka 	 */
    454      1.1    pooka 
    455      1.1    pooka 	tailstart = bytes;
    456      1.1    pooka 	while (tailbytes > 0) {
    457      1.1    pooka 		const int len = PAGE_SIZE - (tailstart & PAGE_MASK);
    458      1.1    pooka 
    459      1.1    pooka 		KASSERT(len <= tailbytes);
    460      1.1    pooka 		if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) {
    461      1.1    pooka 			memset((void *)(kva + tailstart), 0, len);
    462      1.1    pooka 			UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
    463      1.1    pooka 			    kva, tailstart, len, 0);
    464      1.1    pooka 		}
    465      1.1    pooka 		tailstart += len;
    466      1.1    pooka 		tailbytes -= len;
    467      1.1    pooka 	}
    468      1.1    pooka 
    469      1.1    pooka 	/*
    470      1.1    pooka 	 * now loop over the pages, reading as needed.
    471      1.1    pooka 	 */
    472      1.1    pooka 
    473      1.1    pooka 	bp = NULL;
    474  1.6.2.3     yamt 	off_t offset;
    475      1.1    pooka 	for (offset = startoffset;
    476      1.1    pooka 	    bytes > 0;
    477      1.1    pooka 	    offset += iobytes, bytes -= iobytes) {
    478  1.6.2.3     yamt 		int run;
    479  1.6.2.3     yamt 		daddr_t lbn, blkno;
    480  1.6.2.3     yamt 		int pidx;
    481  1.6.2.3     yamt 		struct vnode *devvp;
    482      1.1    pooka 
    483      1.1    pooka 		/*
    484      1.1    pooka 		 * skip pages which don't need to be read.
    485      1.1    pooka 		 */
    486      1.1    pooka 
    487      1.1    pooka 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    488      1.1    pooka 		while ((pgs[pidx]->flags & PG_FAKE) == 0) {
    489      1.1    pooka 			size_t b;
    490      1.1    pooka 
    491      1.1    pooka 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    492      1.1    pooka 			if ((pgs[pidx]->flags & PG_RDONLY)) {
    493      1.1    pooka 				sawhole = true;
    494      1.1    pooka 			}
    495      1.1    pooka 			b = MIN(PAGE_SIZE, bytes);
    496      1.1    pooka 			offset += b;
    497      1.1    pooka 			bytes -= b;
    498      1.1    pooka 			skipbytes += b;
    499      1.1    pooka 			pidx++;
    500      1.1    pooka 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
    501      1.1    pooka 			    offset, 0,0,0);
    502      1.1    pooka 			if (bytes == 0) {
    503      1.1    pooka 				goto loopdone;
    504      1.1    pooka 			}
    505      1.1    pooka 		}
    506      1.1    pooka 
    507      1.1    pooka 		/*
    508      1.1    pooka 		 * bmap the file to find out the blkno to read from and
    509      1.1    pooka 		 * how much we can read in one i/o.  if bmap returns an error,
    510      1.1    pooka 		 * skip the rest of the top-level i/o.
    511      1.1    pooka 		 */
    512      1.1    pooka 
    513      1.1    pooka 		lbn = offset >> fs_bshift;
    514      1.1    pooka 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    515      1.1    pooka 		if (error) {
    516      1.1    pooka 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
    517  1.6.2.3     yamt 			    lbn,error,0,0);
    518      1.1    pooka 			skipbytes += bytes;
    519  1.6.2.3     yamt 			bytes = 0;
    520      1.1    pooka 			goto loopdone;
    521      1.1    pooka 		}
    522      1.1    pooka 
    523      1.1    pooka 		/*
    524      1.1    pooka 		 * see how many pages can be read with this i/o.
    525      1.1    pooka 		 * reduce the i/o size if necessary to avoid
    526      1.1    pooka 		 * overwriting pages with valid data.
    527      1.1    pooka 		 */
    528      1.1    pooka 
    529      1.1    pooka 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    530      1.1    pooka 		    bytes);
    531      1.1    pooka 		if (offset + iobytes > round_page(offset)) {
    532  1.6.2.3     yamt 			int pcount;
    533  1.6.2.3     yamt 
    534      1.1    pooka 			pcount = 1;
    535      1.1    pooka 			while (pidx + pcount < npages &&
    536      1.1    pooka 			    pgs[pidx + pcount]->flags & PG_FAKE) {
    537      1.1    pooka 				pcount++;
    538      1.1    pooka 			}
    539      1.1    pooka 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    540      1.1    pooka 			    (offset - trunc_page(offset)));
    541      1.1    pooka 		}
    542      1.1    pooka 
    543      1.1    pooka 		/*
    544      1.1    pooka 		 * if this block isn't allocated, zero it instead of
    545      1.1    pooka 		 * reading it.  unless we are going to allocate blocks,
    546      1.1    pooka 		 * mark the pages we zeroed PG_RDONLY.
    547      1.1    pooka 		 */
    548      1.1    pooka 
    549  1.6.2.3     yamt 		if (blkno == (daddr_t)-1) {
    550      1.1    pooka 			int holepages = (round_page(offset + iobytes) -
    551      1.1    pooka 			    trunc_page(offset)) >> PAGE_SHIFT;
    552      1.1    pooka 			UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
    553      1.1    pooka 
    554      1.1    pooka 			sawhole = true;
    555      1.1    pooka 			memset((char *)kva + (offset - startoffset), 0,
    556      1.1    pooka 			    iobytes);
    557      1.1    pooka 			skipbytes += iobytes;
    558      1.1    pooka 
    559      1.1    pooka 			for (i = 0; i < holepages; i++) {
    560  1.6.2.3     yamt 				if (memwrite) {
    561      1.1    pooka 					pgs[pidx + i]->flags &= ~PG_CLEAN;
    562      1.1    pooka 				}
    563      1.1    pooka 				if (!blockalloc) {
    564      1.1    pooka 					pgs[pidx + i]->flags |= PG_RDONLY;
    565      1.1    pooka 				}
    566      1.1    pooka 			}
    567      1.1    pooka 			continue;
    568      1.1    pooka 		}
    569      1.1    pooka 
    570      1.1    pooka 		/*
    571      1.1    pooka 		 * allocate a sub-buf for this piece of the i/o
    572      1.1    pooka 		 * (or just use mbp if there's only 1 piece),
    573      1.1    pooka 		 * and start it going.
    574      1.1    pooka 		 */
    575      1.1    pooka 
    576      1.1    pooka 		if (offset == startoffset && iobytes == bytes) {
    577      1.1    pooka 			bp = mbp;
    578      1.1    pooka 		} else {
    579  1.6.2.3     yamt 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
    580  1.6.2.3     yamt 			    vp, bp, vp->v_numoutput, 0);
    581      1.2       ad 			bp = getiobuf(vp, true);
    582      1.1    pooka 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
    583      1.1    pooka 		}
    584      1.1    pooka 		bp->b_lblkno = 0;
    585      1.1    pooka 
    586      1.1    pooka 		/* adjust physical blkno for partial blocks */
    587      1.1    pooka 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    588      1.1    pooka 		    dev_bshift);
    589      1.1    pooka 
    590      1.1    pooka 		UVMHIST_LOG(ubchist,
    591      1.1    pooka 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
    592  1.6.2.3     yamt 		    bp, offset, bp->b_bcount, bp->b_blkno);
    593      1.1    pooka 
    594      1.1    pooka 		VOP_STRATEGY(devvp, bp);
    595      1.1    pooka 	}
    596      1.1    pooka 
    597      1.1    pooka loopdone:
    598      1.1    pooka 	nestiobuf_done(mbp, skipbytes, error);
    599      1.1    pooka 	if (async) {
    600      1.1    pooka 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    601  1.6.2.5     yamt 		if (!glocked) {
    602  1.6.2.5     yamt 			genfs_node_unlock(vp);
    603  1.6.2.5     yamt 		}
    604      1.1    pooka 		error = 0;
    605  1.6.2.3     yamt 		goto out_err_free;
    606      1.1    pooka 	}
    607      1.1    pooka 	if (bp != NULL) {
    608      1.1    pooka 		error = biowait(mbp);
    609      1.1    pooka 	}
    610  1.6.2.2     yamt 
    611  1.6.2.2     yamt 	/* Remove the mapping (make KVA available as soon as possible) */
    612      1.1    pooka 	uvm_pagermapout(kva, npages);
    613      1.1    pooka 
    614      1.1    pooka 	/*
    615      1.1    pooka 	 * if this we encountered a hole then we have to do a little more work.
    616      1.1    pooka 	 * for read faults, we marked the page PG_RDONLY so that future
    617      1.1    pooka 	 * write accesses to the page will fault again.
    618      1.1    pooka 	 * for write faults, we must make sure that the backing store for
    619      1.1    pooka 	 * the page is completely allocated while the pages are locked.
    620      1.1    pooka 	 */
    621      1.1    pooka 
    622      1.1    pooka 	if (!error && sawhole && blockalloc) {
    623  1.6.2.2     yamt 		/*
    624  1.6.2.2     yamt 		 * XXX: This assumes that we come here only via
    625  1.6.2.2     yamt 		 * the mmio path
    626  1.6.2.2     yamt 		 */
    627  1.6.2.2     yamt 		if (vp->v_mount->mnt_wapbl) {
    628  1.6.2.2     yamt 			error = WAPBL_BEGIN(vp->v_mount);
    629  1.6.2.2     yamt 		}
    630  1.6.2.2     yamt 
    631  1.6.2.2     yamt 		if (!error) {
    632  1.6.2.2     yamt 			error = GOP_ALLOC(vp, startoffset,
    633  1.6.2.2     yamt 			    npages << PAGE_SHIFT, 0, cred);
    634  1.6.2.2     yamt 			if (vp->v_mount->mnt_wapbl) {
    635  1.6.2.2     yamt 				WAPBL_END(vp->v_mount);
    636  1.6.2.2     yamt 			}
    637  1.6.2.2     yamt 		}
    638  1.6.2.2     yamt 
    639      1.1    pooka 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
    640      1.1    pooka 		    startoffset, npages << PAGE_SHIFT, error,0);
    641      1.1    pooka 		if (!error) {
    642      1.1    pooka 			for (i = 0; i < npages; i++) {
    643  1.6.2.3     yamt 				struct vm_page *pg = pgs[i];
    644  1.6.2.3     yamt 
    645  1.6.2.3     yamt 				if (pg == NULL) {
    646      1.1    pooka 					continue;
    647      1.1    pooka 				}
    648  1.6.2.3     yamt 				pg->flags &= ~(PG_CLEAN|PG_RDONLY);
    649      1.1    pooka 				UVMHIST_LOG(ubchist, "mark dirty pg %p",
    650  1.6.2.3     yamt 				    pg,0,0,0);
    651      1.1    pooka 			}
    652      1.1    pooka 		}
    653      1.1    pooka 	}
    654  1.6.2.5     yamt 	if (!glocked) {
    655  1.6.2.5     yamt 		genfs_node_unlock(vp);
    656  1.6.2.5     yamt 	}
    657  1.6.2.2     yamt 
    658  1.6.2.2     yamt 	putiobuf(mbp);
    659  1.6.2.3     yamt     }
    660  1.6.2.2     yamt 
    661      1.2       ad 	mutex_enter(&uobj->vmobjlock);
    662      1.1    pooka 
    663      1.1    pooka 	/*
    664      1.1    pooka 	 * we're almost done!  release the pages...
    665      1.1    pooka 	 * for errors, we free the pages.
    666      1.1    pooka 	 * otherwise we activate them and mark them as valid and clean.
    667      1.1    pooka 	 * also, unbusy pages that were not actually requested.
    668      1.1    pooka 	 */
    669      1.1    pooka 
    670      1.1    pooka 	if (error) {
    671      1.1    pooka 		for (i = 0; i < npages; i++) {
    672  1.6.2.3     yamt 			struct vm_page *pg = pgs[i];
    673  1.6.2.3     yamt 
    674  1.6.2.3     yamt 			if (pg == NULL) {
    675      1.1    pooka 				continue;
    676      1.1    pooka 			}
    677      1.1    pooka 			UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    678  1.6.2.3     yamt 			    pg, pg->flags, 0,0);
    679  1.6.2.3     yamt 			if (pg->flags & PG_FAKE) {
    680  1.6.2.3     yamt 				pg->flags |= PG_RELEASED;
    681      1.1    pooka 			}
    682      1.1    pooka 		}
    683      1.2       ad 		mutex_enter(&uvm_pageqlock);
    684      1.1    pooka 		uvm_page_unbusy(pgs, npages);
    685      1.2       ad 		mutex_exit(&uvm_pageqlock);
    686      1.2       ad 		mutex_exit(&uobj->vmobjlock);
    687      1.1    pooka 		UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
    688  1.6.2.3     yamt 		goto out_err_free;
    689      1.1    pooka 	}
    690      1.1    pooka 
    691      1.1    pooka out:
    692      1.1    pooka 	UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
    693      1.1    pooka 	error = 0;
    694      1.2       ad 	mutex_enter(&uvm_pageqlock);
    695      1.1    pooka 	for (i = 0; i < npages; i++) {
    696  1.6.2.3     yamt 		struct vm_page *pg = pgs[i];
    697      1.1    pooka 		if (pg == NULL) {
    698      1.1    pooka 			continue;
    699      1.1    pooka 		}
    700      1.1    pooka 		UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    701      1.1    pooka 		    pg, pg->flags, 0,0);
    702      1.1    pooka 		if (pg->flags & PG_FAKE && !overwrite) {
    703      1.1    pooka 			pg->flags &= ~(PG_FAKE);
    704      1.1    pooka 			pmap_clear_modify(pgs[i]);
    705      1.1    pooka 		}
    706  1.6.2.3     yamt 		KASSERT(!memwrite || !blockalloc || (pg->flags & PG_RDONLY) == 0);
    707  1.6.2.3     yamt 		if (i < ridx || i >= ridx + orignmempages || async) {
    708      1.1    pooka 			UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
    709      1.1    pooka 			    pg, pg->offset,0,0);
    710      1.1    pooka 			if (pg->flags & PG_WANTED) {
    711      1.1    pooka 				wakeup(pg);
    712      1.1    pooka 			}
    713      1.1    pooka 			if (pg->flags & PG_FAKE) {
    714      1.1    pooka 				KASSERT(overwrite);
    715      1.1    pooka 				uvm_pagezero(pg);
    716      1.1    pooka 			}
    717      1.1    pooka 			if (pg->flags & PG_RELEASED) {
    718      1.1    pooka 				uvm_pagefree(pg);
    719      1.1    pooka 				continue;
    720      1.1    pooka 			}
    721      1.1    pooka 			uvm_pageenqueue(pg);
    722      1.1    pooka 			pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
    723      1.1    pooka 			UVM_PAGE_OWN(pg, NULL);
    724      1.1    pooka 		}
    725      1.1    pooka 	}
    726      1.2       ad 	mutex_exit(&uvm_pageqlock);
    727  1.6.2.4     yamt 	if (memwrite) {
    728  1.6.2.4     yamt 		genfs_markdirty(vp);
    729  1.6.2.4     yamt 	}
    730      1.2       ad 	mutex_exit(&uobj->vmobjlock);
    731      1.1    pooka 	if (ap->a_m != NULL) {
    732      1.1    pooka 		memcpy(ap->a_m, &pgs[ridx],
    733  1.6.2.3     yamt 		    orignmempages * sizeof(struct vm_page *));
    734      1.1    pooka 	}
    735      1.1    pooka 
    736  1.6.2.3     yamt out_err_free:
    737  1.6.2.2     yamt 	if (pgs != NULL && pgs != pgs_onstack)
    738      1.1    pooka 		kmem_free(pgs, pgs_size);
    739  1.6.2.3     yamt out_err:
    740      1.1    pooka 	if (has_trans)
    741      1.1    pooka 		fstrans_done(vp->v_mount);
    742  1.6.2.4     yamt 	return error;
    743      1.1    pooka }
    744      1.1    pooka 
    745      1.1    pooka /*
    746      1.1    pooka  * generic VM putpages routine.
    747      1.1    pooka  * Write the given range of pages to backing store.
    748      1.1    pooka  *
    749      1.1    pooka  * => "offhi == 0" means flush all pages at or after "offlo".
    750      1.1    pooka  * => object should be locked by caller.  we return with the
    751      1.1    pooka  *      object unlocked.
    752      1.1    pooka  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
    753      1.1    pooka  *	thus, a caller might want to unlock higher level resources
    754      1.1    pooka  *	(e.g. vm_map) before calling flush.
    755      1.1    pooka  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
    756      1.1    pooka  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
    757      1.1    pooka  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    758      1.1    pooka  *	that new pages are inserted on the tail end of the list.   thus,
    759      1.1    pooka  *	we can make a complete pass through the object in one go by starting
    760      1.1    pooka  *	at the head and working towards the tail (new pages are put in
    761      1.1    pooka  *	front of us).
    762      1.1    pooka  * => NOTE: we are allowed to lock the page queues, so the caller
    763      1.1    pooka  *	must not be holding the page queue lock.
    764      1.1    pooka  *
    765      1.1    pooka  * note on "cleaning" object and PG_BUSY pages:
    766      1.1    pooka  *	this routine is holding the lock on the object.   the only time
    767      1.1    pooka  *	that it can run into a PG_BUSY page that it does not own is if
    768      1.1    pooka  *	some other process has started I/O on the page (e.g. either
    769      1.1    pooka  *	a pagein, or a pageout).    if the PG_BUSY page is being paged
    770      1.1    pooka  *	in, then it can not be dirty (!PG_CLEAN) because no one has
    771      1.1    pooka  *	had a chance to modify it yet.    if the PG_BUSY page is being
    772      1.1    pooka  *	paged out then it means that someone else has already started
    773      1.1    pooka  *	cleaning the page for us (how nice!).    in this case, if we
    774      1.1    pooka  *	have syncio specified, then after we make our pass through the
    775      1.1    pooka  *	object we need to wait for the other PG_BUSY pages to clear
    776      1.1    pooka  *	off (i.e. we need to do an iosync).   also note that once a
    777      1.1    pooka  *	page is PG_BUSY it must stay in its object until it is un-busyed.
    778      1.1    pooka  *
    779      1.1    pooka  * note on page traversal:
    780      1.1    pooka  *	we can traverse the pages in an object either by going down the
    781      1.1    pooka  *	linked list in "uobj->memq", or we can go over the address range
    782      1.1    pooka  *	by page doing hash table lookups for each address.    depending
    783      1.1    pooka  *	on how many pages are in the object it may be cheaper to do one
    784      1.1    pooka  *	or the other.   we set "by_list" to true if we are using memq.
    785      1.1    pooka  *	if the cost of a hash lookup was equal to the cost of the list
    786      1.1    pooka  *	traversal we could compare the number of pages in the start->stop
    787      1.1    pooka  *	range to the total number of pages in the object.   however, it
    788      1.1    pooka  *	seems that a hash table lookup is more expensive than the linked
    789      1.1    pooka  *	list traversal, so we multiply the number of pages in the
    790      1.1    pooka  *	range by an estimate of the relatively higher cost of the hash lookup.
    791      1.1    pooka  */
    792      1.1    pooka 
    793      1.1    pooka int
    794      1.1    pooka genfs_putpages(void *v)
    795      1.1    pooka {
    796      1.1    pooka 	struct vop_putpages_args /* {
    797      1.1    pooka 		struct vnode *a_vp;
    798      1.1    pooka 		voff_t a_offlo;
    799      1.1    pooka 		voff_t a_offhi;
    800      1.1    pooka 		int a_flags;
    801  1.6.2.3     yamt 	} */ * const ap = v;
    802      1.1    pooka 
    803      1.1    pooka 	return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi,
    804      1.1    pooka 	    ap->a_flags, NULL);
    805      1.1    pooka }
    806      1.1    pooka 
    807      1.1    pooka int
    808      1.4     yamt genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff,
    809      1.4     yamt     int origflags, struct vm_page **busypg)
    810      1.1    pooka {
    811  1.6.2.3     yamt 	struct uvm_object * const uobj = &vp->v_uobj;
    812  1.6.2.3     yamt 	kmutex_t * const slock = &uobj->vmobjlock;
    813      1.1    pooka 	off_t off;
    814      1.1    pooka 	/* Even for strange MAXPHYS, the shift rounds down to a page */
    815      1.1    pooka #define maxpages (MAXPHYS >> PAGE_SHIFT)
    816      1.2       ad 	int i, error, npages, nback;
    817      1.1    pooka 	int freeflag;
    818      1.1    pooka 	struct vm_page *pgs[maxpages], *pg, *nextpg, *tpg, curmp, endmp;
    819      1.1    pooka 	bool wasclean, by_list, needs_clean, yld;
    820      1.4     yamt 	bool async = (origflags & PGO_SYNCIO) == 0;
    821      1.1    pooka 	bool pagedaemon = curlwp == uvm.pagedaemon_lwp;
    822  1.6.2.3     yamt 	struct lwp * const l = curlwp ? curlwp : &lwp0;
    823  1.6.2.3     yamt 	struct genfs_node * const gp = VTOG(vp);
    824      1.4     yamt 	int flags;
    825      1.1    pooka 	int dirtygen;
    826      1.4     yamt 	bool modified;
    827  1.6.2.2     yamt 	bool need_wapbl;
    828      1.4     yamt 	bool has_trans;
    829      1.1    pooka 	bool cleanall;
    830      1.4     yamt 	bool onworklst;
    831      1.1    pooka 
    832      1.1    pooka 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
    833      1.1    pooka 
    834      1.4     yamt 	KASSERT(origflags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
    835      1.1    pooka 	KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
    836      1.1    pooka 	KASSERT(startoff < endoff || endoff == 0);
    837      1.1    pooka 
    838      1.1    pooka 	UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
    839      1.1    pooka 	    vp, uobj->uo_npages, startoff, endoff - startoff);
    840      1.1    pooka 
    841      1.6  hannken 	has_trans = false;
    842  1.6.2.2     yamt 	need_wapbl = (!pagedaemon && vp->v_mount && vp->v_mount->mnt_wapbl &&
    843  1.6.2.2     yamt 	    (origflags & PGO_JOURNALLOCKED) == 0);
    844      1.6  hannken 
    845      1.4     yamt retry:
    846      1.4     yamt 	modified = false;
    847      1.4     yamt 	flags = origflags;
    848      1.1    pooka 	KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 ||
    849      1.1    pooka 	    (vp->v_iflag & VI_WRMAPDIRTY) == 0);
    850      1.1    pooka 	if (uobj->uo_npages == 0) {
    851      1.1    pooka 		if (vp->v_iflag & VI_ONWORKLST) {
    852      1.1    pooka 			vp->v_iflag &= ~VI_WRMAPDIRTY;
    853      1.1    pooka 			if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
    854      1.1    pooka 				vn_syncer_remove_from_worklist(vp);
    855      1.1    pooka 		}
    856  1.6.2.2     yamt 		if (has_trans) {
    857  1.6.2.2     yamt 			if (need_wapbl)
    858  1.6.2.2     yamt 				WAPBL_END(vp->v_mount);
    859      1.6  hannken 			fstrans_done(vp->v_mount);
    860  1.6.2.2     yamt 		}
    861      1.2       ad 		mutex_exit(slock);
    862      1.1    pooka 		return (0);
    863      1.1    pooka 	}
    864      1.1    pooka 
    865      1.1    pooka 	/*
    866      1.1    pooka 	 * the vnode has pages, set up to process the request.
    867      1.1    pooka 	 */
    868      1.1    pooka 
    869      1.6  hannken 	if (!has_trans && (flags & PGO_CLEANIT) != 0) {
    870      1.2       ad 		mutex_exit(slock);
    871      1.1    pooka 		if (pagedaemon) {
    872      1.1    pooka 			error = fstrans_start_nowait(vp->v_mount, FSTRANS_LAZY);
    873      1.1    pooka 			if (error)
    874      1.1    pooka 				return error;
    875      1.1    pooka 		} else
    876      1.1    pooka 			fstrans_start(vp->v_mount, FSTRANS_LAZY);
    877  1.6.2.2     yamt 		if (need_wapbl) {
    878  1.6.2.2     yamt 			error = WAPBL_BEGIN(vp->v_mount);
    879  1.6.2.2     yamt 			if (error) {
    880  1.6.2.2     yamt 				fstrans_done(vp->v_mount);
    881  1.6.2.2     yamt 				return error;
    882  1.6.2.2     yamt 			}
    883  1.6.2.2     yamt 		}
    884      1.1    pooka 		has_trans = true;
    885      1.2       ad 		mutex_enter(slock);
    886      1.6  hannken 		goto retry;
    887      1.1    pooka 	}
    888      1.1    pooka 
    889      1.1    pooka 	error = 0;
    890      1.1    pooka 	wasclean = (vp->v_numoutput == 0);
    891      1.1    pooka 	off = startoff;
    892      1.1    pooka 	if (endoff == 0 || flags & PGO_ALLPAGES) {
    893      1.1    pooka 		endoff = trunc_page(LLONG_MAX);
    894      1.1    pooka 	}
    895      1.1    pooka 	by_list = (uobj->uo_npages <=
    896  1.6.2.2     yamt 	    ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_TREE_PENALTY);
    897      1.1    pooka 
    898      1.1    pooka #if !defined(DEBUG)
    899      1.1    pooka 	/*
    900      1.1    pooka 	 * if this vnode is known not to have dirty pages,
    901      1.1    pooka 	 * don't bother to clean it out.
    902      1.1    pooka 	 */
    903      1.1    pooka 
    904      1.1    pooka 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
    905      1.1    pooka 		if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == 0) {
    906      1.1    pooka 			goto skip_scan;
    907      1.1    pooka 		}
    908      1.1    pooka 		flags &= ~PGO_CLEANIT;
    909      1.1    pooka 	}
    910      1.1    pooka #endif /* !defined(DEBUG) */
    911      1.1    pooka 
    912      1.1    pooka 	/*
    913      1.1    pooka 	 * start the loop.  when scanning by list, hold the last page
    914      1.1    pooka 	 * in the list before we start.  pages allocated after we start
    915      1.1    pooka 	 * will be added to the end of the list, so we can stop at the
    916      1.1    pooka 	 * current last page.
    917      1.1    pooka 	 */
    918      1.1    pooka 
    919      1.1    pooka 	cleanall = (flags & PGO_CLEANIT) != 0 && wasclean &&
    920      1.1    pooka 	    startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
    921      1.1    pooka 	    (vp->v_iflag & VI_ONWORKLST) != 0;
    922      1.1    pooka 	dirtygen = gp->g_dirtygen;
    923      1.1    pooka 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
    924      1.1    pooka 	if (by_list) {
    925  1.6.2.4     yamt 		curmp.flags = PG_MARKER;
    926  1.6.2.4     yamt 		endmp.flags = PG_MARKER;
    927      1.1    pooka 		pg = TAILQ_FIRST(&uobj->memq);
    928  1.6.2.2     yamt 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq.queue);
    929      1.1    pooka 	} else {
    930      1.1    pooka 		pg = uvm_pagelookup(uobj, off);
    931      1.1    pooka 	}
    932      1.1    pooka 	nextpg = NULL;
    933      1.1    pooka 	while (by_list || off < endoff) {
    934      1.1    pooka 
    935      1.1    pooka 		/*
    936      1.1    pooka 		 * if the current page is not interesting, move on to the next.
    937      1.1    pooka 		 */
    938      1.1    pooka 
    939  1.6.2.4     yamt 		KASSERT(pg == NULL || pg->uobject == uobj ||
    940  1.6.2.4     yamt 		    (pg->flags & PG_MARKER) != 0);
    941      1.1    pooka 		KASSERT(pg == NULL ||
    942      1.1    pooka 		    (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
    943  1.6.2.4     yamt 		    (pg->flags & (PG_BUSY|PG_MARKER)) != 0);
    944      1.1    pooka 		if (by_list) {
    945      1.1    pooka 			if (pg == &endmp) {
    946      1.1    pooka 				break;
    947      1.1    pooka 			}
    948  1.6.2.4     yamt 			if (pg->flags & PG_MARKER) {
    949  1.6.2.4     yamt 				pg = TAILQ_NEXT(pg, listq.queue);
    950  1.6.2.4     yamt 				continue;
    951  1.6.2.4     yamt 			}
    952      1.1    pooka 			if (pg->offset < startoff || pg->offset >= endoff ||
    953      1.1    pooka 			    pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
    954      1.1    pooka 				if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
    955      1.1    pooka 					wasclean = false;
    956      1.1    pooka 				}
    957  1.6.2.2     yamt 				pg = TAILQ_NEXT(pg, listq.queue);
    958      1.1    pooka 				continue;
    959      1.1    pooka 			}
    960      1.1    pooka 			off = pg->offset;
    961      1.1    pooka 		} else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
    962      1.1    pooka 			if (pg != NULL) {
    963      1.1    pooka 				wasclean = false;
    964      1.1    pooka 			}
    965      1.1    pooka 			off += PAGE_SIZE;
    966      1.1    pooka 			if (off < endoff) {
    967      1.1    pooka 				pg = uvm_pagelookup(uobj, off);
    968      1.1    pooka 			}
    969      1.1    pooka 			continue;
    970      1.1    pooka 		}
    971      1.1    pooka 
    972      1.1    pooka 		/*
    973      1.1    pooka 		 * if the current page needs to be cleaned and it's busy,
    974      1.1    pooka 		 * wait for it to become unbusy.
    975      1.1    pooka 		 */
    976      1.1    pooka 
    977      1.1    pooka 		yld = (l->l_cpu->ci_schedstate.spc_flags &
    978      1.1    pooka 		    SPCF_SHOULDYIELD) && !pagedaemon;
    979      1.1    pooka 		if (pg->flags & PG_BUSY || yld) {
    980      1.1    pooka 			UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
    981      1.1    pooka 			if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
    982      1.1    pooka 				UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0);
    983      1.1    pooka 				error = EDEADLK;
    984      1.1    pooka 				if (busypg != NULL)
    985      1.1    pooka 					*busypg = pg;
    986      1.1    pooka 				break;
    987      1.1    pooka 			}
    988      1.1    pooka 			if (pagedaemon) {
    989      1.1    pooka 				/*
    990      1.1    pooka 				 * someone has taken the page while we
    991      1.1    pooka 				 * dropped the lock for fstrans_start.
    992      1.1    pooka 				 */
    993      1.1    pooka 				break;
    994      1.1    pooka 			}
    995      1.1    pooka 			if (by_list) {
    996  1.6.2.2     yamt 				TAILQ_INSERT_BEFORE(pg, &curmp, listq.queue);
    997      1.1    pooka 				UVMHIST_LOG(ubchist, "curmp next %p",
    998  1.6.2.2     yamt 				    TAILQ_NEXT(&curmp, listq.queue), 0,0,0);
    999      1.1    pooka 			}
   1000      1.1    pooka 			if (yld) {
   1001      1.2       ad 				mutex_exit(slock);
   1002      1.1    pooka 				preempt();
   1003      1.2       ad 				mutex_enter(slock);
   1004      1.1    pooka 			} else {
   1005      1.1    pooka 				pg->flags |= PG_WANTED;
   1006      1.1    pooka 				UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
   1007      1.2       ad 				mutex_enter(slock);
   1008      1.1    pooka 			}
   1009      1.1    pooka 			if (by_list) {
   1010      1.1    pooka 				UVMHIST_LOG(ubchist, "after next %p",
   1011  1.6.2.2     yamt 				    TAILQ_NEXT(&curmp, listq.queue), 0,0,0);
   1012  1.6.2.2     yamt 				pg = TAILQ_NEXT(&curmp, listq.queue);
   1013  1.6.2.2     yamt 				TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue);
   1014      1.1    pooka 			} else {
   1015      1.1    pooka 				pg = uvm_pagelookup(uobj, off);
   1016      1.1    pooka 			}
   1017      1.1    pooka 			continue;
   1018      1.1    pooka 		}
   1019      1.1    pooka 
   1020      1.1    pooka 		/*
   1021      1.1    pooka 		 * if we're freeing, remove all mappings of the page now.
   1022      1.1    pooka 		 * if we're cleaning, check if the page is needs to be cleaned.
   1023      1.1    pooka 		 */
   1024      1.1    pooka 
   1025      1.1    pooka 		if (flags & PGO_FREE) {
   1026      1.1    pooka 			pmap_page_protect(pg, VM_PROT_NONE);
   1027      1.1    pooka 		} else if (flags & PGO_CLEANIT) {
   1028      1.1    pooka 
   1029      1.1    pooka 			/*
   1030      1.1    pooka 			 * if we still have some hope to pull this vnode off
   1031      1.1    pooka 			 * from the syncer queue, write-protect the page.
   1032      1.1    pooka 			 */
   1033      1.1    pooka 
   1034      1.1    pooka 			if (cleanall && wasclean &&
   1035      1.1    pooka 			    gp->g_dirtygen == dirtygen) {
   1036      1.1    pooka 
   1037      1.1    pooka 				/*
   1038      1.1    pooka 				 * uobj pages get wired only by uvm_fault
   1039      1.1    pooka 				 * where uobj is locked.
   1040      1.1    pooka 				 */
   1041      1.1    pooka 
   1042      1.1    pooka 				if (pg->wire_count == 0) {
   1043      1.1    pooka 					pmap_page_protect(pg,
   1044      1.1    pooka 					    VM_PROT_READ|VM_PROT_EXECUTE);
   1045      1.1    pooka 				} else {
   1046      1.1    pooka 					cleanall = false;
   1047      1.1    pooka 				}
   1048      1.1    pooka 			}
   1049      1.1    pooka 		}
   1050      1.1    pooka 
   1051      1.1    pooka 		if (flags & PGO_CLEANIT) {
   1052      1.1    pooka 			needs_clean = pmap_clear_modify(pg) ||
   1053      1.1    pooka 			    (pg->flags & PG_CLEAN) == 0;
   1054      1.1    pooka 			pg->flags |= PG_CLEAN;
   1055      1.1    pooka 		} else {
   1056      1.1    pooka 			needs_clean = false;
   1057      1.1    pooka 		}
   1058      1.1    pooka 
   1059      1.1    pooka 		/*
   1060      1.1    pooka 		 * if we're cleaning, build a cluster.
   1061      1.1    pooka 		 * the cluster will consist of pages which are currently dirty,
   1062      1.1    pooka 		 * but they will be returned to us marked clean.
   1063      1.1    pooka 		 * if not cleaning, just operate on the one page.
   1064      1.1    pooka 		 */
   1065      1.1    pooka 
   1066      1.1    pooka 		if (needs_clean) {
   1067      1.1    pooka 			KDASSERT((vp->v_iflag & VI_ONWORKLST));
   1068      1.1    pooka 			wasclean = false;
   1069      1.1    pooka 			memset(pgs, 0, sizeof(pgs));
   1070      1.1    pooka 			pg->flags |= PG_BUSY;
   1071      1.1    pooka 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1072      1.1    pooka 
   1073      1.1    pooka 			/*
   1074      1.1    pooka 			 * first look backward.
   1075      1.1    pooka 			 */
   1076      1.1    pooka 
   1077      1.1    pooka 			npages = MIN(maxpages >> 1, off >> PAGE_SHIFT);
   1078      1.1    pooka 			nback = npages;
   1079      1.1    pooka 			uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
   1080      1.1    pooka 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
   1081      1.1    pooka 			if (nback) {
   1082      1.1    pooka 				memmove(&pgs[0], &pgs[npages - nback],
   1083      1.1    pooka 				    nback * sizeof(pgs[0]));
   1084      1.1    pooka 				if (npages - nback < nback)
   1085      1.1    pooka 					memset(&pgs[nback], 0,
   1086      1.1    pooka 					    (npages - nback) * sizeof(pgs[0]));
   1087      1.1    pooka 				else
   1088      1.1    pooka 					memset(&pgs[npages - nback], 0,
   1089      1.1    pooka 					    nback * sizeof(pgs[0]));
   1090      1.1    pooka 			}
   1091      1.1    pooka 
   1092      1.1    pooka 			/*
   1093      1.1    pooka 			 * then plug in our page of interest.
   1094      1.1    pooka 			 */
   1095      1.1    pooka 
   1096      1.1    pooka 			pgs[nback] = pg;
   1097      1.1    pooka 
   1098      1.1    pooka 			/*
   1099      1.1    pooka 			 * then look forward to fill in the remaining space in
   1100      1.1    pooka 			 * the array of pages.
   1101      1.1    pooka 			 */
   1102      1.1    pooka 
   1103      1.1    pooka 			npages = maxpages - nback - 1;
   1104      1.1    pooka 			uvn_findpages(uobj, off + PAGE_SIZE, &npages,
   1105      1.1    pooka 			    &pgs[nback + 1],
   1106      1.1    pooka 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
   1107      1.1    pooka 			npages += nback + 1;
   1108      1.1    pooka 		} else {
   1109      1.1    pooka 			pgs[0] = pg;
   1110      1.1    pooka 			npages = 1;
   1111      1.1    pooka 			nback = 0;
   1112      1.1    pooka 		}
   1113      1.1    pooka 
   1114      1.1    pooka 		/*
   1115      1.1    pooka 		 * apply FREE or DEACTIVATE options if requested.
   1116      1.1    pooka 		 */
   1117      1.1    pooka 
   1118      1.1    pooka 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1119      1.2       ad 			mutex_enter(&uvm_pageqlock);
   1120      1.1    pooka 		}
   1121      1.1    pooka 		for (i = 0; i < npages; i++) {
   1122      1.1    pooka 			tpg = pgs[i];
   1123      1.1    pooka 			KASSERT(tpg->uobject == uobj);
   1124  1.6.2.2     yamt 			if (by_list && tpg == TAILQ_NEXT(pg, listq.queue))
   1125      1.1    pooka 				pg = tpg;
   1126      1.1    pooka 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1127      1.1    pooka 				continue;
   1128      1.1    pooka 			if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
   1129      1.1    pooka 				uvm_pagedeactivate(tpg);
   1130      1.1    pooka 			} else if (flags & PGO_FREE) {
   1131      1.1    pooka 				pmap_page_protect(tpg, VM_PROT_NONE);
   1132      1.1    pooka 				if (tpg->flags & PG_BUSY) {
   1133      1.1    pooka 					tpg->flags |= freeflag;
   1134      1.1    pooka 					if (pagedaemon) {
   1135      1.2       ad 						uvm_pageout_start(1);
   1136      1.1    pooka 						uvm_pagedequeue(tpg);
   1137      1.1    pooka 					}
   1138      1.1    pooka 				} else {
   1139      1.1    pooka 
   1140      1.1    pooka 					/*
   1141      1.1    pooka 					 * ``page is not busy''
   1142      1.1    pooka 					 * implies that npages is 1
   1143      1.1    pooka 					 * and needs_clean is false.
   1144      1.1    pooka 					 */
   1145      1.1    pooka 
   1146  1.6.2.2     yamt 					nextpg = TAILQ_NEXT(tpg, listq.queue);
   1147      1.1    pooka 					uvm_pagefree(tpg);
   1148      1.1    pooka 					if (pagedaemon)
   1149      1.1    pooka 						uvmexp.pdfreed++;
   1150      1.1    pooka 				}
   1151      1.1    pooka 			}
   1152      1.1    pooka 		}
   1153      1.1    pooka 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1154      1.2       ad 			mutex_exit(&uvm_pageqlock);
   1155      1.1    pooka 		}
   1156      1.1    pooka 		if (needs_clean) {
   1157      1.1    pooka 			modified = true;
   1158      1.1    pooka 
   1159      1.1    pooka 			/*
   1160      1.1    pooka 			 * start the i/o.  if we're traversing by list,
   1161      1.1    pooka 			 * keep our place in the list with a marker page.
   1162      1.1    pooka 			 */
   1163      1.1    pooka 
   1164      1.1    pooka 			if (by_list) {
   1165      1.1    pooka 				TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
   1166  1.6.2.2     yamt 				    listq.queue);
   1167      1.1    pooka 			}
   1168      1.2       ad 			mutex_exit(slock);
   1169      1.1    pooka 			error = GOP_WRITE(vp, pgs, npages, flags);
   1170      1.2       ad 			mutex_enter(slock);
   1171      1.1    pooka 			if (by_list) {
   1172  1.6.2.2     yamt 				pg = TAILQ_NEXT(&curmp, listq.queue);
   1173  1.6.2.2     yamt 				TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue);
   1174      1.1    pooka 			}
   1175      1.1    pooka 			if (error) {
   1176      1.1    pooka 				break;
   1177      1.1    pooka 			}
   1178      1.1    pooka 			if (by_list) {
   1179      1.1    pooka 				continue;
   1180      1.1    pooka 			}
   1181      1.1    pooka 		}
   1182      1.1    pooka 
   1183      1.1    pooka 		/*
   1184      1.1    pooka 		 * find the next page and continue if there was no error.
   1185      1.1    pooka 		 */
   1186      1.1    pooka 
   1187      1.1    pooka 		if (by_list) {
   1188      1.1    pooka 			if (nextpg) {
   1189      1.1    pooka 				pg = nextpg;
   1190      1.1    pooka 				nextpg = NULL;
   1191      1.1    pooka 			} else {
   1192  1.6.2.2     yamt 				pg = TAILQ_NEXT(pg, listq.queue);
   1193      1.1    pooka 			}
   1194      1.1    pooka 		} else {
   1195      1.1    pooka 			off += (npages - nback) << PAGE_SHIFT;
   1196      1.1    pooka 			if (off < endoff) {
   1197      1.1    pooka 				pg = uvm_pagelookup(uobj, off);
   1198      1.1    pooka 			}
   1199      1.1    pooka 		}
   1200      1.1    pooka 	}
   1201      1.1    pooka 	if (by_list) {
   1202  1.6.2.2     yamt 		TAILQ_REMOVE(&uobj->memq, &endmp, listq.queue);
   1203      1.1    pooka 	}
   1204      1.1    pooka 
   1205      1.1    pooka 	if (modified && (vp->v_iflag & VI_WRMAPDIRTY) != 0 &&
   1206      1.1    pooka 	    (vp->v_type != VBLK ||
   1207      1.1    pooka 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
   1208      1.1    pooka 		GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
   1209      1.1    pooka 	}
   1210      1.1    pooka 
   1211      1.1    pooka 	/*
   1212      1.1    pooka 	 * if we're cleaning and there was nothing to clean,
   1213      1.1    pooka 	 * take us off the syncer list.  if we started any i/o
   1214      1.1    pooka 	 * and we're doing sync i/o, wait for all writes to finish.
   1215      1.1    pooka 	 */
   1216      1.1    pooka 
   1217      1.1    pooka 	if (cleanall && wasclean && gp->g_dirtygen == dirtygen &&
   1218      1.1    pooka 	    (vp->v_iflag & VI_ONWORKLST) != 0) {
   1219      1.5     yamt #if defined(DEBUG)
   1220  1.6.2.2     yamt 		TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
   1221  1.6.2.4     yamt 			if ((pg->flags & PG_MARKER) != 0) {
   1222  1.6.2.4     yamt 				continue;
   1223  1.6.2.4     yamt 			}
   1224      1.5     yamt 			if ((pg->flags & PG_CLEAN) == 0) {
   1225      1.5     yamt 				printf("%s: %p: !CLEAN\n", __func__, pg);
   1226      1.5     yamt 			}
   1227      1.5     yamt 			if (pmap_is_modified(pg)) {
   1228      1.5     yamt 				printf("%s: %p: modified\n", __func__, pg);
   1229      1.5     yamt 			}
   1230      1.5     yamt 		}
   1231      1.5     yamt #endif /* defined(DEBUG) */
   1232      1.1    pooka 		vp->v_iflag &= ~VI_WRMAPDIRTY;
   1233      1.1    pooka 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
   1234      1.1    pooka 			vn_syncer_remove_from_worklist(vp);
   1235      1.1    pooka 	}
   1236      1.1    pooka 
   1237      1.1    pooka #if !defined(DEBUG)
   1238      1.1    pooka skip_scan:
   1239      1.1    pooka #endif /* !defined(DEBUG) */
   1240      1.2       ad 
   1241      1.2       ad 	/* Wait for output to complete. */
   1242      1.2       ad 	if (!wasclean && !async && vp->v_numoutput != 0) {
   1243      1.2       ad 		while (vp->v_numoutput != 0)
   1244      1.2       ad 			cv_wait(&vp->v_cv, slock);
   1245      1.1    pooka 	}
   1246      1.4     yamt 	onworklst = (vp->v_iflag & VI_ONWORKLST) != 0;
   1247      1.2       ad 	mutex_exit(slock);
   1248      1.1    pooka 
   1249      1.4     yamt 	if ((flags & PGO_RECLAIM) != 0 && onworklst) {
   1250      1.4     yamt 		/*
   1251      1.4     yamt 		 * in the case of PGO_RECLAIM, ensure to make the vnode clean.
   1252      1.4     yamt 		 * retrying is not a big deal because, in many cases,
   1253      1.4     yamt 		 * uobj->uo_npages is already 0 here.
   1254      1.4     yamt 		 */
   1255      1.4     yamt 		mutex_enter(slock);
   1256      1.4     yamt 		goto retry;
   1257      1.4     yamt 	}
   1258      1.4     yamt 
   1259  1.6.2.2     yamt 	if (has_trans) {
   1260  1.6.2.2     yamt 		if (need_wapbl)
   1261  1.6.2.2     yamt 			WAPBL_END(vp->v_mount);
   1262      1.6  hannken 		fstrans_done(vp->v_mount);
   1263  1.6.2.2     yamt 	}
   1264      1.6  hannken 
   1265      1.1    pooka 	return (error);
   1266      1.1    pooka }
   1267      1.1    pooka 
   1268      1.1    pooka int
   1269      1.1    pooka genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1270      1.1    pooka {
   1271      1.1    pooka 	off_t off;
   1272      1.1    pooka 	vaddr_t kva;
   1273      1.1    pooka 	size_t len;
   1274      1.1    pooka 	int error;
   1275      1.1    pooka 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1276      1.1    pooka 
   1277      1.1    pooka 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1278      1.1    pooka 	    vp, pgs, npages, flags);
   1279      1.1    pooka 
   1280      1.1    pooka 	off = pgs[0]->offset;
   1281      1.1    pooka 	kva = uvm_pagermapin(pgs, npages,
   1282      1.1    pooka 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1283      1.1    pooka 	len = npages << PAGE_SHIFT;
   1284      1.1    pooka 
   1285      1.1    pooka 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1286      1.1    pooka 			    uvm_aio_biodone);
   1287      1.1    pooka 
   1288      1.1    pooka 	return error;
   1289      1.1    pooka }
   1290      1.1    pooka 
   1291  1.6.2.1     yamt int
   1292  1.6.2.1     yamt genfs_gop_write_rwmap(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1293  1.6.2.1     yamt {
   1294  1.6.2.1     yamt 	off_t off;
   1295  1.6.2.1     yamt 	vaddr_t kva;
   1296  1.6.2.1     yamt 	size_t len;
   1297  1.6.2.1     yamt 	int error;
   1298  1.6.2.1     yamt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1299  1.6.2.1     yamt 
   1300  1.6.2.1     yamt 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1301  1.6.2.1     yamt 	    vp, pgs, npages, flags);
   1302  1.6.2.1     yamt 
   1303  1.6.2.1     yamt 	off = pgs[0]->offset;
   1304  1.6.2.1     yamt 	kva = uvm_pagermapin(pgs, npages,
   1305  1.6.2.1     yamt 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1306  1.6.2.1     yamt 	len = npages << PAGE_SHIFT;
   1307  1.6.2.1     yamt 
   1308  1.6.2.1     yamt 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1309  1.6.2.1     yamt 			    uvm_aio_biodone);
   1310  1.6.2.1     yamt 
   1311  1.6.2.1     yamt 	return error;
   1312  1.6.2.1     yamt }
   1313  1.6.2.1     yamt 
   1314      1.1    pooka /*
   1315      1.1    pooka  * Backend routine for doing I/O to vnode pages.  Pages are already locked
   1316      1.1    pooka  * and mapped into kernel memory.  Here we just look up the underlying
   1317      1.1    pooka  * device block addresses and call the strategy routine.
   1318      1.1    pooka  */
   1319      1.1    pooka 
   1320      1.1    pooka static int
   1321      1.1    pooka genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
   1322      1.1    pooka     enum uio_rw rw, void (*iodone)(struct buf *))
   1323      1.1    pooka {
   1324  1.6.2.3     yamt 	int s, error;
   1325      1.1    pooka 	int fs_bshift, dev_bshift;
   1326      1.1    pooka 	off_t eof, offset, startoffset;
   1327      1.1    pooka 	size_t bytes, iobytes, skipbytes;
   1328      1.1    pooka 	struct buf *mbp, *bp;
   1329  1.6.2.3     yamt 	const bool async = (flags & PGO_SYNCIO) == 0;
   1330  1.6.2.3     yamt 	const bool iowrite = rw == UIO_WRITE;
   1331  1.6.2.3     yamt 	const int brw = iowrite ? B_WRITE : B_READ;
   1332      1.1    pooka 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1333      1.1    pooka 
   1334      1.1    pooka 	UVMHIST_LOG(ubchist, "vp %p kva %p len 0x%x flags 0x%x",
   1335      1.1    pooka 	    vp, kva, len, flags);
   1336      1.1    pooka 
   1337      1.1    pooka 	KASSERT(vp->v_size <= vp->v_writesize);
   1338      1.1    pooka 	GOP_SIZE(vp, vp->v_writesize, &eof, 0);
   1339      1.1    pooka 	if (vp->v_type != VBLK) {
   1340      1.1    pooka 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1341      1.1    pooka 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1342      1.1    pooka 	} else {
   1343      1.1    pooka 		fs_bshift = DEV_BSHIFT;
   1344      1.1    pooka 		dev_bshift = DEV_BSHIFT;
   1345      1.1    pooka 	}
   1346      1.1    pooka 	error = 0;
   1347      1.1    pooka 	startoffset = off;
   1348      1.1    pooka 	bytes = MIN(len, eof - startoffset);
   1349      1.1    pooka 	skipbytes = 0;
   1350      1.1    pooka 	KASSERT(bytes != 0);
   1351      1.1    pooka 
   1352  1.6.2.3     yamt 	if (iowrite) {
   1353      1.2       ad 		mutex_enter(&vp->v_interlock);
   1354      1.1    pooka 		vp->v_numoutput += 2;
   1355      1.2       ad 		mutex_exit(&vp->v_interlock);
   1356      1.1    pooka 	}
   1357      1.2       ad 	mbp = getiobuf(vp, true);
   1358      1.1    pooka 	UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
   1359      1.1    pooka 	    vp, mbp, vp->v_numoutput, bytes);
   1360      1.1    pooka 	mbp->b_bufsize = len;
   1361      1.1    pooka 	mbp->b_data = (void *)kva;
   1362      1.1    pooka 	mbp->b_resid = mbp->b_bcount = bytes;
   1363      1.2       ad 	mbp->b_cflags = BC_BUSY | BC_AGE;
   1364      1.2       ad 	if (async) {
   1365      1.2       ad 		mbp->b_flags = brw | B_ASYNC;
   1366      1.2       ad 		mbp->b_iodone = iodone;
   1367      1.2       ad 	} else {
   1368      1.2       ad 		mbp->b_flags = brw;
   1369      1.2       ad 		mbp->b_iodone = NULL;
   1370      1.2       ad 	}
   1371      1.1    pooka 	if (curlwp == uvm.pagedaemon_lwp)
   1372      1.1    pooka 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
   1373      1.1    pooka 	else if (async)
   1374      1.1    pooka 		BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
   1375      1.1    pooka 	else
   1376      1.1    pooka 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
   1377      1.1    pooka 
   1378      1.1    pooka 	bp = NULL;
   1379      1.1    pooka 	for (offset = startoffset;
   1380      1.1    pooka 	    bytes > 0;
   1381      1.1    pooka 	    offset += iobytes, bytes -= iobytes) {
   1382  1.6.2.3     yamt 		int run;
   1383  1.6.2.3     yamt 		daddr_t lbn, blkno;
   1384  1.6.2.3     yamt 		struct vnode *devvp;
   1385  1.6.2.3     yamt 
   1386  1.6.2.3     yamt 		/*
   1387  1.6.2.3     yamt 		 * bmap the file to find out the blkno to read from and
   1388  1.6.2.3     yamt 		 * how much we can read in one i/o.  if bmap returns an error,
   1389  1.6.2.3     yamt 		 * skip the rest of the top-level i/o.
   1390  1.6.2.3     yamt 		 */
   1391  1.6.2.3     yamt 
   1392      1.1    pooka 		lbn = offset >> fs_bshift;
   1393      1.1    pooka 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1394      1.1    pooka 		if (error) {
   1395  1.6.2.3     yamt 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
   1396  1.6.2.3     yamt 			    lbn,error,0,0);
   1397      1.1    pooka 			skipbytes += bytes;
   1398      1.1    pooka 			bytes = 0;
   1399  1.6.2.3     yamt 			goto loopdone;
   1400      1.1    pooka 		}
   1401      1.1    pooka 
   1402  1.6.2.3     yamt 		/*
   1403  1.6.2.3     yamt 		 * see how many pages can be read with this i/o.
   1404  1.6.2.3     yamt 		 * reduce the i/o size if necessary to avoid
   1405  1.6.2.3     yamt 		 * overwriting pages with valid data.
   1406  1.6.2.3     yamt 		 */
   1407  1.6.2.3     yamt 
   1408      1.1    pooka 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1409      1.1    pooka 		    bytes);
   1410  1.6.2.3     yamt 
   1411  1.6.2.3     yamt 		/*
   1412  1.6.2.3     yamt 		 * if this block isn't allocated, zero it instead of
   1413  1.6.2.3     yamt 		 * reading it.  unless we are going to allocate blocks,
   1414  1.6.2.3     yamt 		 * mark the pages we zeroed PG_RDONLY.
   1415  1.6.2.3     yamt 		 */
   1416  1.6.2.3     yamt 
   1417      1.1    pooka 		if (blkno == (daddr_t)-1) {
   1418  1.6.2.3     yamt 			if (!iowrite) {
   1419      1.1    pooka 				memset((char *)kva + (offset - startoffset), 0,
   1420  1.6.2.3     yamt 				    iobytes);
   1421      1.1    pooka 			}
   1422      1.1    pooka 			skipbytes += iobytes;
   1423      1.1    pooka 			continue;
   1424      1.1    pooka 		}
   1425      1.1    pooka 
   1426  1.6.2.3     yamt 		/*
   1427  1.6.2.3     yamt 		 * allocate a sub-buf for this piece of the i/o
   1428  1.6.2.3     yamt 		 * (or just use mbp if there's only 1 piece),
   1429  1.6.2.3     yamt 		 * and start it going.
   1430  1.6.2.3     yamt 		 */
   1431  1.6.2.3     yamt 
   1432      1.1    pooka 		if (offset == startoffset && iobytes == bytes) {
   1433      1.1    pooka 			bp = mbp;
   1434      1.1    pooka 		} else {
   1435      1.1    pooka 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
   1436      1.1    pooka 			    vp, bp, vp->v_numoutput, 0);
   1437      1.2       ad 			bp = getiobuf(vp, true);
   1438      1.1    pooka 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
   1439      1.1    pooka 		}
   1440      1.1    pooka 		bp->b_lblkno = 0;
   1441      1.1    pooka 
   1442      1.1    pooka 		/* adjust physical blkno for partial blocks */
   1443      1.1    pooka 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1444      1.1    pooka 		    dev_bshift);
   1445  1.6.2.3     yamt 
   1446      1.1    pooka 		UVMHIST_LOG(ubchist,
   1447  1.6.2.3     yamt 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
   1448  1.6.2.3     yamt 		    bp, offset, bp->b_bcount, bp->b_blkno);
   1449      1.1    pooka 
   1450      1.1    pooka 		VOP_STRATEGY(devvp, bp);
   1451      1.1    pooka 	}
   1452  1.6.2.3     yamt 
   1453  1.6.2.3     yamt loopdone:
   1454      1.1    pooka 	if (skipbytes) {
   1455      1.1    pooka 		UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
   1456      1.1    pooka 	}
   1457      1.1    pooka 	nestiobuf_done(mbp, skipbytes, error);
   1458      1.1    pooka 	if (async) {
   1459      1.1    pooka 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1460      1.1    pooka 		return (0);
   1461      1.1    pooka 	}
   1462      1.1    pooka 	UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
   1463      1.1    pooka 	error = biowait(mbp);
   1464      1.1    pooka 	s = splbio();
   1465      1.1    pooka 	(*iodone)(mbp);
   1466      1.1    pooka 	splx(s);
   1467      1.1    pooka 	UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
   1468      1.1    pooka 	return (error);
   1469      1.1    pooka }
   1470      1.1    pooka 
   1471      1.1    pooka int
   1472      1.1    pooka genfs_compat_getpages(void *v)
   1473      1.1    pooka {
   1474      1.1    pooka 	struct vop_getpages_args /* {
   1475      1.1    pooka 		struct vnode *a_vp;
   1476      1.1    pooka 		voff_t a_offset;
   1477      1.1    pooka 		struct vm_page **a_m;
   1478      1.1    pooka 		int *a_count;
   1479      1.1    pooka 		int a_centeridx;
   1480      1.1    pooka 		vm_prot_t a_access_type;
   1481      1.1    pooka 		int a_advice;
   1482      1.1    pooka 		int a_flags;
   1483      1.1    pooka 	} */ *ap = v;
   1484      1.1    pooka 
   1485      1.1    pooka 	off_t origoffset;
   1486      1.1    pooka 	struct vnode *vp = ap->a_vp;
   1487      1.1    pooka 	struct uvm_object *uobj = &vp->v_uobj;
   1488      1.1    pooka 	struct vm_page *pg, **pgs;
   1489      1.1    pooka 	vaddr_t kva;
   1490      1.1    pooka 	int i, error, orignpages, npages;
   1491      1.1    pooka 	struct iovec iov;
   1492      1.1    pooka 	struct uio uio;
   1493      1.1    pooka 	kauth_cred_t cred = curlwp->l_cred;
   1494  1.6.2.3     yamt 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1495      1.1    pooka 
   1496      1.1    pooka 	error = 0;
   1497      1.1    pooka 	origoffset = ap->a_offset;
   1498      1.1    pooka 	orignpages = *ap->a_count;
   1499      1.1    pooka 	pgs = ap->a_m;
   1500      1.1    pooka 
   1501      1.1    pooka 	if (ap->a_flags & PGO_LOCKED) {
   1502      1.1    pooka 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
   1503  1.6.2.3     yamt 		    UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0));
   1504      1.1    pooka 
   1505  1.6.2.4     yamt 		error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
   1506  1.6.2.4     yamt 		if (error == 0 && memwrite) {
   1507  1.6.2.4     yamt 			genfs_markdirty(vp);
   1508  1.6.2.4     yamt 		}
   1509  1.6.2.4     yamt 		return error;
   1510      1.1    pooka 	}
   1511      1.1    pooka 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1512      1.2       ad 		mutex_exit(&uobj->vmobjlock);
   1513  1.6.2.4     yamt 		return EINVAL;
   1514      1.1    pooka 	}
   1515      1.1    pooka 	if ((ap->a_flags & PGO_SYNCIO) == 0) {
   1516      1.2       ad 		mutex_exit(&uobj->vmobjlock);
   1517      1.1    pooka 		return 0;
   1518      1.1    pooka 	}
   1519      1.1    pooka 	npages = orignpages;
   1520      1.1    pooka 	uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
   1521      1.2       ad 	mutex_exit(&uobj->vmobjlock);
   1522      1.1    pooka 	kva = uvm_pagermapin(pgs, npages,
   1523      1.1    pooka 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1524      1.1    pooka 	for (i = 0; i < npages; i++) {
   1525      1.1    pooka 		pg = pgs[i];
   1526      1.1    pooka 		if ((pg->flags & PG_FAKE) == 0) {
   1527      1.1    pooka 			continue;
   1528      1.1    pooka 		}
   1529      1.1    pooka 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   1530      1.1    pooka 		iov.iov_len = PAGE_SIZE;
   1531      1.1    pooka 		uio.uio_iov = &iov;
   1532      1.1    pooka 		uio.uio_iovcnt = 1;
   1533      1.1    pooka 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   1534      1.1    pooka 		uio.uio_rw = UIO_READ;
   1535      1.1    pooka 		uio.uio_resid = PAGE_SIZE;
   1536      1.1    pooka 		UIO_SETUP_SYSSPACE(&uio);
   1537      1.1    pooka 		/* XXX vn_lock */
   1538      1.1    pooka 		error = VOP_READ(vp, &uio, 0, cred);
   1539      1.1    pooka 		if (error) {
   1540      1.1    pooka 			break;
   1541      1.1    pooka 		}
   1542      1.1    pooka 		if (uio.uio_resid) {
   1543      1.1    pooka 			memset(iov.iov_base, 0, uio.uio_resid);
   1544      1.1    pooka 		}
   1545      1.1    pooka 	}
   1546      1.1    pooka 	uvm_pagermapout(kva, npages);
   1547      1.2       ad 	mutex_enter(&uobj->vmobjlock);
   1548      1.2       ad 	mutex_enter(&uvm_pageqlock);
   1549      1.1    pooka 	for (i = 0; i < npages; i++) {
   1550      1.1    pooka 		pg = pgs[i];
   1551      1.1    pooka 		if (error && (pg->flags & PG_FAKE) != 0) {
   1552      1.1    pooka 			pg->flags |= PG_RELEASED;
   1553      1.1    pooka 		} else {
   1554      1.1    pooka 			pmap_clear_modify(pg);
   1555      1.1    pooka 			uvm_pageactivate(pg);
   1556      1.1    pooka 		}
   1557      1.1    pooka 	}
   1558      1.1    pooka 	if (error) {
   1559      1.1    pooka 		uvm_page_unbusy(pgs, npages);
   1560      1.1    pooka 	}
   1561      1.2       ad 	mutex_exit(&uvm_pageqlock);
   1562  1.6.2.4     yamt 	if (error == 0 && memwrite) {
   1563  1.6.2.4     yamt 		genfs_markdirty(vp);
   1564  1.6.2.4     yamt 	}
   1565      1.2       ad 	mutex_exit(&uobj->vmobjlock);
   1566  1.6.2.4     yamt 	return error;
   1567      1.1    pooka }
   1568      1.1    pooka 
   1569      1.1    pooka int
   1570      1.1    pooka genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   1571      1.1    pooka     int flags)
   1572      1.1    pooka {
   1573      1.1    pooka 	off_t offset;
   1574      1.1    pooka 	struct iovec iov;
   1575      1.1    pooka 	struct uio uio;
   1576      1.1    pooka 	kauth_cred_t cred = curlwp->l_cred;
   1577      1.1    pooka 	struct buf *bp;
   1578      1.1    pooka 	vaddr_t kva;
   1579      1.2       ad 	int error;
   1580      1.1    pooka 
   1581      1.1    pooka 	offset = pgs[0]->offset;
   1582      1.1    pooka 	kva = uvm_pagermapin(pgs, npages,
   1583      1.1    pooka 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1584      1.1    pooka 
   1585      1.1    pooka 	iov.iov_base = (void *)kva;
   1586      1.1    pooka 	iov.iov_len = npages << PAGE_SHIFT;
   1587      1.1    pooka 	uio.uio_iov = &iov;
   1588      1.1    pooka 	uio.uio_iovcnt = 1;
   1589      1.1    pooka 	uio.uio_offset = offset;
   1590      1.1    pooka 	uio.uio_rw = UIO_WRITE;
   1591      1.1    pooka 	uio.uio_resid = npages << PAGE_SHIFT;
   1592      1.1    pooka 	UIO_SETUP_SYSSPACE(&uio);
   1593      1.1    pooka 	/* XXX vn_lock */
   1594      1.1    pooka 	error = VOP_WRITE(vp, &uio, 0, cred);
   1595      1.1    pooka 
   1596      1.2       ad 	mutex_enter(&vp->v_interlock);
   1597      1.2       ad 	vp->v_numoutput++;
   1598      1.2       ad 	mutex_exit(&vp->v_interlock);
   1599      1.1    pooka 
   1600      1.2       ad 	bp = getiobuf(vp, true);
   1601      1.2       ad 	bp->b_cflags = BC_BUSY | BC_AGE;
   1602      1.1    pooka 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   1603      1.1    pooka 	bp->b_data = (char *)kva;
   1604      1.1    pooka 	bp->b_bcount = npages << PAGE_SHIFT;
   1605      1.1    pooka 	bp->b_bufsize = npages << PAGE_SHIFT;
   1606      1.1    pooka 	bp->b_resid = 0;
   1607      1.1    pooka 	bp->b_error = error;
   1608      1.1    pooka 	uvm_aio_aiodone(bp);
   1609      1.1    pooka 	return (error);
   1610      1.1    pooka }
   1611      1.1    pooka 
   1612      1.1    pooka /*
   1613      1.1    pooka  * Process a uio using direct I/O.  If we reach a part of the request
   1614      1.1    pooka  * which cannot be processed in this fashion for some reason, just return.
   1615      1.1    pooka  * The caller must handle some additional part of the request using
   1616      1.1    pooka  * buffered I/O before trying direct I/O again.
   1617      1.1    pooka  */
   1618      1.1    pooka 
   1619      1.1    pooka void
   1620      1.1    pooka genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
   1621      1.1    pooka {
   1622      1.1    pooka 	struct vmspace *vs;
   1623      1.1    pooka 	struct iovec *iov;
   1624      1.1    pooka 	vaddr_t va;
   1625      1.1    pooka 	size_t len;
   1626      1.1    pooka 	const int mask = DEV_BSIZE - 1;
   1627      1.1    pooka 	int error;
   1628  1.6.2.2     yamt 	bool need_wapbl = (vp->v_mount && vp->v_mount->mnt_wapbl &&
   1629  1.6.2.2     yamt 	    (ioflag & IO_JOURNALLOCKED) == 0);
   1630      1.1    pooka 
   1631      1.1    pooka 	/*
   1632      1.1    pooka 	 * We only support direct I/O to user space for now.
   1633      1.1    pooka 	 */
   1634      1.1    pooka 
   1635      1.1    pooka 	if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
   1636      1.1    pooka 		return;
   1637      1.1    pooka 	}
   1638      1.1    pooka 
   1639      1.1    pooka 	/*
   1640      1.1    pooka 	 * If the vnode is mapped, we would need to get the getpages lock
   1641      1.1    pooka 	 * to stabilize the bmap, but then we would get into trouble whil e
   1642      1.1    pooka 	 * locking the pages if the pages belong to this same vnode (or a
   1643      1.1    pooka 	 * multi-vnode cascade to the same effect).  Just fall back to
   1644      1.1    pooka 	 * buffered I/O if the vnode is mapped to avoid this mess.
   1645      1.1    pooka 	 */
   1646      1.1    pooka 
   1647      1.1    pooka 	if (vp->v_vflag & VV_MAPPED) {
   1648      1.1    pooka 		return;
   1649      1.1    pooka 	}
   1650      1.1    pooka 
   1651  1.6.2.2     yamt 	if (need_wapbl) {
   1652  1.6.2.2     yamt 		error = WAPBL_BEGIN(vp->v_mount);
   1653  1.6.2.2     yamt 		if (error)
   1654  1.6.2.2     yamt 			return;
   1655  1.6.2.2     yamt 	}
   1656  1.6.2.2     yamt 
   1657      1.1    pooka 	/*
   1658      1.1    pooka 	 * Do as much of the uio as possible with direct I/O.
   1659      1.1    pooka 	 */
   1660      1.1    pooka 
   1661      1.1    pooka 	vs = uio->uio_vmspace;
   1662      1.1    pooka 	while (uio->uio_resid) {
   1663      1.1    pooka 		iov = uio->uio_iov;
   1664      1.1    pooka 		if (iov->iov_len == 0) {
   1665      1.1    pooka 			uio->uio_iov++;
   1666      1.1    pooka 			uio->uio_iovcnt--;
   1667      1.1    pooka 			continue;
   1668      1.1    pooka 		}
   1669      1.1    pooka 		va = (vaddr_t)iov->iov_base;
   1670      1.1    pooka 		len = MIN(iov->iov_len, genfs_maxdio);
   1671      1.1    pooka 		len &= ~mask;
   1672      1.1    pooka 
   1673      1.1    pooka 		/*
   1674      1.1    pooka 		 * If the next chunk is smaller than DEV_BSIZE or extends past
   1675      1.1    pooka 		 * the current EOF, then fall back to buffered I/O.
   1676      1.1    pooka 		 */
   1677      1.1    pooka 
   1678      1.1    pooka 		if (len == 0 || uio->uio_offset + len > vp->v_size) {
   1679  1.6.2.2     yamt 			break;
   1680      1.1    pooka 		}
   1681      1.1    pooka 
   1682      1.1    pooka 		/*
   1683      1.1    pooka 		 * Check alignment.  The file offset must be at least
   1684      1.1    pooka 		 * sector-aligned.  The exact constraint on memory alignment
   1685      1.1    pooka 		 * is very hardware-dependent, but requiring sector-aligned
   1686      1.1    pooka 		 * addresses there too is safe.
   1687      1.1    pooka 		 */
   1688      1.1    pooka 
   1689      1.1    pooka 		if (uio->uio_offset & mask || va & mask) {
   1690  1.6.2.2     yamt 			break;
   1691      1.1    pooka 		}
   1692      1.1    pooka 		error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
   1693      1.1    pooka 					  uio->uio_rw);
   1694      1.1    pooka 		if (error) {
   1695      1.1    pooka 			break;
   1696      1.1    pooka 		}
   1697      1.1    pooka 		iov->iov_base = (char *)iov->iov_base + len;
   1698      1.1    pooka 		iov->iov_len -= len;
   1699      1.1    pooka 		uio->uio_offset += len;
   1700      1.1    pooka 		uio->uio_resid -= len;
   1701      1.1    pooka 	}
   1702  1.6.2.2     yamt 
   1703  1.6.2.2     yamt 	if (need_wapbl)
   1704  1.6.2.2     yamt 		WAPBL_END(vp->v_mount);
   1705      1.1    pooka }
   1706      1.1    pooka 
   1707      1.1    pooka /*
   1708      1.1    pooka  * Iodone routine for direct I/O.  We don't do much here since the request is
   1709      1.1    pooka  * always synchronous, so the caller will do most of the work after biowait().
   1710      1.1    pooka  */
   1711      1.1    pooka 
   1712      1.1    pooka static void
   1713      1.1    pooka genfs_dio_iodone(struct buf *bp)
   1714      1.1    pooka {
   1715      1.1    pooka 
   1716      1.1    pooka 	KASSERT((bp->b_flags & B_ASYNC) == 0);
   1717      1.2       ad 	if ((bp->b_flags & B_READ) == 0 && (bp->b_cflags & BC_AGE) != 0) {
   1718      1.2       ad 		mutex_enter(bp->b_objlock);
   1719      1.1    pooka 		vwakeup(bp);
   1720      1.2       ad 		mutex_exit(bp->b_objlock);
   1721      1.1    pooka 	}
   1722      1.1    pooka 	putiobuf(bp);
   1723      1.1    pooka }
   1724      1.1    pooka 
   1725      1.1    pooka /*
   1726      1.1    pooka  * Process one chunk of a direct I/O request.
   1727      1.1    pooka  */
   1728      1.1    pooka 
   1729      1.1    pooka static int
   1730      1.1    pooka genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
   1731      1.1    pooka     off_t off, enum uio_rw rw)
   1732      1.1    pooka {
   1733      1.1    pooka 	struct vm_map *map;
   1734      1.1    pooka 	struct pmap *upm, *kpm;
   1735      1.1    pooka 	size_t klen = round_page(uva + len) - trunc_page(uva);
   1736      1.1    pooka 	off_t spoff, epoff;
   1737      1.1    pooka 	vaddr_t kva, puva;
   1738      1.1    pooka 	paddr_t pa;
   1739      1.1    pooka 	vm_prot_t prot;
   1740      1.1    pooka 	int error, rv, poff, koff;
   1741  1.6.2.2     yamt 	const int pgoflags = PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED |
   1742      1.1    pooka 		(rw == UIO_WRITE ? PGO_FREE : 0);
   1743      1.1    pooka 
   1744      1.1    pooka 	/*
   1745      1.1    pooka 	 * For writes, verify that this range of the file already has fully
   1746      1.1    pooka 	 * allocated backing store.  If there are any holes, just punt and
   1747      1.1    pooka 	 * make the caller take the buffered write path.
   1748      1.1    pooka 	 */
   1749      1.1    pooka 
   1750      1.1    pooka 	if (rw == UIO_WRITE) {
   1751      1.1    pooka 		daddr_t lbn, elbn, blkno;
   1752      1.1    pooka 		int bsize, bshift, run;
   1753      1.1    pooka 
   1754      1.1    pooka 		bshift = vp->v_mount->mnt_fs_bshift;
   1755      1.1    pooka 		bsize = 1 << bshift;
   1756      1.1    pooka 		lbn = off >> bshift;
   1757      1.1    pooka 		elbn = (off + len + bsize - 1) >> bshift;
   1758      1.1    pooka 		while (lbn < elbn) {
   1759      1.1    pooka 			error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
   1760      1.1    pooka 			if (error) {
   1761      1.1    pooka 				return error;
   1762      1.1    pooka 			}
   1763      1.1    pooka 			if (blkno == (daddr_t)-1) {
   1764      1.1    pooka 				return ENOSPC;
   1765      1.1    pooka 			}
   1766      1.1    pooka 			lbn += 1 + run;
   1767      1.1    pooka 		}
   1768      1.1    pooka 	}
   1769      1.1    pooka 
   1770      1.1    pooka 	/*
   1771      1.1    pooka 	 * Flush any cached pages for parts of the file that we're about to
   1772      1.1    pooka 	 * access.  If we're writing, invalidate pages as well.
   1773      1.1    pooka 	 */
   1774      1.1    pooka 
   1775      1.1    pooka 	spoff = trunc_page(off);
   1776      1.1    pooka 	epoff = round_page(off + len);
   1777      1.2       ad 	mutex_enter(&vp->v_interlock);
   1778      1.1    pooka 	error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
   1779      1.1    pooka 	if (error) {
   1780      1.1    pooka 		return error;
   1781      1.1    pooka 	}
   1782      1.1    pooka 
   1783      1.1    pooka 	/*
   1784      1.1    pooka 	 * Wire the user pages and remap them into kernel memory.
   1785      1.1    pooka 	 */
   1786      1.1    pooka 
   1787      1.1    pooka 	prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
   1788      1.1    pooka 	error = uvm_vslock(vs, (void *)uva, len, prot);
   1789      1.1    pooka 	if (error) {
   1790      1.1    pooka 		return error;
   1791      1.1    pooka 	}
   1792      1.1    pooka 
   1793      1.1    pooka 	map = &vs->vm_map;
   1794      1.1    pooka 	upm = vm_map_pmap(map);
   1795      1.1    pooka 	kpm = vm_map_pmap(kernel_map);
   1796      1.1    pooka 	kva = uvm_km_alloc(kernel_map, klen, 0,
   1797      1.1    pooka 			   UVM_KMF_VAONLY | UVM_KMF_WAITVA);
   1798      1.1    pooka 	puva = trunc_page(uva);
   1799      1.1    pooka 	for (poff = 0; poff < klen; poff += PAGE_SIZE) {
   1800      1.1    pooka 		rv = pmap_extract(upm, puva + poff, &pa);
   1801      1.1    pooka 		KASSERT(rv);
   1802      1.1    pooka 		pmap_enter(kpm, kva + poff, pa, prot, prot | PMAP_WIRED);
   1803      1.1    pooka 	}
   1804      1.1    pooka 	pmap_update(kpm);
   1805      1.1    pooka 
   1806      1.1    pooka 	/*
   1807      1.1    pooka 	 * Do the I/O.
   1808      1.1    pooka 	 */
   1809      1.1    pooka 
   1810      1.1    pooka 	koff = uva - trunc_page(uva);
   1811      1.1    pooka 	error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
   1812      1.1    pooka 			    genfs_dio_iodone);
   1813      1.1    pooka 
   1814      1.1    pooka 	/*
   1815      1.1    pooka 	 * Tear down the kernel mapping.
   1816      1.1    pooka 	 */
   1817      1.1    pooka 
   1818      1.1    pooka 	pmap_remove(kpm, kva, kva + klen);
   1819      1.1    pooka 	pmap_update(kpm);
   1820      1.1    pooka 	uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
   1821      1.1    pooka 
   1822      1.1    pooka 	/*
   1823      1.1    pooka 	 * Unwire the user pages.
   1824      1.1    pooka 	 */
   1825      1.1    pooka 
   1826      1.1    pooka 	uvm_vsunlock(vs, (void *)uva, len);
   1827      1.1    pooka 	return error;
   1828      1.1    pooka }
   1829      1.2       ad 
   1830