Home | History | Annotate | Line # | Download | only in genfs
genfs_io.c revision 1.68.6.1
      1  1.68.6.1    bouyer /*	$NetBSD: genfs_io.c,v 1.68.6.1 2017/06/04 20:35:01 bouyer Exp $	*/
      2       1.1     pooka 
      3       1.1     pooka /*
      4       1.1     pooka  * Copyright (c) 1982, 1986, 1989, 1993
      5       1.1     pooka  *	The Regents of the University of California.  All rights reserved.
      6       1.1     pooka  *
      7       1.1     pooka  * Redistribution and use in source and binary forms, with or without
      8       1.1     pooka  * modification, are permitted provided that the following conditions
      9       1.1     pooka  * are met:
     10       1.1     pooka  * 1. Redistributions of source code must retain the above copyright
     11       1.1     pooka  *    notice, this list of conditions and the following disclaimer.
     12       1.1     pooka  * 2. Redistributions in binary form must reproduce the above copyright
     13       1.1     pooka  *    notice, this list of conditions and the following disclaimer in the
     14       1.1     pooka  *    documentation and/or other materials provided with the distribution.
     15       1.1     pooka  * 3. Neither the name of the University nor the names of its contributors
     16       1.1     pooka  *    may be used to endorse or promote products derived from this software
     17       1.1     pooka  *    without specific prior written permission.
     18       1.1     pooka  *
     19       1.1     pooka  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20       1.1     pooka  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21       1.1     pooka  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22       1.1     pooka  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23       1.1     pooka  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24       1.1     pooka  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25       1.1     pooka  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26       1.1     pooka  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27       1.1     pooka  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28       1.1     pooka  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29       1.1     pooka  * SUCH DAMAGE.
     30       1.1     pooka  *
     31       1.1     pooka  */
     32       1.1     pooka 
     33       1.1     pooka #include <sys/cdefs.h>
     34  1.68.6.1    bouyer __KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.68.6.1 2017/06/04 20:35:01 bouyer Exp $");
     35       1.1     pooka 
     36       1.1     pooka #include <sys/param.h>
     37       1.1     pooka #include <sys/systm.h>
     38       1.1     pooka #include <sys/proc.h>
     39       1.1     pooka #include <sys/kernel.h>
     40       1.1     pooka #include <sys/mount.h>
     41       1.1     pooka #include <sys/vnode.h>
     42       1.1     pooka #include <sys/kmem.h>
     43       1.1     pooka #include <sys/kauth.h>
     44       1.1     pooka #include <sys/fstrans.h>
     45      1.15     pooka #include <sys/buf.h>
     46       1.1     pooka 
     47       1.1     pooka #include <miscfs/genfs/genfs.h>
     48       1.1     pooka #include <miscfs/genfs/genfs_node.h>
     49       1.1     pooka #include <miscfs/specfs/specdev.h>
     50       1.1     pooka 
     51       1.1     pooka #include <uvm/uvm.h>
     52       1.1     pooka #include <uvm/uvm_pager.h>
     53       1.1     pooka 
     54       1.1     pooka static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
     55       1.1     pooka     off_t, enum uio_rw);
     56       1.1     pooka static void genfs_dio_iodone(struct buf *);
     57       1.1     pooka 
     58      1.59  riastrad static int genfs_getpages_read(struct vnode *, struct vm_page **, int, off_t,
     59      1.59  riastrad     off_t, bool, bool, bool, bool);
     60       1.1     pooka static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
     61       1.1     pooka     void (*)(struct buf *));
     62      1.55      yamt static void genfs_rel_pages(struct vm_page **, unsigned int);
     63      1.38       chs static void genfs_markdirty(struct vnode *);
     64       1.1     pooka 
     65       1.1     pooka int genfs_maxdio = MAXPHYS;
     66       1.1     pooka 
     67      1.38       chs static void
     68      1.55      yamt genfs_rel_pages(struct vm_page **pgs, unsigned int npages)
     69       1.1     pooka {
     70      1.55      yamt 	unsigned int i;
     71       1.1     pooka 
     72       1.1     pooka 	for (i = 0; i < npages; i++) {
     73       1.1     pooka 		struct vm_page *pg = pgs[i];
     74       1.1     pooka 
     75       1.1     pooka 		if (pg == NULL || pg == PGO_DONTCARE)
     76       1.1     pooka 			continue;
     77      1.55      yamt 		KASSERT(uvm_page_locked_p(pg));
     78       1.1     pooka 		if (pg->flags & PG_FAKE) {
     79       1.1     pooka 			pg->flags |= PG_RELEASED;
     80       1.1     pooka 		}
     81       1.1     pooka 	}
     82       1.2        ad 	mutex_enter(&uvm_pageqlock);
     83       1.1     pooka 	uvm_page_unbusy(pgs, npages);
     84       1.2        ad 	mutex_exit(&uvm_pageqlock);
     85       1.1     pooka }
     86       1.1     pooka 
     87      1.38       chs static void
     88      1.38       chs genfs_markdirty(struct vnode *vp)
     89      1.38       chs {
     90      1.38       chs 	struct genfs_node * const gp = VTOG(vp);
     91      1.38       chs 
     92      1.49     rmind 	KASSERT(mutex_owned(vp->v_interlock));
     93      1.38       chs 	gp->g_dirtygen++;
     94      1.38       chs 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
     95      1.38       chs 		vn_syncer_add_to_worklist(vp, filedelay);
     96      1.38       chs 	}
     97      1.38       chs 	if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
     98      1.38       chs 		vp->v_iflag |= VI_WRMAPDIRTY;
     99      1.38       chs 	}
    100      1.38       chs }
    101      1.38       chs 
    102       1.1     pooka /*
    103       1.1     pooka  * generic VM getpages routine.
    104       1.1     pooka  * Return PG_BUSY pages for the given range,
    105       1.1     pooka  * reading from backing store if necessary.
    106       1.1     pooka  */
    107       1.1     pooka 
    108       1.1     pooka int
    109       1.1     pooka genfs_getpages(void *v)
    110       1.1     pooka {
    111       1.1     pooka 	struct vop_getpages_args /* {
    112       1.1     pooka 		struct vnode *a_vp;
    113       1.1     pooka 		voff_t a_offset;
    114       1.1     pooka 		struct vm_page **a_m;
    115       1.1     pooka 		int *a_count;
    116       1.1     pooka 		int a_centeridx;
    117       1.1     pooka 		vm_prot_t a_access_type;
    118       1.1     pooka 		int a_advice;
    119       1.1     pooka 		int a_flags;
    120      1.22  uebayasi 	} */ * const ap = v;
    121       1.1     pooka 
    122      1.24  uebayasi 	off_t diskeof, memeof;
    123      1.31  uebayasi 	int i, error, npages;
    124      1.10      yamt 	const int flags = ap->a_flags;
    125      1.22  uebayasi 	struct vnode * const vp = ap->a_vp;
    126      1.22  uebayasi 	struct uvm_object * const uobj = &vp->v_uobj;
    127      1.10      yamt 	const bool async = (flags & PGO_SYNCIO) == 0;
    128      1.35  uebayasi 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
    129      1.10      yamt 	const bool overwrite = (flags & PGO_OVERWRITE) != 0;
    130      1.35  uebayasi 	const bool blockalloc = memwrite && (flags & PGO_NOBLOCKALLOC) == 0;
    131      1.40       chs 	const bool glocked = (flags & PGO_GLOCKHELD) != 0;
    132      1.64   hannken 	bool holds_wapbl = false;
    133      1.64   hannken 	struct mount *trans_mount = NULL;
    134       1.1     pooka 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    135       1.1     pooka 
    136       1.1     pooka 	UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
    137       1.1     pooka 	    vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    138       1.1     pooka 
    139       1.1     pooka 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
    140       1.1     pooka 	    vp->v_type == VLNK || vp->v_type == VBLK);
    141       1.1     pooka 
    142       1.1     pooka startover:
    143       1.1     pooka 	error = 0;
    144      1.27  uebayasi 	const voff_t origvsize = vp->v_size;
    145      1.27  uebayasi 	const off_t origoffset = ap->a_offset;
    146      1.29  uebayasi 	const int orignpages = *ap->a_count;
    147      1.33  uebayasi 
    148       1.1     pooka 	GOP_SIZE(vp, origvsize, &diskeof, 0);
    149       1.1     pooka 	if (flags & PGO_PASTEOF) {
    150      1.24  uebayasi 		off_t newsize;
    151       1.1     pooka #if defined(DIAGNOSTIC)
    152       1.1     pooka 		off_t writeeof;
    153       1.1     pooka #endif /* defined(DIAGNOSTIC) */
    154       1.1     pooka 
    155       1.1     pooka 		newsize = MAX(origvsize,
    156       1.1     pooka 		    origoffset + (orignpages << PAGE_SHIFT));
    157       1.1     pooka 		GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
    158       1.1     pooka #if defined(DIAGNOSTIC)
    159       1.1     pooka 		GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM);
    160       1.1     pooka 		if (newsize > round_page(writeeof)) {
    161      1.39     pooka 			panic("%s: past eof: %" PRId64 " vs. %" PRId64,
    162      1.39     pooka 			    __func__, newsize, round_page(writeeof));
    163       1.1     pooka 		}
    164       1.1     pooka #endif /* defined(DIAGNOSTIC) */
    165       1.1     pooka 	} else {
    166       1.1     pooka 		GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM);
    167       1.1     pooka 	}
    168       1.1     pooka 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    169       1.1     pooka 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    170       1.1     pooka 	KASSERT(orignpages > 0);
    171       1.1     pooka 
    172       1.1     pooka 	/*
    173       1.1     pooka 	 * Bounds-check the request.
    174       1.1     pooka 	 */
    175       1.1     pooka 
    176       1.1     pooka 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    177       1.1     pooka 		if ((flags & PGO_LOCKED) == 0) {
    178      1.49     rmind 			mutex_exit(uobj->vmobjlock);
    179       1.1     pooka 		}
    180       1.1     pooka 		UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
    181       1.1     pooka 		    origoffset, *ap->a_count, memeof,0);
    182       1.1     pooka 		error = EINVAL;
    183       1.1     pooka 		goto out_err;
    184       1.1     pooka 	}
    185       1.1     pooka 
    186       1.1     pooka 	/* uobj is locked */
    187       1.1     pooka 
    188       1.1     pooka 	if ((flags & PGO_NOTIMESTAMP) == 0 &&
    189       1.1     pooka 	    (vp->v_type != VBLK ||
    190       1.1     pooka 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
    191       1.1     pooka 		int updflags = 0;
    192       1.1     pooka 
    193       1.1     pooka 		if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
    194       1.1     pooka 			updflags = GOP_UPDATE_ACCESSED;
    195       1.1     pooka 		}
    196      1.35  uebayasi 		if (memwrite) {
    197       1.1     pooka 			updflags |= GOP_UPDATE_MODIFIED;
    198       1.1     pooka 		}
    199       1.1     pooka 		if (updflags != 0) {
    200       1.1     pooka 			GOP_MARKUPDATE(vp, updflags);
    201       1.1     pooka 		}
    202       1.1     pooka 	}
    203       1.1     pooka 
    204       1.1     pooka 	/*
    205       1.1     pooka 	 * For PGO_LOCKED requests, just return whatever's in memory.
    206       1.1     pooka 	 */
    207       1.1     pooka 
    208       1.1     pooka 	if (flags & PGO_LOCKED) {
    209       1.1     pooka 		int nfound;
    210      1.31  uebayasi 		struct vm_page *pg;
    211       1.1     pooka 
    212      1.40       chs 		KASSERT(!glocked);
    213       1.1     pooka 		npages = *ap->a_count;
    214       1.1     pooka #if defined(DEBUG)
    215       1.1     pooka 		for (i = 0; i < npages; i++) {
    216       1.1     pooka 			pg = ap->a_m[i];
    217       1.1     pooka 			KASSERT(pg == NULL || pg == PGO_DONTCARE);
    218       1.1     pooka 		}
    219       1.1     pooka #endif /* defined(DEBUG) */
    220       1.1     pooka 		nfound = uvn_findpages(uobj, origoffset, &npages,
    221      1.35  uebayasi 		    ap->a_m, UFP_NOWAIT|UFP_NOALLOC|(memwrite ? UFP_NORDONLY : 0));
    222       1.1     pooka 		KASSERT(npages == *ap->a_count);
    223       1.1     pooka 		if (nfound == 0) {
    224       1.1     pooka 			error = EBUSY;
    225       1.1     pooka 			goto out_err;
    226       1.1     pooka 		}
    227      1.23  uebayasi 		if (!genfs_node_rdtrylock(vp)) {
    228       1.1     pooka 			genfs_rel_pages(ap->a_m, npages);
    229       1.1     pooka 
    230       1.1     pooka 			/*
    231       1.1     pooka 			 * restore the array.
    232       1.1     pooka 			 */
    233       1.1     pooka 
    234       1.1     pooka 			for (i = 0; i < npages; i++) {
    235       1.1     pooka 				pg = ap->a_m[i];
    236       1.1     pooka 
    237      1.41  uebayasi 				if (pg != NULL && pg != PGO_DONTCARE) {
    238       1.1     pooka 					ap->a_m[i] = NULL;
    239       1.1     pooka 				}
    240      1.46  uebayasi 				KASSERT(ap->a_m[i] == NULL ||
    241      1.46  uebayasi 				    ap->a_m[i] == PGO_DONTCARE);
    242       1.1     pooka 			}
    243       1.1     pooka 		} else {
    244      1.23  uebayasi 			genfs_node_unlock(vp);
    245       1.1     pooka 		}
    246       1.1     pooka 		error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    247      1.38       chs 		if (error == 0 && memwrite) {
    248      1.38       chs 			genfs_markdirty(vp);
    249      1.38       chs 		}
    250       1.1     pooka 		goto out_err;
    251       1.1     pooka 	}
    252      1.49     rmind 	mutex_exit(uobj->vmobjlock);
    253       1.1     pooka 
    254       1.1     pooka 	/*
    255       1.1     pooka 	 * find the requested pages and make some simple checks.
    256       1.1     pooka 	 * leave space in the page array for a whole block.
    257       1.1     pooka 	 */
    258       1.1     pooka 
    259      1.27  uebayasi 	const int fs_bshift = (vp->v_type != VBLK) ?
    260      1.27  uebayasi 	    vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
    261      1.27  uebayasi 	const int fs_bsize = 1 << fs_bshift;
    262      1.30  uebayasi #define	blk_mask	(fs_bsize - 1)
    263      1.30  uebayasi #define	trunc_blk(x)	((x) & ~blk_mask)
    264      1.30  uebayasi #define	round_blk(x)	(((x) + blk_mask) & ~blk_mask)
    265       1.1     pooka 
    266      1.29  uebayasi 	const int orignmempages = MIN(orignpages,
    267       1.1     pooka 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    268      1.29  uebayasi 	npages = orignmempages;
    269      1.30  uebayasi 	const off_t startoffset = trunc_blk(origoffset);
    270      1.30  uebayasi 	const off_t endoffset = MIN(
    271      1.30  uebayasi 	    round_page(round_blk(origoffset + (npages << PAGE_SHIFT))),
    272      1.30  uebayasi 	    round_page(memeof));
    273      1.31  uebayasi 	const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    274       1.1     pooka 
    275      1.33  uebayasi 	const int pgs_size = sizeof(struct vm_page *) *
    276       1.1     pooka 	    ((endoffset - startoffset) >> PAGE_SHIFT);
    277      1.33  uebayasi 	struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES];
    278      1.31  uebayasi 
    279       1.1     pooka 	if (pgs_size > sizeof(pgs_onstack)) {
    280       1.1     pooka 		pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
    281       1.1     pooka 		if (pgs == NULL) {
    282       1.1     pooka 			pgs = pgs_onstack;
    283       1.1     pooka 			error = ENOMEM;
    284      1.32  uebayasi 			goto out_err;
    285       1.1     pooka 		}
    286       1.1     pooka 	} else {
    287      1.14  christos 		pgs = pgs_onstack;
    288      1.14  christos 		(void)memset(pgs, 0, pgs_size);
    289       1.1     pooka 	}
    290      1.14  christos 
    291       1.1     pooka 	UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
    292       1.1     pooka 	    ridx, npages, startoffset, endoffset);
    293       1.1     pooka 
    294      1.64   hannken 	if (trans_mount == NULL) {
    295      1.64   hannken 		trans_mount = vp->v_mount;
    296  1.68.6.1    bouyer 		fstrans_start(trans_mount);
    297      1.64   hannken 		/*
    298      1.64   hannken 		 * check if this vnode is still valid.
    299      1.64   hannken 		 */
    300      1.64   hannken 		mutex_enter(vp->v_interlock);
    301      1.64   hannken 		error = vdead_check(vp, 0);
    302      1.64   hannken 		mutex_exit(vp->v_interlock);
    303      1.64   hannken 		if (error)
    304      1.64   hannken 			goto out_err_free;
    305      1.42   hannken 		/*
    306      1.42   hannken 		 * XXX: This assumes that we come here only via
    307      1.42   hannken 		 * the mmio path
    308      1.42   hannken 		 */
    309      1.64   hannken 		if (blockalloc && vp->v_mount->mnt_wapbl) {
    310      1.64   hannken 			error = WAPBL_BEGIN(trans_mount);
    311      1.64   hannken 			if (error)
    312      1.42   hannken 				goto out_err_free;
    313      1.64   hannken 			holds_wapbl = true;
    314      1.42   hannken 		}
    315       1.1     pooka 	}
    316       1.1     pooka 
    317       1.1     pooka 	/*
    318       1.1     pooka 	 * hold g_glock to prevent a race with truncate.
    319       1.1     pooka 	 *
    320       1.1     pooka 	 * check if our idea of v_size is still valid.
    321       1.1     pooka 	 */
    322       1.1     pooka 
    323      1.40       chs 	KASSERT(!glocked || genfs_node_wrlocked(vp));
    324      1.40       chs 	if (!glocked) {
    325      1.40       chs 		if (blockalloc) {
    326      1.40       chs 			genfs_node_wrlock(vp);
    327      1.40       chs 		} else {
    328      1.40       chs 			genfs_node_rdlock(vp);
    329      1.40       chs 		}
    330       1.1     pooka 	}
    331      1.49     rmind 	mutex_enter(uobj->vmobjlock);
    332       1.1     pooka 	if (vp->v_size < origvsize) {
    333      1.40       chs 		if (!glocked) {
    334      1.40       chs 			genfs_node_unlock(vp);
    335      1.40       chs 		}
    336       1.1     pooka 		if (pgs != pgs_onstack)
    337       1.1     pooka 			kmem_free(pgs, pgs_size);
    338       1.1     pooka 		goto startover;
    339       1.1     pooka 	}
    340       1.1     pooka 
    341       1.1     pooka 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
    342      1.29  uebayasi 	    async ? UFP_NOWAIT : UFP_ALL) != orignmempages) {
    343      1.40       chs 		if (!glocked) {
    344      1.40       chs 			genfs_node_unlock(vp);
    345      1.40       chs 		}
    346       1.1     pooka 		KASSERT(async != 0);
    347      1.29  uebayasi 		genfs_rel_pages(&pgs[ridx], orignmempages);
    348      1.49     rmind 		mutex_exit(uobj->vmobjlock);
    349       1.1     pooka 		error = EBUSY;
    350      1.33  uebayasi 		goto out_err_free;
    351       1.1     pooka 	}
    352       1.1     pooka 
    353       1.1     pooka 	/*
    354       1.1     pooka 	 * if the pages are already resident, just return them.
    355       1.1     pooka 	 */
    356       1.1     pooka 
    357       1.1     pooka 	for (i = 0; i < npages; i++) {
    358      1.31  uebayasi 		struct vm_page *pg = pgs[ridx + i];
    359       1.1     pooka 
    360      1.31  uebayasi 		if ((pg->flags & PG_FAKE) ||
    361      1.31  uebayasi 		    (blockalloc && (pg->flags & PG_RDONLY))) {
    362       1.1     pooka 			break;
    363       1.1     pooka 		}
    364       1.1     pooka 	}
    365       1.1     pooka 	if (i == npages) {
    366      1.40       chs 		if (!glocked) {
    367      1.40       chs 			genfs_node_unlock(vp);
    368      1.40       chs 		}
    369       1.1     pooka 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    370       1.1     pooka 		npages += ridx;
    371       1.1     pooka 		goto out;
    372       1.1     pooka 	}
    373       1.1     pooka 
    374       1.1     pooka 	/*
    375       1.1     pooka 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    376       1.1     pooka 	 */
    377       1.1     pooka 
    378       1.1     pooka 	if (overwrite) {
    379      1.40       chs 		if (!glocked) {
    380      1.40       chs 			genfs_node_unlock(vp);
    381      1.40       chs 		}
    382       1.1     pooka 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    383       1.1     pooka 
    384       1.1     pooka 		for (i = 0; i < npages; i++) {
    385      1.31  uebayasi 			struct vm_page *pg = pgs[ridx + i];
    386       1.1     pooka 
    387      1.31  uebayasi 			pg->flags &= ~(PG_RDONLY|PG_CLEAN);
    388       1.1     pooka 		}
    389       1.1     pooka 		npages += ridx;
    390       1.1     pooka 		goto out;
    391       1.1     pooka 	}
    392       1.1     pooka 
    393       1.1     pooka 	/*
    394       1.1     pooka 	 * the page wasn't resident and we're not overwriting,
    395       1.1     pooka 	 * so we're going to have to do some i/o.
    396       1.1     pooka 	 * find any additional pages needed to cover the expanded range.
    397       1.1     pooka 	 */
    398       1.1     pooka 
    399       1.1     pooka 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    400      1.29  uebayasi 	if (startoffset != origoffset || npages != orignmempages) {
    401      1.31  uebayasi 		int npgs;
    402       1.1     pooka 
    403       1.1     pooka 		/*
    404       1.1     pooka 		 * we need to avoid deadlocks caused by locking
    405       1.1     pooka 		 * additional pages at lower offsets than pages we
    406       1.1     pooka 		 * already have locked.  unlock them all and start over.
    407       1.1     pooka 		 */
    408       1.1     pooka 
    409      1.29  uebayasi 		genfs_rel_pages(&pgs[ridx], orignmempages);
    410       1.1     pooka 		memset(pgs, 0, pgs_size);
    411       1.1     pooka 
    412       1.1     pooka 		UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
    413       1.1     pooka 		    startoffset, endoffset, 0,0);
    414       1.1     pooka 		npgs = npages;
    415       1.1     pooka 		if (uvn_findpages(uobj, startoffset, &npgs, pgs,
    416       1.1     pooka 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    417      1.40       chs 			if (!glocked) {
    418      1.40       chs 				genfs_node_unlock(vp);
    419      1.40       chs 			}
    420       1.1     pooka 			KASSERT(async != 0);
    421       1.1     pooka 			genfs_rel_pages(pgs, npages);
    422      1.49     rmind 			mutex_exit(uobj->vmobjlock);
    423       1.1     pooka 			error = EBUSY;
    424      1.33  uebayasi 			goto out_err_free;
    425       1.1     pooka 		}
    426       1.1     pooka 	}
    427      1.34  uebayasi 
    428      1.49     rmind 	mutex_exit(uobj->vmobjlock);
    429      1.59  riastrad 	error = genfs_getpages_read(vp, pgs, npages, startoffset, diskeof,
    430      1.59  riastrad 	    async, memwrite, blockalloc, glocked);
    431      1.59  riastrad 	if (!glocked) {
    432      1.59  riastrad 		genfs_node_unlock(vp);
    433      1.59  riastrad 	}
    434      1.67  riastrad 	if (error == 0 && async)
    435      1.67  riastrad 		goto out_err_free;
    436      1.59  riastrad 	mutex_enter(uobj->vmobjlock);
    437      1.59  riastrad 
    438      1.59  riastrad 	/*
    439      1.59  riastrad 	 * we're almost done!  release the pages...
    440      1.59  riastrad 	 * for errors, we free the pages.
    441      1.59  riastrad 	 * otherwise we activate them and mark them as valid and clean.
    442      1.59  riastrad 	 * also, unbusy pages that were not actually requested.
    443      1.59  riastrad 	 */
    444      1.59  riastrad 
    445      1.59  riastrad 	if (error) {
    446      1.59  riastrad 		genfs_rel_pages(pgs, npages);
    447      1.59  riastrad 		mutex_exit(uobj->vmobjlock);
    448      1.59  riastrad 		UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
    449      1.59  riastrad 		goto out_err_free;
    450      1.59  riastrad 	}
    451      1.59  riastrad 
    452      1.59  riastrad out:
    453      1.59  riastrad 	UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
    454      1.59  riastrad 	error = 0;
    455      1.59  riastrad 	mutex_enter(&uvm_pageqlock);
    456      1.59  riastrad 	for (i = 0; i < npages; i++) {
    457      1.59  riastrad 		struct vm_page *pg = pgs[i];
    458      1.59  riastrad 		if (pg == NULL) {
    459      1.59  riastrad 			continue;
    460      1.59  riastrad 		}
    461      1.59  riastrad 		UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    462      1.59  riastrad 		    pg, pg->flags, 0,0);
    463      1.59  riastrad 		if (pg->flags & PG_FAKE && !overwrite) {
    464      1.59  riastrad 			pg->flags &= ~(PG_FAKE);
    465      1.59  riastrad 			pmap_clear_modify(pgs[i]);
    466      1.59  riastrad 		}
    467      1.59  riastrad 		KASSERT(!memwrite || !blockalloc || (pg->flags & PG_RDONLY) == 0);
    468      1.59  riastrad 		if (i < ridx || i >= ridx + orignmempages || async) {
    469      1.59  riastrad 			UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
    470      1.59  riastrad 			    pg, pg->offset,0,0);
    471      1.59  riastrad 			if (pg->flags & PG_WANTED) {
    472      1.59  riastrad 				wakeup(pg);
    473      1.59  riastrad 			}
    474      1.59  riastrad 			if (pg->flags & PG_FAKE) {
    475      1.59  riastrad 				KASSERT(overwrite);
    476      1.59  riastrad 				uvm_pagezero(pg);
    477      1.59  riastrad 			}
    478      1.59  riastrad 			if (pg->flags & PG_RELEASED) {
    479      1.59  riastrad 				uvm_pagefree(pg);
    480      1.59  riastrad 				continue;
    481      1.59  riastrad 			}
    482      1.59  riastrad 			uvm_pageenqueue(pg);
    483      1.59  riastrad 			pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
    484      1.59  riastrad 			UVM_PAGE_OWN(pg, NULL);
    485      1.59  riastrad 		}
    486      1.59  riastrad 	}
    487      1.59  riastrad 	mutex_exit(&uvm_pageqlock);
    488      1.59  riastrad 	if (memwrite) {
    489      1.59  riastrad 		genfs_markdirty(vp);
    490      1.59  riastrad 	}
    491      1.59  riastrad 	mutex_exit(uobj->vmobjlock);
    492      1.59  riastrad 	if (ap->a_m != NULL) {
    493      1.59  riastrad 		memcpy(ap->a_m, &pgs[ridx],
    494      1.59  riastrad 		    orignmempages * sizeof(struct vm_page *));
    495      1.59  riastrad 	}
    496       1.1     pooka 
    497      1.59  riastrad out_err_free:
    498      1.59  riastrad 	if (pgs != NULL && pgs != pgs_onstack)
    499      1.59  riastrad 		kmem_free(pgs, pgs_size);
    500      1.59  riastrad out_err:
    501      1.64   hannken 	if (trans_mount != NULL) {
    502      1.64   hannken 		if (holds_wapbl)
    503      1.64   hannken 			WAPBL_END(trans_mount);
    504      1.64   hannken 		fstrans_done(trans_mount);
    505      1.59  riastrad 	}
    506      1.59  riastrad 	return error;
    507      1.59  riastrad }
    508      1.59  riastrad 
    509      1.59  riastrad /*
    510      1.59  riastrad  * genfs_getpages_read: Read the pages in with VOP_BMAP/VOP_STRATEGY.
    511      1.68  dholland  *
    512      1.68  dholland  * "glocked" (which is currently not actually used) tells us not whether
    513      1.68  dholland  * the genfs_node is locked on entry (it always is) but whether it was
    514      1.68  dholland  * locked on entry to genfs_getpages.
    515      1.59  riastrad  */
    516      1.59  riastrad static int
    517      1.59  riastrad genfs_getpages_read(struct vnode *vp, struct vm_page **pgs, int npages,
    518      1.59  riastrad     off_t startoffset, off_t diskeof,
    519      1.59  riastrad     bool async, bool memwrite, bool blockalloc, bool glocked)
    520      1.59  riastrad {
    521      1.59  riastrad 	struct uvm_object * const uobj = &vp->v_uobj;
    522      1.59  riastrad 	const int fs_bshift = (vp->v_type != VBLK) ?
    523      1.59  riastrad 	    vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
    524      1.59  riastrad 	const int dev_bshift = (vp->v_type != VBLK) ?
    525      1.59  riastrad 	    vp->v_mount->mnt_dev_bshift : DEV_BSHIFT;
    526      1.59  riastrad 	kauth_cred_t const cred = curlwp->l_cred;		/* XXXUBC curlwp */
    527      1.34  uebayasi 	size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
    528      1.34  uebayasi 	vaddr_t kva;
    529      1.34  uebayasi 	struct buf *bp, *mbp;
    530      1.34  uebayasi 	bool sawhole = false;
    531      1.59  riastrad 	int i;
    532      1.59  riastrad 	int error = 0;
    533      1.34  uebayasi 
    534      1.60     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
    535      1.60     skrll 
    536       1.1     pooka 	/*
    537       1.1     pooka 	 * read the desired page(s).
    538       1.1     pooka 	 */
    539       1.1     pooka 
    540       1.1     pooka 	totalbytes = npages << PAGE_SHIFT;
    541       1.1     pooka 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    542       1.1     pooka 	tailbytes = totalbytes - bytes;
    543       1.1     pooka 	skipbytes = 0;
    544       1.1     pooka 
    545       1.1     pooka 	kva = uvm_pagermapin(pgs, npages,
    546      1.55      yamt 	    UVMPAGER_MAPIN_READ | (async ? 0 : UVMPAGER_MAPIN_WAITOK));
    547      1.59  riastrad 	if (kva == 0)
    548      1.59  riastrad 		return EBUSY;
    549       1.1     pooka 
    550       1.2        ad 	mbp = getiobuf(vp, true);
    551       1.1     pooka 	mbp->b_bufsize = totalbytes;
    552       1.1     pooka 	mbp->b_data = (void *)kva;
    553       1.1     pooka 	mbp->b_resid = mbp->b_bcount = bytes;
    554       1.2        ad 	mbp->b_cflags = BC_BUSY;
    555       1.2        ad 	if (async) {
    556       1.2        ad 		mbp->b_flags = B_READ | B_ASYNC;
    557       1.2        ad 		mbp->b_iodone = uvm_aio_biodone;
    558       1.2        ad 	} else {
    559       1.2        ad 		mbp->b_flags = B_READ;
    560       1.2        ad 		mbp->b_iodone = NULL;
    561      1.43  uebayasi 	}
    562       1.1     pooka 	if (async)
    563       1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
    564       1.1     pooka 	else
    565       1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
    566       1.1     pooka 
    567       1.1     pooka 	/*
    568       1.1     pooka 	 * if EOF is in the middle of the range, zero the part past EOF.
    569       1.1     pooka 	 * skip over pages which are not PG_FAKE since in that case they have
    570       1.1     pooka 	 * valid data that we need to preserve.
    571       1.1     pooka 	 */
    572       1.1     pooka 
    573       1.1     pooka 	tailstart = bytes;
    574       1.1     pooka 	while (tailbytes > 0) {
    575       1.1     pooka 		const int len = PAGE_SIZE - (tailstart & PAGE_MASK);
    576       1.1     pooka 
    577       1.1     pooka 		KASSERT(len <= tailbytes);
    578       1.1     pooka 		if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) {
    579       1.1     pooka 			memset((void *)(kva + tailstart), 0, len);
    580       1.1     pooka 			UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
    581       1.1     pooka 			    kva, tailstart, len, 0);
    582       1.1     pooka 		}
    583       1.1     pooka 		tailstart += len;
    584       1.1     pooka 		tailbytes -= len;
    585       1.1     pooka 	}
    586       1.1     pooka 
    587       1.1     pooka 	/*
    588       1.1     pooka 	 * now loop over the pages, reading as needed.
    589       1.1     pooka 	 */
    590       1.1     pooka 
    591       1.1     pooka 	bp = NULL;
    592      1.28  uebayasi 	off_t offset;
    593      1.28  uebayasi 	for (offset = startoffset;
    594       1.1     pooka 	    bytes > 0;
    595       1.1     pooka 	    offset += iobytes, bytes -= iobytes) {
    596      1.30  uebayasi 		int run;
    597      1.25  uebayasi 		daddr_t lbn, blkno;
    598      1.24  uebayasi 		int pidx;
    599      1.26  uebayasi 		struct vnode *devvp;
    600       1.1     pooka 
    601       1.1     pooka 		/*
    602       1.1     pooka 		 * skip pages which don't need to be read.
    603       1.1     pooka 		 */
    604       1.1     pooka 
    605       1.1     pooka 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    606       1.1     pooka 		while ((pgs[pidx]->flags & PG_FAKE) == 0) {
    607       1.1     pooka 			size_t b;
    608       1.1     pooka 
    609       1.1     pooka 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    610       1.1     pooka 			if ((pgs[pidx]->flags & PG_RDONLY)) {
    611       1.1     pooka 				sawhole = true;
    612       1.1     pooka 			}
    613       1.1     pooka 			b = MIN(PAGE_SIZE, bytes);
    614       1.1     pooka 			offset += b;
    615       1.1     pooka 			bytes -= b;
    616       1.1     pooka 			skipbytes += b;
    617       1.1     pooka 			pidx++;
    618       1.1     pooka 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
    619       1.1     pooka 			    offset, 0,0,0);
    620       1.1     pooka 			if (bytes == 0) {
    621       1.1     pooka 				goto loopdone;
    622       1.1     pooka 			}
    623       1.1     pooka 		}
    624       1.1     pooka 
    625       1.1     pooka 		/*
    626       1.1     pooka 		 * bmap the file to find out the blkno to read from and
    627       1.1     pooka 		 * how much we can read in one i/o.  if bmap returns an error,
    628       1.1     pooka 		 * skip the rest of the top-level i/o.
    629       1.1     pooka 		 */
    630       1.1     pooka 
    631       1.1     pooka 		lbn = offset >> fs_bshift;
    632       1.1     pooka 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    633       1.1     pooka 		if (error) {
    634       1.1     pooka 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
    635      1.36  uebayasi 			    lbn,error,0,0);
    636       1.1     pooka 			skipbytes += bytes;
    637      1.36  uebayasi 			bytes = 0;
    638       1.1     pooka 			goto loopdone;
    639       1.1     pooka 		}
    640       1.1     pooka 
    641       1.1     pooka 		/*
    642       1.1     pooka 		 * see how many pages can be read with this i/o.
    643       1.1     pooka 		 * reduce the i/o size if necessary to avoid
    644       1.1     pooka 		 * overwriting pages with valid data.
    645       1.1     pooka 		 */
    646       1.1     pooka 
    647       1.1     pooka 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    648       1.1     pooka 		    bytes);
    649       1.1     pooka 		if (offset + iobytes > round_page(offset)) {
    650      1.24  uebayasi 			int pcount;
    651      1.24  uebayasi 
    652       1.1     pooka 			pcount = 1;
    653       1.1     pooka 			while (pidx + pcount < npages &&
    654       1.1     pooka 			    pgs[pidx + pcount]->flags & PG_FAKE) {
    655       1.1     pooka 				pcount++;
    656       1.1     pooka 			}
    657       1.1     pooka 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    658       1.1     pooka 			    (offset - trunc_page(offset)));
    659       1.1     pooka 		}
    660       1.1     pooka 
    661       1.1     pooka 		/*
    662       1.1     pooka 		 * if this block isn't allocated, zero it instead of
    663       1.1     pooka 		 * reading it.  unless we are going to allocate blocks,
    664       1.1     pooka 		 * mark the pages we zeroed PG_RDONLY.
    665       1.1     pooka 		 */
    666       1.1     pooka 
    667      1.36  uebayasi 		if (blkno == (daddr_t)-1) {
    668       1.1     pooka 			int holepages = (round_page(offset + iobytes) -
    669       1.1     pooka 			    trunc_page(offset)) >> PAGE_SHIFT;
    670       1.1     pooka 			UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
    671       1.1     pooka 
    672       1.1     pooka 			sawhole = true;
    673       1.1     pooka 			memset((char *)kva + (offset - startoffset), 0,
    674       1.1     pooka 			    iobytes);
    675       1.1     pooka 			skipbytes += iobytes;
    676       1.1     pooka 
    677      1.49     rmind 			mutex_enter(uobj->vmobjlock);
    678       1.1     pooka 			for (i = 0; i < holepages; i++) {
    679      1.35  uebayasi 				if (memwrite) {
    680       1.1     pooka 					pgs[pidx + i]->flags &= ~PG_CLEAN;
    681       1.1     pooka 				}
    682       1.1     pooka 				if (!blockalloc) {
    683       1.1     pooka 					pgs[pidx + i]->flags |= PG_RDONLY;
    684       1.1     pooka 				}
    685       1.1     pooka 			}
    686      1.49     rmind 			mutex_exit(uobj->vmobjlock);
    687       1.1     pooka 			continue;
    688       1.1     pooka 		}
    689       1.1     pooka 
    690       1.1     pooka 		/*
    691       1.1     pooka 		 * allocate a sub-buf for this piece of the i/o
    692       1.1     pooka 		 * (or just use mbp if there's only 1 piece),
    693       1.1     pooka 		 * and start it going.
    694       1.1     pooka 		 */
    695       1.1     pooka 
    696       1.1     pooka 		if (offset == startoffset && iobytes == bytes) {
    697       1.1     pooka 			bp = mbp;
    698       1.1     pooka 		} else {
    699      1.36  uebayasi 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
    700      1.36  uebayasi 			    vp, bp, vp->v_numoutput, 0);
    701       1.2        ad 			bp = getiobuf(vp, true);
    702       1.1     pooka 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
    703       1.1     pooka 		}
    704       1.1     pooka 		bp->b_lblkno = 0;
    705       1.1     pooka 
    706       1.1     pooka 		/* adjust physical blkno for partial blocks */
    707       1.1     pooka 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    708       1.1     pooka 		    dev_bshift);
    709       1.1     pooka 
    710       1.1     pooka 		UVMHIST_LOG(ubchist,
    711       1.1     pooka 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
    712      1.36  uebayasi 		    bp, offset, bp->b_bcount, bp->b_blkno);
    713       1.1     pooka 
    714       1.1     pooka 		VOP_STRATEGY(devvp, bp);
    715       1.1     pooka 	}
    716       1.1     pooka 
    717       1.1     pooka loopdone:
    718       1.1     pooka 	nestiobuf_done(mbp, skipbytes, error);
    719       1.1     pooka 	if (async) {
    720       1.1     pooka 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    721      1.59  riastrad 		return 0;
    722       1.1     pooka 	}
    723       1.1     pooka 	if (bp != NULL) {
    724       1.1     pooka 		error = biowait(mbp);
    725       1.1     pooka 	}
    726       1.1     pooka 
    727      1.19     rmind 	/* Remove the mapping (make KVA available as soon as possible) */
    728      1.19     rmind 	uvm_pagermapout(kva, npages);
    729      1.19     rmind 
    730       1.1     pooka 	/*
    731       1.1     pooka 	 * if this we encountered a hole then we have to do a little more work.
    732       1.1     pooka 	 * for read faults, we marked the page PG_RDONLY so that future
    733       1.1     pooka 	 * write accesses to the page will fault again.
    734       1.1     pooka 	 * for write faults, we must make sure that the backing store for
    735       1.1     pooka 	 * the page is completely allocated while the pages are locked.
    736       1.1     pooka 	 */
    737       1.1     pooka 
    738       1.1     pooka 	if (!error && sawhole && blockalloc) {
    739      1.42   hannken 		error = GOP_ALLOC(vp, startoffset,
    740      1.42   hannken 		    npages << PAGE_SHIFT, 0, cred);
    741       1.1     pooka 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
    742       1.1     pooka 		    startoffset, npages << PAGE_SHIFT, error,0);
    743       1.1     pooka 		if (!error) {
    744      1.49     rmind 			mutex_enter(uobj->vmobjlock);
    745       1.1     pooka 			for (i = 0; i < npages; i++) {
    746      1.31  uebayasi 				struct vm_page *pg = pgs[i];
    747      1.31  uebayasi 
    748      1.31  uebayasi 				if (pg == NULL) {
    749       1.1     pooka 					continue;
    750       1.1     pooka 				}
    751      1.31  uebayasi 				pg->flags &= ~(PG_CLEAN|PG_RDONLY);
    752       1.1     pooka 				UVMHIST_LOG(ubchist, "mark dirty pg %p",
    753      1.31  uebayasi 				    pg,0,0,0);
    754       1.1     pooka 			}
    755      1.49     rmind 			mutex_exit(uobj->vmobjlock);
    756       1.1     pooka 		}
    757       1.1     pooka 	}
    758      1.18     rmind 
    759      1.18     rmind 	putiobuf(mbp);
    760      1.38       chs 	return error;
    761       1.1     pooka }
    762       1.1     pooka 
    763       1.1     pooka /*
    764       1.1     pooka  * generic VM putpages routine.
    765       1.1     pooka  * Write the given range of pages to backing store.
    766       1.1     pooka  *
    767       1.1     pooka  * => "offhi == 0" means flush all pages at or after "offlo".
    768       1.1     pooka  * => object should be locked by caller.  we return with the
    769       1.1     pooka  *      object unlocked.
    770       1.1     pooka  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
    771       1.1     pooka  *	thus, a caller might want to unlock higher level resources
    772       1.1     pooka  *	(e.g. vm_map) before calling flush.
    773       1.1     pooka  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
    774       1.1     pooka  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
    775       1.1     pooka  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    776       1.1     pooka  *	that new pages are inserted on the tail end of the list.   thus,
    777       1.1     pooka  *	we can make a complete pass through the object in one go by starting
    778       1.1     pooka  *	at the head and working towards the tail (new pages are put in
    779       1.1     pooka  *	front of us).
    780       1.1     pooka  * => NOTE: we are allowed to lock the page queues, so the caller
    781       1.1     pooka  *	must not be holding the page queue lock.
    782       1.1     pooka  *
    783       1.1     pooka  * note on "cleaning" object and PG_BUSY pages:
    784       1.1     pooka  *	this routine is holding the lock on the object.   the only time
    785       1.1     pooka  *	that it can run into a PG_BUSY page that it does not own is if
    786       1.1     pooka  *	some other process has started I/O on the page (e.g. either
    787       1.1     pooka  *	a pagein, or a pageout).    if the PG_BUSY page is being paged
    788       1.1     pooka  *	in, then it can not be dirty (!PG_CLEAN) because no one has
    789       1.1     pooka  *	had a chance to modify it yet.    if the PG_BUSY page is being
    790       1.1     pooka  *	paged out then it means that someone else has already started
    791       1.1     pooka  *	cleaning the page for us (how nice!).    in this case, if we
    792       1.1     pooka  *	have syncio specified, then after we make our pass through the
    793       1.1     pooka  *	object we need to wait for the other PG_BUSY pages to clear
    794       1.1     pooka  *	off (i.e. we need to do an iosync).   also note that once a
    795       1.1     pooka  *	page is PG_BUSY it must stay in its object until it is un-busyed.
    796       1.1     pooka  *
    797       1.1     pooka  * note on page traversal:
    798       1.1     pooka  *	we can traverse the pages in an object either by going down the
    799       1.1     pooka  *	linked list in "uobj->memq", or we can go over the address range
    800       1.1     pooka  *	by page doing hash table lookups for each address.    depending
    801       1.1     pooka  *	on how many pages are in the object it may be cheaper to do one
    802       1.1     pooka  *	or the other.   we set "by_list" to true if we are using memq.
    803       1.1     pooka  *	if the cost of a hash lookup was equal to the cost of the list
    804       1.1     pooka  *	traversal we could compare the number of pages in the start->stop
    805       1.1     pooka  *	range to the total number of pages in the object.   however, it
    806       1.1     pooka  *	seems that a hash table lookup is more expensive than the linked
    807       1.1     pooka  *	list traversal, so we multiply the number of pages in the
    808       1.1     pooka  *	range by an estimate of the relatively higher cost of the hash lookup.
    809       1.1     pooka  */
    810       1.1     pooka 
    811       1.1     pooka int
    812       1.1     pooka genfs_putpages(void *v)
    813       1.1     pooka {
    814       1.1     pooka 	struct vop_putpages_args /* {
    815       1.1     pooka 		struct vnode *a_vp;
    816       1.1     pooka 		voff_t a_offlo;
    817       1.1     pooka 		voff_t a_offhi;
    818       1.1     pooka 		int a_flags;
    819      1.22  uebayasi 	} */ * const ap = v;
    820       1.1     pooka 
    821       1.1     pooka 	return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi,
    822       1.1     pooka 	    ap->a_flags, NULL);
    823       1.1     pooka }
    824       1.1     pooka 
    825       1.1     pooka int
    826       1.4      yamt genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff,
    827       1.4      yamt     int origflags, struct vm_page **busypg)
    828       1.1     pooka {
    829      1.22  uebayasi 	struct uvm_object * const uobj = &vp->v_uobj;
    830      1.49     rmind 	kmutex_t * const slock = uobj->vmobjlock;
    831       1.1     pooka 	off_t off;
    832       1.2        ad 	int i, error, npages, nback;
    833       1.1     pooka 	int freeflag;
    834      1.63  christos 	/*
    835      1.63  christos 	 * This array is larger than it should so that it's size is constant.
    836      1.63  christos 	 * The right size is MAXPAGES.
    837      1.63  christos 	 */
    838      1.63  christos 	struct vm_page *pgs[MAXPHYS / MIN_PAGE_SIZE];
    839      1.63  christos #define MAXPAGES (MAXPHYS / PAGE_SIZE)
    840      1.63  christos 	struct vm_page *pg, *nextpg, *tpg, curmp, endmp;
    841       1.1     pooka 	bool wasclean, by_list, needs_clean, yld;
    842       1.4      yamt 	bool async = (origflags & PGO_SYNCIO) == 0;
    843       1.1     pooka 	bool pagedaemon = curlwp == uvm.pagedaemon_lwp;
    844      1.22  uebayasi 	struct lwp * const l = curlwp ? curlwp : &lwp0;
    845      1.22  uebayasi 	struct genfs_node * const gp = VTOG(vp);
    846      1.65   hannken 	struct mount *trans_mp;
    847       1.4      yamt 	int flags;
    848       1.1     pooka 	int dirtygen;
    849       1.4      yamt 	bool modified;
    850      1.65   hannken 	bool holds_wapbl;
    851       1.1     pooka 	bool cleanall;
    852       1.4      yamt 	bool onworklst;
    853       1.1     pooka 
    854       1.1     pooka 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
    855       1.1     pooka 
    856       1.4      yamt 	KASSERT(origflags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
    857       1.1     pooka 	KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
    858       1.1     pooka 	KASSERT(startoff < endoff || endoff == 0);
    859       1.1     pooka 
    860       1.1     pooka 	UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
    861       1.1     pooka 	    vp, uobj->uo_npages, startoff, endoff - startoff);
    862       1.1     pooka 
    863      1.65   hannken 	trans_mp = NULL;
    864      1.65   hannken 	holds_wapbl = false;
    865       1.6   hannken 
    866       1.4      yamt retry:
    867       1.4      yamt 	modified = false;
    868       1.4      yamt 	flags = origflags;
    869       1.1     pooka 	KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 ||
    870       1.1     pooka 	    (vp->v_iflag & VI_WRMAPDIRTY) == 0);
    871       1.1     pooka 	if (uobj->uo_npages == 0) {
    872       1.1     pooka 		if (vp->v_iflag & VI_ONWORKLST) {
    873       1.1     pooka 			vp->v_iflag &= ~VI_WRMAPDIRTY;
    874       1.1     pooka 			if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
    875       1.1     pooka 				vn_syncer_remove_from_worklist(vp);
    876       1.1     pooka 		}
    877      1.65   hannken 		if (trans_mp) {
    878      1.65   hannken 			if (holds_wapbl)
    879      1.65   hannken 				WAPBL_END(trans_mp);
    880      1.65   hannken 			fstrans_done(trans_mp);
    881      1.12   hannken 		}
    882       1.2        ad 		mutex_exit(slock);
    883       1.1     pooka 		return (0);
    884       1.1     pooka 	}
    885       1.1     pooka 
    886       1.1     pooka 	/*
    887       1.1     pooka 	 * the vnode has pages, set up to process the request.
    888       1.1     pooka 	 */
    889       1.1     pooka 
    890      1.65   hannken 	if (trans_mp == NULL && (flags & PGO_CLEANIT) != 0) {
    891       1.1     pooka 		if (pagedaemon) {
    892      1.65   hannken 			/* Pagedaemon must not sleep here. */
    893      1.65   hannken 			trans_mp = vp->v_mount;
    894  1.68.6.1    bouyer 			error = fstrans_start_nowait(trans_mp);
    895      1.12   hannken 			if (error) {
    896      1.65   hannken 				mutex_exit(slock);
    897      1.12   hannken 				return error;
    898      1.12   hannken 			}
    899      1.65   hannken 		} else {
    900      1.65   hannken 			/*
    901      1.65   hannken 			 * Cannot use vdeadcheck() here as this operation
    902      1.65   hannken 			 * usually gets used from VOP_RECLAIM().  Test for
    903      1.65   hannken 			 * change of v_mount instead and retry on change.
    904      1.65   hannken 			 */
    905      1.65   hannken 			mutex_exit(slock);
    906      1.65   hannken 			trans_mp = vp->v_mount;
    907  1.68.6.1    bouyer 			fstrans_start(trans_mp);
    908      1.65   hannken 			if (vp->v_mount != trans_mp) {
    909      1.65   hannken 				fstrans_done(trans_mp);
    910      1.65   hannken 				trans_mp = NULL;
    911      1.65   hannken 			} else {
    912      1.65   hannken 				holds_wapbl = (trans_mp->mnt_wapbl &&
    913      1.65   hannken 				    (origflags & PGO_JOURNALLOCKED) == 0);
    914      1.65   hannken 				if (holds_wapbl) {
    915      1.65   hannken 					error = WAPBL_BEGIN(trans_mp);
    916      1.65   hannken 					if (error) {
    917      1.65   hannken 						fstrans_done(trans_mp);
    918      1.65   hannken 						return error;
    919      1.65   hannken 					}
    920      1.65   hannken 				}
    921      1.65   hannken 			}
    922      1.65   hannken 			mutex_enter(slock);
    923      1.65   hannken 			goto retry;
    924      1.12   hannken 		}
    925       1.1     pooka 	}
    926       1.1     pooka 
    927       1.1     pooka 	error = 0;
    928       1.1     pooka 	wasclean = (vp->v_numoutput == 0);
    929       1.1     pooka 	off = startoff;
    930       1.1     pooka 	if (endoff == 0 || flags & PGO_ALLPAGES) {
    931       1.1     pooka 		endoff = trunc_page(LLONG_MAX);
    932       1.1     pooka 	}
    933       1.1     pooka 	by_list = (uobj->uo_npages <=
    934      1.17      yamt 	    ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_TREE_PENALTY);
    935       1.1     pooka 
    936       1.1     pooka 	/*
    937       1.1     pooka 	 * if this vnode is known not to have dirty pages,
    938       1.1     pooka 	 * don't bother to clean it out.
    939       1.1     pooka 	 */
    940       1.1     pooka 
    941       1.1     pooka 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
    942      1.48      matt #if !defined(DEBUG)
    943       1.1     pooka 		if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == 0) {
    944       1.1     pooka 			goto skip_scan;
    945       1.1     pooka 		}
    946      1.48      matt #endif /* !defined(DEBUG) */
    947       1.1     pooka 		flags &= ~PGO_CLEANIT;
    948       1.1     pooka 	}
    949       1.1     pooka 
    950       1.1     pooka 	/*
    951       1.1     pooka 	 * start the loop.  when scanning by list, hold the last page
    952       1.1     pooka 	 * in the list before we start.  pages allocated after we start
    953       1.1     pooka 	 * will be added to the end of the list, so we can stop at the
    954       1.1     pooka 	 * current last page.
    955       1.1     pooka 	 */
    956       1.1     pooka 
    957       1.1     pooka 	cleanall = (flags & PGO_CLEANIT) != 0 && wasclean &&
    958       1.1     pooka 	    startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
    959       1.1     pooka 	    (vp->v_iflag & VI_ONWORKLST) != 0;
    960       1.1     pooka 	dirtygen = gp->g_dirtygen;
    961       1.1     pooka 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
    962       1.1     pooka 	if (by_list) {
    963      1.37   hannken 		curmp.flags = PG_MARKER;
    964      1.37   hannken 		endmp.flags = PG_MARKER;
    965       1.1     pooka 		pg = TAILQ_FIRST(&uobj->memq);
    966       1.8        ad 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq.queue);
    967       1.1     pooka 	} else {
    968       1.1     pooka 		pg = uvm_pagelookup(uobj, off);
    969       1.1     pooka 	}
    970       1.1     pooka 	nextpg = NULL;
    971       1.1     pooka 	while (by_list || off < endoff) {
    972       1.1     pooka 
    973       1.1     pooka 		/*
    974       1.1     pooka 		 * if the current page is not interesting, move on to the next.
    975       1.1     pooka 		 */
    976       1.1     pooka 
    977      1.37   hannken 		KASSERT(pg == NULL || pg->uobject == uobj ||
    978      1.37   hannken 		    (pg->flags & PG_MARKER) != 0);
    979       1.1     pooka 		KASSERT(pg == NULL ||
    980       1.1     pooka 		    (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
    981      1.37   hannken 		    (pg->flags & (PG_BUSY|PG_MARKER)) != 0);
    982       1.1     pooka 		if (by_list) {
    983       1.1     pooka 			if (pg == &endmp) {
    984       1.1     pooka 				break;
    985       1.1     pooka 			}
    986      1.37   hannken 			if (pg->flags & PG_MARKER) {
    987      1.37   hannken 				pg = TAILQ_NEXT(pg, listq.queue);
    988      1.37   hannken 				continue;
    989      1.37   hannken 			}
    990       1.1     pooka 			if (pg->offset < startoff || pg->offset >= endoff ||
    991       1.1     pooka 			    pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
    992       1.1     pooka 				if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
    993       1.1     pooka 					wasclean = false;
    994       1.1     pooka 				}
    995       1.8        ad 				pg = TAILQ_NEXT(pg, listq.queue);
    996       1.1     pooka 				continue;
    997       1.1     pooka 			}
    998       1.1     pooka 			off = pg->offset;
    999       1.1     pooka 		} else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1000       1.1     pooka 			if (pg != NULL) {
   1001       1.1     pooka 				wasclean = false;
   1002       1.1     pooka 			}
   1003       1.1     pooka 			off += PAGE_SIZE;
   1004       1.1     pooka 			if (off < endoff) {
   1005       1.1     pooka 				pg = uvm_pagelookup(uobj, off);
   1006       1.1     pooka 			}
   1007       1.1     pooka 			continue;
   1008       1.1     pooka 		}
   1009       1.1     pooka 
   1010       1.1     pooka 		/*
   1011       1.1     pooka 		 * if the current page needs to be cleaned and it's busy,
   1012       1.1     pooka 		 * wait for it to become unbusy.
   1013       1.1     pooka 		 */
   1014       1.1     pooka 
   1015       1.1     pooka 		yld = (l->l_cpu->ci_schedstate.spc_flags &
   1016       1.1     pooka 		    SPCF_SHOULDYIELD) && !pagedaemon;
   1017       1.1     pooka 		if (pg->flags & PG_BUSY || yld) {
   1018       1.1     pooka 			UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
   1019       1.1     pooka 			if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
   1020       1.1     pooka 				UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0);
   1021       1.1     pooka 				error = EDEADLK;
   1022       1.1     pooka 				if (busypg != NULL)
   1023       1.1     pooka 					*busypg = pg;
   1024       1.1     pooka 				break;
   1025       1.1     pooka 			}
   1026       1.1     pooka 			if (pagedaemon) {
   1027       1.1     pooka 				/*
   1028       1.1     pooka 				 * someone has taken the page while we
   1029       1.1     pooka 				 * dropped the lock for fstrans_start.
   1030       1.1     pooka 				 */
   1031       1.1     pooka 				break;
   1032       1.1     pooka 			}
   1033       1.1     pooka 			if (by_list) {
   1034       1.8        ad 				TAILQ_INSERT_BEFORE(pg, &curmp, listq.queue);
   1035       1.1     pooka 				UVMHIST_LOG(ubchist, "curmp next %p",
   1036       1.8        ad 				    TAILQ_NEXT(&curmp, listq.queue), 0,0,0);
   1037       1.1     pooka 			}
   1038       1.1     pooka 			if (yld) {
   1039       1.2        ad 				mutex_exit(slock);
   1040       1.1     pooka 				preempt();
   1041       1.2        ad 				mutex_enter(slock);
   1042       1.1     pooka 			} else {
   1043       1.1     pooka 				pg->flags |= PG_WANTED;
   1044       1.1     pooka 				UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
   1045       1.2        ad 				mutex_enter(slock);
   1046       1.1     pooka 			}
   1047       1.1     pooka 			if (by_list) {
   1048       1.1     pooka 				UVMHIST_LOG(ubchist, "after next %p",
   1049       1.8        ad 				    TAILQ_NEXT(&curmp, listq.queue), 0,0,0);
   1050       1.8        ad 				pg = TAILQ_NEXT(&curmp, listq.queue);
   1051       1.8        ad 				TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue);
   1052       1.1     pooka 			} else {
   1053       1.1     pooka 				pg = uvm_pagelookup(uobj, off);
   1054       1.1     pooka 			}
   1055       1.1     pooka 			continue;
   1056       1.1     pooka 		}
   1057       1.1     pooka 
   1058       1.1     pooka 		/*
   1059       1.1     pooka 		 * if we're freeing, remove all mappings of the page now.
   1060       1.1     pooka 		 * if we're cleaning, check if the page is needs to be cleaned.
   1061       1.1     pooka 		 */
   1062       1.1     pooka 
   1063       1.1     pooka 		if (flags & PGO_FREE) {
   1064       1.1     pooka 			pmap_page_protect(pg, VM_PROT_NONE);
   1065       1.1     pooka 		} else if (flags & PGO_CLEANIT) {
   1066       1.1     pooka 
   1067       1.1     pooka 			/*
   1068       1.1     pooka 			 * if we still have some hope to pull this vnode off
   1069       1.1     pooka 			 * from the syncer queue, write-protect the page.
   1070       1.1     pooka 			 */
   1071       1.1     pooka 
   1072       1.1     pooka 			if (cleanall && wasclean &&
   1073       1.1     pooka 			    gp->g_dirtygen == dirtygen) {
   1074       1.1     pooka 
   1075       1.1     pooka 				/*
   1076       1.1     pooka 				 * uobj pages get wired only by uvm_fault
   1077       1.1     pooka 				 * where uobj is locked.
   1078       1.1     pooka 				 */
   1079       1.1     pooka 
   1080       1.1     pooka 				if (pg->wire_count == 0) {
   1081       1.1     pooka 					pmap_page_protect(pg,
   1082       1.1     pooka 					    VM_PROT_READ|VM_PROT_EXECUTE);
   1083       1.1     pooka 				} else {
   1084       1.1     pooka 					cleanall = false;
   1085       1.1     pooka 				}
   1086       1.1     pooka 			}
   1087       1.1     pooka 		}
   1088       1.1     pooka 
   1089       1.1     pooka 		if (flags & PGO_CLEANIT) {
   1090       1.1     pooka 			needs_clean = pmap_clear_modify(pg) ||
   1091       1.1     pooka 			    (pg->flags & PG_CLEAN) == 0;
   1092       1.1     pooka 			pg->flags |= PG_CLEAN;
   1093       1.1     pooka 		} else {
   1094       1.1     pooka 			needs_clean = false;
   1095       1.1     pooka 		}
   1096       1.1     pooka 
   1097       1.1     pooka 		/*
   1098       1.1     pooka 		 * if we're cleaning, build a cluster.
   1099       1.1     pooka 		 * the cluster will consist of pages which are currently dirty,
   1100       1.1     pooka 		 * but they will be returned to us marked clean.
   1101       1.1     pooka 		 * if not cleaning, just operate on the one page.
   1102       1.1     pooka 		 */
   1103       1.1     pooka 
   1104       1.1     pooka 		if (needs_clean) {
   1105       1.1     pooka 			KDASSERT((vp->v_iflag & VI_ONWORKLST));
   1106       1.1     pooka 			wasclean = false;
   1107       1.1     pooka 			memset(pgs, 0, sizeof(pgs));
   1108       1.1     pooka 			pg->flags |= PG_BUSY;
   1109       1.1     pooka 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1110       1.1     pooka 
   1111       1.1     pooka 			/*
   1112       1.1     pooka 			 * first look backward.
   1113       1.1     pooka 			 */
   1114       1.1     pooka 
   1115      1.62  christos 			npages = MIN(MAXPAGES >> 1, off >> PAGE_SHIFT);
   1116       1.1     pooka 			nback = npages;
   1117       1.1     pooka 			uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
   1118       1.1     pooka 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
   1119       1.1     pooka 			if (nback) {
   1120       1.1     pooka 				memmove(&pgs[0], &pgs[npages - nback],
   1121       1.1     pooka 				    nback * sizeof(pgs[0]));
   1122       1.1     pooka 				if (npages - nback < nback)
   1123       1.1     pooka 					memset(&pgs[nback], 0,
   1124       1.1     pooka 					    (npages - nback) * sizeof(pgs[0]));
   1125       1.1     pooka 				else
   1126       1.1     pooka 					memset(&pgs[npages - nback], 0,
   1127       1.1     pooka 					    nback * sizeof(pgs[0]));
   1128       1.1     pooka 			}
   1129       1.1     pooka 
   1130       1.1     pooka 			/*
   1131       1.1     pooka 			 * then plug in our page of interest.
   1132       1.1     pooka 			 */
   1133       1.1     pooka 
   1134       1.1     pooka 			pgs[nback] = pg;
   1135       1.1     pooka 
   1136       1.1     pooka 			/*
   1137       1.1     pooka 			 * then look forward to fill in the remaining space in
   1138       1.1     pooka 			 * the array of pages.
   1139       1.1     pooka 			 */
   1140       1.1     pooka 
   1141      1.62  christos 			npages = MAXPAGES - nback - 1;
   1142       1.1     pooka 			uvn_findpages(uobj, off + PAGE_SIZE, &npages,
   1143       1.1     pooka 			    &pgs[nback + 1],
   1144       1.1     pooka 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
   1145       1.1     pooka 			npages += nback + 1;
   1146       1.1     pooka 		} else {
   1147       1.1     pooka 			pgs[0] = pg;
   1148       1.1     pooka 			npages = 1;
   1149       1.1     pooka 			nback = 0;
   1150       1.1     pooka 		}
   1151       1.1     pooka 
   1152       1.1     pooka 		/*
   1153       1.1     pooka 		 * apply FREE or DEACTIVATE options if requested.
   1154       1.1     pooka 		 */
   1155       1.1     pooka 
   1156       1.1     pooka 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1157       1.2        ad 			mutex_enter(&uvm_pageqlock);
   1158       1.1     pooka 		}
   1159       1.1     pooka 		for (i = 0; i < npages; i++) {
   1160       1.1     pooka 			tpg = pgs[i];
   1161       1.1     pooka 			KASSERT(tpg->uobject == uobj);
   1162       1.8        ad 			if (by_list && tpg == TAILQ_NEXT(pg, listq.queue))
   1163       1.1     pooka 				pg = tpg;
   1164       1.1     pooka 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1165       1.1     pooka 				continue;
   1166       1.1     pooka 			if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
   1167       1.1     pooka 				uvm_pagedeactivate(tpg);
   1168       1.1     pooka 			} else if (flags & PGO_FREE) {
   1169       1.1     pooka 				pmap_page_protect(tpg, VM_PROT_NONE);
   1170       1.1     pooka 				if (tpg->flags & PG_BUSY) {
   1171       1.1     pooka 					tpg->flags |= freeflag;
   1172       1.1     pooka 					if (pagedaemon) {
   1173       1.2        ad 						uvm_pageout_start(1);
   1174       1.1     pooka 						uvm_pagedequeue(tpg);
   1175       1.1     pooka 					}
   1176       1.1     pooka 				} else {
   1177       1.1     pooka 
   1178       1.1     pooka 					/*
   1179       1.1     pooka 					 * ``page is not busy''
   1180       1.1     pooka 					 * implies that npages is 1
   1181       1.1     pooka 					 * and needs_clean is false.
   1182       1.1     pooka 					 */
   1183       1.1     pooka 
   1184       1.8        ad 					nextpg = TAILQ_NEXT(tpg, listq.queue);
   1185       1.1     pooka 					uvm_pagefree(tpg);
   1186       1.1     pooka 					if (pagedaemon)
   1187       1.1     pooka 						uvmexp.pdfreed++;
   1188       1.1     pooka 				}
   1189       1.1     pooka 			}
   1190       1.1     pooka 		}
   1191       1.1     pooka 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1192       1.2        ad 			mutex_exit(&uvm_pageqlock);
   1193       1.1     pooka 		}
   1194       1.1     pooka 		if (needs_clean) {
   1195       1.1     pooka 			modified = true;
   1196       1.1     pooka 
   1197       1.1     pooka 			/*
   1198       1.1     pooka 			 * start the i/o.  if we're traversing by list,
   1199       1.1     pooka 			 * keep our place in the list with a marker page.
   1200       1.1     pooka 			 */
   1201       1.1     pooka 
   1202       1.1     pooka 			if (by_list) {
   1203       1.1     pooka 				TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
   1204       1.8        ad 				    listq.queue);
   1205       1.1     pooka 			}
   1206       1.2        ad 			mutex_exit(slock);
   1207       1.1     pooka 			error = GOP_WRITE(vp, pgs, npages, flags);
   1208       1.2        ad 			mutex_enter(slock);
   1209       1.1     pooka 			if (by_list) {
   1210       1.8        ad 				pg = TAILQ_NEXT(&curmp, listq.queue);
   1211       1.8        ad 				TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue);
   1212       1.1     pooka 			}
   1213       1.1     pooka 			if (error) {
   1214       1.1     pooka 				break;
   1215       1.1     pooka 			}
   1216       1.1     pooka 			if (by_list) {
   1217       1.1     pooka 				continue;
   1218       1.1     pooka 			}
   1219       1.1     pooka 		}
   1220       1.1     pooka 
   1221       1.1     pooka 		/*
   1222       1.1     pooka 		 * find the next page and continue if there was no error.
   1223       1.1     pooka 		 */
   1224       1.1     pooka 
   1225       1.1     pooka 		if (by_list) {
   1226       1.1     pooka 			if (nextpg) {
   1227       1.1     pooka 				pg = nextpg;
   1228       1.1     pooka 				nextpg = NULL;
   1229       1.1     pooka 			} else {
   1230       1.8        ad 				pg = TAILQ_NEXT(pg, listq.queue);
   1231       1.1     pooka 			}
   1232       1.1     pooka 		} else {
   1233       1.1     pooka 			off += (npages - nback) << PAGE_SHIFT;
   1234       1.1     pooka 			if (off < endoff) {
   1235       1.1     pooka 				pg = uvm_pagelookup(uobj, off);
   1236       1.1     pooka 			}
   1237       1.1     pooka 		}
   1238       1.1     pooka 	}
   1239       1.1     pooka 	if (by_list) {
   1240       1.8        ad 		TAILQ_REMOVE(&uobj->memq, &endmp, listq.queue);
   1241       1.1     pooka 	}
   1242       1.1     pooka 
   1243       1.1     pooka 	if (modified && (vp->v_iflag & VI_WRMAPDIRTY) != 0 &&
   1244       1.1     pooka 	    (vp->v_type != VBLK ||
   1245       1.1     pooka 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
   1246       1.1     pooka 		GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
   1247       1.1     pooka 	}
   1248       1.1     pooka 
   1249       1.1     pooka 	/*
   1250       1.1     pooka 	 * if we're cleaning and there was nothing to clean,
   1251       1.1     pooka 	 * take us off the syncer list.  if we started any i/o
   1252       1.1     pooka 	 * and we're doing sync i/o, wait for all writes to finish.
   1253       1.1     pooka 	 */
   1254       1.1     pooka 
   1255       1.1     pooka 	if (cleanall && wasclean && gp->g_dirtygen == dirtygen &&
   1256       1.1     pooka 	    (vp->v_iflag & VI_ONWORKLST) != 0) {
   1257       1.5      yamt #if defined(DEBUG)
   1258       1.8        ad 		TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
   1259      1.45   hannken 			if ((pg->flags & (PG_FAKE | PG_MARKER)) != 0) {
   1260      1.37   hannken 				continue;
   1261      1.37   hannken 			}
   1262       1.5      yamt 			if ((pg->flags & PG_CLEAN) == 0) {
   1263       1.5      yamt 				printf("%s: %p: !CLEAN\n", __func__, pg);
   1264       1.5      yamt 			}
   1265       1.5      yamt 			if (pmap_is_modified(pg)) {
   1266       1.5      yamt 				printf("%s: %p: modified\n", __func__, pg);
   1267       1.5      yamt 			}
   1268       1.5      yamt 		}
   1269       1.5      yamt #endif /* defined(DEBUG) */
   1270       1.1     pooka 		vp->v_iflag &= ~VI_WRMAPDIRTY;
   1271       1.1     pooka 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
   1272       1.1     pooka 			vn_syncer_remove_from_worklist(vp);
   1273       1.1     pooka 	}
   1274       1.1     pooka 
   1275       1.1     pooka #if !defined(DEBUG)
   1276       1.1     pooka skip_scan:
   1277       1.1     pooka #endif /* !defined(DEBUG) */
   1278       1.2        ad 
   1279       1.2        ad 	/* Wait for output to complete. */
   1280       1.2        ad 	if (!wasclean && !async && vp->v_numoutput != 0) {
   1281       1.2        ad 		while (vp->v_numoutput != 0)
   1282       1.2        ad 			cv_wait(&vp->v_cv, slock);
   1283       1.1     pooka 	}
   1284       1.4      yamt 	onworklst = (vp->v_iflag & VI_ONWORKLST) != 0;
   1285       1.2        ad 	mutex_exit(slock);
   1286       1.1     pooka 
   1287       1.4      yamt 	if ((flags & PGO_RECLAIM) != 0 && onworklst) {
   1288       1.4      yamt 		/*
   1289       1.4      yamt 		 * in the case of PGO_RECLAIM, ensure to make the vnode clean.
   1290       1.4      yamt 		 * retrying is not a big deal because, in many cases,
   1291       1.4      yamt 		 * uobj->uo_npages is already 0 here.
   1292       1.4      yamt 		 */
   1293       1.4      yamt 		mutex_enter(slock);
   1294       1.4      yamt 		goto retry;
   1295       1.4      yamt 	}
   1296       1.4      yamt 
   1297      1.65   hannken 	if (trans_mp) {
   1298      1.65   hannken 		if (holds_wapbl)
   1299      1.65   hannken 			WAPBL_END(trans_mp);
   1300      1.65   hannken 		fstrans_done(trans_mp);
   1301      1.12   hannken 	}
   1302       1.6   hannken 
   1303       1.1     pooka 	return (error);
   1304       1.1     pooka }
   1305       1.1     pooka 
   1306       1.1     pooka int
   1307       1.1     pooka genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1308       1.1     pooka {
   1309       1.1     pooka 	off_t off;
   1310       1.1     pooka 	vaddr_t kva;
   1311       1.1     pooka 	size_t len;
   1312       1.1     pooka 	int error;
   1313       1.1     pooka 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1314       1.1     pooka 
   1315       1.1     pooka 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1316       1.1     pooka 	    vp, pgs, npages, flags);
   1317       1.1     pooka 
   1318       1.1     pooka 	off = pgs[0]->offset;
   1319       1.1     pooka 	kva = uvm_pagermapin(pgs, npages,
   1320       1.1     pooka 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1321       1.1     pooka 	len = npages << PAGE_SHIFT;
   1322       1.1     pooka 
   1323       1.1     pooka 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1324       1.1     pooka 			    uvm_aio_biodone);
   1325       1.1     pooka 
   1326       1.1     pooka 	return error;
   1327       1.1     pooka }
   1328       1.1     pooka 
   1329       1.7   reinoud int
   1330       1.7   reinoud genfs_gop_write_rwmap(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1331       1.7   reinoud {
   1332       1.7   reinoud 	off_t off;
   1333       1.7   reinoud 	vaddr_t kva;
   1334       1.7   reinoud 	size_t len;
   1335       1.7   reinoud 	int error;
   1336       1.7   reinoud 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1337       1.7   reinoud 
   1338       1.7   reinoud 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1339       1.7   reinoud 	    vp, pgs, npages, flags);
   1340       1.7   reinoud 
   1341       1.7   reinoud 	off = pgs[0]->offset;
   1342       1.7   reinoud 	kva = uvm_pagermapin(pgs, npages,
   1343       1.7   reinoud 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1344       1.7   reinoud 	len = npages << PAGE_SHIFT;
   1345       1.7   reinoud 
   1346       1.7   reinoud 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1347       1.7   reinoud 			    uvm_aio_biodone);
   1348       1.7   reinoud 
   1349       1.7   reinoud 	return error;
   1350       1.7   reinoud }
   1351       1.7   reinoud 
   1352       1.1     pooka /*
   1353       1.1     pooka  * Backend routine for doing I/O to vnode pages.  Pages are already locked
   1354       1.1     pooka  * and mapped into kernel memory.  Here we just look up the underlying
   1355       1.1     pooka  * device block addresses and call the strategy routine.
   1356       1.1     pooka  */
   1357       1.1     pooka 
   1358       1.1     pooka static int
   1359       1.1     pooka genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
   1360       1.1     pooka     enum uio_rw rw, void (*iodone)(struct buf *))
   1361       1.1     pooka {
   1362      1.36  uebayasi 	int s, error;
   1363       1.1     pooka 	int fs_bshift, dev_bshift;
   1364       1.1     pooka 	off_t eof, offset, startoffset;
   1365       1.1     pooka 	size_t bytes, iobytes, skipbytes;
   1366       1.1     pooka 	struct buf *mbp, *bp;
   1367      1.35  uebayasi 	const bool async = (flags & PGO_SYNCIO) == 0;
   1368      1.54       chs 	const bool lazy = (flags & PGO_LAZY) == 0;
   1369      1.35  uebayasi 	const bool iowrite = rw == UIO_WRITE;
   1370      1.35  uebayasi 	const int brw = iowrite ? B_WRITE : B_READ;
   1371       1.1     pooka 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1372       1.1     pooka 
   1373       1.1     pooka 	UVMHIST_LOG(ubchist, "vp %p kva %p len 0x%x flags 0x%x",
   1374       1.1     pooka 	    vp, kva, len, flags);
   1375       1.1     pooka 
   1376       1.1     pooka 	KASSERT(vp->v_size <= vp->v_writesize);
   1377       1.1     pooka 	GOP_SIZE(vp, vp->v_writesize, &eof, 0);
   1378       1.1     pooka 	if (vp->v_type != VBLK) {
   1379       1.1     pooka 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1380       1.1     pooka 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1381       1.1     pooka 	} else {
   1382       1.1     pooka 		fs_bshift = DEV_BSHIFT;
   1383       1.1     pooka 		dev_bshift = DEV_BSHIFT;
   1384       1.1     pooka 	}
   1385       1.1     pooka 	error = 0;
   1386       1.1     pooka 	startoffset = off;
   1387       1.1     pooka 	bytes = MIN(len, eof - startoffset);
   1388       1.1     pooka 	skipbytes = 0;
   1389       1.1     pooka 	KASSERT(bytes != 0);
   1390       1.1     pooka 
   1391      1.35  uebayasi 	if (iowrite) {
   1392      1.49     rmind 		mutex_enter(vp->v_interlock);
   1393       1.1     pooka 		vp->v_numoutput += 2;
   1394      1.49     rmind 		mutex_exit(vp->v_interlock);
   1395       1.1     pooka 	}
   1396       1.2        ad 	mbp = getiobuf(vp, true);
   1397       1.1     pooka 	UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
   1398       1.1     pooka 	    vp, mbp, vp->v_numoutput, bytes);
   1399       1.1     pooka 	mbp->b_bufsize = len;
   1400       1.1     pooka 	mbp->b_data = (void *)kva;
   1401       1.1     pooka 	mbp->b_resid = mbp->b_bcount = bytes;
   1402       1.2        ad 	mbp->b_cflags = BC_BUSY | BC_AGE;
   1403       1.2        ad 	if (async) {
   1404       1.2        ad 		mbp->b_flags = brw | B_ASYNC;
   1405       1.2        ad 		mbp->b_iodone = iodone;
   1406       1.2        ad 	} else {
   1407       1.2        ad 		mbp->b_flags = brw;
   1408       1.2        ad 		mbp->b_iodone = NULL;
   1409       1.2        ad 	}
   1410       1.1     pooka 	if (curlwp == uvm.pagedaemon_lwp)
   1411       1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
   1412      1.54       chs 	else if (async || lazy)
   1413       1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
   1414       1.1     pooka 	else
   1415       1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
   1416       1.1     pooka 
   1417       1.1     pooka 	bp = NULL;
   1418       1.1     pooka 	for (offset = startoffset;
   1419       1.1     pooka 	    bytes > 0;
   1420       1.1     pooka 	    offset += iobytes, bytes -= iobytes) {
   1421      1.36  uebayasi 		int run;
   1422      1.36  uebayasi 		daddr_t lbn, blkno;
   1423      1.36  uebayasi 		struct vnode *devvp;
   1424      1.36  uebayasi 
   1425      1.36  uebayasi 		/*
   1426      1.36  uebayasi 		 * bmap the file to find out the blkno to read from and
   1427      1.36  uebayasi 		 * how much we can read in one i/o.  if bmap returns an error,
   1428      1.36  uebayasi 		 * skip the rest of the top-level i/o.
   1429      1.36  uebayasi 		 */
   1430      1.36  uebayasi 
   1431       1.1     pooka 		lbn = offset >> fs_bshift;
   1432       1.1     pooka 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1433       1.1     pooka 		if (error) {
   1434      1.36  uebayasi 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
   1435      1.36  uebayasi 			    lbn,error,0,0);
   1436       1.1     pooka 			skipbytes += bytes;
   1437       1.1     pooka 			bytes = 0;
   1438      1.36  uebayasi 			goto loopdone;
   1439       1.1     pooka 		}
   1440       1.1     pooka 
   1441      1.36  uebayasi 		/*
   1442      1.36  uebayasi 		 * see how many pages can be read with this i/o.
   1443      1.36  uebayasi 		 * reduce the i/o size if necessary to avoid
   1444      1.36  uebayasi 		 * overwriting pages with valid data.
   1445      1.36  uebayasi 		 */
   1446      1.36  uebayasi 
   1447       1.1     pooka 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1448       1.1     pooka 		    bytes);
   1449      1.36  uebayasi 
   1450      1.36  uebayasi 		/*
   1451      1.36  uebayasi 		 * if this block isn't allocated, zero it instead of
   1452      1.36  uebayasi 		 * reading it.  unless we are going to allocate blocks,
   1453      1.36  uebayasi 		 * mark the pages we zeroed PG_RDONLY.
   1454      1.36  uebayasi 		 */
   1455      1.36  uebayasi 
   1456       1.1     pooka 		if (blkno == (daddr_t)-1) {
   1457      1.35  uebayasi 			if (!iowrite) {
   1458       1.1     pooka 				memset((char *)kva + (offset - startoffset), 0,
   1459      1.36  uebayasi 				    iobytes);
   1460       1.1     pooka 			}
   1461       1.1     pooka 			skipbytes += iobytes;
   1462       1.1     pooka 			continue;
   1463       1.1     pooka 		}
   1464       1.1     pooka 
   1465      1.36  uebayasi 		/*
   1466      1.36  uebayasi 		 * allocate a sub-buf for this piece of the i/o
   1467      1.36  uebayasi 		 * (or just use mbp if there's only 1 piece),
   1468      1.36  uebayasi 		 * and start it going.
   1469      1.36  uebayasi 		 */
   1470      1.36  uebayasi 
   1471       1.1     pooka 		if (offset == startoffset && iobytes == bytes) {
   1472       1.1     pooka 			bp = mbp;
   1473       1.1     pooka 		} else {
   1474       1.1     pooka 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
   1475       1.1     pooka 			    vp, bp, vp->v_numoutput, 0);
   1476       1.2        ad 			bp = getiobuf(vp, true);
   1477       1.1     pooka 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
   1478       1.1     pooka 		}
   1479       1.1     pooka 		bp->b_lblkno = 0;
   1480       1.1     pooka 
   1481       1.1     pooka 		/* adjust physical blkno for partial blocks */
   1482       1.1     pooka 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1483       1.1     pooka 		    dev_bshift);
   1484      1.36  uebayasi 
   1485       1.1     pooka 		UVMHIST_LOG(ubchist,
   1486      1.36  uebayasi 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
   1487      1.36  uebayasi 		    bp, offset, bp->b_bcount, bp->b_blkno);
   1488       1.1     pooka 
   1489       1.1     pooka 		VOP_STRATEGY(devvp, bp);
   1490       1.1     pooka 	}
   1491      1.36  uebayasi 
   1492      1.36  uebayasi loopdone:
   1493       1.1     pooka 	if (skipbytes) {
   1494       1.1     pooka 		UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
   1495       1.1     pooka 	}
   1496       1.1     pooka 	nestiobuf_done(mbp, skipbytes, error);
   1497       1.1     pooka 	if (async) {
   1498       1.1     pooka 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1499       1.1     pooka 		return (0);
   1500       1.1     pooka 	}
   1501       1.1     pooka 	UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
   1502       1.1     pooka 	error = biowait(mbp);
   1503       1.1     pooka 	s = splbio();
   1504       1.1     pooka 	(*iodone)(mbp);
   1505       1.1     pooka 	splx(s);
   1506       1.1     pooka 	UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
   1507       1.1     pooka 	return (error);
   1508       1.1     pooka }
   1509       1.1     pooka 
   1510       1.1     pooka int
   1511       1.1     pooka genfs_compat_getpages(void *v)
   1512       1.1     pooka {
   1513       1.1     pooka 	struct vop_getpages_args /* {
   1514       1.1     pooka 		struct vnode *a_vp;
   1515       1.1     pooka 		voff_t a_offset;
   1516       1.1     pooka 		struct vm_page **a_m;
   1517       1.1     pooka 		int *a_count;
   1518       1.1     pooka 		int a_centeridx;
   1519       1.1     pooka 		vm_prot_t a_access_type;
   1520       1.1     pooka 		int a_advice;
   1521       1.1     pooka 		int a_flags;
   1522       1.1     pooka 	} */ *ap = v;
   1523       1.1     pooka 
   1524       1.1     pooka 	off_t origoffset;
   1525       1.1     pooka 	struct vnode *vp = ap->a_vp;
   1526       1.1     pooka 	struct uvm_object *uobj = &vp->v_uobj;
   1527       1.1     pooka 	struct vm_page *pg, **pgs;
   1528       1.1     pooka 	vaddr_t kva;
   1529       1.1     pooka 	int i, error, orignpages, npages;
   1530       1.1     pooka 	struct iovec iov;
   1531       1.1     pooka 	struct uio uio;
   1532       1.1     pooka 	kauth_cred_t cred = curlwp->l_cred;
   1533      1.35  uebayasi 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1534       1.1     pooka 
   1535       1.1     pooka 	error = 0;
   1536       1.1     pooka 	origoffset = ap->a_offset;
   1537       1.1     pooka 	orignpages = *ap->a_count;
   1538       1.1     pooka 	pgs = ap->a_m;
   1539       1.1     pooka 
   1540       1.1     pooka 	if (ap->a_flags & PGO_LOCKED) {
   1541       1.1     pooka 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
   1542      1.35  uebayasi 		    UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0));
   1543       1.1     pooka 
   1544      1.38       chs 		error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
   1545      1.38       chs 		if (error == 0 && memwrite) {
   1546      1.38       chs 			genfs_markdirty(vp);
   1547      1.38       chs 		}
   1548      1.38       chs 		return error;
   1549       1.1     pooka 	}
   1550       1.1     pooka 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1551      1.49     rmind 		mutex_exit(uobj->vmobjlock);
   1552      1.38       chs 		return EINVAL;
   1553       1.1     pooka 	}
   1554       1.1     pooka 	if ((ap->a_flags & PGO_SYNCIO) == 0) {
   1555      1.49     rmind 		mutex_exit(uobj->vmobjlock);
   1556       1.1     pooka 		return 0;
   1557       1.1     pooka 	}
   1558       1.1     pooka 	npages = orignpages;
   1559       1.1     pooka 	uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
   1560      1.49     rmind 	mutex_exit(uobj->vmobjlock);
   1561       1.1     pooka 	kva = uvm_pagermapin(pgs, npages,
   1562       1.1     pooka 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1563       1.1     pooka 	for (i = 0; i < npages; i++) {
   1564       1.1     pooka 		pg = pgs[i];
   1565       1.1     pooka 		if ((pg->flags & PG_FAKE) == 0) {
   1566       1.1     pooka 			continue;
   1567       1.1     pooka 		}
   1568       1.1     pooka 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   1569       1.1     pooka 		iov.iov_len = PAGE_SIZE;
   1570       1.1     pooka 		uio.uio_iov = &iov;
   1571       1.1     pooka 		uio.uio_iovcnt = 1;
   1572       1.1     pooka 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   1573       1.1     pooka 		uio.uio_rw = UIO_READ;
   1574       1.1     pooka 		uio.uio_resid = PAGE_SIZE;
   1575       1.1     pooka 		UIO_SETUP_SYSSPACE(&uio);
   1576       1.1     pooka 		/* XXX vn_lock */
   1577       1.1     pooka 		error = VOP_READ(vp, &uio, 0, cred);
   1578       1.1     pooka 		if (error) {
   1579       1.1     pooka 			break;
   1580       1.1     pooka 		}
   1581       1.1     pooka 		if (uio.uio_resid) {
   1582       1.1     pooka 			memset(iov.iov_base, 0, uio.uio_resid);
   1583       1.1     pooka 		}
   1584       1.1     pooka 	}
   1585       1.1     pooka 	uvm_pagermapout(kva, npages);
   1586      1.49     rmind 	mutex_enter(uobj->vmobjlock);
   1587       1.2        ad 	mutex_enter(&uvm_pageqlock);
   1588       1.1     pooka 	for (i = 0; i < npages; i++) {
   1589       1.1     pooka 		pg = pgs[i];
   1590       1.1     pooka 		if (error && (pg->flags & PG_FAKE) != 0) {
   1591       1.1     pooka 			pg->flags |= PG_RELEASED;
   1592       1.1     pooka 		} else {
   1593       1.1     pooka 			pmap_clear_modify(pg);
   1594       1.1     pooka 			uvm_pageactivate(pg);
   1595       1.1     pooka 		}
   1596       1.1     pooka 	}
   1597       1.1     pooka 	if (error) {
   1598       1.1     pooka 		uvm_page_unbusy(pgs, npages);
   1599       1.1     pooka 	}
   1600       1.2        ad 	mutex_exit(&uvm_pageqlock);
   1601      1.38       chs 	if (error == 0 && memwrite) {
   1602      1.38       chs 		genfs_markdirty(vp);
   1603      1.38       chs 	}
   1604      1.49     rmind 	mutex_exit(uobj->vmobjlock);
   1605      1.38       chs 	return error;
   1606       1.1     pooka }
   1607       1.1     pooka 
   1608       1.1     pooka int
   1609       1.1     pooka genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   1610       1.1     pooka     int flags)
   1611       1.1     pooka {
   1612       1.1     pooka 	off_t offset;
   1613       1.1     pooka 	struct iovec iov;
   1614       1.1     pooka 	struct uio uio;
   1615       1.1     pooka 	kauth_cred_t cred = curlwp->l_cred;
   1616       1.1     pooka 	struct buf *bp;
   1617       1.1     pooka 	vaddr_t kva;
   1618       1.2        ad 	int error;
   1619       1.1     pooka 
   1620       1.1     pooka 	offset = pgs[0]->offset;
   1621       1.1     pooka 	kva = uvm_pagermapin(pgs, npages,
   1622       1.1     pooka 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1623       1.1     pooka 
   1624       1.1     pooka 	iov.iov_base = (void *)kva;
   1625       1.1     pooka 	iov.iov_len = npages << PAGE_SHIFT;
   1626       1.1     pooka 	uio.uio_iov = &iov;
   1627       1.1     pooka 	uio.uio_iovcnt = 1;
   1628       1.1     pooka 	uio.uio_offset = offset;
   1629       1.1     pooka 	uio.uio_rw = UIO_WRITE;
   1630       1.1     pooka 	uio.uio_resid = npages << PAGE_SHIFT;
   1631       1.1     pooka 	UIO_SETUP_SYSSPACE(&uio);
   1632       1.1     pooka 	/* XXX vn_lock */
   1633       1.1     pooka 	error = VOP_WRITE(vp, &uio, 0, cred);
   1634       1.1     pooka 
   1635      1.49     rmind 	mutex_enter(vp->v_interlock);
   1636       1.2        ad 	vp->v_numoutput++;
   1637      1.49     rmind 	mutex_exit(vp->v_interlock);
   1638       1.1     pooka 
   1639       1.2        ad 	bp = getiobuf(vp, true);
   1640       1.2        ad 	bp->b_cflags = BC_BUSY | BC_AGE;
   1641       1.1     pooka 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   1642       1.1     pooka 	bp->b_data = (char *)kva;
   1643       1.1     pooka 	bp->b_bcount = npages << PAGE_SHIFT;
   1644       1.1     pooka 	bp->b_bufsize = npages << PAGE_SHIFT;
   1645       1.1     pooka 	bp->b_resid = 0;
   1646       1.1     pooka 	bp->b_error = error;
   1647       1.1     pooka 	uvm_aio_aiodone(bp);
   1648       1.1     pooka 	return (error);
   1649       1.1     pooka }
   1650       1.1     pooka 
   1651       1.1     pooka /*
   1652       1.1     pooka  * Process a uio using direct I/O.  If we reach a part of the request
   1653       1.1     pooka  * which cannot be processed in this fashion for some reason, just return.
   1654       1.1     pooka  * The caller must handle some additional part of the request using
   1655       1.1     pooka  * buffered I/O before trying direct I/O again.
   1656       1.1     pooka  */
   1657       1.1     pooka 
   1658       1.1     pooka void
   1659       1.1     pooka genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
   1660       1.1     pooka {
   1661       1.1     pooka 	struct vmspace *vs;
   1662       1.1     pooka 	struct iovec *iov;
   1663       1.1     pooka 	vaddr_t va;
   1664       1.1     pooka 	size_t len;
   1665       1.1     pooka 	const int mask = DEV_BSIZE - 1;
   1666       1.1     pooka 	int error;
   1667      1.16     joerg 	bool need_wapbl = (vp->v_mount && vp->v_mount->mnt_wapbl &&
   1668      1.16     joerg 	    (ioflag & IO_JOURNALLOCKED) == 0);
   1669       1.1     pooka 
   1670       1.1     pooka 	/*
   1671       1.1     pooka 	 * We only support direct I/O to user space for now.
   1672       1.1     pooka 	 */
   1673       1.1     pooka 
   1674       1.1     pooka 	if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
   1675       1.1     pooka 		return;
   1676       1.1     pooka 	}
   1677       1.1     pooka 
   1678       1.1     pooka 	/*
   1679       1.1     pooka 	 * If the vnode is mapped, we would need to get the getpages lock
   1680      1.53      yamt 	 * to stabilize the bmap, but then we would get into trouble while
   1681       1.1     pooka 	 * locking the pages if the pages belong to this same vnode (or a
   1682       1.1     pooka 	 * multi-vnode cascade to the same effect).  Just fall back to
   1683       1.1     pooka 	 * buffered I/O if the vnode is mapped to avoid this mess.
   1684       1.1     pooka 	 */
   1685       1.1     pooka 
   1686       1.1     pooka 	if (vp->v_vflag & VV_MAPPED) {
   1687       1.1     pooka 		return;
   1688       1.1     pooka 	}
   1689       1.1     pooka 
   1690      1.16     joerg 	if (need_wapbl) {
   1691      1.13   hannken 		error = WAPBL_BEGIN(vp->v_mount);
   1692      1.13   hannken 		if (error)
   1693      1.13   hannken 			return;
   1694      1.13   hannken 	}
   1695      1.13   hannken 
   1696       1.1     pooka 	/*
   1697       1.1     pooka 	 * Do as much of the uio as possible with direct I/O.
   1698       1.1     pooka 	 */
   1699       1.1     pooka 
   1700       1.1     pooka 	vs = uio->uio_vmspace;
   1701       1.1     pooka 	while (uio->uio_resid) {
   1702       1.1     pooka 		iov = uio->uio_iov;
   1703       1.1     pooka 		if (iov->iov_len == 0) {
   1704       1.1     pooka 			uio->uio_iov++;
   1705       1.1     pooka 			uio->uio_iovcnt--;
   1706       1.1     pooka 			continue;
   1707       1.1     pooka 		}
   1708       1.1     pooka 		va = (vaddr_t)iov->iov_base;
   1709       1.1     pooka 		len = MIN(iov->iov_len, genfs_maxdio);
   1710       1.1     pooka 		len &= ~mask;
   1711       1.1     pooka 
   1712       1.1     pooka 		/*
   1713       1.1     pooka 		 * If the next chunk is smaller than DEV_BSIZE or extends past
   1714       1.1     pooka 		 * the current EOF, then fall back to buffered I/O.
   1715       1.1     pooka 		 */
   1716       1.1     pooka 
   1717       1.1     pooka 		if (len == 0 || uio->uio_offset + len > vp->v_size) {
   1718      1.13   hannken 			break;
   1719       1.1     pooka 		}
   1720       1.1     pooka 
   1721       1.1     pooka 		/*
   1722       1.1     pooka 		 * Check alignment.  The file offset must be at least
   1723       1.1     pooka 		 * sector-aligned.  The exact constraint on memory alignment
   1724       1.1     pooka 		 * is very hardware-dependent, but requiring sector-aligned
   1725       1.1     pooka 		 * addresses there too is safe.
   1726       1.1     pooka 		 */
   1727       1.1     pooka 
   1728       1.1     pooka 		if (uio->uio_offset & mask || va & mask) {
   1729      1.13   hannken 			break;
   1730       1.1     pooka 		}
   1731       1.1     pooka 		error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
   1732       1.1     pooka 					  uio->uio_rw);
   1733       1.1     pooka 		if (error) {
   1734       1.1     pooka 			break;
   1735       1.1     pooka 		}
   1736       1.1     pooka 		iov->iov_base = (char *)iov->iov_base + len;
   1737       1.1     pooka 		iov->iov_len -= len;
   1738       1.1     pooka 		uio->uio_offset += len;
   1739       1.1     pooka 		uio->uio_resid -= len;
   1740       1.1     pooka 	}
   1741      1.13   hannken 
   1742      1.16     joerg 	if (need_wapbl)
   1743      1.13   hannken 		WAPBL_END(vp->v_mount);
   1744       1.1     pooka }
   1745       1.1     pooka 
   1746       1.1     pooka /*
   1747       1.1     pooka  * Iodone routine for direct I/O.  We don't do much here since the request is
   1748       1.1     pooka  * always synchronous, so the caller will do most of the work after biowait().
   1749       1.1     pooka  */
   1750       1.1     pooka 
   1751       1.1     pooka static void
   1752       1.1     pooka genfs_dio_iodone(struct buf *bp)
   1753       1.1     pooka {
   1754       1.1     pooka 
   1755       1.1     pooka 	KASSERT((bp->b_flags & B_ASYNC) == 0);
   1756       1.2        ad 	if ((bp->b_flags & B_READ) == 0 && (bp->b_cflags & BC_AGE) != 0) {
   1757       1.2        ad 		mutex_enter(bp->b_objlock);
   1758       1.1     pooka 		vwakeup(bp);
   1759       1.2        ad 		mutex_exit(bp->b_objlock);
   1760       1.1     pooka 	}
   1761       1.1     pooka 	putiobuf(bp);
   1762       1.1     pooka }
   1763       1.1     pooka 
   1764       1.1     pooka /*
   1765       1.1     pooka  * Process one chunk of a direct I/O request.
   1766       1.1     pooka  */
   1767       1.1     pooka 
   1768       1.1     pooka static int
   1769       1.1     pooka genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
   1770       1.1     pooka     off_t off, enum uio_rw rw)
   1771       1.1     pooka {
   1772       1.1     pooka 	struct vm_map *map;
   1773      1.56    martin 	struct pmap *upm, *kpm __unused;
   1774       1.1     pooka 	size_t klen = round_page(uva + len) - trunc_page(uva);
   1775       1.1     pooka 	off_t spoff, epoff;
   1776       1.1     pooka 	vaddr_t kva, puva;
   1777       1.1     pooka 	paddr_t pa;
   1778       1.1     pooka 	vm_prot_t prot;
   1779      1.58    martin 	int error, rv __diagused, poff, koff;
   1780      1.13   hannken 	const int pgoflags = PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED |
   1781       1.1     pooka 		(rw == UIO_WRITE ? PGO_FREE : 0);
   1782       1.1     pooka 
   1783       1.1     pooka 	/*
   1784       1.1     pooka 	 * For writes, verify that this range of the file already has fully
   1785       1.1     pooka 	 * allocated backing store.  If there are any holes, just punt and
   1786       1.1     pooka 	 * make the caller take the buffered write path.
   1787       1.1     pooka 	 */
   1788       1.1     pooka 
   1789       1.1     pooka 	if (rw == UIO_WRITE) {
   1790       1.1     pooka 		daddr_t lbn, elbn, blkno;
   1791       1.1     pooka 		int bsize, bshift, run;
   1792       1.1     pooka 
   1793       1.1     pooka 		bshift = vp->v_mount->mnt_fs_bshift;
   1794       1.1     pooka 		bsize = 1 << bshift;
   1795       1.1     pooka 		lbn = off >> bshift;
   1796       1.1     pooka 		elbn = (off + len + bsize - 1) >> bshift;
   1797       1.1     pooka 		while (lbn < elbn) {
   1798       1.1     pooka 			error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
   1799       1.1     pooka 			if (error) {
   1800       1.1     pooka 				return error;
   1801       1.1     pooka 			}
   1802       1.1     pooka 			if (blkno == (daddr_t)-1) {
   1803       1.1     pooka 				return ENOSPC;
   1804       1.1     pooka 			}
   1805       1.1     pooka 			lbn += 1 + run;
   1806       1.1     pooka 		}
   1807       1.1     pooka 	}
   1808       1.1     pooka 
   1809       1.1     pooka 	/*
   1810       1.1     pooka 	 * Flush any cached pages for parts of the file that we're about to
   1811       1.1     pooka 	 * access.  If we're writing, invalidate pages as well.
   1812       1.1     pooka 	 */
   1813       1.1     pooka 
   1814       1.1     pooka 	spoff = trunc_page(off);
   1815       1.1     pooka 	epoff = round_page(off + len);
   1816      1.49     rmind 	mutex_enter(vp->v_interlock);
   1817       1.1     pooka 	error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
   1818       1.1     pooka 	if (error) {
   1819       1.1     pooka 		return error;
   1820       1.1     pooka 	}
   1821       1.1     pooka 
   1822       1.1     pooka 	/*
   1823       1.1     pooka 	 * Wire the user pages and remap them into kernel memory.
   1824       1.1     pooka 	 */
   1825       1.1     pooka 
   1826       1.1     pooka 	prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
   1827       1.1     pooka 	error = uvm_vslock(vs, (void *)uva, len, prot);
   1828       1.1     pooka 	if (error) {
   1829       1.1     pooka 		return error;
   1830       1.1     pooka 	}
   1831       1.1     pooka 
   1832       1.1     pooka 	map = &vs->vm_map;
   1833       1.1     pooka 	upm = vm_map_pmap(map);
   1834       1.1     pooka 	kpm = vm_map_pmap(kernel_map);
   1835       1.1     pooka 	puva = trunc_page(uva);
   1836      1.51      matt 	kva = uvm_km_alloc(kernel_map, klen, atop(puva) & uvmexp.colormask,
   1837      1.51      matt 	    UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
   1838       1.1     pooka 	for (poff = 0; poff < klen; poff += PAGE_SIZE) {
   1839       1.1     pooka 		rv = pmap_extract(upm, puva + poff, &pa);
   1840       1.1     pooka 		KASSERT(rv);
   1841      1.51      matt 		pmap_kenter_pa(kva + poff, pa, prot, PMAP_WIRED);
   1842       1.1     pooka 	}
   1843       1.1     pooka 	pmap_update(kpm);
   1844       1.1     pooka 
   1845       1.1     pooka 	/*
   1846       1.1     pooka 	 * Do the I/O.
   1847       1.1     pooka 	 */
   1848       1.1     pooka 
   1849       1.1     pooka 	koff = uva - trunc_page(uva);
   1850       1.1     pooka 	error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
   1851       1.1     pooka 			    genfs_dio_iodone);
   1852       1.1     pooka 
   1853       1.1     pooka 	/*
   1854       1.1     pooka 	 * Tear down the kernel mapping.
   1855       1.1     pooka 	 */
   1856       1.1     pooka 
   1857      1.51      matt 	pmap_kremove(kva, klen);
   1858       1.1     pooka 	pmap_update(kpm);
   1859       1.1     pooka 	uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
   1860       1.1     pooka 
   1861       1.1     pooka 	/*
   1862       1.1     pooka 	 * Unwire the user pages.
   1863       1.1     pooka 	 */
   1864       1.1     pooka 
   1865       1.1     pooka 	uvm_vsunlock(vs, (void *)uva, len);
   1866       1.1     pooka 	return error;
   1867       1.1     pooka }
   1868