Home | History | Annotate | Line # | Download | only in genfs
genfs_io.c revision 1.36.2.40
      1  1.36.2.31  uebayasi /*	$NetBSD: genfs_io.c,v 1.36.2.40 2010/11/19 05:22:29 uebayasi Exp $	*/
      2        1.1     pooka 
      3        1.1     pooka /*
      4        1.1     pooka  * Copyright (c) 1982, 1986, 1989, 1993
      5        1.1     pooka  *	The Regents of the University of California.  All rights reserved.
      6        1.1     pooka  *
      7        1.1     pooka  * Redistribution and use in source and binary forms, with or without
      8        1.1     pooka  * modification, are permitted provided that the following conditions
      9        1.1     pooka  * are met:
     10        1.1     pooka  * 1. Redistributions of source code must retain the above copyright
     11        1.1     pooka  *    notice, this list of conditions and the following disclaimer.
     12        1.1     pooka  * 2. Redistributions in binary form must reproduce the above copyright
     13        1.1     pooka  *    notice, this list of conditions and the following disclaimer in the
     14        1.1     pooka  *    documentation and/or other materials provided with the distribution.
     15        1.1     pooka  * 3. Neither the name of the University nor the names of its contributors
     16        1.1     pooka  *    may be used to endorse or promote products derived from this software
     17        1.1     pooka  *    without specific prior written permission.
     18        1.1     pooka  *
     19        1.1     pooka  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20        1.1     pooka  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21        1.1     pooka  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22        1.1     pooka  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23        1.1     pooka  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24        1.1     pooka  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25        1.1     pooka  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26        1.1     pooka  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27        1.1     pooka  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28        1.1     pooka  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29        1.1     pooka  * SUCH DAMAGE.
     30        1.1     pooka  *
     31        1.1     pooka  */
     32        1.1     pooka 
     33        1.1     pooka #include <sys/cdefs.h>
     34  1.36.2.31  uebayasi __KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.36.2.40 2010/11/19 05:22:29 uebayasi Exp $");
     35        1.1     pooka 
     36   1.36.2.1  uebayasi #include "opt_xip.h"
     37   1.36.2.1  uebayasi 
     38        1.1     pooka #include <sys/param.h>
     39        1.1     pooka #include <sys/systm.h>
     40        1.1     pooka #include <sys/proc.h>
     41        1.1     pooka #include <sys/kernel.h>
     42        1.1     pooka #include <sys/mount.h>
     43        1.1     pooka #include <sys/namei.h>
     44        1.1     pooka #include <sys/vnode.h>
     45        1.1     pooka #include <sys/fcntl.h>
     46        1.1     pooka #include <sys/kmem.h>
     47        1.1     pooka #include <sys/poll.h>
     48        1.1     pooka #include <sys/mman.h>
     49        1.1     pooka #include <sys/file.h>
     50        1.1     pooka #include <sys/kauth.h>
     51        1.1     pooka #include <sys/fstrans.h>
     52       1.15     pooka #include <sys/buf.h>
     53  1.36.2.11  uebayasi #include <sys/once.h>
     54        1.1     pooka 
     55        1.1     pooka #include <miscfs/genfs/genfs.h>
     56        1.1     pooka #include <miscfs/genfs/genfs_node.h>
     57        1.1     pooka #include <miscfs/specfs/specdev.h>
     58        1.1     pooka 
     59        1.1     pooka #include <uvm/uvm.h>
     60        1.1     pooka #include <uvm/uvm_pager.h>
     61        1.1     pooka 
     62   1.36.2.4  uebayasi #ifdef XIP
     63   1.36.2.1  uebayasi static int genfs_do_getpages_xip(void *);
     64  1.36.2.35  uebayasi static int genfs_do_getpages_xip_io(struct vnode *, voff_t, struct vm_page **,
     65  1.36.2.35  uebayasi     int *, int, vm_prot_t, int, int);
     66  1.36.2.24  uebayasi static int genfs_do_putpages_xip(struct vnode *, off_t, off_t, int,
     67  1.36.2.24  uebayasi     struct vm_page **);
     68   1.36.2.4  uebayasi #endif
     69        1.1     pooka static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
     70        1.1     pooka     off_t, enum uio_rw);
     71        1.1     pooka static void genfs_dio_iodone(struct buf *);
     72        1.1     pooka 
     73        1.1     pooka static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
     74        1.1     pooka     void (*)(struct buf *));
     75  1.36.2.21  uebayasi static void genfs_rel_pages(struct vm_page **, int);
     76  1.36.2.21  uebayasi static void genfs_markdirty(struct vnode *);
     77        1.1     pooka 
     78        1.1     pooka int genfs_maxdio = MAXPHYS;
     79        1.1     pooka 
     80  1.36.2.21  uebayasi static void
     81        1.1     pooka genfs_rel_pages(struct vm_page **pgs, int npages)
     82        1.1     pooka {
     83        1.1     pooka 	int i;
     84        1.1     pooka 
     85        1.1     pooka 	for (i = 0; i < npages; i++) {
     86        1.1     pooka 		struct vm_page *pg = pgs[i];
     87        1.1     pooka 
     88        1.1     pooka 		if (pg == NULL || pg == PGO_DONTCARE)
     89        1.1     pooka 			continue;
     90        1.1     pooka 		if (pg->flags & PG_FAKE) {
     91        1.1     pooka 			pg->flags |= PG_RELEASED;
     92        1.1     pooka 		}
     93        1.1     pooka 	}
     94        1.2        ad 	mutex_enter(&uvm_pageqlock);
     95        1.1     pooka 	uvm_page_unbusy(pgs, npages);
     96        1.2        ad 	mutex_exit(&uvm_pageqlock);
     97        1.1     pooka }
     98        1.1     pooka 
     99  1.36.2.21  uebayasi static void
    100  1.36.2.21  uebayasi genfs_markdirty(struct vnode *vp)
    101  1.36.2.21  uebayasi {
    102  1.36.2.21  uebayasi 	struct genfs_node * const gp = VTOG(vp);
    103  1.36.2.21  uebayasi 
    104  1.36.2.21  uebayasi 	KASSERT(mutex_owned(&vp->v_interlock));
    105  1.36.2.21  uebayasi 	gp->g_dirtygen++;
    106  1.36.2.21  uebayasi 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
    107  1.36.2.21  uebayasi 		vn_syncer_add_to_worklist(vp, filedelay);
    108  1.36.2.21  uebayasi 	}
    109  1.36.2.21  uebayasi 	if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
    110  1.36.2.21  uebayasi 		vp->v_iflag |= VI_WRMAPDIRTY;
    111  1.36.2.21  uebayasi 	}
    112  1.36.2.21  uebayasi }
    113  1.36.2.21  uebayasi 
    114        1.1     pooka /*
    115        1.1     pooka  * generic VM getpages routine.
    116        1.1     pooka  * Return PG_BUSY pages for the given range,
    117        1.1     pooka  * reading from backing store if necessary.
    118        1.1     pooka  */
    119        1.1     pooka 
    120        1.1     pooka int
    121        1.1     pooka genfs_getpages(void *v)
    122        1.1     pooka {
    123   1.36.2.1  uebayasi 	struct vop_getpages_args /* {
    124   1.36.2.1  uebayasi 		struct vnode *a_vp;
    125   1.36.2.1  uebayasi 		voff_t a_offset;
    126   1.36.2.1  uebayasi 		struct vm_page **a_m;
    127   1.36.2.1  uebayasi 		int *a_count;
    128   1.36.2.1  uebayasi 		int a_centeridx;
    129   1.36.2.1  uebayasi 		vm_prot_t a_access_type;
    130   1.36.2.1  uebayasi 		int a_advice;
    131   1.36.2.1  uebayasi 		int a_flags;
    132   1.36.2.1  uebayasi 	} */ * const ap = v;
    133        1.1     pooka 
    134       1.24  uebayasi 	off_t diskeof, memeof;
    135       1.31  uebayasi 	int i, error, npages;
    136       1.10      yamt 	const int flags = ap->a_flags;
    137       1.22  uebayasi 	struct vnode * const vp = ap->a_vp;
    138       1.22  uebayasi 	struct uvm_object * const uobj = &vp->v_uobj;
    139       1.31  uebayasi 	kauth_cred_t const cred = curlwp->l_cred;		/* XXXUBC curlwp */
    140       1.10      yamt 	const bool async = (flags & PGO_SYNCIO) == 0;
    141       1.35  uebayasi 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
    142        1.1     pooka 	bool has_trans = false;
    143       1.10      yamt 	const bool overwrite = (flags & PGO_OVERWRITE) != 0;
    144       1.35  uebayasi 	const bool blockalloc = memwrite && (flags & PGO_NOBLOCKALLOC) == 0;
    145  1.36.2.27  uebayasi 	const bool glocked = (flags & PGO_GLOCKHELD) != 0;
    146        1.1     pooka 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    147        1.1     pooka 
    148        1.1     pooka 	UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
    149        1.1     pooka 	    vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    150        1.1     pooka 
    151        1.1     pooka 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
    152        1.1     pooka 	    vp->v_type == VLNK || vp->v_type == VBLK);
    153        1.1     pooka 
    154        1.1     pooka startover:
    155        1.1     pooka 	error = 0;
    156       1.27  uebayasi 	const voff_t origvsize = vp->v_size;
    157       1.27  uebayasi 	const off_t origoffset = ap->a_offset;
    158       1.29  uebayasi 	const int orignpages = *ap->a_count;
    159       1.33  uebayasi 
    160        1.1     pooka 	GOP_SIZE(vp, origvsize, &diskeof, 0);
    161        1.1     pooka 	if (flags & PGO_PASTEOF) {
    162       1.24  uebayasi 		off_t newsize;
    163        1.1     pooka #if defined(DIAGNOSTIC)
    164        1.1     pooka 		off_t writeeof;
    165        1.1     pooka #endif /* defined(DIAGNOSTIC) */
    166        1.1     pooka 
    167        1.1     pooka 		newsize = MAX(origvsize,
    168        1.1     pooka 		    origoffset + (orignpages << PAGE_SHIFT));
    169        1.1     pooka 		GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
    170        1.1     pooka #if defined(DIAGNOSTIC)
    171        1.1     pooka 		GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM);
    172        1.1     pooka 		if (newsize > round_page(writeeof)) {
    173  1.36.2.27  uebayasi 			panic("%s: past eof: %" PRId64 " vs. %" PRId64,
    174  1.36.2.27  uebayasi 			    __func__, newsize, round_page(writeeof));
    175        1.1     pooka 		}
    176        1.1     pooka #endif /* defined(DIAGNOSTIC) */
    177        1.1     pooka 	} else {
    178        1.1     pooka 		GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM);
    179        1.1     pooka 	}
    180        1.1     pooka 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    181        1.1     pooka 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    182        1.1     pooka 	KASSERT(orignpages > 0);
    183        1.1     pooka 
    184        1.1     pooka 	/*
    185        1.1     pooka 	 * Bounds-check the request.
    186        1.1     pooka 	 */
    187        1.1     pooka 
    188        1.1     pooka 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    189        1.1     pooka 		if ((flags & PGO_LOCKED) == 0) {
    190        1.2        ad 			mutex_exit(&uobj->vmobjlock);
    191        1.1     pooka 		}
    192        1.1     pooka 		UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
    193        1.1     pooka 		    origoffset, *ap->a_count, memeof,0);
    194        1.1     pooka 		error = EINVAL;
    195        1.1     pooka 		goto out_err;
    196        1.1     pooka 	}
    197        1.1     pooka 
    198        1.1     pooka 	/* uobj is locked */
    199        1.1     pooka 
    200        1.1     pooka 	if ((flags & PGO_NOTIMESTAMP) == 0 &&
    201        1.1     pooka 	    (vp->v_type != VBLK ||
    202        1.1     pooka 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
    203        1.1     pooka 		int updflags = 0;
    204        1.1     pooka 
    205        1.1     pooka 		if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
    206        1.1     pooka 			updflags = GOP_UPDATE_ACCESSED;
    207        1.1     pooka 		}
    208       1.35  uebayasi 		if (memwrite) {
    209        1.1     pooka 			updflags |= GOP_UPDATE_MODIFIED;
    210        1.1     pooka 		}
    211        1.1     pooka 		if (updflags != 0) {
    212        1.1     pooka 			GOP_MARKUPDATE(vp, updflags);
    213        1.1     pooka 		}
    214        1.1     pooka 	}
    215        1.1     pooka 
    216        1.1     pooka 	/*
    217        1.1     pooka 	 * For PGO_LOCKED requests, just return whatever's in memory.
    218        1.1     pooka 	 */
    219        1.1     pooka 
    220        1.1     pooka 	if (flags & PGO_LOCKED) {
    221  1.36.2.39  uebayasi #if 0
    222  1.36.2.39  uebayasi 		genfs_do_getpages_locked();
    223  1.36.2.39  uebayasi 	} else {
    224  1.36.2.39  uebayasi 		genfs_do_getpages_unlocked();
    225  1.36.2.39  uebayasi 	}
    226  1.36.2.39  uebayasi }
    227  1.36.2.39  uebayasi 
    228  1.36.2.39  uebayasi int
    229  1.36.2.39  uebayasi genfs_do_getpages_locked()
    230  1.36.2.39  uebayasi {
    231  1.36.2.39  uebayasi #endif
    232        1.1     pooka 		int nfound;
    233       1.31  uebayasi 		struct vm_page *pg;
    234        1.1     pooka 
    235  1.36.2.39  uebayasi #if 1
    236  1.36.2.39  uebayasi 		if ((ap->a_vp->v_vflag & VV_XIP) != 0) {
    237  1.36.2.39  uebayasi 			*ap->a_count = 0;
    238  1.36.2.39  uebayasi 			return 0;
    239  1.36.2.39  uebayasi 		}
    240  1.36.2.39  uebayasi #endif
    241  1.36.2.39  uebayasi 
    242  1.36.2.27  uebayasi 		KASSERT(!glocked);
    243        1.1     pooka 		npages = *ap->a_count;
    244        1.1     pooka #if defined(DEBUG)
    245        1.1     pooka 		for (i = 0; i < npages; i++) {
    246        1.1     pooka 			pg = ap->a_m[i];
    247        1.1     pooka 			KASSERT(pg == NULL || pg == PGO_DONTCARE);
    248        1.1     pooka 		}
    249        1.1     pooka #endif /* defined(DEBUG) */
    250        1.1     pooka 		nfound = uvn_findpages(uobj, origoffset, &npages,
    251       1.35  uebayasi 		    ap->a_m, UFP_NOWAIT|UFP_NOALLOC|(memwrite ? UFP_NORDONLY : 0));
    252        1.1     pooka 		KASSERT(npages == *ap->a_count);
    253        1.1     pooka 		if (nfound == 0) {
    254        1.1     pooka 			error = EBUSY;
    255        1.1     pooka 			goto out_err;
    256        1.1     pooka 		}
    257       1.23  uebayasi 		if (!genfs_node_rdtrylock(vp)) {
    258        1.1     pooka 			genfs_rel_pages(ap->a_m, npages);
    259        1.1     pooka 
    260        1.1     pooka 			/*
    261        1.1     pooka 			 * restore the array.
    262        1.1     pooka 			 */
    263        1.1     pooka 
    264        1.1     pooka 			for (i = 0; i < npages; i++) {
    265        1.1     pooka 				pg = ap->a_m[i];
    266        1.1     pooka 
    267  1.36.2.30  uebayasi 				if (pg != NULL && pg != PGO_DONTCARE) {
    268        1.1     pooka 					ap->a_m[i] = NULL;
    269        1.1     pooka 				}
    270  1.36.2.30  uebayasi 				KASSERT(pg == NULL || pg == PGO_DONTCARE);
    271        1.1     pooka 			}
    272        1.1     pooka 		} else {
    273       1.23  uebayasi 			genfs_node_unlock(vp);
    274        1.1     pooka 		}
    275        1.1     pooka 		error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    276  1.36.2.21  uebayasi 		if (error == 0 && memwrite) {
    277  1.36.2.21  uebayasi 			genfs_markdirty(vp);
    278  1.36.2.21  uebayasi 		}
    279        1.1     pooka 		goto out_err;
    280        1.1     pooka 	}
    281        1.2        ad 	mutex_exit(&uobj->vmobjlock);
    282  1.36.2.39  uebayasi #if 0
    283  1.36.2.39  uebayasi }
    284  1.36.2.39  uebayasi 
    285  1.36.2.39  uebayasi int
    286  1.36.2.39  uebayasi genfs_do_getpages_unlocked()
    287  1.36.2.39  uebayasi {
    288  1.36.2.39  uebayasi #endif
    289  1.36.2.39  uebayasi #if 1
    290  1.36.2.39  uebayasi 	if ((ap->a_vp->v_vflag & VV_XIP) != 0)
    291  1.36.2.39  uebayasi 		return genfs_do_getpages_xip(v);
    292  1.36.2.39  uebayasi #endif
    293        1.1     pooka 	/*
    294        1.1     pooka 	 * find the requested pages and make some simple checks.
    295        1.1     pooka 	 * leave space in the page array for a whole block.
    296        1.1     pooka 	 */
    297        1.1     pooka 
    298       1.27  uebayasi 	const int fs_bshift = (vp->v_type != VBLK) ?
    299       1.27  uebayasi 	    vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
    300       1.27  uebayasi 	const int dev_bshift = (vp->v_type != VBLK) ?
    301       1.27  uebayasi 	    vp->v_mount->mnt_dev_bshift : DEV_BSHIFT;
    302       1.27  uebayasi 	const int fs_bsize = 1 << fs_bshift;
    303       1.30  uebayasi #define	blk_mask	(fs_bsize - 1)
    304       1.30  uebayasi #define	trunc_blk(x)	((x) & ~blk_mask)
    305       1.30  uebayasi #define	round_blk(x)	(((x) + blk_mask) & ~blk_mask)
    306        1.1     pooka 
    307       1.29  uebayasi 	const int orignmempages = MIN(orignpages,
    308        1.1     pooka 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    309       1.29  uebayasi 	npages = orignmempages;
    310       1.30  uebayasi 	const off_t startoffset = trunc_blk(origoffset);
    311       1.30  uebayasi 	const off_t endoffset = MIN(
    312       1.30  uebayasi 	    round_page(round_blk(origoffset + (npages << PAGE_SHIFT))),
    313       1.30  uebayasi 	    round_page(memeof));
    314       1.31  uebayasi 	const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    315        1.1     pooka 
    316       1.33  uebayasi 	const int pgs_size = sizeof(struct vm_page *) *
    317        1.1     pooka 	    ((endoffset - startoffset) >> PAGE_SHIFT);
    318       1.33  uebayasi 	struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES];
    319       1.31  uebayasi 
    320        1.1     pooka 	if (pgs_size > sizeof(pgs_onstack)) {
    321        1.1     pooka 		pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
    322        1.1     pooka 		if (pgs == NULL) {
    323        1.1     pooka 			pgs = pgs_onstack;
    324        1.1     pooka 			error = ENOMEM;
    325       1.32  uebayasi 			goto out_err;
    326        1.1     pooka 		}
    327        1.1     pooka 	} else {
    328       1.14  christos 		pgs = pgs_onstack;
    329       1.14  christos 		(void)memset(pgs, 0, pgs_size);
    330        1.1     pooka 	}
    331       1.14  christos 
    332        1.1     pooka 	UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
    333        1.1     pooka 	    ridx, npages, startoffset, endoffset);
    334        1.1     pooka 
    335        1.1     pooka 	if (!has_trans) {
    336        1.1     pooka 		fstrans_start(vp->v_mount, FSTRANS_SHARED);
    337        1.1     pooka 		has_trans = true;
    338        1.1     pooka 	}
    339        1.1     pooka 
    340        1.1     pooka 	/*
    341        1.1     pooka 	 * hold g_glock to prevent a race with truncate.
    342        1.1     pooka 	 *
    343        1.1     pooka 	 * check if our idea of v_size is still valid.
    344        1.1     pooka 	 */
    345        1.1     pooka 
    346  1.36.2.27  uebayasi 	KASSERT(!glocked || genfs_node_wrlocked(vp));
    347  1.36.2.27  uebayasi 	if (!glocked) {
    348  1.36.2.27  uebayasi 		if (blockalloc) {
    349  1.36.2.27  uebayasi 			genfs_node_wrlock(vp);
    350  1.36.2.27  uebayasi 		} else {
    351  1.36.2.27  uebayasi 			genfs_node_rdlock(vp);
    352  1.36.2.27  uebayasi 		}
    353        1.1     pooka 	}
    354        1.2        ad 	mutex_enter(&uobj->vmobjlock);
    355        1.1     pooka 	if (vp->v_size < origvsize) {
    356  1.36.2.27  uebayasi 		if (!glocked) {
    357  1.36.2.27  uebayasi 			genfs_node_unlock(vp);
    358  1.36.2.27  uebayasi 		}
    359        1.1     pooka 		if (pgs != pgs_onstack)
    360        1.1     pooka 			kmem_free(pgs, pgs_size);
    361        1.1     pooka 		goto startover;
    362        1.1     pooka 	}
    363        1.1     pooka 
    364        1.1     pooka 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
    365       1.29  uebayasi 	    async ? UFP_NOWAIT : UFP_ALL) != orignmempages) {
    366  1.36.2.27  uebayasi 		if (!glocked) {
    367  1.36.2.27  uebayasi 			genfs_node_unlock(vp);
    368  1.36.2.27  uebayasi 		}
    369        1.1     pooka 		KASSERT(async != 0);
    370       1.29  uebayasi 		genfs_rel_pages(&pgs[ridx], orignmempages);
    371        1.2        ad 		mutex_exit(&uobj->vmobjlock);
    372        1.1     pooka 		error = EBUSY;
    373       1.33  uebayasi 		goto out_err_free;
    374        1.1     pooka 	}
    375        1.1     pooka 
    376        1.1     pooka 	/*
    377        1.1     pooka 	 * if the pages are already resident, just return them.
    378        1.1     pooka 	 */
    379        1.1     pooka 
    380        1.1     pooka 	for (i = 0; i < npages; i++) {
    381       1.31  uebayasi 		struct vm_page *pg = pgs[ridx + i];
    382        1.1     pooka 
    383       1.31  uebayasi 		if ((pg->flags & PG_FAKE) ||
    384       1.31  uebayasi 		    (blockalloc && (pg->flags & PG_RDONLY))) {
    385        1.1     pooka 			break;
    386        1.1     pooka 		}
    387        1.1     pooka 	}
    388        1.1     pooka 	if (i == npages) {
    389  1.36.2.27  uebayasi 		if (!glocked) {
    390  1.36.2.27  uebayasi 			genfs_node_unlock(vp);
    391  1.36.2.27  uebayasi 		}
    392        1.1     pooka 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    393        1.1     pooka 		npages += ridx;
    394        1.1     pooka 		goto out;
    395        1.1     pooka 	}
    396        1.1     pooka 
    397        1.1     pooka 	/*
    398        1.1     pooka 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    399        1.1     pooka 	 */
    400        1.1     pooka 
    401        1.1     pooka 	if (overwrite) {
    402  1.36.2.39  uebayasi #if 0
    403  1.36.2.39  uebayasi 		genfs_do_getpages_overwrite();
    404  1.36.2.39  uebayasi 	} else {
    405  1.36.2.39  uebayasi 		genfs_do_getpages_io();
    406  1.36.2.39  uebayasi 	}
    407  1.36.2.39  uebayasi }
    408  1.36.2.39  uebayasi 
    409  1.36.2.39  uebayasi int
    410  1.36.2.39  uebayasi genfs_do_getpages_overwrite()
    411  1.36.2.39  uebayasi {
    412  1.36.2.39  uebayasi 	{
    413  1.36.2.39  uebayasi #endif
    414  1.36.2.27  uebayasi 		if (!glocked) {
    415  1.36.2.27  uebayasi 			genfs_node_unlock(vp);
    416  1.36.2.27  uebayasi 		}
    417        1.1     pooka 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    418        1.1     pooka 
    419        1.1     pooka 		for (i = 0; i < npages; i++) {
    420       1.31  uebayasi 			struct vm_page *pg = pgs[ridx + i];
    421        1.1     pooka 
    422       1.31  uebayasi 			pg->flags &= ~(PG_RDONLY|PG_CLEAN);
    423        1.1     pooka 		}
    424        1.1     pooka 		npages += ridx;
    425        1.1     pooka 		goto out;
    426        1.1     pooka 	}
    427  1.36.2.39  uebayasi #if 0
    428  1.36.2.39  uebayasi }
    429        1.1     pooka 
    430  1.36.2.39  uebayasi int
    431  1.36.2.39  uebayasi genfs_do_getpages_io()
    432  1.36.2.39  uebayasi {
    433  1.36.2.39  uebayasi #endif
    434        1.1     pooka 	/*
    435        1.1     pooka 	 * the page wasn't resident and we're not overwriting,
    436        1.1     pooka 	 * so we're going to have to do some i/o.
    437        1.1     pooka 	 * find any additional pages needed to cover the expanded range.
    438        1.1     pooka 	 */
    439        1.1     pooka 
    440        1.1     pooka 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    441       1.29  uebayasi 	if (startoffset != origoffset || npages != orignmempages) {
    442       1.31  uebayasi 		int npgs;
    443        1.1     pooka 
    444        1.1     pooka 		/*
    445        1.1     pooka 		 * we need to avoid deadlocks caused by locking
    446        1.1     pooka 		 * additional pages at lower offsets than pages we
    447        1.1     pooka 		 * already have locked.  unlock them all and start over.
    448        1.1     pooka 		 */
    449        1.1     pooka 
    450       1.29  uebayasi 		genfs_rel_pages(&pgs[ridx], orignmempages);
    451        1.1     pooka 		memset(pgs, 0, pgs_size);
    452        1.1     pooka 
    453        1.1     pooka 		UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
    454        1.1     pooka 		    startoffset, endoffset, 0,0);
    455        1.1     pooka 		npgs = npages;
    456        1.1     pooka 		if (uvn_findpages(uobj, startoffset, &npgs, pgs,
    457        1.1     pooka 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    458  1.36.2.27  uebayasi 			if (!glocked) {
    459  1.36.2.27  uebayasi 				genfs_node_unlock(vp);
    460  1.36.2.27  uebayasi 			}
    461        1.1     pooka 			KASSERT(async != 0);
    462        1.1     pooka 			genfs_rel_pages(pgs, npages);
    463        1.2        ad 			mutex_exit(&uobj->vmobjlock);
    464        1.1     pooka 			error = EBUSY;
    465       1.33  uebayasi 			goto out_err_free;
    466        1.1     pooka 		}
    467        1.1     pooka 	}
    468       1.34  uebayasi 
    469        1.2        ad 	mutex_exit(&uobj->vmobjlock);
    470        1.1     pooka 
    471       1.34  uebayasi     {
    472       1.34  uebayasi 	size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
    473       1.34  uebayasi 	vaddr_t kva;
    474       1.34  uebayasi 	struct buf *bp, *mbp;
    475       1.34  uebayasi 	bool sawhole = false;
    476       1.34  uebayasi 
    477        1.1     pooka 	/*
    478        1.1     pooka 	 * read the desired page(s).
    479        1.1     pooka 	 */
    480        1.1     pooka 
    481        1.1     pooka 	totalbytes = npages << PAGE_SHIFT;
    482        1.1     pooka 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    483        1.1     pooka 	tailbytes = totalbytes - bytes;
    484        1.1     pooka 	skipbytes = 0;
    485        1.1     pooka 
    486        1.1     pooka 	kva = uvm_pagermapin(pgs, npages,
    487        1.1     pooka 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
    488        1.1     pooka 
    489        1.2        ad 	mbp = getiobuf(vp, true);
    490        1.1     pooka 	mbp->b_bufsize = totalbytes;
    491        1.1     pooka 	mbp->b_data = (void *)kva;
    492        1.1     pooka 	mbp->b_resid = mbp->b_bcount = bytes;
    493        1.2        ad 	mbp->b_cflags = BC_BUSY;
    494        1.2        ad 	if (async) {
    495        1.2        ad 		mbp->b_flags = B_READ | B_ASYNC;
    496        1.2        ad 		mbp->b_iodone = uvm_aio_biodone;
    497        1.2        ad 	} else {
    498        1.2        ad 		mbp->b_flags = B_READ;
    499        1.2        ad 		mbp->b_iodone = NULL;
    500        1.2        ad 	}
    501        1.1     pooka 	if (async)
    502        1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
    503        1.1     pooka 	else
    504        1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
    505        1.1     pooka 
    506        1.1     pooka 	/*
    507        1.1     pooka 	 * if EOF is in the middle of the range, zero the part past EOF.
    508        1.1     pooka 	 * skip over pages which are not PG_FAKE since in that case they have
    509        1.1     pooka 	 * valid data that we need to preserve.
    510        1.1     pooka 	 */
    511        1.1     pooka 
    512        1.1     pooka 	tailstart = bytes;
    513        1.1     pooka 	while (tailbytes > 0) {
    514        1.1     pooka 		const int len = PAGE_SIZE - (tailstart & PAGE_MASK);
    515        1.1     pooka 
    516        1.1     pooka 		KASSERT(len <= tailbytes);
    517        1.1     pooka 		if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) {
    518        1.1     pooka 			memset((void *)(kva + tailstart), 0, len);
    519        1.1     pooka 			UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
    520        1.1     pooka 			    kva, tailstart, len, 0);
    521        1.1     pooka 		}
    522        1.1     pooka 		tailstart += len;
    523        1.1     pooka 		tailbytes -= len;
    524        1.1     pooka 	}
    525        1.1     pooka 
    526        1.1     pooka 	/*
    527        1.1     pooka 	 * now loop over the pages, reading as needed.
    528        1.1     pooka 	 */
    529        1.1     pooka 
    530        1.1     pooka 	bp = NULL;
    531       1.28  uebayasi 	off_t offset;
    532       1.28  uebayasi 	for (offset = startoffset;
    533        1.1     pooka 	    bytes > 0;
    534        1.1     pooka 	    offset += iobytes, bytes -= iobytes) {
    535       1.30  uebayasi 		int run;
    536       1.25  uebayasi 		daddr_t lbn, blkno;
    537       1.24  uebayasi 		int pidx;
    538       1.26  uebayasi 		struct vnode *devvp;
    539        1.1     pooka 
    540        1.1     pooka 		/*
    541        1.1     pooka 		 * skip pages which don't need to be read.
    542        1.1     pooka 		 */
    543        1.1     pooka 
    544        1.1     pooka 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    545        1.1     pooka 		while ((pgs[pidx]->flags & PG_FAKE) == 0) {
    546        1.1     pooka 			size_t b;
    547        1.1     pooka 
    548        1.1     pooka 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    549        1.1     pooka 			if ((pgs[pidx]->flags & PG_RDONLY)) {
    550        1.1     pooka 				sawhole = true;
    551        1.1     pooka 			}
    552        1.1     pooka 			b = MIN(PAGE_SIZE, bytes);
    553        1.1     pooka 			offset += b;
    554        1.1     pooka 			bytes -= b;
    555        1.1     pooka 			skipbytes += b;
    556        1.1     pooka 			pidx++;
    557        1.1     pooka 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
    558        1.1     pooka 			    offset, 0,0,0);
    559        1.1     pooka 			if (bytes == 0) {
    560        1.1     pooka 				goto loopdone;
    561        1.1     pooka 			}
    562        1.1     pooka 		}
    563        1.1     pooka 
    564        1.1     pooka 		/*
    565        1.1     pooka 		 * bmap the file to find out the blkno to read from and
    566        1.1     pooka 		 * how much we can read in one i/o.  if bmap returns an error,
    567        1.1     pooka 		 * skip the rest of the top-level i/o.
    568        1.1     pooka 		 */
    569        1.1     pooka 
    570        1.1     pooka 		lbn = offset >> fs_bshift;
    571        1.1     pooka 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    572        1.1     pooka 		if (error) {
    573        1.1     pooka 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
    574       1.36  uebayasi 			    lbn,error,0,0);
    575        1.1     pooka 			skipbytes += bytes;
    576       1.36  uebayasi 			bytes = 0;
    577        1.1     pooka 			goto loopdone;
    578        1.1     pooka 		}
    579        1.1     pooka 
    580        1.1     pooka 		/*
    581        1.1     pooka 		 * see how many pages can be read with this i/o.
    582        1.1     pooka 		 * reduce the i/o size if necessary to avoid
    583        1.1     pooka 		 * overwriting pages with valid data.
    584        1.1     pooka 		 */
    585        1.1     pooka 
    586        1.1     pooka 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    587        1.1     pooka 		    bytes);
    588        1.1     pooka 		if (offset + iobytes > round_page(offset)) {
    589       1.24  uebayasi 			int pcount;
    590       1.24  uebayasi 
    591        1.1     pooka 			pcount = 1;
    592        1.1     pooka 			while (pidx + pcount < npages &&
    593        1.1     pooka 			    pgs[pidx + pcount]->flags & PG_FAKE) {
    594        1.1     pooka 				pcount++;
    595        1.1     pooka 			}
    596        1.1     pooka 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    597        1.1     pooka 			    (offset - trunc_page(offset)));
    598        1.1     pooka 		}
    599        1.1     pooka 
    600        1.1     pooka 		/*
    601        1.1     pooka 		 * if this block isn't allocated, zero it instead of
    602        1.1     pooka 		 * reading it.  unless we are going to allocate blocks,
    603        1.1     pooka 		 * mark the pages we zeroed PG_RDONLY.
    604        1.1     pooka 		 */
    605        1.1     pooka 
    606       1.36  uebayasi 		if (blkno == (daddr_t)-1) {
    607        1.1     pooka 			int holepages = (round_page(offset + iobytes) -
    608        1.1     pooka 			    trunc_page(offset)) >> PAGE_SHIFT;
    609        1.1     pooka 			UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
    610        1.1     pooka 
    611        1.1     pooka 			sawhole = true;
    612        1.1     pooka 			memset((char *)kva + (offset - startoffset), 0,
    613        1.1     pooka 			    iobytes);
    614        1.1     pooka 			skipbytes += iobytes;
    615        1.1     pooka 
    616        1.1     pooka 			for (i = 0; i < holepages; i++) {
    617       1.35  uebayasi 				if (memwrite) {
    618        1.1     pooka 					pgs[pidx + i]->flags &= ~PG_CLEAN;
    619        1.1     pooka 				}
    620        1.1     pooka 				if (!blockalloc) {
    621        1.1     pooka 					pgs[pidx + i]->flags |= PG_RDONLY;
    622        1.1     pooka 				}
    623        1.1     pooka 			}
    624        1.1     pooka 			continue;
    625        1.1     pooka 		}
    626        1.1     pooka 
    627        1.1     pooka 		/*
    628        1.1     pooka 		 * allocate a sub-buf for this piece of the i/o
    629        1.1     pooka 		 * (or just use mbp if there's only 1 piece),
    630        1.1     pooka 		 * and start it going.
    631        1.1     pooka 		 */
    632        1.1     pooka 
    633        1.1     pooka 		if (offset == startoffset && iobytes == bytes) {
    634        1.1     pooka 			bp = mbp;
    635        1.1     pooka 		} else {
    636       1.36  uebayasi 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
    637       1.36  uebayasi 			    vp, bp, vp->v_numoutput, 0);
    638        1.2        ad 			bp = getiobuf(vp, true);
    639        1.1     pooka 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
    640        1.1     pooka 		}
    641        1.1     pooka 		bp->b_lblkno = 0;
    642        1.1     pooka 
    643        1.1     pooka 		/* adjust physical blkno for partial blocks */
    644        1.1     pooka 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    645        1.1     pooka 		    dev_bshift);
    646        1.1     pooka 
    647        1.1     pooka 		UVMHIST_LOG(ubchist,
    648        1.1     pooka 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
    649       1.36  uebayasi 		    bp, offset, bp->b_bcount, bp->b_blkno);
    650        1.1     pooka 
    651        1.1     pooka 		VOP_STRATEGY(devvp, bp);
    652        1.1     pooka 	}
    653        1.1     pooka 
    654        1.1     pooka loopdone:
    655        1.1     pooka 	nestiobuf_done(mbp, skipbytes, error);
    656        1.1     pooka 	if (async) {
    657        1.1     pooka 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    658  1.36.2.27  uebayasi 		if (!glocked) {
    659  1.36.2.27  uebayasi 			genfs_node_unlock(vp);
    660  1.36.2.27  uebayasi 		}
    661        1.1     pooka 		error = 0;
    662       1.33  uebayasi 		goto out_err_free;
    663        1.1     pooka 	}
    664        1.1     pooka 	if (bp != NULL) {
    665        1.1     pooka 		error = biowait(mbp);
    666        1.1     pooka 	}
    667        1.1     pooka 
    668       1.19     rmind 	/* Remove the mapping (make KVA available as soon as possible) */
    669       1.19     rmind 	uvm_pagermapout(kva, npages);
    670       1.19     rmind 
    671        1.1     pooka 	/*
    672        1.1     pooka 	 * if this we encountered a hole then we have to do a little more work.
    673        1.1     pooka 	 * for read faults, we marked the page PG_RDONLY so that future
    674        1.1     pooka 	 * write accesses to the page will fault again.
    675        1.1     pooka 	 * for write faults, we must make sure that the backing store for
    676        1.1     pooka 	 * the page is completely allocated while the pages are locked.
    677        1.1     pooka 	 */
    678        1.1     pooka 
    679        1.1     pooka 	if (!error && sawhole && blockalloc) {
    680        1.9    simonb 		/*
    681        1.9    simonb 		 * XXX: This assumes that we come here only via
    682        1.9    simonb 		 * the mmio path
    683        1.9    simonb 		 */
    684       1.11      yamt 		if (vp->v_mount->mnt_wapbl) {
    685        1.9    simonb 			error = WAPBL_BEGIN(vp->v_mount);
    686        1.9    simonb 		}
    687        1.9    simonb 
    688        1.9    simonb 		if (!error) {
    689        1.9    simonb 			error = GOP_ALLOC(vp, startoffset,
    690        1.9    simonb 			    npages << PAGE_SHIFT, 0, cred);
    691       1.11      yamt 			if (vp->v_mount->mnt_wapbl) {
    692        1.9    simonb 				WAPBL_END(vp->v_mount);
    693        1.9    simonb 			}
    694        1.9    simonb 		}
    695        1.9    simonb 
    696        1.1     pooka 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
    697        1.1     pooka 		    startoffset, npages << PAGE_SHIFT, error,0);
    698        1.1     pooka 		if (!error) {
    699        1.1     pooka 			for (i = 0; i < npages; i++) {
    700       1.31  uebayasi 				struct vm_page *pg = pgs[i];
    701       1.31  uebayasi 
    702       1.31  uebayasi 				if (pg == NULL) {
    703        1.1     pooka 					continue;
    704        1.1     pooka 				}
    705       1.31  uebayasi 				pg->flags &= ~(PG_CLEAN|PG_RDONLY);
    706        1.1     pooka 				UVMHIST_LOG(ubchist, "mark dirty pg %p",
    707       1.31  uebayasi 				    pg,0,0,0);
    708        1.1     pooka 			}
    709        1.1     pooka 		}
    710        1.1     pooka 	}
    711  1.36.2.27  uebayasi 	if (!glocked) {
    712  1.36.2.27  uebayasi 		genfs_node_unlock(vp);
    713  1.36.2.27  uebayasi 	}
    714       1.18     rmind 
    715       1.18     rmind 	putiobuf(mbp);
    716       1.34  uebayasi     }
    717       1.18     rmind 
    718        1.2        ad 	mutex_enter(&uobj->vmobjlock);
    719        1.1     pooka 
    720        1.1     pooka 	/*
    721        1.1     pooka 	 * we're almost done!  release the pages...
    722        1.1     pooka 	 * for errors, we free the pages.
    723        1.1     pooka 	 * otherwise we activate them and mark them as valid and clean.
    724        1.1     pooka 	 * also, unbusy pages that were not actually requested.
    725        1.1     pooka 	 */
    726        1.1     pooka 
    727        1.1     pooka 	if (error) {
    728        1.1     pooka 		for (i = 0; i < npages; i++) {
    729       1.31  uebayasi 			struct vm_page *pg = pgs[i];
    730       1.31  uebayasi 
    731       1.31  uebayasi 			if (pg == NULL) {
    732        1.1     pooka 				continue;
    733        1.1     pooka 			}
    734        1.1     pooka 			UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    735       1.31  uebayasi 			    pg, pg->flags, 0,0);
    736       1.31  uebayasi 			if (pg->flags & PG_FAKE) {
    737       1.31  uebayasi 				pg->flags |= PG_RELEASED;
    738        1.1     pooka 			}
    739        1.1     pooka 		}
    740        1.2        ad 		mutex_enter(&uvm_pageqlock);
    741        1.1     pooka 		uvm_page_unbusy(pgs, npages);
    742        1.2        ad 		mutex_exit(&uvm_pageqlock);
    743        1.2        ad 		mutex_exit(&uobj->vmobjlock);
    744        1.1     pooka 		UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
    745       1.33  uebayasi 		goto out_err_free;
    746        1.1     pooka 	}
    747        1.1     pooka 
    748        1.1     pooka out:
    749        1.1     pooka 	UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
    750        1.1     pooka 	error = 0;
    751        1.2        ad 	mutex_enter(&uvm_pageqlock);
    752        1.1     pooka 	for (i = 0; i < npages; i++) {
    753       1.31  uebayasi 		struct vm_page *pg = pgs[i];
    754        1.1     pooka 		if (pg == NULL) {
    755        1.1     pooka 			continue;
    756        1.1     pooka 		}
    757        1.1     pooka 		UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    758        1.1     pooka 		    pg, pg->flags, 0,0);
    759        1.1     pooka 		if (pg->flags & PG_FAKE && !overwrite) {
    760        1.1     pooka 			pg->flags &= ~(PG_FAKE);
    761        1.1     pooka 			pmap_clear_modify(pgs[i]);
    762        1.1     pooka 		}
    763       1.35  uebayasi 		KASSERT(!memwrite || !blockalloc || (pg->flags & PG_RDONLY) == 0);
    764       1.29  uebayasi 		if (i < ridx || i >= ridx + orignmempages || async) {
    765        1.1     pooka 			UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
    766        1.1     pooka 			    pg, pg->offset,0,0);
    767        1.1     pooka 			if (pg->flags & PG_WANTED) {
    768        1.1     pooka 				wakeup(pg);
    769        1.1     pooka 			}
    770        1.1     pooka 			if (pg->flags & PG_FAKE) {
    771        1.1     pooka 				KASSERT(overwrite);
    772        1.1     pooka 				uvm_pagezero(pg);
    773        1.1     pooka 			}
    774        1.1     pooka 			if (pg->flags & PG_RELEASED) {
    775        1.1     pooka 				uvm_pagefree(pg);
    776        1.1     pooka 				continue;
    777        1.1     pooka 			}
    778        1.1     pooka 			uvm_pageenqueue(pg);
    779        1.1     pooka 			pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
    780        1.1     pooka 			UVM_PAGE_OWN(pg, NULL);
    781        1.1     pooka 		}
    782        1.1     pooka 	}
    783        1.2        ad 	mutex_exit(&uvm_pageqlock);
    784  1.36.2.21  uebayasi 	if (memwrite) {
    785  1.36.2.21  uebayasi 		genfs_markdirty(vp);
    786  1.36.2.21  uebayasi 	}
    787        1.2        ad 	mutex_exit(&uobj->vmobjlock);
    788        1.1     pooka 	if (ap->a_m != NULL) {
    789        1.1     pooka 		memcpy(ap->a_m, &pgs[ridx],
    790       1.29  uebayasi 		    orignmempages * sizeof(struct vm_page *));
    791        1.1     pooka 	}
    792        1.1     pooka 
    793       1.33  uebayasi out_err_free:
    794       1.14  christos 	if (pgs != NULL && pgs != pgs_onstack)
    795        1.1     pooka 		kmem_free(pgs, pgs_size);
    796       1.33  uebayasi out_err:
    797        1.1     pooka 	if (has_trans)
    798        1.1     pooka 		fstrans_done(vp->v_mount);
    799  1.36.2.21  uebayasi 	return error;
    800        1.1     pooka }
    801        1.1     pooka 
    802   1.36.2.1  uebayasi #ifdef XIP
    803   1.36.2.9  uebayasi /*
    804   1.36.2.9  uebayasi  * genfs_do_getpages_xip
    805   1.36.2.9  uebayasi  *      Return "direct pages" of XIP vnode.  The block addresses of XIP
    806   1.36.2.9  uebayasi  *      vnode pages are returned back to the VM fault handler as the
    807   1.36.2.9  uebayasi  *	actually mapped physical addresses.
    808   1.36.2.9  uebayasi  */
    809   1.36.2.1  uebayasi static int
    810   1.36.2.1  uebayasi genfs_do_getpages_xip(void *v)
    811   1.36.2.1  uebayasi {
    812   1.36.2.1  uebayasi 	struct vop_getpages_args /* {
    813   1.36.2.1  uebayasi 		struct vnode *a_vp;
    814   1.36.2.1  uebayasi 		voff_t a_offset;
    815   1.36.2.1  uebayasi 		struct vm_page **a_m;
    816   1.36.2.1  uebayasi 		int *a_count;
    817   1.36.2.1  uebayasi 		int a_centeridx;
    818   1.36.2.1  uebayasi 		vm_prot_t a_access_type;
    819   1.36.2.1  uebayasi 		int a_advice;
    820   1.36.2.1  uebayasi 		int a_flags;
    821   1.36.2.1  uebayasi 	} */ * const ap = v;
    822   1.36.2.1  uebayasi 
    823  1.36.2.35  uebayasi 	UVMHIST_FUNC("genfs_do_getpages_xip"); UVMHIST_CALLED(ubchist);
    824  1.36.2.35  uebayasi 
    825  1.36.2.40  uebayasi 	if ((ap->a_flags & PGO_LOCKED) != 0) {
    826  1.36.2.40  uebayasi 		*ap->a_count = 0;
    827  1.36.2.35  uebayasi 		return 0;
    828  1.36.2.35  uebayasi 	} else
    829  1.36.2.35  uebayasi 		return genfs_do_getpages_xip_io(
    830  1.36.2.40  uebayasi 			ap->a_vp,
    831  1.36.2.40  uebayasi 			ap->a_offset,
    832  1.36.2.40  uebayasi 			ap->a_m,
    833  1.36.2.40  uebayasi 			ap->a_count,
    834  1.36.2.40  uebayasi 			ap->a_centeridx,
    835  1.36.2.40  uebayasi 			ap->a_access_type,
    836  1.36.2.40  uebayasi 			ap->a_advice,
    837  1.36.2.40  uebayasi 			ap->a_flags);
    838  1.36.2.35  uebayasi }
    839  1.36.2.35  uebayasi 
    840  1.36.2.35  uebayasi static int
    841  1.36.2.35  uebayasi genfs_do_getpages_xip_io(
    842  1.36.2.35  uebayasi 	struct vnode *vp,
    843  1.36.2.35  uebayasi 	voff_t offset,
    844  1.36.2.35  uebayasi 	struct vm_page **pps,
    845  1.36.2.35  uebayasi 	int *npagesp,
    846  1.36.2.35  uebayasi 	int centeridx,
    847  1.36.2.35  uebayasi 	vm_prot_t access_type,
    848  1.36.2.35  uebayasi 	int advice,
    849  1.36.2.35  uebayasi 	int flags)
    850  1.36.2.35  uebayasi {
    851   1.36.2.1  uebayasi 	struct uvm_object * const uobj = &vp->v_uobj;
    852   1.36.2.1  uebayasi 
    853   1.36.2.1  uebayasi 	int error;
    854   1.36.2.1  uebayasi 	off_t eof, sbkoff, ebkoff, off;
    855   1.36.2.1  uebayasi 	int npages;
    856   1.36.2.1  uebayasi 	int fs_bshift, fs_bsize, dev_bshift, dev_bsize;
    857   1.36.2.1  uebayasi 	int i;
    858   1.36.2.1  uebayasi 
    859  1.36.2.35  uebayasi 	UVMHIST_FUNC("genfs_do_getpages_xip_io"); UVMHIST_CALLED(ubchist);
    860   1.36.2.1  uebayasi 
    861   1.36.2.1  uebayasi 	GOP_SIZE(vp, vp->v_size, &eof, GOP_SIZE_MEM);
    862   1.36.2.1  uebayasi 	npages = MIN(*npagesp, round_page(eof - offset) >> PAGE_SHIFT);
    863   1.36.2.1  uebayasi 
    864   1.36.2.1  uebayasi 	fs_bshift = vp->v_mount->mnt_fs_bshift;
    865   1.36.2.1  uebayasi 	fs_bsize = 1 << fs_bshift;
    866   1.36.2.1  uebayasi 	dev_bshift = vp->v_mount->mnt_dev_bshift;
    867   1.36.2.1  uebayasi 	dev_bsize = 1 << dev_bshift;
    868   1.36.2.1  uebayasi 
    869   1.36.2.1  uebayasi 	sbkoff = offset & ~(fs_bsize - 1);
    870  1.36.2.23  uebayasi 	ebkoff = ((offset + PAGE_SIZE * npages) + (fs_bsize - 1)) &
    871  1.36.2.23  uebayasi 	    ~(fs_bsize - 1);
    872   1.36.2.1  uebayasi 
    873  1.36.2.23  uebayasi 	UVMHIST_LOG(ubchist, "xip npages=%d sbkoff=%lx ebkoff=%lx",
    874  1.36.2.23  uebayasi 	    npages, (long)sbkoff, (long)ebkoff, 0);
    875   1.36.2.1  uebayasi 
    876   1.36.2.1  uebayasi 	off = offset;
    877  1.36.2.13  uebayasi 	for (i = 0; i < npages; i++) {
    878   1.36.2.1  uebayasi 		daddr_t lbn, blkno;
    879   1.36.2.1  uebayasi 		int run;
    880   1.36.2.1  uebayasi 		struct vnode *devvp;
    881   1.36.2.1  uebayasi 
    882   1.36.2.1  uebayasi 		lbn = (off & ~(fs_bsize - 1)) >> fs_bshift;
    883   1.36.2.1  uebayasi 
    884   1.36.2.1  uebayasi 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    885   1.36.2.1  uebayasi 		KASSERT(error == 0);
    886  1.36.2.23  uebayasi 		UVMHIST_LOG(ubchist, "xip VOP_BMAP: lbn=%ld blkno=%ld run=%d",
    887  1.36.2.23  uebayasi 		    (long)lbn, (long)blkno, run, 0);
    888   1.36.2.1  uebayasi 
    889  1.36.2.12  uebayasi 		/*
    890  1.36.2.12  uebayasi 		 * XIP page metadata assignment
    891  1.36.2.12  uebayasi 		 * - Unallocated block is redirected to the dedicated zero'ed
    892  1.36.2.12  uebayasi 		 *   page.
    893  1.36.2.12  uebayasi 		 */
    894   1.36.2.1  uebayasi 		if (blkno < 0) {
    895  1.36.2.37  uebayasi 			panic("XIP hole is not supported yet!");
    896   1.36.2.1  uebayasi 		} else {
    897  1.36.2.32  uebayasi 			daddr_t blk_off, fs_off;
    898  1.36.2.13  uebayasi 
    899  1.36.2.32  uebayasi 			blk_off = blkno << dev_bshift;
    900  1.36.2.32  uebayasi 			fs_off = off - (lbn << fs_bshift);
    901  1.36.2.32  uebayasi 
    902  1.36.2.34  uebayasi 			pps[i] = uvn_findpage_xip(devvp, &vp->v_uobj,
    903  1.36.2.32  uebayasi 			    blk_off + fs_off);
    904  1.36.2.32  uebayasi 			KASSERT(pps[i] != NULL);
    905   1.36.2.1  uebayasi 		}
    906   1.36.2.1  uebayasi 
    907   1.36.2.1  uebayasi 		UVMHIST_LOG(ubchist, "xip pgs %d => phys_addr=0x%lx (%p)",
    908   1.36.2.1  uebayasi 			i,
    909  1.36.2.22  uebayasi 			(long)pps[i]->phys_addr,
    910   1.36.2.1  uebayasi 			pps[i],
    911   1.36.2.1  uebayasi 			0);
    912   1.36.2.1  uebayasi 
    913   1.36.2.1  uebayasi 		off += PAGE_SIZE;
    914   1.36.2.1  uebayasi 	}
    915   1.36.2.1  uebayasi 
    916  1.36.2.26  uebayasi 	mutex_enter(&uobj->vmobjlock);
    917  1.36.2.13  uebayasi 
    918  1.36.2.13  uebayasi 	for (i = 0; i < npages; i++) {
    919  1.36.2.13  uebayasi 		struct vm_page *pg = pps[i];
    920  1.36.2.13  uebayasi 
    921  1.36.2.31  uebayasi 		KASSERT((pg->flags & PG_RDONLY) != 0);
    922  1.36.2.33  uebayasi 		KASSERT((pg->flags & PG_BUSY) == 0);
    923  1.36.2.33  uebayasi 		KASSERT((pg->flags & PG_CLEAN) != 0);
    924  1.36.2.33  uebayasi 		KASSERT((pg->flags & PG_DEVICE) != 0);
    925  1.36.2.33  uebayasi 		pg->flags |= PG_BUSY;
    926  1.36.2.33  uebayasi 		pg->flags &= ~PG_FAKE;
    927  1.36.2.33  uebayasi 		pg->uobject = &vp->v_uobj;
    928  1.36.2.13  uebayasi 	}
    929  1.36.2.13  uebayasi 
    930  1.36.2.36  uebayasi 	mutex_exit(&uobj->vmobjlock);
    931  1.36.2.13  uebayasi 
    932  1.36.2.13  uebayasi 	*npagesp = npages;
    933   1.36.2.1  uebayasi 
    934   1.36.2.1  uebayasi 	return 0;
    935   1.36.2.1  uebayasi }
    936   1.36.2.1  uebayasi #endif
    937   1.36.2.1  uebayasi 
    938        1.1     pooka /*
    939        1.1     pooka  * generic VM putpages routine.
    940        1.1     pooka  * Write the given range of pages to backing store.
    941        1.1     pooka  *
    942        1.1     pooka  * => "offhi == 0" means flush all pages at or after "offlo".
    943        1.1     pooka  * => object should be locked by caller.  we return with the
    944        1.1     pooka  *      object unlocked.
    945        1.1     pooka  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
    946        1.1     pooka  *	thus, a caller might want to unlock higher level resources
    947        1.1     pooka  *	(e.g. vm_map) before calling flush.
    948        1.1     pooka  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
    949        1.1     pooka  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
    950        1.1     pooka  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    951        1.1     pooka  *	that new pages are inserted on the tail end of the list.   thus,
    952        1.1     pooka  *	we can make a complete pass through the object in one go by starting
    953        1.1     pooka  *	at the head and working towards the tail (new pages are put in
    954        1.1     pooka  *	front of us).
    955        1.1     pooka  * => NOTE: we are allowed to lock the page queues, so the caller
    956        1.1     pooka  *	must not be holding the page queue lock.
    957        1.1     pooka  *
    958        1.1     pooka  * note on "cleaning" object and PG_BUSY pages:
    959        1.1     pooka  *	this routine is holding the lock on the object.   the only time
    960        1.1     pooka  *	that it can run into a PG_BUSY page that it does not own is if
    961        1.1     pooka  *	some other process has started I/O on the page (e.g. either
    962        1.1     pooka  *	a pagein, or a pageout).    if the PG_BUSY page is being paged
    963        1.1     pooka  *	in, then it can not be dirty (!PG_CLEAN) because no one has
    964        1.1     pooka  *	had a chance to modify it yet.    if the PG_BUSY page is being
    965        1.1     pooka  *	paged out then it means that someone else has already started
    966        1.1     pooka  *	cleaning the page for us (how nice!).    in this case, if we
    967        1.1     pooka  *	have syncio specified, then after we make our pass through the
    968        1.1     pooka  *	object we need to wait for the other PG_BUSY pages to clear
    969        1.1     pooka  *	off (i.e. we need to do an iosync).   also note that once a
    970        1.1     pooka  *	page is PG_BUSY it must stay in its object until it is un-busyed.
    971        1.1     pooka  *
    972        1.1     pooka  * note on page traversal:
    973        1.1     pooka  *	we can traverse the pages in an object either by going down the
    974        1.1     pooka  *	linked list in "uobj->memq", or we can go over the address range
    975        1.1     pooka  *	by page doing hash table lookups for each address.    depending
    976        1.1     pooka  *	on how many pages are in the object it may be cheaper to do one
    977        1.1     pooka  *	or the other.   we set "by_list" to true if we are using memq.
    978        1.1     pooka  *	if the cost of a hash lookup was equal to the cost of the list
    979        1.1     pooka  *	traversal we could compare the number of pages in the start->stop
    980        1.1     pooka  *	range to the total number of pages in the object.   however, it
    981        1.1     pooka  *	seems that a hash table lookup is more expensive than the linked
    982        1.1     pooka  *	list traversal, so we multiply the number of pages in the
    983        1.1     pooka  *	range by an estimate of the relatively higher cost of the hash lookup.
    984        1.1     pooka  */
    985        1.1     pooka 
    986        1.1     pooka int
    987        1.1     pooka genfs_putpages(void *v)
    988        1.1     pooka {
    989        1.1     pooka 	struct vop_putpages_args /* {
    990        1.1     pooka 		struct vnode *a_vp;
    991        1.1     pooka 		voff_t a_offlo;
    992        1.1     pooka 		voff_t a_offhi;
    993        1.1     pooka 		int a_flags;
    994       1.22  uebayasi 	} */ * const ap = v;
    995        1.1     pooka 
    996  1.36.2.24  uebayasi #ifdef XIP
    997  1.36.2.24  uebayasi 	if ((ap->a_vp->v_vflag & VV_XIP) != 0)
    998  1.36.2.24  uebayasi 		return genfs_do_putpages_xip(ap->a_vp, ap->a_offlo, ap->a_offhi,
    999  1.36.2.24  uebayasi 		    ap->a_flags, NULL);
   1000  1.36.2.24  uebayasi 	else
   1001  1.36.2.24  uebayasi #endif
   1002        1.1     pooka 	return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi,
   1003        1.1     pooka 	    ap->a_flags, NULL);
   1004        1.1     pooka }
   1005        1.1     pooka 
   1006        1.1     pooka int
   1007        1.4      yamt genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff,
   1008        1.4      yamt     int origflags, struct vm_page **busypg)
   1009        1.1     pooka {
   1010       1.22  uebayasi 	struct uvm_object * const uobj = &vp->v_uobj;
   1011       1.22  uebayasi 	kmutex_t * const slock = &uobj->vmobjlock;
   1012        1.1     pooka 	off_t off;
   1013        1.1     pooka 	/* Even for strange MAXPHYS, the shift rounds down to a page */
   1014        1.1     pooka #define maxpages (MAXPHYS >> PAGE_SHIFT)
   1015        1.2        ad 	int i, error, npages, nback;
   1016        1.1     pooka 	int freeflag;
   1017        1.1     pooka 	struct vm_page *pgs[maxpages], *pg, *nextpg, *tpg, curmp, endmp;
   1018        1.1     pooka 	bool wasclean, by_list, needs_clean, yld;
   1019        1.4      yamt 	bool async = (origflags & PGO_SYNCIO) == 0;
   1020        1.1     pooka 	bool pagedaemon = curlwp == uvm.pagedaemon_lwp;
   1021       1.22  uebayasi 	struct lwp * const l = curlwp ? curlwp : &lwp0;
   1022       1.22  uebayasi 	struct genfs_node * const gp = VTOG(vp);
   1023        1.4      yamt 	int flags;
   1024        1.1     pooka 	int dirtygen;
   1025        1.4      yamt 	bool modified;
   1026       1.12   hannken 	bool need_wapbl;
   1027        1.4      yamt 	bool has_trans;
   1028        1.1     pooka 	bool cleanall;
   1029        1.4      yamt 	bool onworklst;
   1030        1.1     pooka 
   1031        1.1     pooka 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
   1032        1.1     pooka 
   1033        1.4      yamt 	KASSERT(origflags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
   1034        1.1     pooka 	KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
   1035        1.1     pooka 	KASSERT(startoff < endoff || endoff == 0);
   1036        1.1     pooka 
   1037        1.1     pooka 	UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
   1038        1.1     pooka 	    vp, uobj->uo_npages, startoff, endoff - startoff);
   1039        1.1     pooka 
   1040        1.6   hannken 	has_trans = false;
   1041       1.12   hannken 	need_wapbl = (!pagedaemon && vp->v_mount && vp->v_mount->mnt_wapbl &&
   1042       1.12   hannken 	    (origflags & PGO_JOURNALLOCKED) == 0);
   1043        1.6   hannken 
   1044        1.4      yamt retry:
   1045        1.4      yamt 	modified = false;
   1046        1.4      yamt 	flags = origflags;
   1047        1.1     pooka 	KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 ||
   1048        1.1     pooka 	    (vp->v_iflag & VI_WRMAPDIRTY) == 0);
   1049        1.1     pooka 	if (uobj->uo_npages == 0) {
   1050        1.1     pooka 		if (vp->v_iflag & VI_ONWORKLST) {
   1051        1.1     pooka 			vp->v_iflag &= ~VI_WRMAPDIRTY;
   1052        1.1     pooka 			if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
   1053        1.1     pooka 				vn_syncer_remove_from_worklist(vp);
   1054        1.1     pooka 		}
   1055       1.12   hannken 		if (has_trans) {
   1056       1.12   hannken 			if (need_wapbl)
   1057       1.12   hannken 				WAPBL_END(vp->v_mount);
   1058        1.6   hannken 			fstrans_done(vp->v_mount);
   1059       1.12   hannken 		}
   1060        1.2        ad 		mutex_exit(slock);
   1061        1.1     pooka 		return (0);
   1062        1.1     pooka 	}
   1063        1.1     pooka 
   1064        1.1     pooka 	/*
   1065        1.1     pooka 	 * the vnode has pages, set up to process the request.
   1066        1.1     pooka 	 */
   1067        1.1     pooka 
   1068        1.6   hannken 	if (!has_trans && (flags & PGO_CLEANIT) != 0) {
   1069        1.2        ad 		mutex_exit(slock);
   1070        1.1     pooka 		if (pagedaemon) {
   1071        1.1     pooka 			error = fstrans_start_nowait(vp->v_mount, FSTRANS_LAZY);
   1072        1.1     pooka 			if (error)
   1073        1.1     pooka 				return error;
   1074        1.1     pooka 		} else
   1075        1.1     pooka 			fstrans_start(vp->v_mount, FSTRANS_LAZY);
   1076       1.12   hannken 		if (need_wapbl) {
   1077       1.12   hannken 			error = WAPBL_BEGIN(vp->v_mount);
   1078       1.12   hannken 			if (error) {
   1079       1.12   hannken 				fstrans_done(vp->v_mount);
   1080       1.12   hannken 				return error;
   1081       1.12   hannken 			}
   1082       1.12   hannken 		}
   1083        1.1     pooka 		has_trans = true;
   1084        1.2        ad 		mutex_enter(slock);
   1085        1.6   hannken 		goto retry;
   1086        1.1     pooka 	}
   1087        1.1     pooka 
   1088        1.1     pooka 	error = 0;
   1089        1.1     pooka 	wasclean = (vp->v_numoutput == 0);
   1090        1.1     pooka 	off = startoff;
   1091        1.1     pooka 	if (endoff == 0 || flags & PGO_ALLPAGES) {
   1092        1.1     pooka 		endoff = trunc_page(LLONG_MAX);
   1093        1.1     pooka 	}
   1094        1.1     pooka 	by_list = (uobj->uo_npages <=
   1095       1.17      yamt 	    ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_TREE_PENALTY);
   1096        1.1     pooka 
   1097        1.1     pooka #if !defined(DEBUG)
   1098        1.1     pooka 	/*
   1099        1.1     pooka 	 * if this vnode is known not to have dirty pages,
   1100        1.1     pooka 	 * don't bother to clean it out.
   1101        1.1     pooka 	 */
   1102        1.1     pooka 
   1103        1.1     pooka 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
   1104        1.1     pooka 		if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == 0) {
   1105        1.1     pooka 			goto skip_scan;
   1106        1.1     pooka 		}
   1107        1.1     pooka 		flags &= ~PGO_CLEANIT;
   1108        1.1     pooka 	}
   1109        1.1     pooka #endif /* !defined(DEBUG) */
   1110        1.1     pooka 
   1111        1.1     pooka 	/*
   1112        1.1     pooka 	 * start the loop.  when scanning by list, hold the last page
   1113        1.1     pooka 	 * in the list before we start.  pages allocated after we start
   1114        1.1     pooka 	 * will be added to the end of the list, so we can stop at the
   1115        1.1     pooka 	 * current last page.
   1116        1.1     pooka 	 */
   1117        1.1     pooka 
   1118        1.1     pooka 	cleanall = (flags & PGO_CLEANIT) != 0 && wasclean &&
   1119        1.1     pooka 	    startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
   1120        1.1     pooka 	    (vp->v_iflag & VI_ONWORKLST) != 0;
   1121        1.1     pooka 	dirtygen = gp->g_dirtygen;
   1122        1.1     pooka 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
   1123        1.1     pooka 	if (by_list) {
   1124  1.36.2.21  uebayasi 		curmp.flags = PG_MARKER;
   1125  1.36.2.21  uebayasi 		endmp.flags = PG_MARKER;
   1126        1.1     pooka 		pg = TAILQ_FIRST(&uobj->memq);
   1127        1.8        ad 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq.queue);
   1128        1.1     pooka 	} else {
   1129        1.1     pooka 		pg = uvm_pagelookup(uobj, off);
   1130        1.1     pooka 	}
   1131        1.1     pooka 	nextpg = NULL;
   1132        1.1     pooka 	while (by_list || off < endoff) {
   1133        1.1     pooka 
   1134        1.1     pooka 		/*
   1135        1.1     pooka 		 * if the current page is not interesting, move on to the next.
   1136        1.1     pooka 		 */
   1137        1.1     pooka 
   1138  1.36.2.21  uebayasi 		KASSERT(pg == NULL || pg->uobject == uobj ||
   1139  1.36.2.21  uebayasi 		    (pg->flags & PG_MARKER) != 0);
   1140        1.1     pooka 		KASSERT(pg == NULL ||
   1141        1.1     pooka 		    (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
   1142  1.36.2.21  uebayasi 		    (pg->flags & (PG_BUSY|PG_MARKER)) != 0);
   1143        1.1     pooka 		if (by_list) {
   1144        1.1     pooka 			if (pg == &endmp) {
   1145        1.1     pooka 				break;
   1146        1.1     pooka 			}
   1147  1.36.2.21  uebayasi 			if (pg->flags & PG_MARKER) {
   1148  1.36.2.21  uebayasi 				pg = TAILQ_NEXT(pg, listq.queue);
   1149  1.36.2.21  uebayasi 				continue;
   1150  1.36.2.21  uebayasi 			}
   1151        1.1     pooka 			if (pg->offset < startoff || pg->offset >= endoff ||
   1152        1.1     pooka 			    pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1153        1.1     pooka 				if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1154        1.1     pooka 					wasclean = false;
   1155        1.1     pooka 				}
   1156        1.8        ad 				pg = TAILQ_NEXT(pg, listq.queue);
   1157        1.1     pooka 				continue;
   1158        1.1     pooka 			}
   1159        1.1     pooka 			off = pg->offset;
   1160        1.1     pooka 		} else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1161        1.1     pooka 			if (pg != NULL) {
   1162        1.1     pooka 				wasclean = false;
   1163        1.1     pooka 			}
   1164        1.1     pooka 			off += PAGE_SIZE;
   1165        1.1     pooka 			if (off < endoff) {
   1166        1.1     pooka 				pg = uvm_pagelookup(uobj, off);
   1167        1.1     pooka 			}
   1168        1.1     pooka 			continue;
   1169        1.1     pooka 		}
   1170        1.1     pooka 
   1171        1.1     pooka 		/*
   1172        1.1     pooka 		 * if the current page needs to be cleaned and it's busy,
   1173        1.1     pooka 		 * wait for it to become unbusy.
   1174        1.1     pooka 		 */
   1175        1.1     pooka 
   1176        1.1     pooka 		yld = (l->l_cpu->ci_schedstate.spc_flags &
   1177        1.1     pooka 		    SPCF_SHOULDYIELD) && !pagedaemon;
   1178        1.1     pooka 		if (pg->flags & PG_BUSY || yld) {
   1179        1.1     pooka 			UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
   1180        1.1     pooka 			if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
   1181        1.1     pooka 				UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0);
   1182        1.1     pooka 				error = EDEADLK;
   1183        1.1     pooka 				if (busypg != NULL)
   1184        1.1     pooka 					*busypg = pg;
   1185        1.1     pooka 				break;
   1186        1.1     pooka 			}
   1187        1.1     pooka 			if (pagedaemon) {
   1188        1.1     pooka 				/*
   1189        1.1     pooka 				 * someone has taken the page while we
   1190        1.1     pooka 				 * dropped the lock for fstrans_start.
   1191        1.1     pooka 				 */
   1192        1.1     pooka 				break;
   1193        1.1     pooka 			}
   1194        1.1     pooka 			if (by_list) {
   1195        1.8        ad 				TAILQ_INSERT_BEFORE(pg, &curmp, listq.queue);
   1196        1.1     pooka 				UVMHIST_LOG(ubchist, "curmp next %p",
   1197        1.8        ad 				    TAILQ_NEXT(&curmp, listq.queue), 0,0,0);
   1198        1.1     pooka 			}
   1199        1.1     pooka 			if (yld) {
   1200        1.2        ad 				mutex_exit(slock);
   1201        1.1     pooka 				preempt();
   1202        1.2        ad 				mutex_enter(slock);
   1203        1.1     pooka 			} else {
   1204        1.1     pooka 				pg->flags |= PG_WANTED;
   1205        1.1     pooka 				UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
   1206        1.2        ad 				mutex_enter(slock);
   1207        1.1     pooka 			}
   1208        1.1     pooka 			if (by_list) {
   1209        1.1     pooka 				UVMHIST_LOG(ubchist, "after next %p",
   1210        1.8        ad 				    TAILQ_NEXT(&curmp, listq.queue), 0,0,0);
   1211        1.8        ad 				pg = TAILQ_NEXT(&curmp, listq.queue);
   1212        1.8        ad 				TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue);
   1213        1.1     pooka 			} else {
   1214        1.1     pooka 				pg = uvm_pagelookup(uobj, off);
   1215        1.1     pooka 			}
   1216        1.1     pooka 			continue;
   1217        1.1     pooka 		}
   1218        1.1     pooka 
   1219        1.1     pooka 		/*
   1220        1.1     pooka 		 * if we're freeing, remove all mappings of the page now.
   1221        1.1     pooka 		 * if we're cleaning, check if the page is needs to be cleaned.
   1222        1.1     pooka 		 */
   1223        1.1     pooka 
   1224        1.1     pooka 		if (flags & PGO_FREE) {
   1225        1.1     pooka 			pmap_page_protect(pg, VM_PROT_NONE);
   1226        1.1     pooka 		} else if (flags & PGO_CLEANIT) {
   1227        1.1     pooka 
   1228        1.1     pooka 			/*
   1229        1.1     pooka 			 * if we still have some hope to pull this vnode off
   1230        1.1     pooka 			 * from the syncer queue, write-protect the page.
   1231        1.1     pooka 			 */
   1232        1.1     pooka 
   1233        1.1     pooka 			if (cleanall && wasclean &&
   1234        1.1     pooka 			    gp->g_dirtygen == dirtygen) {
   1235        1.1     pooka 
   1236        1.1     pooka 				/*
   1237        1.1     pooka 				 * uobj pages get wired only by uvm_fault
   1238        1.1     pooka 				 * where uobj is locked.
   1239        1.1     pooka 				 */
   1240        1.1     pooka 
   1241        1.1     pooka 				if (pg->wire_count == 0) {
   1242        1.1     pooka 					pmap_page_protect(pg,
   1243        1.1     pooka 					    VM_PROT_READ|VM_PROT_EXECUTE);
   1244        1.1     pooka 				} else {
   1245        1.1     pooka 					cleanall = false;
   1246        1.1     pooka 				}
   1247        1.1     pooka 			}
   1248        1.1     pooka 		}
   1249        1.1     pooka 
   1250        1.1     pooka 		if (flags & PGO_CLEANIT) {
   1251        1.1     pooka 			needs_clean = pmap_clear_modify(pg) ||
   1252        1.1     pooka 			    (pg->flags & PG_CLEAN) == 0;
   1253        1.1     pooka 			pg->flags |= PG_CLEAN;
   1254        1.1     pooka 		} else {
   1255        1.1     pooka 			needs_clean = false;
   1256        1.1     pooka 		}
   1257        1.1     pooka 
   1258        1.1     pooka 		/*
   1259        1.1     pooka 		 * if we're cleaning, build a cluster.
   1260        1.1     pooka 		 * the cluster will consist of pages which are currently dirty,
   1261        1.1     pooka 		 * but they will be returned to us marked clean.
   1262        1.1     pooka 		 * if not cleaning, just operate on the one page.
   1263        1.1     pooka 		 */
   1264        1.1     pooka 
   1265        1.1     pooka 		if (needs_clean) {
   1266        1.1     pooka 			KDASSERT((vp->v_iflag & VI_ONWORKLST));
   1267        1.1     pooka 			wasclean = false;
   1268        1.1     pooka 			memset(pgs, 0, sizeof(pgs));
   1269        1.1     pooka 			pg->flags |= PG_BUSY;
   1270        1.1     pooka 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1271        1.1     pooka 
   1272        1.1     pooka 			/*
   1273        1.1     pooka 			 * first look backward.
   1274        1.1     pooka 			 */
   1275        1.1     pooka 
   1276        1.1     pooka 			npages = MIN(maxpages >> 1, off >> PAGE_SHIFT);
   1277        1.1     pooka 			nback = npages;
   1278        1.1     pooka 			uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
   1279        1.1     pooka 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
   1280        1.1     pooka 			if (nback) {
   1281        1.1     pooka 				memmove(&pgs[0], &pgs[npages - nback],
   1282        1.1     pooka 				    nback * sizeof(pgs[0]));
   1283        1.1     pooka 				if (npages - nback < nback)
   1284        1.1     pooka 					memset(&pgs[nback], 0,
   1285        1.1     pooka 					    (npages - nback) * sizeof(pgs[0]));
   1286        1.1     pooka 				else
   1287        1.1     pooka 					memset(&pgs[npages - nback], 0,
   1288        1.1     pooka 					    nback * sizeof(pgs[0]));
   1289        1.1     pooka 			}
   1290        1.1     pooka 
   1291        1.1     pooka 			/*
   1292        1.1     pooka 			 * then plug in our page of interest.
   1293        1.1     pooka 			 */
   1294        1.1     pooka 
   1295        1.1     pooka 			pgs[nback] = pg;
   1296        1.1     pooka 
   1297        1.1     pooka 			/*
   1298        1.1     pooka 			 * then look forward to fill in the remaining space in
   1299        1.1     pooka 			 * the array of pages.
   1300        1.1     pooka 			 */
   1301        1.1     pooka 
   1302        1.1     pooka 			npages = maxpages - nback - 1;
   1303        1.1     pooka 			uvn_findpages(uobj, off + PAGE_SIZE, &npages,
   1304        1.1     pooka 			    &pgs[nback + 1],
   1305        1.1     pooka 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
   1306        1.1     pooka 			npages += nback + 1;
   1307        1.1     pooka 		} else {
   1308        1.1     pooka 			pgs[0] = pg;
   1309        1.1     pooka 			npages = 1;
   1310        1.1     pooka 			nback = 0;
   1311        1.1     pooka 		}
   1312        1.1     pooka 
   1313        1.1     pooka 		/*
   1314        1.1     pooka 		 * apply FREE or DEACTIVATE options if requested.
   1315        1.1     pooka 		 */
   1316        1.1     pooka 
   1317        1.1     pooka 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1318        1.2        ad 			mutex_enter(&uvm_pageqlock);
   1319        1.1     pooka 		}
   1320        1.1     pooka 		for (i = 0; i < npages; i++) {
   1321        1.1     pooka 			tpg = pgs[i];
   1322        1.1     pooka 			KASSERT(tpg->uobject == uobj);
   1323        1.8        ad 			if (by_list && tpg == TAILQ_NEXT(pg, listq.queue))
   1324        1.1     pooka 				pg = tpg;
   1325        1.1     pooka 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1326        1.1     pooka 				continue;
   1327        1.1     pooka 			if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
   1328        1.1     pooka 				uvm_pagedeactivate(tpg);
   1329        1.1     pooka 			} else if (flags & PGO_FREE) {
   1330        1.1     pooka 				pmap_page_protect(tpg, VM_PROT_NONE);
   1331        1.1     pooka 				if (tpg->flags & PG_BUSY) {
   1332        1.1     pooka 					tpg->flags |= freeflag;
   1333        1.1     pooka 					if (pagedaemon) {
   1334        1.2        ad 						uvm_pageout_start(1);
   1335        1.1     pooka 						uvm_pagedequeue(tpg);
   1336        1.1     pooka 					}
   1337        1.1     pooka 				} else {
   1338        1.1     pooka 
   1339        1.1     pooka 					/*
   1340        1.1     pooka 					 * ``page is not busy''
   1341        1.1     pooka 					 * implies that npages is 1
   1342        1.1     pooka 					 * and needs_clean is false.
   1343        1.1     pooka 					 */
   1344        1.1     pooka 
   1345        1.8        ad 					nextpg = TAILQ_NEXT(tpg, listq.queue);
   1346        1.1     pooka 					uvm_pagefree(tpg);
   1347        1.1     pooka 					if (pagedaemon)
   1348        1.1     pooka 						uvmexp.pdfreed++;
   1349        1.1     pooka 				}
   1350        1.1     pooka 			}
   1351        1.1     pooka 		}
   1352        1.1     pooka 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1353        1.2        ad 			mutex_exit(&uvm_pageqlock);
   1354        1.1     pooka 		}
   1355        1.1     pooka 		if (needs_clean) {
   1356        1.1     pooka 			modified = true;
   1357        1.1     pooka 
   1358        1.1     pooka 			/*
   1359        1.1     pooka 			 * start the i/o.  if we're traversing by list,
   1360        1.1     pooka 			 * keep our place in the list with a marker page.
   1361        1.1     pooka 			 */
   1362        1.1     pooka 
   1363        1.1     pooka 			if (by_list) {
   1364        1.1     pooka 				TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
   1365        1.8        ad 				    listq.queue);
   1366        1.1     pooka 			}
   1367        1.2        ad 			mutex_exit(slock);
   1368        1.1     pooka 			error = GOP_WRITE(vp, pgs, npages, flags);
   1369        1.2        ad 			mutex_enter(slock);
   1370        1.1     pooka 			if (by_list) {
   1371        1.8        ad 				pg = TAILQ_NEXT(&curmp, listq.queue);
   1372        1.8        ad 				TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue);
   1373        1.1     pooka 			}
   1374        1.1     pooka 			if (error) {
   1375        1.1     pooka 				break;
   1376        1.1     pooka 			}
   1377        1.1     pooka 			if (by_list) {
   1378        1.1     pooka 				continue;
   1379        1.1     pooka 			}
   1380        1.1     pooka 		}
   1381        1.1     pooka 
   1382        1.1     pooka 		/*
   1383        1.1     pooka 		 * find the next page and continue if there was no error.
   1384        1.1     pooka 		 */
   1385        1.1     pooka 
   1386        1.1     pooka 		if (by_list) {
   1387        1.1     pooka 			if (nextpg) {
   1388        1.1     pooka 				pg = nextpg;
   1389        1.1     pooka 				nextpg = NULL;
   1390        1.1     pooka 			} else {
   1391        1.8        ad 				pg = TAILQ_NEXT(pg, listq.queue);
   1392        1.1     pooka 			}
   1393        1.1     pooka 		} else {
   1394        1.1     pooka 			off += (npages - nback) << PAGE_SHIFT;
   1395        1.1     pooka 			if (off < endoff) {
   1396        1.1     pooka 				pg = uvm_pagelookup(uobj, off);
   1397        1.1     pooka 			}
   1398        1.1     pooka 		}
   1399        1.1     pooka 	}
   1400        1.1     pooka 	if (by_list) {
   1401        1.8        ad 		TAILQ_REMOVE(&uobj->memq, &endmp, listq.queue);
   1402        1.1     pooka 	}
   1403        1.1     pooka 
   1404        1.1     pooka 	if (modified && (vp->v_iflag & VI_WRMAPDIRTY) != 0 &&
   1405        1.1     pooka 	    (vp->v_type != VBLK ||
   1406        1.1     pooka 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
   1407        1.1     pooka 		GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
   1408        1.1     pooka 	}
   1409        1.1     pooka 
   1410        1.1     pooka 	/*
   1411        1.1     pooka 	 * if we're cleaning and there was nothing to clean,
   1412        1.1     pooka 	 * take us off the syncer list.  if we started any i/o
   1413        1.1     pooka 	 * and we're doing sync i/o, wait for all writes to finish.
   1414        1.1     pooka 	 */
   1415        1.1     pooka 
   1416        1.1     pooka 	if (cleanall && wasclean && gp->g_dirtygen == dirtygen &&
   1417        1.1     pooka 	    (vp->v_iflag & VI_ONWORKLST) != 0) {
   1418        1.5      yamt #if defined(DEBUG)
   1419        1.8        ad 		TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
   1420  1.36.2.21  uebayasi 			if ((pg->flags & PG_MARKER) != 0) {
   1421  1.36.2.21  uebayasi 				continue;
   1422  1.36.2.21  uebayasi 			}
   1423        1.5      yamt 			if ((pg->flags & PG_CLEAN) == 0) {
   1424        1.5      yamt 				printf("%s: %p: !CLEAN\n", __func__, pg);
   1425        1.5      yamt 			}
   1426        1.5      yamt 			if (pmap_is_modified(pg)) {
   1427        1.5      yamt 				printf("%s: %p: modified\n", __func__, pg);
   1428        1.5      yamt 			}
   1429        1.5      yamt 		}
   1430        1.5      yamt #endif /* defined(DEBUG) */
   1431        1.1     pooka 		vp->v_iflag &= ~VI_WRMAPDIRTY;
   1432        1.1     pooka 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
   1433        1.1     pooka 			vn_syncer_remove_from_worklist(vp);
   1434        1.1     pooka 	}
   1435        1.1     pooka 
   1436        1.1     pooka #if !defined(DEBUG)
   1437        1.1     pooka skip_scan:
   1438        1.1     pooka #endif /* !defined(DEBUG) */
   1439        1.2        ad 
   1440        1.2        ad 	/* Wait for output to complete. */
   1441        1.2        ad 	if (!wasclean && !async && vp->v_numoutput != 0) {
   1442        1.2        ad 		while (vp->v_numoutput != 0)
   1443        1.2        ad 			cv_wait(&vp->v_cv, slock);
   1444        1.1     pooka 	}
   1445        1.4      yamt 	onworklst = (vp->v_iflag & VI_ONWORKLST) != 0;
   1446        1.2        ad 	mutex_exit(slock);
   1447        1.1     pooka 
   1448        1.4      yamt 	if ((flags & PGO_RECLAIM) != 0 && onworklst) {
   1449        1.4      yamt 		/*
   1450        1.4      yamt 		 * in the case of PGO_RECLAIM, ensure to make the vnode clean.
   1451        1.4      yamt 		 * retrying is not a big deal because, in many cases,
   1452        1.4      yamt 		 * uobj->uo_npages is already 0 here.
   1453        1.4      yamt 		 */
   1454        1.4      yamt 		mutex_enter(slock);
   1455        1.4      yamt 		goto retry;
   1456        1.4      yamt 	}
   1457        1.4      yamt 
   1458       1.12   hannken 	if (has_trans) {
   1459       1.12   hannken 		if (need_wapbl)
   1460       1.12   hannken 			WAPBL_END(vp->v_mount);
   1461        1.6   hannken 		fstrans_done(vp->v_mount);
   1462       1.12   hannken 	}
   1463        1.6   hannken 
   1464        1.1     pooka 	return (error);
   1465        1.1     pooka }
   1466        1.1     pooka 
   1467  1.36.2.24  uebayasi #ifdef XIP
   1468  1.36.2.24  uebayasi int
   1469  1.36.2.24  uebayasi genfs_do_putpages_xip(struct vnode *vp, off_t startoff, off_t endoff,
   1470  1.36.2.24  uebayasi     int flags, struct vm_page **busypg)
   1471  1.36.2.24  uebayasi {
   1472  1.36.2.24  uebayasi 	struct uvm_object *uobj = &vp->v_uobj;
   1473  1.36.2.25  uebayasi #ifdef DIAGNOSTIC
   1474  1.36.2.24  uebayasi 	struct genfs_node * const gp = VTOG(vp);
   1475  1.36.2.25  uebayasi #endif
   1476  1.36.2.24  uebayasi 
   1477  1.36.2.24  uebayasi 	UVMHIST_FUNC("genfs_do_putpages_xip"); UVMHIST_CALLED(ubchist);
   1478  1.36.2.24  uebayasi 
   1479  1.36.2.24  uebayasi 	KASSERT(mutex_owned(&uobj->vmobjlock));
   1480  1.36.2.24  uebayasi 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1481  1.36.2.24  uebayasi 	KASSERT(vp->v_numoutput == 0);
   1482  1.36.2.24  uebayasi 	KASSERT(gp->g_dirtygen == 0);
   1483  1.36.2.24  uebayasi 
   1484  1.36.2.24  uebayasi 	UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
   1485  1.36.2.24  uebayasi 	    vp, uobj->uo_npages, startoff, endoff - startoff);
   1486  1.36.2.24  uebayasi 
   1487  1.36.2.24  uebayasi 	/*
   1488  1.36.2.24  uebayasi 	 * XIP pages are read-only, and never become dirty.  They're also never
   1489  1.36.2.24  uebayasi 	 * queued.  PGO_DEACTIVATE and PGO_CLEANIT are meaningless for XIP
   1490  1.36.2.24  uebayasi 	 * pages, so we ignore them.
   1491  1.36.2.24  uebayasi 	 */
   1492  1.36.2.24  uebayasi 	if ((flags & PGO_FREE) == 0)
   1493  1.36.2.24  uebayasi 		goto done;
   1494  1.36.2.24  uebayasi 
   1495  1.36.2.24  uebayasi 	/*
   1496  1.36.2.24  uebayasi 	 * For PGO_FREE (or (PGO_CLEANIT | PGO_FREE)), we invalidate MMU
   1497  1.36.2.24  uebayasi 	 * mappings of both XIP pages and XIP zero pages.
   1498  1.36.2.24  uebayasi 	 *
   1499  1.36.2.24  uebayasi 	 * Zero page is freed when one of its mapped offset is freed, even if
   1500  1.36.2.24  uebayasi 	 * one file (vnode) has many holes and mapping its zero page to all
   1501  1.36.2.24  uebayasi 	 * of those hole pages.
   1502  1.36.2.24  uebayasi 	 *
   1503  1.36.2.25  uebayasi 	 * We don't know which pages are currently mapped in the given vnode,
   1504  1.36.2.25  uebayasi 	 * because XIP pages are not added to vnode.  What we can do is to
   1505  1.36.2.25  uebayasi 	 * locate pages by querying the filesystem as done in getpages.  Call
   1506  1.36.2.40  uebayasi 	 * genfs_do_getpages_xip_io().
   1507  1.36.2.24  uebayasi 	 */
   1508  1.36.2.24  uebayasi 
   1509  1.36.2.24  uebayasi 	off_t off, eof;
   1510  1.36.2.24  uebayasi 
   1511  1.36.2.24  uebayasi 	off = trunc_page(startoff);
   1512  1.36.2.24  uebayasi 	if (endoff == 0 || (flags & PGO_ALLPAGES))
   1513  1.36.2.24  uebayasi 		GOP_SIZE(vp, vp->v_size, &eof, GOP_SIZE_MEM);
   1514  1.36.2.24  uebayasi 	else
   1515  1.36.2.24  uebayasi 		eof = endoff;
   1516  1.36.2.24  uebayasi 
   1517  1.36.2.24  uebayasi 	while (off < eof) {
   1518  1.36.2.24  uebayasi 		int npages, orignpages, error, i;
   1519  1.36.2.24  uebayasi 		struct vm_page *pgs[maxpages], *pg;
   1520  1.36.2.24  uebayasi 
   1521  1.36.2.24  uebayasi 		npages = round_page(eof - off) >> PAGE_SHIFT;
   1522  1.36.2.24  uebayasi 		if (npages > maxpages)
   1523  1.36.2.24  uebayasi 			npages = maxpages;
   1524  1.36.2.24  uebayasi 
   1525  1.36.2.24  uebayasi 		orignpages = npages;
   1526  1.36.2.26  uebayasi 		KASSERT(mutex_owned(&uobj->vmobjlock));
   1527  1.36.2.39  uebayasi 		mutex_exit(&uobj->vmobjlock);
   1528  1.36.2.40  uebayasi 		error = genfs_do_getpages_xip_io(vp, off, pgs, &npages, 0,
   1529  1.36.2.36  uebayasi 		    VM_PROT_ALL, 0, 0);
   1530  1.36.2.24  uebayasi 		KASSERT(error == 0);
   1531  1.36.2.24  uebayasi 		KASSERT(npages == orignpages);
   1532  1.36.2.36  uebayasi 		mutex_enter(&uobj->vmobjlock);
   1533  1.36.2.24  uebayasi 		for (i = 0; i < npages; i++) {
   1534  1.36.2.24  uebayasi 			pg = pgs[i];
   1535  1.36.2.24  uebayasi 			if (pg == NULL || pg == PGO_DONTCARE)
   1536  1.36.2.24  uebayasi 				continue;
   1537  1.36.2.33  uebayasi 			/*
   1538  1.36.2.33  uebayasi 			 * Freeing normal XIP pages; nothing to do.
   1539  1.36.2.33  uebayasi 			 */
   1540  1.36.2.33  uebayasi 			pmap_page_protect(pg, VM_PROT_NONE);
   1541  1.36.2.33  uebayasi 			KASSERT((pg->flags & PG_BUSY) != 0);
   1542  1.36.2.33  uebayasi 			KASSERT((pg->flags & PG_RDONLY) != 0);
   1543  1.36.2.33  uebayasi 			KASSERT((pg->flags & PG_CLEAN) != 0);
   1544  1.36.2.33  uebayasi 			KASSERT((pg->flags & PG_FAKE) == 0);
   1545  1.36.2.33  uebayasi 			KASSERT((pg->flags & PG_DEVICE) != 0);
   1546  1.36.2.33  uebayasi 			pg->flags &= ~PG_BUSY;
   1547  1.36.2.24  uebayasi 		}
   1548  1.36.2.24  uebayasi 		off += npages << PAGE_SHIFT;
   1549  1.36.2.24  uebayasi 	}
   1550  1.36.2.24  uebayasi 
   1551  1.36.2.24  uebayasi 	KASSERT(uobj->uo_npages == 0);
   1552  1.36.2.24  uebayasi 
   1553  1.36.2.24  uebayasi done:
   1554  1.36.2.26  uebayasi 	KASSERT(mutex_owned(&uobj->vmobjlock));
   1555  1.36.2.24  uebayasi 	mutex_exit(&uobj->vmobjlock);
   1556  1.36.2.24  uebayasi 	return 0;
   1557  1.36.2.24  uebayasi }
   1558  1.36.2.24  uebayasi #endif
   1559  1.36.2.24  uebayasi 
   1560        1.1     pooka int
   1561        1.1     pooka genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1562        1.1     pooka {
   1563        1.1     pooka 	off_t off;
   1564        1.1     pooka 	vaddr_t kva;
   1565        1.1     pooka 	size_t len;
   1566        1.1     pooka 	int error;
   1567        1.1     pooka 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1568        1.1     pooka 
   1569        1.1     pooka 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1570        1.1     pooka 	    vp, pgs, npages, flags);
   1571        1.1     pooka 
   1572        1.1     pooka 	off = pgs[0]->offset;
   1573        1.1     pooka 	kva = uvm_pagermapin(pgs, npages,
   1574        1.1     pooka 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1575        1.1     pooka 	len = npages << PAGE_SHIFT;
   1576        1.1     pooka 
   1577        1.1     pooka 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1578        1.1     pooka 			    uvm_aio_biodone);
   1579        1.1     pooka 
   1580        1.1     pooka 	return error;
   1581        1.1     pooka }
   1582        1.1     pooka 
   1583        1.7   reinoud int
   1584        1.7   reinoud genfs_gop_write_rwmap(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1585        1.7   reinoud {
   1586        1.7   reinoud 	off_t off;
   1587        1.7   reinoud 	vaddr_t kva;
   1588        1.7   reinoud 	size_t len;
   1589        1.7   reinoud 	int error;
   1590        1.7   reinoud 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1591        1.7   reinoud 
   1592        1.7   reinoud 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1593        1.7   reinoud 	    vp, pgs, npages, flags);
   1594        1.7   reinoud 
   1595        1.7   reinoud 	off = pgs[0]->offset;
   1596        1.7   reinoud 	kva = uvm_pagermapin(pgs, npages,
   1597        1.7   reinoud 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1598        1.7   reinoud 	len = npages << PAGE_SHIFT;
   1599        1.7   reinoud 
   1600        1.7   reinoud 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1601        1.7   reinoud 			    uvm_aio_biodone);
   1602        1.7   reinoud 
   1603        1.7   reinoud 	return error;
   1604        1.7   reinoud }
   1605        1.7   reinoud 
   1606        1.1     pooka /*
   1607        1.1     pooka  * Backend routine for doing I/O to vnode pages.  Pages are already locked
   1608        1.1     pooka  * and mapped into kernel memory.  Here we just look up the underlying
   1609        1.1     pooka  * device block addresses and call the strategy routine.
   1610        1.1     pooka  */
   1611        1.1     pooka 
   1612        1.1     pooka static int
   1613        1.1     pooka genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
   1614        1.1     pooka     enum uio_rw rw, void (*iodone)(struct buf *))
   1615        1.1     pooka {
   1616       1.36  uebayasi 	int s, error;
   1617        1.1     pooka 	int fs_bshift, dev_bshift;
   1618        1.1     pooka 	off_t eof, offset, startoffset;
   1619        1.1     pooka 	size_t bytes, iobytes, skipbytes;
   1620        1.1     pooka 	struct buf *mbp, *bp;
   1621       1.35  uebayasi 	const bool async = (flags & PGO_SYNCIO) == 0;
   1622       1.35  uebayasi 	const bool iowrite = rw == UIO_WRITE;
   1623       1.35  uebayasi 	const int brw = iowrite ? B_WRITE : B_READ;
   1624        1.1     pooka 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1625        1.1     pooka 
   1626        1.1     pooka 	UVMHIST_LOG(ubchist, "vp %p kva %p len 0x%x flags 0x%x",
   1627        1.1     pooka 	    vp, kva, len, flags);
   1628        1.1     pooka 
   1629        1.1     pooka 	KASSERT(vp->v_size <= vp->v_writesize);
   1630        1.1     pooka 	GOP_SIZE(vp, vp->v_writesize, &eof, 0);
   1631        1.1     pooka 	if (vp->v_type != VBLK) {
   1632        1.1     pooka 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1633        1.1     pooka 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1634        1.1     pooka 	} else {
   1635        1.1     pooka 		fs_bshift = DEV_BSHIFT;
   1636        1.1     pooka 		dev_bshift = DEV_BSHIFT;
   1637        1.1     pooka 	}
   1638        1.1     pooka 	error = 0;
   1639        1.1     pooka 	startoffset = off;
   1640        1.1     pooka 	bytes = MIN(len, eof - startoffset);
   1641        1.1     pooka 	skipbytes = 0;
   1642        1.1     pooka 	KASSERT(bytes != 0);
   1643        1.1     pooka 
   1644       1.35  uebayasi 	if (iowrite) {
   1645        1.2        ad 		mutex_enter(&vp->v_interlock);
   1646        1.1     pooka 		vp->v_numoutput += 2;
   1647        1.2        ad 		mutex_exit(&vp->v_interlock);
   1648        1.1     pooka 	}
   1649        1.2        ad 	mbp = getiobuf(vp, true);
   1650        1.1     pooka 	UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
   1651        1.1     pooka 	    vp, mbp, vp->v_numoutput, bytes);
   1652        1.1     pooka 	mbp->b_bufsize = len;
   1653        1.1     pooka 	mbp->b_data = (void *)kva;
   1654        1.1     pooka 	mbp->b_resid = mbp->b_bcount = bytes;
   1655        1.2        ad 	mbp->b_cflags = BC_BUSY | BC_AGE;
   1656        1.2        ad 	if (async) {
   1657        1.2        ad 		mbp->b_flags = brw | B_ASYNC;
   1658        1.2        ad 		mbp->b_iodone = iodone;
   1659        1.2        ad 	} else {
   1660        1.2        ad 		mbp->b_flags = brw;
   1661        1.2        ad 		mbp->b_iodone = NULL;
   1662        1.2        ad 	}
   1663        1.1     pooka 	if (curlwp == uvm.pagedaemon_lwp)
   1664        1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
   1665        1.1     pooka 	else if (async)
   1666        1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
   1667        1.1     pooka 	else
   1668        1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
   1669        1.1     pooka 
   1670        1.1     pooka 	bp = NULL;
   1671        1.1     pooka 	for (offset = startoffset;
   1672        1.1     pooka 	    bytes > 0;
   1673        1.1     pooka 	    offset += iobytes, bytes -= iobytes) {
   1674       1.36  uebayasi 		int run;
   1675       1.36  uebayasi 		daddr_t lbn, blkno;
   1676       1.36  uebayasi 		struct vnode *devvp;
   1677       1.36  uebayasi 
   1678       1.36  uebayasi 		/*
   1679       1.36  uebayasi 		 * bmap the file to find out the blkno to read from and
   1680       1.36  uebayasi 		 * how much we can read in one i/o.  if bmap returns an error,
   1681       1.36  uebayasi 		 * skip the rest of the top-level i/o.
   1682       1.36  uebayasi 		 */
   1683       1.36  uebayasi 
   1684        1.1     pooka 		lbn = offset >> fs_bshift;
   1685        1.1     pooka 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1686        1.1     pooka 		if (error) {
   1687       1.36  uebayasi 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
   1688       1.36  uebayasi 			    lbn,error,0,0);
   1689        1.1     pooka 			skipbytes += bytes;
   1690        1.1     pooka 			bytes = 0;
   1691       1.36  uebayasi 			goto loopdone;
   1692        1.1     pooka 		}
   1693        1.1     pooka 
   1694       1.36  uebayasi 		/*
   1695       1.36  uebayasi 		 * see how many pages can be read with this i/o.
   1696       1.36  uebayasi 		 * reduce the i/o size if necessary to avoid
   1697       1.36  uebayasi 		 * overwriting pages with valid data.
   1698       1.36  uebayasi 		 */
   1699       1.36  uebayasi 
   1700        1.1     pooka 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1701        1.1     pooka 		    bytes);
   1702       1.36  uebayasi 
   1703       1.36  uebayasi 		/*
   1704       1.36  uebayasi 		 * if this block isn't allocated, zero it instead of
   1705       1.36  uebayasi 		 * reading it.  unless we are going to allocate blocks,
   1706       1.36  uebayasi 		 * mark the pages we zeroed PG_RDONLY.
   1707       1.36  uebayasi 		 */
   1708       1.36  uebayasi 
   1709        1.1     pooka 		if (blkno == (daddr_t)-1) {
   1710       1.35  uebayasi 			if (!iowrite) {
   1711        1.1     pooka 				memset((char *)kva + (offset - startoffset), 0,
   1712       1.36  uebayasi 				    iobytes);
   1713        1.1     pooka 			}
   1714        1.1     pooka 			skipbytes += iobytes;
   1715        1.1     pooka 			continue;
   1716        1.1     pooka 		}
   1717        1.1     pooka 
   1718       1.36  uebayasi 		/*
   1719       1.36  uebayasi 		 * allocate a sub-buf for this piece of the i/o
   1720       1.36  uebayasi 		 * (or just use mbp if there's only 1 piece),
   1721       1.36  uebayasi 		 * and start it going.
   1722       1.36  uebayasi 		 */
   1723       1.36  uebayasi 
   1724        1.1     pooka 		if (offset == startoffset && iobytes == bytes) {
   1725        1.1     pooka 			bp = mbp;
   1726        1.1     pooka 		} else {
   1727        1.1     pooka 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
   1728        1.1     pooka 			    vp, bp, vp->v_numoutput, 0);
   1729        1.2        ad 			bp = getiobuf(vp, true);
   1730        1.1     pooka 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
   1731        1.1     pooka 		}
   1732        1.1     pooka 		bp->b_lblkno = 0;
   1733        1.1     pooka 
   1734        1.1     pooka 		/* adjust physical blkno for partial blocks */
   1735        1.1     pooka 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1736        1.1     pooka 		    dev_bshift);
   1737       1.36  uebayasi 
   1738        1.1     pooka 		UVMHIST_LOG(ubchist,
   1739       1.36  uebayasi 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
   1740       1.36  uebayasi 		    bp, offset, bp->b_bcount, bp->b_blkno);
   1741        1.1     pooka 
   1742        1.1     pooka 		VOP_STRATEGY(devvp, bp);
   1743        1.1     pooka 	}
   1744       1.36  uebayasi 
   1745       1.36  uebayasi loopdone:
   1746        1.1     pooka 	if (skipbytes) {
   1747        1.1     pooka 		UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
   1748        1.1     pooka 	}
   1749        1.1     pooka 	nestiobuf_done(mbp, skipbytes, error);
   1750        1.1     pooka 	if (async) {
   1751        1.1     pooka 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1752        1.1     pooka 		return (0);
   1753        1.1     pooka 	}
   1754        1.1     pooka 	UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
   1755        1.1     pooka 	error = biowait(mbp);
   1756        1.1     pooka 	s = splbio();
   1757        1.1     pooka 	(*iodone)(mbp);
   1758        1.1     pooka 	splx(s);
   1759        1.1     pooka 	UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
   1760        1.1     pooka 	return (error);
   1761        1.1     pooka }
   1762        1.1     pooka 
   1763        1.1     pooka int
   1764        1.1     pooka genfs_compat_getpages(void *v)
   1765        1.1     pooka {
   1766        1.1     pooka 	struct vop_getpages_args /* {
   1767        1.1     pooka 		struct vnode *a_vp;
   1768        1.1     pooka 		voff_t a_offset;
   1769        1.1     pooka 		struct vm_page **a_m;
   1770        1.1     pooka 		int *a_count;
   1771        1.1     pooka 		int a_centeridx;
   1772        1.1     pooka 		vm_prot_t a_access_type;
   1773        1.1     pooka 		int a_advice;
   1774        1.1     pooka 		int a_flags;
   1775        1.1     pooka 	} */ *ap = v;
   1776        1.1     pooka 
   1777        1.1     pooka 	off_t origoffset;
   1778        1.1     pooka 	struct vnode *vp = ap->a_vp;
   1779        1.1     pooka 	struct uvm_object *uobj = &vp->v_uobj;
   1780        1.1     pooka 	struct vm_page *pg, **pgs;
   1781        1.1     pooka 	vaddr_t kva;
   1782        1.1     pooka 	int i, error, orignpages, npages;
   1783        1.1     pooka 	struct iovec iov;
   1784        1.1     pooka 	struct uio uio;
   1785        1.1     pooka 	kauth_cred_t cred = curlwp->l_cred;
   1786       1.35  uebayasi 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1787        1.1     pooka 
   1788        1.1     pooka 	error = 0;
   1789        1.1     pooka 	origoffset = ap->a_offset;
   1790        1.1     pooka 	orignpages = *ap->a_count;
   1791        1.1     pooka 	pgs = ap->a_m;
   1792        1.1     pooka 
   1793        1.1     pooka 	if (ap->a_flags & PGO_LOCKED) {
   1794        1.1     pooka 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
   1795       1.35  uebayasi 		    UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0));
   1796        1.1     pooka 
   1797  1.36.2.21  uebayasi 		error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
   1798  1.36.2.21  uebayasi 		if (error == 0 && memwrite) {
   1799  1.36.2.21  uebayasi 			genfs_markdirty(vp);
   1800  1.36.2.21  uebayasi 		}
   1801  1.36.2.21  uebayasi 		return error;
   1802        1.1     pooka 	}
   1803        1.1     pooka 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1804        1.2        ad 		mutex_exit(&uobj->vmobjlock);
   1805  1.36.2.21  uebayasi 		return EINVAL;
   1806        1.1     pooka 	}
   1807        1.1     pooka 	if ((ap->a_flags & PGO_SYNCIO) == 0) {
   1808        1.2        ad 		mutex_exit(&uobj->vmobjlock);
   1809        1.1     pooka 		return 0;
   1810        1.1     pooka 	}
   1811        1.1     pooka 	npages = orignpages;
   1812        1.1     pooka 	uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
   1813        1.2        ad 	mutex_exit(&uobj->vmobjlock);
   1814        1.1     pooka 	kva = uvm_pagermapin(pgs, npages,
   1815        1.1     pooka 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1816        1.1     pooka 	for (i = 0; i < npages; i++) {
   1817        1.1     pooka 		pg = pgs[i];
   1818        1.1     pooka 		if ((pg->flags & PG_FAKE) == 0) {
   1819        1.1     pooka 			continue;
   1820        1.1     pooka 		}
   1821        1.1     pooka 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   1822        1.1     pooka 		iov.iov_len = PAGE_SIZE;
   1823        1.1     pooka 		uio.uio_iov = &iov;
   1824        1.1     pooka 		uio.uio_iovcnt = 1;
   1825        1.1     pooka 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   1826        1.1     pooka 		uio.uio_rw = UIO_READ;
   1827        1.1     pooka 		uio.uio_resid = PAGE_SIZE;
   1828        1.1     pooka 		UIO_SETUP_SYSSPACE(&uio);
   1829        1.1     pooka 		/* XXX vn_lock */
   1830        1.1     pooka 		error = VOP_READ(vp, &uio, 0, cred);
   1831        1.1     pooka 		if (error) {
   1832        1.1     pooka 			break;
   1833        1.1     pooka 		}
   1834        1.1     pooka 		if (uio.uio_resid) {
   1835        1.1     pooka 			memset(iov.iov_base, 0, uio.uio_resid);
   1836        1.1     pooka 		}
   1837        1.1     pooka 	}
   1838        1.1     pooka 	uvm_pagermapout(kva, npages);
   1839        1.2        ad 	mutex_enter(&uobj->vmobjlock);
   1840        1.2        ad 	mutex_enter(&uvm_pageqlock);
   1841        1.1     pooka 	for (i = 0; i < npages; i++) {
   1842        1.1     pooka 		pg = pgs[i];
   1843        1.1     pooka 		if (error && (pg->flags & PG_FAKE) != 0) {
   1844        1.1     pooka 			pg->flags |= PG_RELEASED;
   1845        1.1     pooka 		} else {
   1846        1.1     pooka 			pmap_clear_modify(pg);
   1847        1.1     pooka 			uvm_pageactivate(pg);
   1848        1.1     pooka 		}
   1849        1.1     pooka 	}
   1850        1.1     pooka 	if (error) {
   1851        1.1     pooka 		uvm_page_unbusy(pgs, npages);
   1852        1.1     pooka 	}
   1853        1.2        ad 	mutex_exit(&uvm_pageqlock);
   1854  1.36.2.21  uebayasi 	if (error == 0 && memwrite) {
   1855  1.36.2.21  uebayasi 		genfs_markdirty(vp);
   1856  1.36.2.21  uebayasi 	}
   1857        1.2        ad 	mutex_exit(&uobj->vmobjlock);
   1858  1.36.2.21  uebayasi 	return error;
   1859        1.1     pooka }
   1860        1.1     pooka 
   1861        1.1     pooka int
   1862        1.1     pooka genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   1863        1.1     pooka     int flags)
   1864        1.1     pooka {
   1865        1.1     pooka 	off_t offset;
   1866        1.1     pooka 	struct iovec iov;
   1867        1.1     pooka 	struct uio uio;
   1868        1.1     pooka 	kauth_cred_t cred = curlwp->l_cred;
   1869        1.1     pooka 	struct buf *bp;
   1870        1.1     pooka 	vaddr_t kva;
   1871        1.2        ad 	int error;
   1872        1.1     pooka 
   1873        1.1     pooka 	offset = pgs[0]->offset;
   1874        1.1     pooka 	kva = uvm_pagermapin(pgs, npages,
   1875        1.1     pooka 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1876        1.1     pooka 
   1877        1.1     pooka 	iov.iov_base = (void *)kva;
   1878        1.1     pooka 	iov.iov_len = npages << PAGE_SHIFT;
   1879        1.1     pooka 	uio.uio_iov = &iov;
   1880        1.1     pooka 	uio.uio_iovcnt = 1;
   1881        1.1     pooka 	uio.uio_offset = offset;
   1882        1.1     pooka 	uio.uio_rw = UIO_WRITE;
   1883        1.1     pooka 	uio.uio_resid = npages << PAGE_SHIFT;
   1884        1.1     pooka 	UIO_SETUP_SYSSPACE(&uio);
   1885        1.1     pooka 	/* XXX vn_lock */
   1886        1.1     pooka 	error = VOP_WRITE(vp, &uio, 0, cred);
   1887        1.1     pooka 
   1888        1.2        ad 	mutex_enter(&vp->v_interlock);
   1889        1.2        ad 	vp->v_numoutput++;
   1890        1.2        ad 	mutex_exit(&vp->v_interlock);
   1891        1.1     pooka 
   1892        1.2        ad 	bp = getiobuf(vp, true);
   1893        1.2        ad 	bp->b_cflags = BC_BUSY | BC_AGE;
   1894        1.1     pooka 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   1895        1.1     pooka 	bp->b_data = (char *)kva;
   1896        1.1     pooka 	bp->b_bcount = npages << PAGE_SHIFT;
   1897        1.1     pooka 	bp->b_bufsize = npages << PAGE_SHIFT;
   1898        1.1     pooka 	bp->b_resid = 0;
   1899        1.1     pooka 	bp->b_error = error;
   1900        1.1     pooka 	uvm_aio_aiodone(bp);
   1901        1.1     pooka 	return (error);
   1902        1.1     pooka }
   1903        1.1     pooka 
   1904        1.1     pooka /*
   1905        1.1     pooka  * Process a uio using direct I/O.  If we reach a part of the request
   1906        1.1     pooka  * which cannot be processed in this fashion for some reason, just return.
   1907        1.1     pooka  * The caller must handle some additional part of the request using
   1908        1.1     pooka  * buffered I/O before trying direct I/O again.
   1909        1.1     pooka  */
   1910        1.1     pooka 
   1911        1.1     pooka void
   1912        1.1     pooka genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
   1913        1.1     pooka {
   1914        1.1     pooka 	struct vmspace *vs;
   1915        1.1     pooka 	struct iovec *iov;
   1916        1.1     pooka 	vaddr_t va;
   1917        1.1     pooka 	size_t len;
   1918        1.1     pooka 	const int mask = DEV_BSIZE - 1;
   1919        1.1     pooka 	int error;
   1920       1.16     joerg 	bool need_wapbl = (vp->v_mount && vp->v_mount->mnt_wapbl &&
   1921       1.16     joerg 	    (ioflag & IO_JOURNALLOCKED) == 0);
   1922        1.1     pooka 
   1923        1.1     pooka 	/*
   1924        1.1     pooka 	 * We only support direct I/O to user space for now.
   1925        1.1     pooka 	 */
   1926        1.1     pooka 
   1927        1.1     pooka 	if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
   1928        1.1     pooka 		return;
   1929        1.1     pooka 	}
   1930        1.1     pooka 
   1931        1.1     pooka 	/*
   1932        1.1     pooka 	 * If the vnode is mapped, we would need to get the getpages lock
   1933        1.1     pooka 	 * to stabilize the bmap, but then we would get into trouble whil e
   1934        1.1     pooka 	 * locking the pages if the pages belong to this same vnode (or a
   1935        1.1     pooka 	 * multi-vnode cascade to the same effect).  Just fall back to
   1936        1.1     pooka 	 * buffered I/O if the vnode is mapped to avoid this mess.
   1937        1.1     pooka 	 */
   1938        1.1     pooka 
   1939        1.1     pooka 	if (vp->v_vflag & VV_MAPPED) {
   1940        1.1     pooka 		return;
   1941        1.1     pooka 	}
   1942        1.1     pooka 
   1943       1.16     joerg 	if (need_wapbl) {
   1944       1.13   hannken 		error = WAPBL_BEGIN(vp->v_mount);
   1945       1.13   hannken 		if (error)
   1946       1.13   hannken 			return;
   1947       1.13   hannken 	}
   1948       1.13   hannken 
   1949        1.1     pooka 	/*
   1950        1.1     pooka 	 * Do as much of the uio as possible with direct I/O.
   1951        1.1     pooka 	 */
   1952        1.1     pooka 
   1953        1.1     pooka 	vs = uio->uio_vmspace;
   1954        1.1     pooka 	while (uio->uio_resid) {
   1955        1.1     pooka 		iov = uio->uio_iov;
   1956        1.1     pooka 		if (iov->iov_len == 0) {
   1957        1.1     pooka 			uio->uio_iov++;
   1958        1.1     pooka 			uio->uio_iovcnt--;
   1959        1.1     pooka 			continue;
   1960        1.1     pooka 		}
   1961        1.1     pooka 		va = (vaddr_t)iov->iov_base;
   1962        1.1     pooka 		len = MIN(iov->iov_len, genfs_maxdio);
   1963        1.1     pooka 		len &= ~mask;
   1964        1.1     pooka 
   1965        1.1     pooka 		/*
   1966        1.1     pooka 		 * If the next chunk is smaller than DEV_BSIZE or extends past
   1967        1.1     pooka 		 * the current EOF, then fall back to buffered I/O.
   1968        1.1     pooka 		 */
   1969        1.1     pooka 
   1970        1.1     pooka 		if (len == 0 || uio->uio_offset + len > vp->v_size) {
   1971       1.13   hannken 			break;
   1972        1.1     pooka 		}
   1973        1.1     pooka 
   1974        1.1     pooka 		/*
   1975        1.1     pooka 		 * Check alignment.  The file offset must be at least
   1976        1.1     pooka 		 * sector-aligned.  The exact constraint on memory alignment
   1977        1.1     pooka 		 * is very hardware-dependent, but requiring sector-aligned
   1978        1.1     pooka 		 * addresses there too is safe.
   1979        1.1     pooka 		 */
   1980        1.1     pooka 
   1981        1.1     pooka 		if (uio->uio_offset & mask || va & mask) {
   1982       1.13   hannken 			break;
   1983        1.1     pooka 		}
   1984        1.1     pooka 		error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
   1985        1.1     pooka 					  uio->uio_rw);
   1986        1.1     pooka 		if (error) {
   1987        1.1     pooka 			break;
   1988        1.1     pooka 		}
   1989        1.1     pooka 		iov->iov_base = (char *)iov->iov_base + len;
   1990        1.1     pooka 		iov->iov_len -= len;
   1991        1.1     pooka 		uio->uio_offset += len;
   1992        1.1     pooka 		uio->uio_resid -= len;
   1993        1.1     pooka 	}
   1994       1.13   hannken 
   1995       1.16     joerg 	if (need_wapbl)
   1996       1.13   hannken 		WAPBL_END(vp->v_mount);
   1997        1.1     pooka }
   1998        1.1     pooka 
   1999        1.1     pooka /*
   2000        1.1     pooka  * Iodone routine for direct I/O.  We don't do much here since the request is
   2001        1.1     pooka  * always synchronous, so the caller will do most of the work after biowait().
   2002        1.1     pooka  */
   2003        1.1     pooka 
   2004        1.1     pooka static void
   2005        1.1     pooka genfs_dio_iodone(struct buf *bp)
   2006        1.1     pooka {
   2007        1.1     pooka 
   2008        1.1     pooka 	KASSERT((bp->b_flags & B_ASYNC) == 0);
   2009        1.2        ad 	if ((bp->b_flags & B_READ) == 0 && (bp->b_cflags & BC_AGE) != 0) {
   2010        1.2        ad 		mutex_enter(bp->b_objlock);
   2011        1.1     pooka 		vwakeup(bp);
   2012        1.2        ad 		mutex_exit(bp->b_objlock);
   2013        1.1     pooka 	}
   2014        1.1     pooka 	putiobuf(bp);
   2015        1.1     pooka }
   2016        1.1     pooka 
   2017        1.1     pooka /*
   2018        1.1     pooka  * Process one chunk of a direct I/O request.
   2019        1.1     pooka  */
   2020        1.1     pooka 
   2021        1.1     pooka static int
   2022        1.1     pooka genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
   2023        1.1     pooka     off_t off, enum uio_rw rw)
   2024        1.1     pooka {
   2025        1.1     pooka 	struct vm_map *map;
   2026        1.1     pooka 	struct pmap *upm, *kpm;
   2027        1.1     pooka 	size_t klen = round_page(uva + len) - trunc_page(uva);
   2028        1.1     pooka 	off_t spoff, epoff;
   2029        1.1     pooka 	vaddr_t kva, puva;
   2030        1.1     pooka 	paddr_t pa;
   2031        1.1     pooka 	vm_prot_t prot;
   2032        1.1     pooka 	int error, rv, poff, koff;
   2033       1.13   hannken 	const int pgoflags = PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED |
   2034        1.1     pooka 		(rw == UIO_WRITE ? PGO_FREE : 0);
   2035        1.1     pooka 
   2036        1.1     pooka 	/*
   2037        1.1     pooka 	 * For writes, verify that this range of the file already has fully
   2038        1.1     pooka 	 * allocated backing store.  If there are any holes, just punt and
   2039        1.1     pooka 	 * make the caller take the buffered write path.
   2040        1.1     pooka 	 */
   2041        1.1     pooka 
   2042        1.1     pooka 	if (rw == UIO_WRITE) {
   2043        1.1     pooka 		daddr_t lbn, elbn, blkno;
   2044        1.1     pooka 		int bsize, bshift, run;
   2045        1.1     pooka 
   2046        1.1     pooka 		bshift = vp->v_mount->mnt_fs_bshift;
   2047        1.1     pooka 		bsize = 1 << bshift;
   2048        1.1     pooka 		lbn = off >> bshift;
   2049        1.1     pooka 		elbn = (off + len + bsize - 1) >> bshift;
   2050        1.1     pooka 		while (lbn < elbn) {
   2051        1.1     pooka 			error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
   2052        1.1     pooka 			if (error) {
   2053        1.1     pooka 				return error;
   2054        1.1     pooka 			}
   2055        1.1     pooka 			if (blkno == (daddr_t)-1) {
   2056        1.1     pooka 				return ENOSPC;
   2057        1.1     pooka 			}
   2058        1.1     pooka 			lbn += 1 + run;
   2059        1.1     pooka 		}
   2060        1.1     pooka 	}
   2061        1.1     pooka 
   2062        1.1     pooka 	/*
   2063        1.1     pooka 	 * Flush any cached pages for parts of the file that we're about to
   2064        1.1     pooka 	 * access.  If we're writing, invalidate pages as well.
   2065        1.1     pooka 	 */
   2066        1.1     pooka 
   2067        1.1     pooka 	spoff = trunc_page(off);
   2068        1.1     pooka 	epoff = round_page(off + len);
   2069        1.2        ad 	mutex_enter(&vp->v_interlock);
   2070        1.1     pooka 	error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
   2071        1.1     pooka 	if (error) {
   2072        1.1     pooka 		return error;
   2073        1.1     pooka 	}
   2074        1.1     pooka 
   2075        1.1     pooka 	/*
   2076        1.1     pooka 	 * Wire the user pages and remap them into kernel memory.
   2077        1.1     pooka 	 */
   2078        1.1     pooka 
   2079        1.1     pooka 	prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
   2080        1.1     pooka 	error = uvm_vslock(vs, (void *)uva, len, prot);
   2081        1.1     pooka 	if (error) {
   2082        1.1     pooka 		return error;
   2083        1.1     pooka 	}
   2084        1.1     pooka 
   2085        1.1     pooka 	map = &vs->vm_map;
   2086        1.1     pooka 	upm = vm_map_pmap(map);
   2087        1.1     pooka 	kpm = vm_map_pmap(kernel_map);
   2088        1.1     pooka 	kva = uvm_km_alloc(kernel_map, klen, 0,
   2089        1.1     pooka 			   UVM_KMF_VAONLY | UVM_KMF_WAITVA);
   2090        1.1     pooka 	puva = trunc_page(uva);
   2091        1.1     pooka 	for (poff = 0; poff < klen; poff += PAGE_SIZE) {
   2092        1.1     pooka 		rv = pmap_extract(upm, puva + poff, &pa);
   2093        1.1     pooka 		KASSERT(rv);
   2094        1.1     pooka 		pmap_enter(kpm, kva + poff, pa, prot, prot | PMAP_WIRED);
   2095        1.1     pooka 	}
   2096        1.1     pooka 	pmap_update(kpm);
   2097        1.1     pooka 
   2098        1.1     pooka 	/*
   2099        1.1     pooka 	 * Do the I/O.
   2100        1.1     pooka 	 */
   2101        1.1     pooka 
   2102        1.1     pooka 	koff = uva - trunc_page(uva);
   2103        1.1     pooka 	error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
   2104        1.1     pooka 			    genfs_dio_iodone);
   2105        1.1     pooka 
   2106        1.1     pooka 	/*
   2107        1.1     pooka 	 * Tear down the kernel mapping.
   2108        1.1     pooka 	 */
   2109        1.1     pooka 
   2110        1.1     pooka 	pmap_remove(kpm, kva, kva + klen);
   2111        1.1     pooka 	pmap_update(kpm);
   2112        1.1     pooka 	uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
   2113        1.1     pooka 
   2114        1.1     pooka 	/*
   2115        1.1     pooka 	 * Unwire the user pages.
   2116        1.1     pooka 	 */
   2117        1.1     pooka 
   2118        1.1     pooka 	uvm_vsunlock(vs, (void *)uva, len);
   2119        1.1     pooka 	return error;
   2120        1.1     pooka }
   2121        1.2        ad 
   2122