Home | History | Annotate | Line # | Download | only in genfs
genfs_io.c revision 1.83.2.2
      1  1.83.2.2        ad /*	$NetBSD: genfs_io.c,v 1.83.2.2 2020/02/29 20:21:04 ad Exp $	*/
      2       1.1     pooka 
      3       1.1     pooka /*
      4       1.1     pooka  * Copyright (c) 1982, 1986, 1989, 1993
      5       1.1     pooka  *	The Regents of the University of California.  All rights reserved.
      6       1.1     pooka  *
      7       1.1     pooka  * Redistribution and use in source and binary forms, with or without
      8       1.1     pooka  * modification, are permitted provided that the following conditions
      9       1.1     pooka  * are met:
     10       1.1     pooka  * 1. Redistributions of source code must retain the above copyright
     11       1.1     pooka  *    notice, this list of conditions and the following disclaimer.
     12       1.1     pooka  * 2. Redistributions in binary form must reproduce the above copyright
     13       1.1     pooka  *    notice, this list of conditions and the following disclaimer in the
     14       1.1     pooka  *    documentation and/or other materials provided with the distribution.
     15       1.1     pooka  * 3. Neither the name of the University nor the names of its contributors
     16       1.1     pooka  *    may be used to endorse or promote products derived from this software
     17       1.1     pooka  *    without specific prior written permission.
     18       1.1     pooka  *
     19       1.1     pooka  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20       1.1     pooka  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21       1.1     pooka  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22       1.1     pooka  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23       1.1     pooka  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24       1.1     pooka  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25       1.1     pooka  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26       1.1     pooka  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27       1.1     pooka  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28       1.1     pooka  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29       1.1     pooka  * SUCH DAMAGE.
     30       1.1     pooka  *
     31       1.1     pooka  */
     32       1.1     pooka 
     33       1.1     pooka #include <sys/cdefs.h>
     34  1.83.2.2        ad __KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.83.2.2 2020/02/29 20:21:04 ad Exp $");
     35       1.1     pooka 
     36       1.1     pooka #include <sys/param.h>
     37       1.1     pooka #include <sys/systm.h>
     38       1.1     pooka #include <sys/proc.h>
     39       1.1     pooka #include <sys/kernel.h>
     40       1.1     pooka #include <sys/mount.h>
     41       1.1     pooka #include <sys/vnode.h>
     42       1.1     pooka #include <sys/kmem.h>
     43       1.1     pooka #include <sys/kauth.h>
     44       1.1     pooka #include <sys/fstrans.h>
     45      1.15     pooka #include <sys/buf.h>
     46       1.1     pooka 
     47       1.1     pooka #include <miscfs/genfs/genfs.h>
     48       1.1     pooka #include <miscfs/genfs/genfs_node.h>
     49       1.1     pooka #include <miscfs/specfs/specdev.h>
     50       1.1     pooka 
     51       1.1     pooka #include <uvm/uvm.h>
     52       1.1     pooka #include <uvm/uvm_pager.h>
     53      1.78        ad #include <uvm/uvm_page_array.h>
     54       1.1     pooka 
     55       1.1     pooka static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
     56       1.1     pooka     off_t, enum uio_rw);
     57       1.1     pooka static void genfs_dio_iodone(struct buf *);
     58       1.1     pooka 
     59      1.59  riastrad static int genfs_getpages_read(struct vnode *, struct vm_page **, int, off_t,
     60      1.59  riastrad     off_t, bool, bool, bool, bool);
     61       1.1     pooka static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
     62       1.1     pooka     void (*)(struct buf *));
     63      1.55      yamt static void genfs_rel_pages(struct vm_page **, unsigned int);
     64      1.38       chs static void genfs_markdirty(struct vnode *);
     65       1.1     pooka 
     66       1.1     pooka int genfs_maxdio = MAXPHYS;
     67       1.1     pooka 
     68      1.38       chs static void
     69      1.55      yamt genfs_rel_pages(struct vm_page **pgs, unsigned int npages)
     70       1.1     pooka {
     71      1.55      yamt 	unsigned int i;
     72       1.1     pooka 
     73       1.1     pooka 	for (i = 0; i < npages; i++) {
     74       1.1     pooka 		struct vm_page *pg = pgs[i];
     75       1.1     pooka 
     76       1.1     pooka 		if (pg == NULL || pg == PGO_DONTCARE)
     77       1.1     pooka 			continue;
     78  1.83.2.2        ad 		KASSERT(uvm_page_owner_locked_p(pg, true));
     79       1.1     pooka 		if (pg->flags & PG_FAKE) {
     80       1.1     pooka 			pg->flags |= PG_RELEASED;
     81       1.1     pooka 		}
     82       1.1     pooka 	}
     83       1.1     pooka 	uvm_page_unbusy(pgs, npages);
     84       1.1     pooka }
     85       1.1     pooka 
     86      1.38       chs static void
     87      1.38       chs genfs_markdirty(struct vnode *vp)
     88      1.38       chs {
     89      1.38       chs 
     90  1.83.2.2        ad 	KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
     91  1.83.2.2        ad 
     92  1.83.2.2        ad 	mutex_enter(vp->v_interlock);
     93      1.38       chs 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
     94      1.38       chs 		vn_syncer_add_to_worklist(vp, filedelay);
     95      1.38       chs 	}
     96      1.38       chs 	if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
     97      1.38       chs 		vp->v_iflag |= VI_WRMAPDIRTY;
     98      1.38       chs 	}
     99  1.83.2.2        ad 	mutex_exit(vp->v_interlock);
    100      1.38       chs }
    101      1.38       chs 
    102       1.1     pooka /*
    103       1.1     pooka  * generic VM getpages routine.
    104       1.1     pooka  * Return PG_BUSY pages for the given range,
    105       1.1     pooka  * reading from backing store if necessary.
    106       1.1     pooka  */
    107       1.1     pooka 
    108       1.1     pooka int
    109       1.1     pooka genfs_getpages(void *v)
    110       1.1     pooka {
    111       1.1     pooka 	struct vop_getpages_args /* {
    112       1.1     pooka 		struct vnode *a_vp;
    113       1.1     pooka 		voff_t a_offset;
    114       1.1     pooka 		struct vm_page **a_m;
    115       1.1     pooka 		int *a_count;
    116       1.1     pooka 		int a_centeridx;
    117       1.1     pooka 		vm_prot_t a_access_type;
    118       1.1     pooka 		int a_advice;
    119       1.1     pooka 		int a_flags;
    120      1.22  uebayasi 	} */ * const ap = v;
    121       1.1     pooka 
    122      1.24  uebayasi 	off_t diskeof, memeof;
    123      1.31  uebayasi 	int i, error, npages;
    124      1.10      yamt 	const int flags = ap->a_flags;
    125      1.22  uebayasi 	struct vnode * const vp = ap->a_vp;
    126      1.22  uebayasi 	struct uvm_object * const uobj = &vp->v_uobj;
    127      1.10      yamt 	const bool async = (flags & PGO_SYNCIO) == 0;
    128      1.35  uebayasi 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
    129      1.10      yamt 	const bool overwrite = (flags & PGO_OVERWRITE) != 0;
    130      1.35  uebayasi 	const bool blockalloc = memwrite && (flags & PGO_NOBLOCKALLOC) == 0;
    131      1.73  jdolecek 	const bool need_wapbl = (vp->v_mount->mnt_wapbl &&
    132      1.73  jdolecek 			(flags & PGO_JOURNALLOCKED) == 0);
    133      1.40       chs 	const bool glocked = (flags & PGO_GLOCKHELD) != 0;
    134      1.64   hannken 	bool holds_wapbl = false;
    135      1.64   hannken 	struct mount *trans_mount = NULL;
    136       1.1     pooka 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    137       1.1     pooka 
    138      1.71  pgoyette 	UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx/%jx count %jd",
    139      1.71  pgoyette 	    (uintptr_t)vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    140       1.1     pooka 
    141  1.83.2.1        ad 	KASSERT(memwrite >= overwrite);
    142       1.1     pooka 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
    143       1.1     pooka 	    vp->v_type == VLNK || vp->v_type == VBLK);
    144       1.1     pooka 
    145      1.74  jdolecek #ifdef DIAGNOSTIC
    146      1.74  jdolecek 	if ((flags & PGO_JOURNALLOCKED) && vp->v_mount->mnt_wapbl)
    147      1.74  jdolecek                 WAPBL_JLOCK_ASSERT(vp->v_mount);
    148      1.74  jdolecek #endif
    149      1.74  jdolecek 
    150  1.83.2.2        ad 	mutex_enter(vp->v_interlock);
    151      1.70   hannken 	error = vdead_check(vp, VDEAD_NOWAIT);
    152  1.83.2.2        ad 	mutex_exit(vp->v_interlock);
    153      1.70   hannken 	if (error) {
    154      1.70   hannken 		if ((flags & PGO_LOCKED) == 0)
    155  1.83.2.2        ad 			rw_exit(uobj->vmobjlock);
    156      1.70   hannken 		return error;
    157      1.70   hannken 	}
    158      1.70   hannken 
    159       1.1     pooka startover:
    160       1.1     pooka 	error = 0;
    161      1.27  uebayasi 	const voff_t origvsize = vp->v_size;
    162      1.27  uebayasi 	const off_t origoffset = ap->a_offset;
    163      1.29  uebayasi 	const int orignpages = *ap->a_count;
    164      1.33  uebayasi 
    165       1.1     pooka 	GOP_SIZE(vp, origvsize, &diskeof, 0);
    166       1.1     pooka 	if (flags & PGO_PASTEOF) {
    167      1.24  uebayasi 		off_t newsize;
    168       1.1     pooka #if defined(DIAGNOSTIC)
    169       1.1     pooka 		off_t writeeof;
    170       1.1     pooka #endif /* defined(DIAGNOSTIC) */
    171       1.1     pooka 
    172       1.1     pooka 		newsize = MAX(origvsize,
    173       1.1     pooka 		    origoffset + (orignpages << PAGE_SHIFT));
    174       1.1     pooka 		GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
    175       1.1     pooka #if defined(DIAGNOSTIC)
    176       1.1     pooka 		GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM);
    177       1.1     pooka 		if (newsize > round_page(writeeof)) {
    178      1.39     pooka 			panic("%s: past eof: %" PRId64 " vs. %" PRId64,
    179      1.39     pooka 			    __func__, newsize, round_page(writeeof));
    180       1.1     pooka 		}
    181       1.1     pooka #endif /* defined(DIAGNOSTIC) */
    182       1.1     pooka 	} else {
    183       1.1     pooka 		GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM);
    184       1.1     pooka 	}
    185       1.1     pooka 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    186       1.1     pooka 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    187       1.1     pooka 	KASSERT(orignpages > 0);
    188       1.1     pooka 
    189       1.1     pooka 	/*
    190       1.1     pooka 	 * Bounds-check the request.
    191       1.1     pooka 	 */
    192       1.1     pooka 
    193       1.1     pooka 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    194       1.1     pooka 		if ((flags & PGO_LOCKED) == 0) {
    195  1.83.2.2        ad 			rw_exit(uobj->vmobjlock);
    196       1.1     pooka 		}
    197      1.71  pgoyette 		UVMHIST_LOG(ubchist, "off 0x%jx count %jd goes past EOF 0x%jx",
    198       1.1     pooka 		    origoffset, *ap->a_count, memeof,0);
    199       1.1     pooka 		error = EINVAL;
    200       1.1     pooka 		goto out_err;
    201       1.1     pooka 	}
    202       1.1     pooka 
    203       1.1     pooka 	/* uobj is locked */
    204       1.1     pooka 
    205       1.1     pooka 	if ((flags & PGO_NOTIMESTAMP) == 0 &&
    206       1.1     pooka 	    (vp->v_type != VBLK ||
    207       1.1     pooka 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
    208       1.1     pooka 		int updflags = 0;
    209       1.1     pooka 
    210       1.1     pooka 		if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
    211       1.1     pooka 			updflags = GOP_UPDATE_ACCESSED;
    212       1.1     pooka 		}
    213      1.35  uebayasi 		if (memwrite) {
    214       1.1     pooka 			updflags |= GOP_UPDATE_MODIFIED;
    215       1.1     pooka 		}
    216       1.1     pooka 		if (updflags != 0) {
    217       1.1     pooka 			GOP_MARKUPDATE(vp, updflags);
    218       1.1     pooka 		}
    219       1.1     pooka 	}
    220       1.1     pooka 
    221       1.1     pooka 	/*
    222       1.1     pooka 	 * For PGO_LOCKED requests, just return whatever's in memory.
    223       1.1     pooka 	 */
    224       1.1     pooka 
    225       1.1     pooka 	if (flags & PGO_LOCKED) {
    226       1.1     pooka 		int nfound;
    227      1.31  uebayasi 		struct vm_page *pg;
    228       1.1     pooka 
    229      1.40       chs 		KASSERT(!glocked);
    230       1.1     pooka 		npages = *ap->a_count;
    231       1.1     pooka #if defined(DEBUG)
    232       1.1     pooka 		for (i = 0; i < npages; i++) {
    233       1.1     pooka 			pg = ap->a_m[i];
    234       1.1     pooka 			KASSERT(pg == NULL || pg == PGO_DONTCARE);
    235       1.1     pooka 		}
    236       1.1     pooka #endif /* defined(DEBUG) */
    237       1.1     pooka 		nfound = uvn_findpages(uobj, origoffset, &npages,
    238  1.83.2.1        ad 		    ap->a_m, NULL,
    239  1.83.2.1        ad 		    UFP_NOWAIT|UFP_NOALLOC|(memwrite ? UFP_NORDONLY : 0));
    240       1.1     pooka 		KASSERT(npages == *ap->a_count);
    241       1.1     pooka 		if (nfound == 0) {
    242       1.1     pooka 			error = EBUSY;
    243       1.1     pooka 			goto out_err;
    244       1.1     pooka 		}
    245  1.83.2.1        ad 		/*
    246  1.83.2.1        ad 		 * lock and unlock g_glock to ensure that no one is truncating
    247  1.83.2.1        ad 		 * the file behind us.
    248  1.83.2.1        ad 		 */
    249      1.23  uebayasi 		if (!genfs_node_rdtrylock(vp)) {
    250       1.1     pooka 			genfs_rel_pages(ap->a_m, npages);
    251       1.1     pooka 
    252       1.1     pooka 			/*
    253       1.1     pooka 			 * restore the array.
    254       1.1     pooka 			 */
    255       1.1     pooka 
    256       1.1     pooka 			for (i = 0; i < npages; i++) {
    257       1.1     pooka 				pg = ap->a_m[i];
    258       1.1     pooka 
    259      1.41  uebayasi 				if (pg != NULL && pg != PGO_DONTCARE) {
    260       1.1     pooka 					ap->a_m[i] = NULL;
    261       1.1     pooka 				}
    262      1.46  uebayasi 				KASSERT(ap->a_m[i] == NULL ||
    263      1.46  uebayasi 				    ap->a_m[i] == PGO_DONTCARE);
    264       1.1     pooka 			}
    265       1.1     pooka 		} else {
    266      1.23  uebayasi 			genfs_node_unlock(vp);
    267       1.1     pooka 		}
    268       1.1     pooka 		error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    269      1.38       chs 		if (error == 0 && memwrite) {
    270  1.83.2.1        ad 			for (i = 0; i < npages; i++) {
    271  1.83.2.1        ad 				pg = ap->a_m[i];
    272  1.83.2.1        ad 				if (pg == NULL || pg == PGO_DONTCARE) {
    273  1.83.2.1        ad 					continue;
    274  1.83.2.1        ad 				}
    275  1.83.2.1        ad 				if (uvm_pagegetdirty(pg) ==
    276  1.83.2.1        ad 				    UVM_PAGE_STATUS_CLEAN) {
    277  1.83.2.1        ad 					uvm_pagemarkdirty(pg,
    278  1.83.2.1        ad 					    UVM_PAGE_STATUS_UNKNOWN);
    279  1.83.2.1        ad 				}
    280  1.83.2.1        ad 			}
    281      1.38       chs 			genfs_markdirty(vp);
    282      1.38       chs 		}
    283       1.1     pooka 		goto out_err;
    284       1.1     pooka 	}
    285  1.83.2.2        ad 	rw_exit(uobj->vmobjlock);
    286       1.1     pooka 
    287       1.1     pooka 	/*
    288       1.1     pooka 	 * find the requested pages and make some simple checks.
    289       1.1     pooka 	 * leave space in the page array for a whole block.
    290       1.1     pooka 	 */
    291       1.1     pooka 
    292      1.27  uebayasi 	const int fs_bshift = (vp->v_type != VBLK) ?
    293      1.27  uebayasi 	    vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
    294      1.27  uebayasi 	const int fs_bsize = 1 << fs_bshift;
    295      1.30  uebayasi #define	blk_mask	(fs_bsize - 1)
    296      1.30  uebayasi #define	trunc_blk(x)	((x) & ~blk_mask)
    297      1.30  uebayasi #define	round_blk(x)	(((x) + blk_mask) & ~blk_mask)
    298       1.1     pooka 
    299      1.29  uebayasi 	const int orignmempages = MIN(orignpages,
    300       1.1     pooka 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    301      1.29  uebayasi 	npages = orignmempages;
    302      1.30  uebayasi 	const off_t startoffset = trunc_blk(origoffset);
    303      1.30  uebayasi 	const off_t endoffset = MIN(
    304      1.30  uebayasi 	    round_page(round_blk(origoffset + (npages << PAGE_SHIFT))),
    305      1.30  uebayasi 	    round_page(memeof));
    306      1.31  uebayasi 	const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    307       1.1     pooka 
    308      1.33  uebayasi 	const int pgs_size = sizeof(struct vm_page *) *
    309       1.1     pooka 	    ((endoffset - startoffset) >> PAGE_SHIFT);
    310      1.33  uebayasi 	struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES];
    311      1.31  uebayasi 
    312       1.1     pooka 	if (pgs_size > sizeof(pgs_onstack)) {
    313       1.1     pooka 		pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
    314       1.1     pooka 		if (pgs == NULL) {
    315       1.1     pooka 			pgs = pgs_onstack;
    316       1.1     pooka 			error = ENOMEM;
    317      1.32  uebayasi 			goto out_err;
    318       1.1     pooka 		}
    319       1.1     pooka 	} else {
    320      1.14  christos 		pgs = pgs_onstack;
    321      1.14  christos 		(void)memset(pgs, 0, pgs_size);
    322       1.1     pooka 	}
    323      1.14  christos 
    324      1.71  pgoyette 	UVMHIST_LOG(ubchist, "ridx %jd npages %jd startoff %jd endoff %jd",
    325       1.1     pooka 	    ridx, npages, startoffset, endoffset);
    326       1.1     pooka 
    327      1.64   hannken 	if (trans_mount == NULL) {
    328      1.64   hannken 		trans_mount = vp->v_mount;
    329      1.69   hannken 		fstrans_start(trans_mount);
    330      1.64   hannken 		/*
    331      1.64   hannken 		 * check if this vnode is still valid.
    332      1.64   hannken 		 */
    333      1.64   hannken 		mutex_enter(vp->v_interlock);
    334      1.64   hannken 		error = vdead_check(vp, 0);
    335      1.64   hannken 		mutex_exit(vp->v_interlock);
    336      1.64   hannken 		if (error)
    337      1.64   hannken 			goto out_err_free;
    338      1.42   hannken 		/*
    339      1.42   hannken 		 * XXX: This assumes that we come here only via
    340      1.42   hannken 		 * the mmio path
    341      1.42   hannken 		 */
    342      1.73  jdolecek 		if (blockalloc && need_wapbl) {
    343      1.64   hannken 			error = WAPBL_BEGIN(trans_mount);
    344      1.64   hannken 			if (error)
    345      1.42   hannken 				goto out_err_free;
    346      1.64   hannken 			holds_wapbl = true;
    347      1.42   hannken 		}
    348       1.1     pooka 	}
    349       1.1     pooka 
    350       1.1     pooka 	/*
    351       1.1     pooka 	 * hold g_glock to prevent a race with truncate.
    352       1.1     pooka 	 *
    353       1.1     pooka 	 * check if our idea of v_size is still valid.
    354       1.1     pooka 	 */
    355       1.1     pooka 
    356      1.40       chs 	KASSERT(!glocked || genfs_node_wrlocked(vp));
    357      1.40       chs 	if (!glocked) {
    358      1.40       chs 		if (blockalloc) {
    359      1.40       chs 			genfs_node_wrlock(vp);
    360      1.40       chs 		} else {
    361      1.40       chs 			genfs_node_rdlock(vp);
    362      1.40       chs 		}
    363       1.1     pooka 	}
    364  1.83.2.2        ad 	rw_enter(uobj->vmobjlock, RW_WRITER);
    365       1.1     pooka 	if (vp->v_size < origvsize) {
    366      1.40       chs 		if (!glocked) {
    367      1.40       chs 			genfs_node_unlock(vp);
    368      1.40       chs 		}
    369       1.1     pooka 		if (pgs != pgs_onstack)
    370       1.1     pooka 			kmem_free(pgs, pgs_size);
    371       1.1     pooka 		goto startover;
    372       1.1     pooka 	}
    373       1.1     pooka 
    374  1.83.2.1        ad 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx], NULL,
    375      1.29  uebayasi 	    async ? UFP_NOWAIT : UFP_ALL) != orignmempages) {
    376      1.40       chs 		if (!glocked) {
    377      1.40       chs 			genfs_node_unlock(vp);
    378      1.40       chs 		}
    379       1.1     pooka 		KASSERT(async != 0);
    380      1.29  uebayasi 		genfs_rel_pages(&pgs[ridx], orignmempages);
    381  1.83.2.2        ad 		rw_exit(uobj->vmobjlock);
    382       1.1     pooka 		error = EBUSY;
    383      1.33  uebayasi 		goto out_err_free;
    384       1.1     pooka 	}
    385       1.1     pooka 
    386       1.1     pooka 	/*
    387  1.83.2.1        ad 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    388       1.1     pooka 	 */
    389       1.1     pooka 
    390  1.83.2.1        ad 	if (overwrite) {
    391      1.40       chs 		if (!glocked) {
    392      1.40       chs 			genfs_node_unlock(vp);
    393      1.40       chs 		}
    394  1.83.2.1        ad 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    395  1.83.2.1        ad 
    396  1.83.2.1        ad 		for (i = 0; i < npages; i++) {
    397  1.83.2.1        ad 			struct vm_page *pg = pgs[ridx + i];
    398  1.83.2.1        ad 
    399  1.83.2.1        ad 			/*
    400  1.83.2.1        ad 			 * it's caller's responsibility to allocate blocks
    401  1.83.2.1        ad 			 * beforehand for the overwrite case.
    402  1.83.2.1        ad 			 */
    403  1.83.2.1        ad 
    404  1.83.2.1        ad 			KASSERT((pg->flags & PG_RDONLY) == 0 || !blockalloc);
    405  1.83.2.1        ad 			pg->flags &= ~PG_RDONLY;
    406  1.83.2.1        ad 
    407  1.83.2.1        ad 			/*
    408  1.83.2.1        ad 			 * mark the page DIRTY.
    409  1.83.2.1        ad 			 * otherwise another thread can do putpages and pull
    410  1.83.2.1        ad 			 * our vnode from syncer's queue before our caller does
    411  1.83.2.1        ad 			 * ubc_release.  note that putpages won't see CLEAN
    412  1.83.2.1        ad 			 * pages even if they are BUSY.
    413  1.83.2.1        ad 			 */
    414  1.83.2.1        ad 
    415  1.83.2.1        ad 			uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
    416  1.83.2.1        ad 		}
    417       1.1     pooka 		npages += ridx;
    418       1.1     pooka 		goto out;
    419       1.1     pooka 	}
    420       1.1     pooka 
    421       1.1     pooka 	/*
    422  1.83.2.1        ad 	 * if the pages are already resident, just return them.
    423       1.1     pooka 	 */
    424       1.1     pooka 
    425  1.83.2.1        ad 	for (i = 0; i < npages; i++) {
    426  1.83.2.1        ad 		struct vm_page *pg = pgs[ridx + i];
    427  1.83.2.1        ad 
    428  1.83.2.1        ad 		if ((pg->flags & PG_FAKE) ||
    429  1.83.2.1        ad 		    (blockalloc && (pg->flags & PG_RDONLY) != 0)) {
    430  1.83.2.1        ad 			break;
    431  1.83.2.1        ad 		}
    432  1.83.2.1        ad 	}
    433  1.83.2.1        ad 	if (i == npages) {
    434      1.40       chs 		if (!glocked) {
    435      1.40       chs 			genfs_node_unlock(vp);
    436      1.40       chs 		}
    437  1.83.2.1        ad 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    438       1.1     pooka 		npages += ridx;
    439       1.1     pooka 		goto out;
    440       1.1     pooka 	}
    441       1.1     pooka 
    442       1.1     pooka 	/*
    443       1.1     pooka 	 * the page wasn't resident and we're not overwriting,
    444       1.1     pooka 	 * so we're going to have to do some i/o.
    445       1.1     pooka 	 * find any additional pages needed to cover the expanded range.
    446       1.1     pooka 	 */
    447       1.1     pooka 
    448       1.1     pooka 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    449      1.29  uebayasi 	if (startoffset != origoffset || npages != orignmempages) {
    450      1.31  uebayasi 		int npgs;
    451       1.1     pooka 
    452       1.1     pooka 		/*
    453       1.1     pooka 		 * we need to avoid deadlocks caused by locking
    454       1.1     pooka 		 * additional pages at lower offsets than pages we
    455       1.1     pooka 		 * already have locked.  unlock them all and start over.
    456       1.1     pooka 		 */
    457       1.1     pooka 
    458      1.29  uebayasi 		genfs_rel_pages(&pgs[ridx], orignmempages);
    459       1.1     pooka 		memset(pgs, 0, pgs_size);
    460       1.1     pooka 
    461      1.71  pgoyette 		UVMHIST_LOG(ubchist, "reset npages start 0x%jx end 0x%jx",
    462       1.1     pooka 		    startoffset, endoffset, 0,0);
    463       1.1     pooka 		npgs = npages;
    464  1.83.2.1        ad 		if (uvn_findpages(uobj, startoffset, &npgs, pgs, NULL,
    465       1.1     pooka 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    466      1.40       chs 			if (!glocked) {
    467      1.40       chs 				genfs_node_unlock(vp);
    468      1.40       chs 			}
    469       1.1     pooka 			KASSERT(async != 0);
    470       1.1     pooka 			genfs_rel_pages(pgs, npages);
    471  1.83.2.2        ad 			rw_exit(uobj->vmobjlock);
    472       1.1     pooka 			error = EBUSY;
    473      1.33  uebayasi 			goto out_err_free;
    474       1.1     pooka 		}
    475       1.1     pooka 	}
    476      1.34  uebayasi 
    477  1.83.2.2        ad 	rw_exit(uobj->vmobjlock);
    478      1.59  riastrad 	error = genfs_getpages_read(vp, pgs, npages, startoffset, diskeof,
    479      1.59  riastrad 	    async, memwrite, blockalloc, glocked);
    480      1.59  riastrad 	if (!glocked) {
    481      1.59  riastrad 		genfs_node_unlock(vp);
    482      1.59  riastrad 	}
    483      1.67  riastrad 	if (error == 0 && async)
    484      1.67  riastrad 		goto out_err_free;
    485  1.83.2.2        ad 	rw_enter(uobj->vmobjlock, RW_WRITER);
    486      1.59  riastrad 
    487      1.59  riastrad 	/*
    488      1.59  riastrad 	 * we're almost done!  release the pages...
    489      1.59  riastrad 	 * for errors, we free the pages.
    490      1.59  riastrad 	 * otherwise we activate them and mark them as valid and clean.
    491      1.59  riastrad 	 * also, unbusy pages that were not actually requested.
    492      1.59  riastrad 	 */
    493      1.59  riastrad 
    494      1.59  riastrad 	if (error) {
    495      1.59  riastrad 		genfs_rel_pages(pgs, npages);
    496  1.83.2.2        ad 		rw_exit(uobj->vmobjlock);
    497      1.71  pgoyette 		UVMHIST_LOG(ubchist, "returning error %jd", error,0,0,0);
    498      1.59  riastrad 		goto out_err_free;
    499      1.59  riastrad 	}
    500      1.59  riastrad 
    501      1.59  riastrad out:
    502      1.71  pgoyette 	UVMHIST_LOG(ubchist, "succeeding, npages %jd", npages,0,0,0);
    503      1.59  riastrad 	error = 0;
    504      1.59  riastrad 	for (i = 0; i < npages; i++) {
    505      1.59  riastrad 		struct vm_page *pg = pgs[i];
    506      1.59  riastrad 		if (pg == NULL) {
    507      1.59  riastrad 			continue;
    508      1.59  riastrad 		}
    509      1.71  pgoyette 		UVMHIST_LOG(ubchist, "examining pg %#jx flags 0x%jx",
    510      1.71  pgoyette 		    (uintptr_t)pg, pg->flags, 0,0);
    511      1.59  riastrad 		if (pg->flags & PG_FAKE && !overwrite) {
    512  1.83.2.1        ad 			/*
    513  1.83.2.1        ad 			 * we've read page's contents from the backing storage.
    514  1.83.2.1        ad 			 *
    515  1.83.2.1        ad 			 * for a read fault, we keep them CLEAN;  if we
    516  1.83.2.1        ad 			 * encountered a hole while reading, the pages can
    517  1.83.2.1        ad 			 * already been dirtied with zeros.
    518  1.83.2.1        ad 			 */
    519  1.83.2.1        ad 			KASSERTMSG(blockalloc || uvm_pagegetdirty(pg) ==
    520  1.83.2.1        ad 			    UVM_PAGE_STATUS_CLEAN, "page %p not clean", pg);
    521  1.83.2.1        ad 			pg->flags &= ~PG_FAKE;
    522      1.59  riastrad 		}
    523      1.59  riastrad 		KASSERT(!memwrite || !blockalloc || (pg->flags & PG_RDONLY) == 0);
    524      1.59  riastrad 		if (i < ridx || i >= ridx + orignmempages || async) {
    525      1.71  pgoyette 			UVMHIST_LOG(ubchist, "unbusy pg %#jx offset 0x%jx",
    526      1.71  pgoyette 			    (uintptr_t)pg, pg->offset,0,0);
    527      1.59  riastrad 			if (pg->flags & PG_WANTED) {
    528      1.59  riastrad 				wakeup(pg);
    529      1.59  riastrad 			}
    530      1.59  riastrad 			if (pg->flags & PG_FAKE) {
    531      1.59  riastrad 				KASSERT(overwrite);
    532      1.59  riastrad 				uvm_pagezero(pg);
    533      1.59  riastrad 			}
    534      1.59  riastrad 			if (pg->flags & PG_RELEASED) {
    535      1.59  riastrad 				uvm_pagefree(pg);
    536      1.59  riastrad 				continue;
    537      1.59  riastrad 			}
    538      1.83        ad 			uvm_pagelock(pg);
    539      1.59  riastrad 			uvm_pageenqueue(pg);
    540      1.83        ad 			uvm_pageunlock(pg);
    541      1.59  riastrad 			pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
    542      1.59  riastrad 			UVM_PAGE_OWN(pg, NULL);
    543  1.83.2.1        ad 		} else if (memwrite && !overwrite &&
    544  1.83.2.1        ad 		    uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN) {
    545  1.83.2.1        ad 			/*
    546  1.83.2.1        ad 			 * for a write fault, start dirtiness tracking of
    547  1.83.2.1        ad 			 * requested pages.
    548  1.83.2.1        ad 			 */
    549  1.83.2.1        ad 			uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
    550      1.59  riastrad 		}
    551      1.59  riastrad 	}
    552      1.59  riastrad 	if (memwrite) {
    553      1.59  riastrad 		genfs_markdirty(vp);
    554      1.59  riastrad 	}
    555  1.83.2.2        ad 	rw_exit(uobj->vmobjlock);
    556      1.59  riastrad 	if (ap->a_m != NULL) {
    557      1.59  riastrad 		memcpy(ap->a_m, &pgs[ridx],
    558      1.59  riastrad 		    orignmempages * sizeof(struct vm_page *));
    559      1.59  riastrad 	}
    560       1.1     pooka 
    561      1.59  riastrad out_err_free:
    562      1.59  riastrad 	if (pgs != NULL && pgs != pgs_onstack)
    563      1.59  riastrad 		kmem_free(pgs, pgs_size);
    564      1.59  riastrad out_err:
    565      1.64   hannken 	if (trans_mount != NULL) {
    566      1.64   hannken 		if (holds_wapbl)
    567      1.64   hannken 			WAPBL_END(trans_mount);
    568      1.64   hannken 		fstrans_done(trans_mount);
    569      1.59  riastrad 	}
    570      1.59  riastrad 	return error;
    571      1.59  riastrad }
    572      1.59  riastrad 
    573      1.59  riastrad /*
    574      1.59  riastrad  * genfs_getpages_read: Read the pages in with VOP_BMAP/VOP_STRATEGY.
    575      1.68  dholland  *
    576      1.68  dholland  * "glocked" (which is currently not actually used) tells us not whether
    577      1.68  dholland  * the genfs_node is locked on entry (it always is) but whether it was
    578      1.68  dholland  * locked on entry to genfs_getpages.
    579      1.59  riastrad  */
    580      1.59  riastrad static int
    581      1.59  riastrad genfs_getpages_read(struct vnode *vp, struct vm_page **pgs, int npages,
    582      1.59  riastrad     off_t startoffset, off_t diskeof,
    583      1.59  riastrad     bool async, bool memwrite, bool blockalloc, bool glocked)
    584      1.59  riastrad {
    585      1.59  riastrad 	struct uvm_object * const uobj = &vp->v_uobj;
    586      1.59  riastrad 	const int fs_bshift = (vp->v_type != VBLK) ?
    587      1.59  riastrad 	    vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
    588      1.59  riastrad 	const int dev_bshift = (vp->v_type != VBLK) ?
    589      1.59  riastrad 	    vp->v_mount->mnt_dev_bshift : DEV_BSHIFT;
    590      1.59  riastrad 	kauth_cred_t const cred = curlwp->l_cred;		/* XXXUBC curlwp */
    591      1.34  uebayasi 	size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
    592      1.34  uebayasi 	vaddr_t kva;
    593      1.34  uebayasi 	struct buf *bp, *mbp;
    594      1.34  uebayasi 	bool sawhole = false;
    595      1.59  riastrad 	int i;
    596      1.59  riastrad 	int error = 0;
    597      1.34  uebayasi 
    598      1.60     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
    599      1.60     skrll 
    600       1.1     pooka 	/*
    601       1.1     pooka 	 * read the desired page(s).
    602       1.1     pooka 	 */
    603       1.1     pooka 
    604       1.1     pooka 	totalbytes = npages << PAGE_SHIFT;
    605       1.1     pooka 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    606       1.1     pooka 	tailbytes = totalbytes - bytes;
    607       1.1     pooka 	skipbytes = 0;
    608       1.1     pooka 
    609       1.1     pooka 	kva = uvm_pagermapin(pgs, npages,
    610      1.55      yamt 	    UVMPAGER_MAPIN_READ | (async ? 0 : UVMPAGER_MAPIN_WAITOK));
    611      1.59  riastrad 	if (kva == 0)
    612      1.59  riastrad 		return EBUSY;
    613       1.1     pooka 
    614       1.2        ad 	mbp = getiobuf(vp, true);
    615       1.1     pooka 	mbp->b_bufsize = totalbytes;
    616       1.1     pooka 	mbp->b_data = (void *)kva;
    617       1.1     pooka 	mbp->b_resid = mbp->b_bcount = bytes;
    618       1.2        ad 	mbp->b_cflags = BC_BUSY;
    619       1.2        ad 	if (async) {
    620       1.2        ad 		mbp->b_flags = B_READ | B_ASYNC;
    621  1.83.2.2        ad 		mbp->b_iodone = uvm_aio_aiodone;
    622       1.2        ad 	} else {
    623       1.2        ad 		mbp->b_flags = B_READ;
    624       1.2        ad 		mbp->b_iodone = NULL;
    625      1.43  uebayasi 	}
    626       1.1     pooka 	if (async)
    627       1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
    628       1.1     pooka 	else
    629       1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
    630       1.1     pooka 
    631       1.1     pooka 	/*
    632       1.1     pooka 	 * if EOF is in the middle of the range, zero the part past EOF.
    633       1.1     pooka 	 * skip over pages which are not PG_FAKE since in that case they have
    634       1.1     pooka 	 * valid data that we need to preserve.
    635       1.1     pooka 	 */
    636       1.1     pooka 
    637       1.1     pooka 	tailstart = bytes;
    638       1.1     pooka 	while (tailbytes > 0) {
    639       1.1     pooka 		const int len = PAGE_SIZE - (tailstart & PAGE_MASK);
    640       1.1     pooka 
    641       1.1     pooka 		KASSERT(len <= tailbytes);
    642       1.1     pooka 		if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) {
    643       1.1     pooka 			memset((void *)(kva + tailstart), 0, len);
    644      1.71  pgoyette 			UVMHIST_LOG(ubchist, "tailbytes %#jx 0x%jx 0x%jx",
    645      1.71  pgoyette 			    (uintptr_t)kva, tailstart, len, 0);
    646       1.1     pooka 		}
    647       1.1     pooka 		tailstart += len;
    648       1.1     pooka 		tailbytes -= len;
    649       1.1     pooka 	}
    650       1.1     pooka 
    651       1.1     pooka 	/*
    652       1.1     pooka 	 * now loop over the pages, reading as needed.
    653       1.1     pooka 	 */
    654       1.1     pooka 
    655       1.1     pooka 	bp = NULL;
    656      1.28  uebayasi 	off_t offset;
    657      1.28  uebayasi 	for (offset = startoffset;
    658       1.1     pooka 	    bytes > 0;
    659       1.1     pooka 	    offset += iobytes, bytes -= iobytes) {
    660      1.30  uebayasi 		int run;
    661      1.25  uebayasi 		daddr_t lbn, blkno;
    662      1.24  uebayasi 		int pidx;
    663      1.26  uebayasi 		struct vnode *devvp;
    664       1.1     pooka 
    665       1.1     pooka 		/*
    666       1.1     pooka 		 * skip pages which don't need to be read.
    667       1.1     pooka 		 */
    668       1.1     pooka 
    669       1.1     pooka 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    670       1.1     pooka 		while ((pgs[pidx]->flags & PG_FAKE) == 0) {
    671       1.1     pooka 			size_t b;
    672       1.1     pooka 
    673       1.1     pooka 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    674       1.1     pooka 			if ((pgs[pidx]->flags & PG_RDONLY)) {
    675       1.1     pooka 				sawhole = true;
    676       1.1     pooka 			}
    677       1.1     pooka 			b = MIN(PAGE_SIZE, bytes);
    678       1.1     pooka 			offset += b;
    679       1.1     pooka 			bytes -= b;
    680       1.1     pooka 			skipbytes += b;
    681       1.1     pooka 			pidx++;
    682      1.71  pgoyette 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%jx",
    683       1.1     pooka 			    offset, 0,0,0);
    684       1.1     pooka 			if (bytes == 0) {
    685       1.1     pooka 				goto loopdone;
    686       1.1     pooka 			}
    687       1.1     pooka 		}
    688       1.1     pooka 
    689       1.1     pooka 		/*
    690       1.1     pooka 		 * bmap the file to find out the blkno to read from and
    691       1.1     pooka 		 * how much we can read in one i/o.  if bmap returns an error,
    692       1.1     pooka 		 * skip the rest of the top-level i/o.
    693       1.1     pooka 		 */
    694       1.1     pooka 
    695       1.1     pooka 		lbn = offset >> fs_bshift;
    696       1.1     pooka 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    697       1.1     pooka 		if (error) {
    698      1.71  pgoyette 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%jx -> %jd\n",
    699      1.36  uebayasi 			    lbn,error,0,0);
    700       1.1     pooka 			skipbytes += bytes;
    701      1.36  uebayasi 			bytes = 0;
    702       1.1     pooka 			goto loopdone;
    703       1.1     pooka 		}
    704       1.1     pooka 
    705       1.1     pooka 		/*
    706       1.1     pooka 		 * see how many pages can be read with this i/o.
    707       1.1     pooka 		 * reduce the i/o size if necessary to avoid
    708       1.1     pooka 		 * overwriting pages with valid data.
    709       1.1     pooka 		 */
    710       1.1     pooka 
    711       1.1     pooka 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    712       1.1     pooka 		    bytes);
    713       1.1     pooka 		if (offset + iobytes > round_page(offset)) {
    714      1.24  uebayasi 			int pcount;
    715      1.24  uebayasi 
    716       1.1     pooka 			pcount = 1;
    717       1.1     pooka 			while (pidx + pcount < npages &&
    718       1.1     pooka 			    pgs[pidx + pcount]->flags & PG_FAKE) {
    719       1.1     pooka 				pcount++;
    720       1.1     pooka 			}
    721       1.1     pooka 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    722       1.1     pooka 			    (offset - trunc_page(offset)));
    723       1.1     pooka 		}
    724       1.1     pooka 
    725       1.1     pooka 		/*
    726       1.1     pooka 		 * if this block isn't allocated, zero it instead of
    727       1.1     pooka 		 * reading it.  unless we are going to allocate blocks,
    728       1.1     pooka 		 * mark the pages we zeroed PG_RDONLY.
    729       1.1     pooka 		 */
    730       1.1     pooka 
    731      1.36  uebayasi 		if (blkno == (daddr_t)-1) {
    732       1.1     pooka 			int holepages = (round_page(offset + iobytes) -
    733       1.1     pooka 			    trunc_page(offset)) >> PAGE_SHIFT;
    734      1.71  pgoyette 			UVMHIST_LOG(ubchist, "lbn 0x%jx -> HOLE", lbn,0,0,0);
    735       1.1     pooka 
    736       1.1     pooka 			sawhole = true;
    737       1.1     pooka 			memset((char *)kva + (offset - startoffset), 0,
    738       1.1     pooka 			    iobytes);
    739       1.1     pooka 			skipbytes += iobytes;
    740       1.1     pooka 
    741  1.83.2.1        ad 			if (!blockalloc) {
    742  1.83.2.2        ad 				rw_enter(uobj->vmobjlock, RW_WRITER);
    743  1.83.2.1        ad 				for (i = 0; i < holepages; i++) {
    744       1.1     pooka 					pgs[pidx + i]->flags |= PG_RDONLY;
    745       1.1     pooka 				}
    746  1.83.2.2        ad 				rw_exit(uobj->vmobjlock);
    747       1.1     pooka 			}
    748       1.1     pooka 			continue;
    749       1.1     pooka 		}
    750       1.1     pooka 
    751       1.1     pooka 		/*
    752       1.1     pooka 		 * allocate a sub-buf for this piece of the i/o
    753       1.1     pooka 		 * (or just use mbp if there's only 1 piece),
    754       1.1     pooka 		 * and start it going.
    755       1.1     pooka 		 */
    756       1.1     pooka 
    757       1.1     pooka 		if (offset == startoffset && iobytes == bytes) {
    758       1.1     pooka 			bp = mbp;
    759       1.1     pooka 		} else {
    760      1.71  pgoyette 			UVMHIST_LOG(ubchist, "vp %#jx bp %#jx num now %jd",
    761      1.71  pgoyette 			    (uintptr_t)vp, (uintptr_t)bp, vp->v_numoutput, 0);
    762       1.2        ad 			bp = getiobuf(vp, true);
    763       1.1     pooka 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
    764       1.1     pooka 		}
    765       1.1     pooka 		bp->b_lblkno = 0;
    766       1.1     pooka 
    767       1.1     pooka 		/* adjust physical blkno for partial blocks */
    768       1.1     pooka 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    769       1.1     pooka 		    dev_bshift);
    770       1.1     pooka 
    771       1.1     pooka 		UVMHIST_LOG(ubchist,
    772      1.71  pgoyette 		    "bp %#jx offset 0x%x bcount 0x%x blkno 0x%x",
    773      1.71  pgoyette 		    (uintptr_t)bp, offset, bp->b_bcount, bp->b_blkno);
    774       1.1     pooka 
    775       1.1     pooka 		VOP_STRATEGY(devvp, bp);
    776       1.1     pooka 	}
    777       1.1     pooka 
    778       1.1     pooka loopdone:
    779       1.1     pooka 	nestiobuf_done(mbp, skipbytes, error);
    780       1.1     pooka 	if (async) {
    781       1.1     pooka 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    782      1.59  riastrad 		return 0;
    783       1.1     pooka 	}
    784       1.1     pooka 	if (bp != NULL) {
    785       1.1     pooka 		error = biowait(mbp);
    786       1.1     pooka 	}
    787       1.1     pooka 
    788      1.19     rmind 	/* Remove the mapping (make KVA available as soon as possible) */
    789      1.19     rmind 	uvm_pagermapout(kva, npages);
    790      1.19     rmind 
    791       1.1     pooka 	/*
    792       1.1     pooka 	 * if this we encountered a hole then we have to do a little more work.
    793       1.1     pooka 	 * for read faults, we marked the page PG_RDONLY so that future
    794       1.1     pooka 	 * write accesses to the page will fault again.
    795       1.1     pooka 	 * for write faults, we must make sure that the backing store for
    796       1.1     pooka 	 * the page is completely allocated while the pages are locked.
    797       1.1     pooka 	 */
    798       1.1     pooka 
    799       1.1     pooka 	if (!error && sawhole && blockalloc) {
    800      1.42   hannken 		error = GOP_ALLOC(vp, startoffset,
    801      1.42   hannken 		    npages << PAGE_SHIFT, 0, cred);
    802      1.71  pgoyette 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%jx/0x%jx -> %jd",
    803       1.1     pooka 		    startoffset, npages << PAGE_SHIFT, error,0);
    804       1.1     pooka 		if (!error) {
    805  1.83.2.2        ad 			rw_enter(uobj->vmobjlock, RW_WRITER);
    806       1.1     pooka 			for (i = 0; i < npages; i++) {
    807      1.31  uebayasi 				struct vm_page *pg = pgs[i];
    808      1.31  uebayasi 
    809      1.31  uebayasi 				if (pg == NULL) {
    810       1.1     pooka 					continue;
    811       1.1     pooka 				}
    812  1.83.2.1        ad 				pg->flags &= ~PG_RDONLY;
    813  1.83.2.1        ad 				uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
    814      1.71  pgoyette 				UVMHIST_LOG(ubchist, "mark dirty pg %#jx",
    815      1.71  pgoyette 				    (uintptr_t)pg, 0, 0, 0);
    816       1.1     pooka 			}
    817  1.83.2.2        ad 			rw_exit(uobj->vmobjlock);
    818       1.1     pooka 		}
    819       1.1     pooka 	}
    820      1.18     rmind 
    821      1.18     rmind 	putiobuf(mbp);
    822      1.38       chs 	return error;
    823       1.1     pooka }
    824       1.1     pooka 
    825       1.1     pooka /*
    826       1.1     pooka  * generic VM putpages routine.
    827       1.1     pooka  * Write the given range of pages to backing store.
    828       1.1     pooka  *
    829       1.1     pooka  * => "offhi == 0" means flush all pages at or after "offlo".
    830       1.1     pooka  * => object should be locked by caller.  we return with the
    831       1.1     pooka  *      object unlocked.
    832       1.1     pooka  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
    833       1.1     pooka  *	thus, a caller might want to unlock higher level resources
    834       1.1     pooka  *	(e.g. vm_map) before calling flush.
    835       1.1     pooka  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
    836       1.1     pooka  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
    837       1.1     pooka  *
    838       1.1     pooka  * note on "cleaning" object and PG_BUSY pages:
    839       1.1     pooka  *	this routine is holding the lock on the object.   the only time
    840       1.1     pooka  *	that it can run into a PG_BUSY page that it does not own is if
    841       1.1     pooka  *	some other process has started I/O on the page (e.g. either
    842  1.83.2.1        ad  *	a pagein, or a pageout).  if the PG_BUSY page is being paged
    843  1.83.2.1        ad  *	in, then it can not be dirty (!UVM_PAGE_STATUS_CLEAN) because no
    844  1.83.2.1        ad  *	one has	had a chance to modify it yet.  if the PG_BUSY page is
    845  1.83.2.1        ad  *	being paged out then it means that someone else has already started
    846  1.83.2.1        ad  *	cleaning the page for us (how nice!).  in this case, if we
    847       1.1     pooka  *	have syncio specified, then after we make our pass through the
    848       1.1     pooka  *	object we need to wait for the other PG_BUSY pages to clear
    849       1.1     pooka  *	off (i.e. we need to do an iosync).   also note that once a
    850       1.1     pooka  *	page is PG_BUSY it must stay in its object until it is un-busyed.
    851       1.1     pooka  */
    852       1.1     pooka 
    853       1.1     pooka int
    854       1.1     pooka genfs_putpages(void *v)
    855       1.1     pooka {
    856       1.1     pooka 	struct vop_putpages_args /* {
    857       1.1     pooka 		struct vnode *a_vp;
    858       1.1     pooka 		voff_t a_offlo;
    859       1.1     pooka 		voff_t a_offhi;
    860       1.1     pooka 		int a_flags;
    861      1.22  uebayasi 	} */ * const ap = v;
    862       1.1     pooka 
    863       1.1     pooka 	return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi,
    864       1.1     pooka 	    ap->a_flags, NULL);
    865       1.1     pooka }
    866       1.1     pooka 
    867       1.1     pooka int
    868       1.4      yamt genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff,
    869       1.4      yamt     int origflags, struct vm_page **busypg)
    870       1.1     pooka {
    871      1.22  uebayasi 	struct uvm_object * const uobj = &vp->v_uobj;
    872  1.83.2.2        ad 	krwlock_t * const slock = uobj->vmobjlock;
    873      1.78        ad 	off_t nextoff;
    874       1.2        ad 	int i, error, npages, nback;
    875       1.1     pooka 	int freeflag;
    876      1.63  christos 	/*
    877      1.63  christos 	 * This array is larger than it should so that it's size is constant.
    878      1.63  christos 	 * The right size is MAXPAGES.
    879      1.63  christos 	 */
    880      1.63  christos 	struct vm_page *pgs[MAXPHYS / MIN_PAGE_SIZE];
    881      1.63  christos #define MAXPAGES (MAXPHYS / PAGE_SIZE)
    882      1.78        ad 	struct vm_page *pg, *tpg;
    883      1.78        ad 	struct uvm_page_array a;
    884      1.78        ad 	bool wasclean, needs_clean;
    885       1.4      yamt 	bool async = (origflags & PGO_SYNCIO) == 0;
    886       1.1     pooka 	bool pagedaemon = curlwp == uvm.pagedaemon_lwp;
    887      1.22  uebayasi 	struct lwp * const l = curlwp ? curlwp : &lwp0;
    888      1.65   hannken 	struct mount *trans_mp;
    889       1.4      yamt 	int flags;
    890  1.83.2.1        ad 	bool modified;		/* if we write out any pages */
    891      1.65   hannken 	bool holds_wapbl;
    892  1.83.2.1        ad 	bool cleanall;		/* try to pull off from the syncer's list */
    893       1.4      yamt 	bool onworklst;
    894  1.83.2.2        ad 	bool nodirty;
    895  1.83.2.1        ad 	const bool dirtyonly = (origflags & (PGO_DEACTIVATE|PGO_FREE)) == 0;
    896       1.1     pooka 
    897       1.1     pooka 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
    898       1.1     pooka 
    899       1.4      yamt 	KASSERT(origflags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
    900       1.1     pooka 	KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
    901       1.1     pooka 	KASSERT(startoff < endoff || endoff == 0);
    902  1.83.2.2        ad 	KASSERT(rw_write_held(slock));
    903       1.1     pooka 
    904      1.71  pgoyette 	UVMHIST_LOG(ubchist, "vp %#jx pages %jd off 0x%jx len 0x%jx",
    905      1.71  pgoyette 	    (uintptr_t)vp, uobj->uo_npages, startoff, endoff - startoff);
    906       1.1     pooka 
    907      1.74  jdolecek #ifdef DIAGNOSTIC
    908      1.74  jdolecek 	if ((origflags & PGO_JOURNALLOCKED) && vp->v_mount->mnt_wapbl)
    909      1.74  jdolecek                 WAPBL_JLOCK_ASSERT(vp->v_mount);
    910      1.74  jdolecek #endif
    911      1.74  jdolecek 
    912      1.65   hannken 	trans_mp = NULL;
    913      1.65   hannken 	holds_wapbl = false;
    914       1.6   hannken 
    915       1.4      yamt retry:
    916       1.4      yamt 	modified = false;
    917       1.4      yamt 	flags = origflags;
    918       1.1     pooka 	KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 ||
    919       1.1     pooka 	    (vp->v_iflag & VI_WRMAPDIRTY) == 0);
    920  1.83.2.1        ad 
    921  1.83.2.1        ad 	/*
    922  1.83.2.1        ad 	 * shortcut if we have no pages to process.
    923  1.83.2.1        ad 	 */
    924  1.83.2.1        ad 
    925  1.83.2.2        ad 	nodirty = radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
    926  1.83.2.2        ad             UVM_PAGE_DIRTY_TAG);
    927  1.83.2.2        ad 	if (uobj->uo_npages == 0 || (dirtyonly && nodirty)) {
    928  1.83.2.2        ad 		mutex_enter(vp->v_interlock);
    929       1.1     pooka 		if (vp->v_iflag & VI_ONWORKLST) {
    930       1.1     pooka 			vp->v_iflag &= ~VI_WRMAPDIRTY;
    931       1.1     pooka 			if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
    932       1.1     pooka 				vn_syncer_remove_from_worklist(vp);
    933       1.1     pooka 		}
    934  1.83.2.2        ad 		mutex_exit(vp->v_interlock);
    935      1.65   hannken 		if (trans_mp) {
    936      1.65   hannken 			if (holds_wapbl)
    937      1.65   hannken 				WAPBL_END(trans_mp);
    938      1.65   hannken 			fstrans_done(trans_mp);
    939      1.12   hannken 		}
    940  1.83.2.2        ad 		rw_exit(slock);
    941       1.1     pooka 		return (0);
    942       1.1     pooka 	}
    943       1.1     pooka 
    944       1.1     pooka 	/*
    945       1.1     pooka 	 * the vnode has pages, set up to process the request.
    946       1.1     pooka 	 */
    947       1.1     pooka 
    948      1.65   hannken 	if (trans_mp == NULL && (flags & PGO_CLEANIT) != 0) {
    949       1.1     pooka 		if (pagedaemon) {
    950      1.65   hannken 			/* Pagedaemon must not sleep here. */
    951      1.65   hannken 			trans_mp = vp->v_mount;
    952      1.69   hannken 			error = fstrans_start_nowait(trans_mp);
    953      1.12   hannken 			if (error) {
    954  1.83.2.2        ad 				rw_exit(slock);
    955      1.12   hannken 				return error;
    956      1.12   hannken 			}
    957      1.65   hannken 		} else {
    958      1.65   hannken 			/*
    959      1.65   hannken 			 * Cannot use vdeadcheck() here as this operation
    960      1.65   hannken 			 * usually gets used from VOP_RECLAIM().  Test for
    961      1.65   hannken 			 * change of v_mount instead and retry on change.
    962      1.65   hannken 			 */
    963  1.83.2.2        ad 			rw_exit(slock);
    964      1.65   hannken 			trans_mp = vp->v_mount;
    965      1.69   hannken 			fstrans_start(trans_mp);
    966      1.65   hannken 			if (vp->v_mount != trans_mp) {
    967      1.65   hannken 				fstrans_done(trans_mp);
    968      1.65   hannken 				trans_mp = NULL;
    969      1.65   hannken 			} else {
    970      1.65   hannken 				holds_wapbl = (trans_mp->mnt_wapbl &&
    971      1.65   hannken 				    (origflags & PGO_JOURNALLOCKED) == 0);
    972      1.65   hannken 				if (holds_wapbl) {
    973      1.65   hannken 					error = WAPBL_BEGIN(trans_mp);
    974      1.65   hannken 					if (error) {
    975      1.65   hannken 						fstrans_done(trans_mp);
    976      1.65   hannken 						return error;
    977      1.65   hannken 					}
    978      1.65   hannken 				}
    979      1.65   hannken 			}
    980  1.83.2.2        ad 			rw_enter(slock, RW_WRITER);
    981      1.65   hannken 			goto retry;
    982      1.12   hannken 		}
    983       1.1     pooka 	}
    984       1.1     pooka 
    985       1.1     pooka 	error = 0;
    986  1.83.2.2        ad 	wasclean = radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
    987  1.83.2.2        ad             UVM_PAGE_WRITEBACK_TAG);
    988      1.78        ad 	nextoff = startoff;
    989       1.1     pooka 	if (endoff == 0 || flags & PGO_ALLPAGES) {
    990       1.1     pooka 		endoff = trunc_page(LLONG_MAX);
    991       1.1     pooka 	}
    992       1.1     pooka 
    993       1.1     pooka 	/*
    994       1.1     pooka 	 * if this vnode is known not to have dirty pages,
    995       1.1     pooka 	 * don't bother to clean it out.
    996       1.1     pooka 	 */
    997       1.1     pooka 
    998  1.83.2.2        ad 	if (nodirty) {
    999      1.48      matt #if !defined(DEBUG)
   1000  1.83.2.1        ad 		if (dirtyonly) {
   1001       1.1     pooka 			goto skip_scan;
   1002       1.1     pooka 		}
   1003      1.48      matt #endif /* !defined(DEBUG) */
   1004       1.1     pooka 		flags &= ~PGO_CLEANIT;
   1005       1.1     pooka 	}
   1006       1.1     pooka 
   1007       1.1     pooka 	/*
   1008      1.78        ad 	 * start the loop to scan pages.
   1009       1.1     pooka 	 */
   1010       1.1     pooka 
   1011  1.83.2.1        ad 	cleanall = true;
   1012       1.1     pooka 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
   1013      1.78        ad 	uvm_page_array_init(&a);
   1014      1.78        ad 	for (;;) {
   1015  1.83.2.1        ad 		bool pgprotected;
   1016  1.83.2.1        ad 
   1017      1.78        ad 		/*
   1018  1.83.2.1        ad 		 * if !dirtyonly, iterate over all resident pages in the range.
   1019  1.83.2.1        ad 		 *
   1020  1.83.2.1        ad 		 * if dirtyonly, only possibly dirty pages are interesting.
   1021  1.83.2.1        ad 		 * however, if we are asked to sync for integrity, we should
   1022  1.83.2.1        ad 		 * wait on pages being written back by other threads as well.
   1023      1.78        ad 		 */
   1024      1.78        ad 
   1025  1.83.2.1        ad 		pg = uvm_page_array_fill_and_peek(&a, uobj, nextoff, 0,
   1026  1.83.2.1        ad 		    dirtyonly ? (UVM_PAGE_ARRAY_FILL_DIRTY |
   1027  1.83.2.1        ad 		    (!async ? UVM_PAGE_ARRAY_FILL_WRITEBACK : 0)) : 0);
   1028      1.78        ad 		if (pg == NULL) {
   1029      1.78        ad 			break;
   1030      1.78        ad 		}
   1031      1.78        ad 
   1032      1.78        ad 		KASSERT(pg->uobject == uobj);
   1033      1.78        ad 		KASSERT((pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
   1034      1.78        ad 		    (pg->flags & (PG_BUSY)) != 0);
   1035      1.78        ad 		KASSERT(pg->offset >= startoff);
   1036      1.78        ad 		KASSERT(pg->offset >= nextoff);
   1037  1.83.2.1        ad 		KASSERT(!dirtyonly ||
   1038  1.83.2.1        ad 		    uvm_pagegetdirty(pg) != UVM_PAGE_STATUS_CLEAN ||
   1039  1.83.2.1        ad 		    radix_tree_get_tag(&uobj->uo_pages,
   1040  1.83.2.1        ad 			pg->offset >> PAGE_SHIFT, UVM_PAGE_WRITEBACK_TAG));
   1041      1.78        ad 
   1042      1.78        ad 		if (pg->offset >= endoff) {
   1043      1.78        ad 			break;
   1044      1.78        ad 		}
   1045      1.78        ad 
   1046       1.1     pooka 		/*
   1047      1.78        ad 		 * a preempt point.
   1048       1.1     pooka 		 */
   1049       1.1     pooka 
   1050      1.78        ad 		if ((l->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
   1051      1.78        ad 		    != 0) {
   1052      1.78        ad 			nextoff = pg->offset; /* visit this page again */
   1053  1.83.2.2        ad 			rw_exit(slock);
   1054      1.78        ad 			preempt();
   1055      1.78        ad 			/*
   1056      1.78        ad 			 * as we dropped the object lock, our cached pages can
   1057      1.78        ad 			 * be stale.
   1058      1.78        ad 			 */
   1059      1.78        ad 			uvm_page_array_clear(&a);
   1060  1.83.2.2        ad 			rw_enter(slock, RW_WRITER);
   1061       1.1     pooka 			continue;
   1062       1.1     pooka 		}
   1063       1.1     pooka 
   1064       1.1     pooka 		/*
   1065  1.83.2.1        ad 		 * if the current page is busy, wait for it to become unbusy.
   1066       1.1     pooka 		 */
   1067       1.1     pooka 
   1068  1.83.2.1        ad 		if ((pg->flags & PG_BUSY) != 0) {
   1069      1.71  pgoyette 			UVMHIST_LOG(ubchist, "busy %#jx", (uintptr_t)pg,
   1070      1.71  pgoyette 			   0, 0, 0);
   1071  1.83.2.1        ad 			if ((pg->flags & (PG_RELEASED|PG_PAGEOUT)) != 0
   1072  1.83.2.1        ad 			    && (flags & PGO_BUSYFAIL) != 0) {
   1073      1.71  pgoyette 				UVMHIST_LOG(ubchist, "busyfail %#jx",
   1074      1.71  pgoyette 				    (uintptr_t)pg, 0, 0, 0);
   1075       1.1     pooka 				error = EDEADLK;
   1076       1.1     pooka 				if (busypg != NULL)
   1077       1.1     pooka 					*busypg = pg;
   1078       1.1     pooka 				break;
   1079       1.1     pooka 			}
   1080       1.1     pooka 			if (pagedaemon) {
   1081       1.1     pooka 				/*
   1082       1.1     pooka 				 * someone has taken the page while we
   1083       1.1     pooka 				 * dropped the lock for fstrans_start.
   1084       1.1     pooka 				 */
   1085       1.1     pooka 				break;
   1086       1.1     pooka 			}
   1087  1.83.2.1        ad 			/*
   1088  1.83.2.1        ad 			 * don't bother to wait on other's activities
   1089  1.83.2.1        ad 			 * unless we are asked to sync for integrity.
   1090  1.83.2.1        ad 			 */
   1091  1.83.2.1        ad 			if (!async && (flags & PGO_RECLAIM) == 0) {
   1092  1.83.2.1        ad 				wasclean = false;
   1093  1.83.2.1        ad 				nextoff = pg->offset + PAGE_SIZE;
   1094  1.83.2.1        ad 				uvm_page_array_advance(&a);
   1095  1.83.2.1        ad 				continue;
   1096  1.83.2.1        ad 			}
   1097      1.78        ad 			nextoff = pg->offset; /* visit this page again */
   1098      1.78        ad 			pg->flags |= PG_WANTED;
   1099  1.83.2.2        ad 			UVM_UNLOCK_AND_WAIT_RW(pg, slock, 0, "genput", 0);
   1100      1.78        ad 			/*
   1101      1.78        ad 			 * as we dropped the object lock, our cached pages can
   1102      1.78        ad 			 * be stale.
   1103      1.78        ad 			 */
   1104      1.78        ad 			uvm_page_array_clear(&a);
   1105  1.83.2.2        ad 			rw_enter(slock, RW_WRITER);
   1106       1.1     pooka 			continue;
   1107       1.1     pooka 		}
   1108       1.1     pooka 
   1109      1.78        ad 		nextoff = pg->offset + PAGE_SIZE;
   1110      1.78        ad 		uvm_page_array_advance(&a);
   1111      1.78        ad 
   1112       1.1     pooka 		/*
   1113       1.1     pooka 		 * if we're freeing, remove all mappings of the page now.
   1114       1.1     pooka 		 * if we're cleaning, check if the page is needs to be cleaned.
   1115       1.1     pooka 		 */
   1116       1.1     pooka 
   1117  1.83.2.1        ad 		pgprotected = false;
   1118       1.1     pooka 		if (flags & PGO_FREE) {
   1119       1.1     pooka 			pmap_page_protect(pg, VM_PROT_NONE);
   1120  1.83.2.1        ad 			pgprotected = true;
   1121       1.1     pooka 		} else if (flags & PGO_CLEANIT) {
   1122       1.1     pooka 
   1123       1.1     pooka 			/*
   1124       1.1     pooka 			 * if we still have some hope to pull this vnode off
   1125       1.1     pooka 			 * from the syncer queue, write-protect the page.
   1126       1.1     pooka 			 */
   1127       1.1     pooka 
   1128  1.83.2.1        ad 			if (cleanall && wasclean) {
   1129       1.1     pooka 
   1130       1.1     pooka 				/*
   1131       1.1     pooka 				 * uobj pages get wired only by uvm_fault
   1132       1.1     pooka 				 * where uobj is locked.
   1133       1.1     pooka 				 */
   1134       1.1     pooka 
   1135       1.1     pooka 				if (pg->wire_count == 0) {
   1136       1.1     pooka 					pmap_page_protect(pg,
   1137       1.1     pooka 					    VM_PROT_READ|VM_PROT_EXECUTE);
   1138  1.83.2.1        ad 					pgprotected = true;
   1139       1.1     pooka 				} else {
   1140       1.1     pooka 					cleanall = false;
   1141       1.1     pooka 				}
   1142       1.1     pooka 			}
   1143       1.1     pooka 		}
   1144       1.1     pooka 
   1145       1.1     pooka 		if (flags & PGO_CLEANIT) {
   1146  1.83.2.1        ad 			needs_clean = uvm_pagecheckdirty(pg, pgprotected);
   1147       1.1     pooka 		} else {
   1148       1.1     pooka 			needs_clean = false;
   1149       1.1     pooka 		}
   1150       1.1     pooka 
   1151       1.1     pooka 		/*
   1152       1.1     pooka 		 * if we're cleaning, build a cluster.
   1153  1.83.2.1        ad 		 * the cluster will consist of pages which are currently dirty.
   1154       1.1     pooka 		 * if not cleaning, just operate on the one page.
   1155       1.1     pooka 		 */
   1156       1.1     pooka 
   1157       1.1     pooka 		if (needs_clean) {
   1158       1.1     pooka 			KDASSERT((vp->v_iflag & VI_ONWORKLST));
   1159       1.1     pooka 			wasclean = false;
   1160       1.1     pooka 			memset(pgs, 0, sizeof(pgs));
   1161       1.1     pooka 			pg->flags |= PG_BUSY;
   1162       1.1     pooka 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1163       1.1     pooka 
   1164       1.1     pooka 			/*
   1165      1.72       chs 			 * let the fs constrain the offset range of the cluster.
   1166      1.72       chs 			 * we additionally constrain the range here such that
   1167      1.72       chs 			 * it fits in the "pgs" pages array.
   1168      1.72       chs 			 */
   1169      1.72       chs 
   1170      1.78        ad 			off_t fslo, fshi, genlo, lo, off = pg->offset;
   1171      1.72       chs 			GOP_PUTRANGE(vp, off, &fslo, &fshi);
   1172      1.72       chs 			KASSERT(fslo == trunc_page(fslo));
   1173      1.72       chs 			KASSERT(fslo <= off);
   1174      1.72       chs 			KASSERT(fshi == trunc_page(fshi));
   1175      1.72       chs 			KASSERT(fshi == 0 || off < fshi);
   1176      1.72       chs 
   1177      1.72       chs 			if (off > MAXPHYS / 2)
   1178      1.72       chs 				genlo = trunc_page(off - (MAXPHYS / 2));
   1179      1.72       chs 			else
   1180      1.72       chs 				genlo = 0;
   1181      1.72       chs 			lo = MAX(fslo, genlo);
   1182      1.72       chs 
   1183      1.72       chs 			/*
   1184       1.1     pooka 			 * first look backward.
   1185       1.1     pooka 			 */
   1186       1.1     pooka 
   1187      1.72       chs 			npages = (off - lo) >> PAGE_SHIFT;
   1188       1.1     pooka 			nback = npages;
   1189  1.83.2.1        ad 			uvn_findpages(uobj, off - PAGE_SIZE, &nback,
   1190  1.83.2.1        ad 			    &pgs[0], NULL,
   1191       1.1     pooka 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
   1192       1.1     pooka 			if (nback) {
   1193       1.1     pooka 				memmove(&pgs[0], &pgs[npages - nback],
   1194       1.1     pooka 				    nback * sizeof(pgs[0]));
   1195       1.1     pooka 				if (npages - nback < nback)
   1196       1.1     pooka 					memset(&pgs[nback], 0,
   1197       1.1     pooka 					    (npages - nback) * sizeof(pgs[0]));
   1198       1.1     pooka 				else
   1199       1.1     pooka 					memset(&pgs[npages - nback], 0,
   1200       1.1     pooka 					    nback * sizeof(pgs[0]));
   1201       1.1     pooka 			}
   1202       1.1     pooka 
   1203       1.1     pooka 			/*
   1204       1.1     pooka 			 * then plug in our page of interest.
   1205       1.1     pooka 			 */
   1206       1.1     pooka 
   1207       1.1     pooka 			pgs[nback] = pg;
   1208       1.1     pooka 
   1209       1.1     pooka 			/*
   1210       1.1     pooka 			 * then look forward to fill in the remaining space in
   1211       1.1     pooka 			 * the array of pages.
   1212  1.83.2.1        ad 			 *
   1213  1.83.2.1        ad 			 * pass our cached array of pages so that hopefully
   1214  1.83.2.1        ad 			 * uvn_findpages can find some good pages in it.
   1215  1.83.2.1        ad 			 * the array a was filled above with the one of
   1216  1.83.2.1        ad 			 * following sets of flags:
   1217  1.83.2.1        ad 			 *	0
   1218  1.83.2.1        ad 			 *	UVM_PAGE_ARRAY_FILL_DIRTY
   1219  1.83.2.1        ad 			 *	UVM_PAGE_ARRAY_FILL_DIRTY|WRITEBACK
   1220       1.1     pooka 			 */
   1221       1.1     pooka 
   1222      1.62  christos 			npages = MAXPAGES - nback - 1;
   1223      1.72       chs 			if (fshi)
   1224      1.72       chs 				npages = MIN(npages,
   1225      1.72       chs 					     (fshi - off - 1) >> PAGE_SHIFT);
   1226       1.1     pooka 			uvn_findpages(uobj, off + PAGE_SIZE, &npages,
   1227  1.83.2.1        ad 			    &pgs[nback + 1], NULL,
   1228       1.1     pooka 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
   1229       1.1     pooka 			npages += nback + 1;
   1230       1.1     pooka 		} else {
   1231       1.1     pooka 			pgs[0] = pg;
   1232       1.1     pooka 			npages = 1;
   1233       1.1     pooka 			nback = 0;
   1234       1.1     pooka 		}
   1235       1.1     pooka 
   1236       1.1     pooka 		/*
   1237       1.1     pooka 		 * apply FREE or DEACTIVATE options if requested.
   1238       1.1     pooka 		 */
   1239       1.1     pooka 
   1240       1.1     pooka 		for (i = 0; i < npages; i++) {
   1241       1.1     pooka 			tpg = pgs[i];
   1242       1.1     pooka 			KASSERT(tpg->uobject == uobj);
   1243  1.83.2.1        ad 			KASSERT(i == 0 ||
   1244  1.83.2.1        ad 			    pgs[i-1]->offset + PAGE_SIZE == tpg->offset);
   1245  1.83.2.1        ad 			KASSERT(!needs_clean || uvm_pagegetdirty(pgs[i]) !=
   1246  1.83.2.1        ad 			    UVM_PAGE_STATUS_DIRTY);
   1247  1.83.2.1        ad 			if (needs_clean) {
   1248  1.83.2.1        ad 				/*
   1249  1.83.2.1        ad 				 * mark pages as WRITEBACK so that concurrent
   1250  1.83.2.1        ad 				 * fsync can find and wait for our activities.
   1251  1.83.2.1        ad 				 */
   1252  1.83.2.1        ad 				radix_tree_set_tag(&uobj->uo_pages,
   1253  1.83.2.1        ad 				    pgs[i]->offset >> PAGE_SHIFT,
   1254  1.83.2.1        ad 				    UVM_PAGE_WRITEBACK_TAG);
   1255  1.83.2.1        ad 			}
   1256       1.1     pooka 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1257       1.1     pooka 				continue;
   1258       1.1     pooka 			if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
   1259      1.83        ad 				uvm_pagelock(tpg);
   1260       1.1     pooka 				uvm_pagedeactivate(tpg);
   1261      1.83        ad 				uvm_pageunlock(tpg);
   1262       1.1     pooka 			} else if (flags & PGO_FREE) {
   1263       1.1     pooka 				pmap_page_protect(tpg, VM_PROT_NONE);
   1264       1.1     pooka 				if (tpg->flags & PG_BUSY) {
   1265       1.1     pooka 					tpg->flags |= freeflag;
   1266       1.1     pooka 					if (pagedaemon) {
   1267       1.2        ad 						uvm_pageout_start(1);
   1268      1.83        ad 						uvm_pagelock(tpg);
   1269       1.1     pooka 						uvm_pagedequeue(tpg);
   1270      1.83        ad 						uvm_pageunlock(tpg);
   1271       1.1     pooka 					}
   1272       1.1     pooka 				} else {
   1273       1.1     pooka 
   1274       1.1     pooka 					/*
   1275       1.1     pooka 					 * ``page is not busy''
   1276       1.1     pooka 					 * implies that npages is 1
   1277       1.1     pooka 					 * and needs_clean is false.
   1278       1.1     pooka 					 */
   1279       1.1     pooka 
   1280      1.78        ad 					KASSERT(npages == 1);
   1281      1.78        ad 					KASSERT(!needs_clean);
   1282      1.78        ad 					KASSERT(pg == tpg);
   1283      1.78        ad 					KASSERT(nextoff ==
   1284      1.78        ad 					    tpg->offset + PAGE_SIZE);
   1285       1.1     pooka 					uvm_pagefree(tpg);
   1286       1.1     pooka 					if (pagedaemon)
   1287       1.1     pooka 						uvmexp.pdfreed++;
   1288       1.1     pooka 				}
   1289       1.1     pooka 			}
   1290       1.1     pooka 		}
   1291       1.1     pooka 		if (needs_clean) {
   1292       1.1     pooka 			modified = true;
   1293      1.78        ad 			KASSERT(nextoff == pg->offset + PAGE_SIZE);
   1294      1.78        ad 			KASSERT(nback < npages);
   1295      1.78        ad 			nextoff = pg->offset + ((npages - nback) << PAGE_SHIFT);
   1296      1.78        ad 			KASSERT(pgs[nback] == pg);
   1297      1.78        ad 			KASSERT(nextoff == pgs[npages - 1]->offset + PAGE_SIZE);
   1298       1.1     pooka 
   1299       1.1     pooka 			/*
   1300      1.78        ad 			 * start the i/o.
   1301       1.1     pooka 			 */
   1302  1.83.2.2        ad 			rw_exit(slock);
   1303       1.1     pooka 			error = GOP_WRITE(vp, pgs, npages, flags);
   1304      1.78        ad 			/*
   1305      1.78        ad 			 * as we dropped the object lock, our cached pages can
   1306      1.78        ad 			 * be stale.
   1307      1.78        ad 			 */
   1308      1.78        ad 			uvm_page_array_clear(&a);
   1309  1.83.2.2        ad 			rw_enter(slock, RW_WRITER);
   1310       1.1     pooka 			if (error) {
   1311       1.1     pooka 				break;
   1312       1.1     pooka 			}
   1313       1.1     pooka 		}
   1314       1.1     pooka 	}
   1315      1.78        ad 	uvm_page_array_fini(&a);
   1316       1.1     pooka 
   1317  1.83.2.1        ad 	/*
   1318  1.83.2.1        ad 	 * update ctime/mtime if the modification we started writing out might
   1319  1.83.2.1        ad 	 * be from mmap'ed write.
   1320  1.83.2.1        ad 	 *
   1321  1.83.2.1        ad 	 * this is necessary when an application keeps a file mmaped and
   1322  1.83.2.1        ad 	 * repeatedly modifies it via the window.  note that, because we
   1323  1.83.2.1        ad 	 * don't always write-protect pages when cleaning, such modifications
   1324  1.83.2.1        ad 	 * might not involve any page faults.
   1325  1.83.2.1        ad 	 */
   1326  1.83.2.1        ad 
   1327  1.83.2.2        ad 	mutex_enter(vp->v_interlock);
   1328       1.1     pooka 	if (modified && (vp->v_iflag & VI_WRMAPDIRTY) != 0 &&
   1329       1.1     pooka 	    (vp->v_type != VBLK ||
   1330       1.1     pooka 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
   1331       1.1     pooka 		GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
   1332       1.1     pooka 	}
   1333       1.1     pooka 
   1334       1.1     pooka 	/*
   1335  1.83.2.1        ad 	 * if we no longer have any possibly dirty pages, take us off the
   1336  1.83.2.1        ad 	 * syncer list.
   1337       1.1     pooka 	 */
   1338       1.1     pooka 
   1339  1.83.2.1        ad 	if ((vp->v_iflag & VI_ONWORKLST) != 0 &&
   1340  1.83.2.1        ad 	    radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
   1341  1.83.2.1        ad 	    UVM_PAGE_DIRTY_TAG)) {
   1342       1.1     pooka 		vp->v_iflag &= ~VI_WRMAPDIRTY;
   1343       1.1     pooka 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
   1344       1.1     pooka 			vn_syncer_remove_from_worklist(vp);
   1345       1.1     pooka 	}
   1346       1.1     pooka 
   1347       1.1     pooka #if !defined(DEBUG)
   1348       1.1     pooka skip_scan:
   1349       1.1     pooka #endif /* !defined(DEBUG) */
   1350       1.2        ad 
   1351       1.2        ad 	/* Wait for output to complete. */
   1352  1.83.2.2        ad 	rw_exit(slock);
   1353       1.2        ad 	if (!wasclean && !async && vp->v_numoutput != 0) {
   1354       1.2        ad 		while (vp->v_numoutput != 0)
   1355  1.83.2.2        ad 			cv_wait(&vp->v_cv, vp->v_interlock);
   1356       1.1     pooka 	}
   1357       1.4      yamt 	onworklst = (vp->v_iflag & VI_ONWORKLST) != 0;
   1358  1.83.2.2        ad 	mutex_exit(vp->v_interlock);
   1359       1.1     pooka 
   1360       1.4      yamt 	if ((flags & PGO_RECLAIM) != 0 && onworklst) {
   1361       1.4      yamt 		/*
   1362       1.4      yamt 		 * in the case of PGO_RECLAIM, ensure to make the vnode clean.
   1363       1.4      yamt 		 * retrying is not a big deal because, in many cases,
   1364       1.4      yamt 		 * uobj->uo_npages is already 0 here.
   1365       1.4      yamt 		 */
   1366  1.83.2.2        ad 		rw_enter(slock, RW_WRITER);
   1367       1.4      yamt 		goto retry;
   1368       1.4      yamt 	}
   1369       1.4      yamt 
   1370      1.65   hannken 	if (trans_mp) {
   1371      1.65   hannken 		if (holds_wapbl)
   1372      1.65   hannken 			WAPBL_END(trans_mp);
   1373      1.65   hannken 		fstrans_done(trans_mp);
   1374      1.12   hannken 	}
   1375       1.6   hannken 
   1376       1.1     pooka 	return (error);
   1377       1.1     pooka }
   1378       1.1     pooka 
   1379      1.72       chs /*
   1380      1.72       chs  * Default putrange method for file systems that do not care
   1381      1.72       chs  * how many pages are given to one GOP_WRITE() call.
   1382      1.72       chs  */
   1383      1.72       chs void
   1384      1.72       chs genfs_gop_putrange(struct vnode *vp, off_t off, off_t *lop, off_t *hip)
   1385      1.72       chs {
   1386      1.72       chs 
   1387      1.72       chs 	*lop = 0;
   1388      1.72       chs 	*hip = 0;
   1389      1.72       chs }
   1390      1.72       chs 
   1391       1.1     pooka int
   1392       1.1     pooka genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1393       1.1     pooka {
   1394       1.1     pooka 	off_t off;
   1395       1.1     pooka 	vaddr_t kva;
   1396       1.1     pooka 	size_t len;
   1397       1.1     pooka 	int error;
   1398       1.1     pooka 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1399       1.1     pooka 
   1400      1.71  pgoyette 	UVMHIST_LOG(ubchist, "vp %#jx pgs %#jx npages %jd flags 0x%jx",
   1401      1.71  pgoyette 	    (uintptr_t)vp, (uintptr_t)pgs, npages, flags);
   1402       1.1     pooka 
   1403       1.1     pooka 	off = pgs[0]->offset;
   1404       1.1     pooka 	kva = uvm_pagermapin(pgs, npages,
   1405       1.1     pooka 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1406       1.1     pooka 	len = npages << PAGE_SHIFT;
   1407       1.1     pooka 
   1408       1.1     pooka 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1409  1.83.2.2        ad 			    uvm_aio_aiodone);
   1410       1.1     pooka 
   1411       1.1     pooka 	return error;
   1412       1.1     pooka }
   1413       1.1     pooka 
   1414      1.78        ad /*
   1415      1.78        ad  * genfs_gop_write_rwmap:
   1416      1.78        ad  *
   1417      1.78        ad  * a variant of genfs_gop_write.  it's used by UDF for its directory buffers.
   1418      1.78        ad  * this maps pages with PROT_WRITE so that VOP_STRATEGY can modifies
   1419      1.78        ad  * the contents before writing it out to the underlying storage.
   1420      1.78        ad  */
   1421      1.78        ad 
   1422       1.7   reinoud int
   1423      1.78        ad genfs_gop_write_rwmap(struct vnode *vp, struct vm_page **pgs, int npages,
   1424      1.78        ad     int flags)
   1425       1.7   reinoud {
   1426       1.7   reinoud 	off_t off;
   1427       1.7   reinoud 	vaddr_t kva;
   1428       1.7   reinoud 	size_t len;
   1429       1.7   reinoud 	int error;
   1430       1.7   reinoud 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1431       1.7   reinoud 
   1432      1.71  pgoyette 	UVMHIST_LOG(ubchist, "vp %#jx pgs %#jx npages %jd flags 0x%jx",
   1433      1.71  pgoyette 	    (uintptr_t)vp, (uintptr_t)pgs, npages, flags);
   1434       1.7   reinoud 
   1435       1.7   reinoud 	off = pgs[0]->offset;
   1436       1.7   reinoud 	kva = uvm_pagermapin(pgs, npages,
   1437       1.7   reinoud 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1438       1.7   reinoud 	len = npages << PAGE_SHIFT;
   1439       1.7   reinoud 
   1440       1.7   reinoud 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1441  1.83.2.2        ad 			    uvm_aio_aiodone);
   1442       1.7   reinoud 
   1443       1.7   reinoud 	return error;
   1444       1.7   reinoud }
   1445       1.7   reinoud 
   1446       1.1     pooka /*
   1447       1.1     pooka  * Backend routine for doing I/O to vnode pages.  Pages are already locked
   1448       1.1     pooka  * and mapped into kernel memory.  Here we just look up the underlying
   1449       1.1     pooka  * device block addresses and call the strategy routine.
   1450       1.1     pooka  */
   1451       1.1     pooka 
   1452       1.1     pooka static int
   1453       1.1     pooka genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
   1454       1.1     pooka     enum uio_rw rw, void (*iodone)(struct buf *))
   1455       1.1     pooka {
   1456      1.36  uebayasi 	int s, error;
   1457       1.1     pooka 	int fs_bshift, dev_bshift;
   1458       1.1     pooka 	off_t eof, offset, startoffset;
   1459       1.1     pooka 	size_t bytes, iobytes, skipbytes;
   1460       1.1     pooka 	struct buf *mbp, *bp;
   1461      1.35  uebayasi 	const bool async = (flags & PGO_SYNCIO) == 0;
   1462      1.54       chs 	const bool lazy = (flags & PGO_LAZY) == 0;
   1463      1.35  uebayasi 	const bool iowrite = rw == UIO_WRITE;
   1464      1.35  uebayasi 	const int brw = iowrite ? B_WRITE : B_READ;
   1465       1.1     pooka 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1466       1.1     pooka 
   1467      1.71  pgoyette 	UVMHIST_LOG(ubchist, "vp %#jx kva %#jx len 0x%jx flags 0x%jx",
   1468      1.71  pgoyette 	    (uintptr_t)vp, (uintptr_t)kva, len, flags);
   1469       1.1     pooka 
   1470       1.1     pooka 	KASSERT(vp->v_size <= vp->v_writesize);
   1471       1.1     pooka 	GOP_SIZE(vp, vp->v_writesize, &eof, 0);
   1472       1.1     pooka 	if (vp->v_type != VBLK) {
   1473       1.1     pooka 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1474       1.1     pooka 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1475       1.1     pooka 	} else {
   1476       1.1     pooka 		fs_bshift = DEV_BSHIFT;
   1477       1.1     pooka 		dev_bshift = DEV_BSHIFT;
   1478       1.1     pooka 	}
   1479       1.1     pooka 	error = 0;
   1480       1.1     pooka 	startoffset = off;
   1481       1.1     pooka 	bytes = MIN(len, eof - startoffset);
   1482       1.1     pooka 	skipbytes = 0;
   1483       1.1     pooka 	KASSERT(bytes != 0);
   1484       1.1     pooka 
   1485      1.35  uebayasi 	if (iowrite) {
   1486      1.78        ad 		/*
   1487      1.78        ad 		 * why += 2?
   1488      1.78        ad 		 * 1 for biodone, 1 for uvm_aio_aiodone.
   1489      1.78        ad 		 */
   1490      1.49     rmind 		mutex_enter(vp->v_interlock);
   1491       1.1     pooka 		vp->v_numoutput += 2;
   1492      1.49     rmind 		mutex_exit(vp->v_interlock);
   1493       1.1     pooka 	}
   1494       1.2        ad 	mbp = getiobuf(vp, true);
   1495      1.71  pgoyette 	UVMHIST_LOG(ubchist, "vp %#jx mbp %#jx num now %jd bytes 0x%jx",
   1496      1.71  pgoyette 	    (uintptr_t)vp, (uintptr_t)mbp, vp->v_numoutput, bytes);
   1497       1.1     pooka 	mbp->b_bufsize = len;
   1498       1.1     pooka 	mbp->b_data = (void *)kva;
   1499       1.1     pooka 	mbp->b_resid = mbp->b_bcount = bytes;
   1500       1.2        ad 	mbp->b_cflags = BC_BUSY | BC_AGE;
   1501       1.2        ad 	if (async) {
   1502       1.2        ad 		mbp->b_flags = brw | B_ASYNC;
   1503       1.2        ad 		mbp->b_iodone = iodone;
   1504       1.2        ad 	} else {
   1505       1.2        ad 		mbp->b_flags = brw;
   1506       1.2        ad 		mbp->b_iodone = NULL;
   1507       1.2        ad 	}
   1508       1.1     pooka 	if (curlwp == uvm.pagedaemon_lwp)
   1509       1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
   1510      1.54       chs 	else if (async || lazy)
   1511       1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
   1512       1.1     pooka 	else
   1513       1.1     pooka 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
   1514       1.1     pooka 
   1515       1.1     pooka 	bp = NULL;
   1516       1.1     pooka 	for (offset = startoffset;
   1517       1.1     pooka 	    bytes > 0;
   1518       1.1     pooka 	    offset += iobytes, bytes -= iobytes) {
   1519      1.36  uebayasi 		int run;
   1520      1.36  uebayasi 		daddr_t lbn, blkno;
   1521      1.36  uebayasi 		struct vnode *devvp;
   1522      1.36  uebayasi 
   1523      1.36  uebayasi 		/*
   1524      1.36  uebayasi 		 * bmap the file to find out the blkno to read from and
   1525      1.36  uebayasi 		 * how much we can read in one i/o.  if bmap returns an error,
   1526      1.36  uebayasi 		 * skip the rest of the top-level i/o.
   1527      1.36  uebayasi 		 */
   1528      1.36  uebayasi 
   1529       1.1     pooka 		lbn = offset >> fs_bshift;
   1530       1.1     pooka 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1531       1.1     pooka 		if (error) {
   1532      1.71  pgoyette 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%jx -> %jd\n",
   1533      1.71  pgoyette 			    lbn, error, 0, 0);
   1534       1.1     pooka 			skipbytes += bytes;
   1535       1.1     pooka 			bytes = 0;
   1536      1.36  uebayasi 			goto loopdone;
   1537       1.1     pooka 		}
   1538       1.1     pooka 
   1539      1.36  uebayasi 		/*
   1540      1.36  uebayasi 		 * see how many pages can be read with this i/o.
   1541      1.36  uebayasi 		 * reduce the i/o size if necessary to avoid
   1542      1.36  uebayasi 		 * overwriting pages with valid data.
   1543      1.36  uebayasi 		 */
   1544      1.36  uebayasi 
   1545       1.1     pooka 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1546       1.1     pooka 		    bytes);
   1547      1.36  uebayasi 
   1548      1.36  uebayasi 		/*
   1549      1.36  uebayasi 		 * if this block isn't allocated, zero it instead of
   1550      1.36  uebayasi 		 * reading it.  unless we are going to allocate blocks,
   1551      1.36  uebayasi 		 * mark the pages we zeroed PG_RDONLY.
   1552      1.36  uebayasi 		 */
   1553      1.36  uebayasi 
   1554       1.1     pooka 		if (blkno == (daddr_t)-1) {
   1555      1.35  uebayasi 			if (!iowrite) {
   1556       1.1     pooka 				memset((char *)kva + (offset - startoffset), 0,
   1557      1.36  uebayasi 				    iobytes);
   1558       1.1     pooka 			}
   1559       1.1     pooka 			skipbytes += iobytes;
   1560       1.1     pooka 			continue;
   1561       1.1     pooka 		}
   1562       1.1     pooka 
   1563      1.36  uebayasi 		/*
   1564      1.36  uebayasi 		 * allocate a sub-buf for this piece of the i/o
   1565      1.36  uebayasi 		 * (or just use mbp if there's only 1 piece),
   1566      1.36  uebayasi 		 * and start it going.
   1567      1.36  uebayasi 		 */
   1568      1.36  uebayasi 
   1569       1.1     pooka 		if (offset == startoffset && iobytes == bytes) {
   1570       1.1     pooka 			bp = mbp;
   1571       1.1     pooka 		} else {
   1572      1.71  pgoyette 			UVMHIST_LOG(ubchist, "vp %#jx bp %#jx num now %jd",
   1573      1.71  pgoyette 			    (uintptr_t)vp, (uintptr_t)bp, vp->v_numoutput, 0);
   1574       1.2        ad 			bp = getiobuf(vp, true);
   1575       1.1     pooka 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
   1576       1.1     pooka 		}
   1577       1.1     pooka 		bp->b_lblkno = 0;
   1578       1.1     pooka 
   1579       1.1     pooka 		/* adjust physical blkno for partial blocks */
   1580       1.1     pooka 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1581       1.1     pooka 		    dev_bshift);
   1582      1.36  uebayasi 
   1583       1.1     pooka 		UVMHIST_LOG(ubchist,
   1584      1.71  pgoyette 		    "bp %#jx offset 0x%jx bcount 0x%jx blkno 0x%jx",
   1585      1.71  pgoyette 		    (uintptr_t)bp, offset, bp->b_bcount, bp->b_blkno);
   1586       1.1     pooka 
   1587       1.1     pooka 		VOP_STRATEGY(devvp, bp);
   1588       1.1     pooka 	}
   1589      1.36  uebayasi 
   1590      1.36  uebayasi loopdone:
   1591       1.1     pooka 	if (skipbytes) {
   1592      1.71  pgoyette 		UVMHIST_LOG(ubchist, "skipbytes %jd", skipbytes, 0,0,0);
   1593       1.1     pooka 	}
   1594       1.1     pooka 	nestiobuf_done(mbp, skipbytes, error);
   1595       1.1     pooka 	if (async) {
   1596       1.1     pooka 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1597       1.1     pooka 		return (0);
   1598       1.1     pooka 	}
   1599      1.71  pgoyette 	UVMHIST_LOG(ubchist, "waiting for mbp %#jx", (uintptr_t)mbp, 0, 0, 0);
   1600       1.1     pooka 	error = biowait(mbp);
   1601       1.1     pooka 	s = splbio();
   1602       1.1     pooka 	(*iodone)(mbp);
   1603       1.1     pooka 	splx(s);
   1604      1.71  pgoyette 	UVMHIST_LOG(ubchist, "returning, error %jd", error, 0, 0, 0);
   1605       1.1     pooka 	return (error);
   1606       1.1     pooka }
   1607       1.1     pooka 
   1608       1.1     pooka int
   1609       1.1     pooka genfs_compat_getpages(void *v)
   1610       1.1     pooka {
   1611       1.1     pooka 	struct vop_getpages_args /* {
   1612       1.1     pooka 		struct vnode *a_vp;
   1613       1.1     pooka 		voff_t a_offset;
   1614       1.1     pooka 		struct vm_page **a_m;
   1615       1.1     pooka 		int *a_count;
   1616       1.1     pooka 		int a_centeridx;
   1617       1.1     pooka 		vm_prot_t a_access_type;
   1618       1.1     pooka 		int a_advice;
   1619       1.1     pooka 		int a_flags;
   1620       1.1     pooka 	} */ *ap = v;
   1621       1.1     pooka 
   1622       1.1     pooka 	off_t origoffset;
   1623       1.1     pooka 	struct vnode *vp = ap->a_vp;
   1624       1.1     pooka 	struct uvm_object *uobj = &vp->v_uobj;
   1625       1.1     pooka 	struct vm_page *pg, **pgs;
   1626       1.1     pooka 	vaddr_t kva;
   1627       1.1     pooka 	int i, error, orignpages, npages;
   1628       1.1     pooka 	struct iovec iov;
   1629       1.1     pooka 	struct uio uio;
   1630       1.1     pooka 	kauth_cred_t cred = curlwp->l_cred;
   1631      1.35  uebayasi 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1632       1.1     pooka 
   1633       1.1     pooka 	error = 0;
   1634       1.1     pooka 	origoffset = ap->a_offset;
   1635       1.1     pooka 	orignpages = *ap->a_count;
   1636       1.1     pooka 	pgs = ap->a_m;
   1637       1.1     pooka 
   1638       1.1     pooka 	if (ap->a_flags & PGO_LOCKED) {
   1639  1.83.2.1        ad 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m, NULL,
   1640      1.35  uebayasi 		    UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0));
   1641       1.1     pooka 
   1642      1.38       chs 		error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
   1643      1.38       chs 		if (error == 0 && memwrite) {
   1644      1.38       chs 			genfs_markdirty(vp);
   1645      1.38       chs 		}
   1646      1.38       chs 		return error;
   1647       1.1     pooka 	}
   1648       1.1     pooka 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1649  1.83.2.2        ad 		rw_exit(uobj->vmobjlock);
   1650      1.38       chs 		return EINVAL;
   1651       1.1     pooka 	}
   1652       1.1     pooka 	if ((ap->a_flags & PGO_SYNCIO) == 0) {
   1653  1.83.2.2        ad 		rw_exit(uobj->vmobjlock);
   1654       1.1     pooka 		return 0;
   1655       1.1     pooka 	}
   1656       1.1     pooka 	npages = orignpages;
   1657  1.83.2.1        ad 	uvn_findpages(uobj, origoffset, &npages, pgs, NULL, UFP_ALL);
   1658  1.83.2.2        ad 	rw_exit(uobj->vmobjlock);
   1659       1.1     pooka 	kva = uvm_pagermapin(pgs, npages,
   1660       1.1     pooka 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1661       1.1     pooka 	for (i = 0; i < npages; i++) {
   1662       1.1     pooka 		pg = pgs[i];
   1663       1.1     pooka 		if ((pg->flags & PG_FAKE) == 0) {
   1664       1.1     pooka 			continue;
   1665       1.1     pooka 		}
   1666       1.1     pooka 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   1667       1.1     pooka 		iov.iov_len = PAGE_SIZE;
   1668       1.1     pooka 		uio.uio_iov = &iov;
   1669       1.1     pooka 		uio.uio_iovcnt = 1;
   1670       1.1     pooka 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   1671       1.1     pooka 		uio.uio_rw = UIO_READ;
   1672       1.1     pooka 		uio.uio_resid = PAGE_SIZE;
   1673       1.1     pooka 		UIO_SETUP_SYSSPACE(&uio);
   1674       1.1     pooka 		/* XXX vn_lock */
   1675       1.1     pooka 		error = VOP_READ(vp, &uio, 0, cred);
   1676       1.1     pooka 		if (error) {
   1677       1.1     pooka 			break;
   1678       1.1     pooka 		}
   1679       1.1     pooka 		if (uio.uio_resid) {
   1680       1.1     pooka 			memset(iov.iov_base, 0, uio.uio_resid);
   1681       1.1     pooka 		}
   1682       1.1     pooka 	}
   1683       1.1     pooka 	uvm_pagermapout(kva, npages);
   1684  1.83.2.2        ad 	rw_enter(uobj->vmobjlock, RW_WRITER);
   1685       1.1     pooka 	for (i = 0; i < npages; i++) {
   1686       1.1     pooka 		pg = pgs[i];
   1687       1.1     pooka 		if (error && (pg->flags & PG_FAKE) != 0) {
   1688       1.1     pooka 			pg->flags |= PG_RELEASED;
   1689       1.1     pooka 		} else {
   1690  1.83.2.1        ad 			uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
   1691      1.83        ad 			uvm_pagelock(pg);
   1692       1.1     pooka 			uvm_pageactivate(pg);
   1693      1.83        ad 			uvm_pageunlock(pg);
   1694       1.1     pooka 		}
   1695       1.1     pooka 	}
   1696       1.1     pooka 	if (error) {
   1697       1.1     pooka 		uvm_page_unbusy(pgs, npages);
   1698       1.1     pooka 	}
   1699      1.38       chs 	if (error == 0 && memwrite) {
   1700      1.38       chs 		genfs_markdirty(vp);
   1701      1.38       chs 	}
   1702  1.83.2.2        ad 	rw_exit(uobj->vmobjlock);
   1703      1.38       chs 	return error;
   1704       1.1     pooka }
   1705       1.1     pooka 
   1706       1.1     pooka int
   1707       1.1     pooka genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   1708       1.1     pooka     int flags)
   1709       1.1     pooka {
   1710       1.1     pooka 	off_t offset;
   1711       1.1     pooka 	struct iovec iov;
   1712       1.1     pooka 	struct uio uio;
   1713       1.1     pooka 	kauth_cred_t cred = curlwp->l_cred;
   1714       1.1     pooka 	struct buf *bp;
   1715       1.1     pooka 	vaddr_t kva;
   1716       1.2        ad 	int error;
   1717       1.1     pooka 
   1718       1.1     pooka 	offset = pgs[0]->offset;
   1719       1.1     pooka 	kva = uvm_pagermapin(pgs, npages,
   1720       1.1     pooka 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1721       1.1     pooka 
   1722       1.1     pooka 	iov.iov_base = (void *)kva;
   1723       1.1     pooka 	iov.iov_len = npages << PAGE_SHIFT;
   1724       1.1     pooka 	uio.uio_iov = &iov;
   1725       1.1     pooka 	uio.uio_iovcnt = 1;
   1726       1.1     pooka 	uio.uio_offset = offset;
   1727       1.1     pooka 	uio.uio_rw = UIO_WRITE;
   1728       1.1     pooka 	uio.uio_resid = npages << PAGE_SHIFT;
   1729       1.1     pooka 	UIO_SETUP_SYSSPACE(&uio);
   1730       1.1     pooka 	/* XXX vn_lock */
   1731       1.1     pooka 	error = VOP_WRITE(vp, &uio, 0, cred);
   1732       1.1     pooka 
   1733      1.49     rmind 	mutex_enter(vp->v_interlock);
   1734       1.2        ad 	vp->v_numoutput++;
   1735      1.49     rmind 	mutex_exit(vp->v_interlock);
   1736       1.1     pooka 
   1737       1.2        ad 	bp = getiobuf(vp, true);
   1738       1.2        ad 	bp->b_cflags = BC_BUSY | BC_AGE;
   1739       1.1     pooka 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   1740       1.1     pooka 	bp->b_data = (char *)kva;
   1741       1.1     pooka 	bp->b_bcount = npages << PAGE_SHIFT;
   1742       1.1     pooka 	bp->b_bufsize = npages << PAGE_SHIFT;
   1743       1.1     pooka 	bp->b_resid = 0;
   1744       1.1     pooka 	bp->b_error = error;
   1745       1.1     pooka 	uvm_aio_aiodone(bp);
   1746       1.1     pooka 	return (error);
   1747       1.1     pooka }
   1748       1.1     pooka 
   1749       1.1     pooka /*
   1750       1.1     pooka  * Process a uio using direct I/O.  If we reach a part of the request
   1751       1.1     pooka  * which cannot be processed in this fashion for some reason, just return.
   1752       1.1     pooka  * The caller must handle some additional part of the request using
   1753       1.1     pooka  * buffered I/O before trying direct I/O again.
   1754       1.1     pooka  */
   1755       1.1     pooka 
   1756       1.1     pooka void
   1757       1.1     pooka genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
   1758       1.1     pooka {
   1759       1.1     pooka 	struct vmspace *vs;
   1760       1.1     pooka 	struct iovec *iov;
   1761       1.1     pooka 	vaddr_t va;
   1762       1.1     pooka 	size_t len;
   1763       1.1     pooka 	const int mask = DEV_BSIZE - 1;
   1764       1.1     pooka 	int error;
   1765      1.16     joerg 	bool need_wapbl = (vp->v_mount && vp->v_mount->mnt_wapbl &&
   1766      1.16     joerg 	    (ioflag & IO_JOURNALLOCKED) == 0);
   1767       1.1     pooka 
   1768      1.74  jdolecek #ifdef DIAGNOSTIC
   1769      1.74  jdolecek 	if ((ioflag & IO_JOURNALLOCKED) && vp->v_mount->mnt_wapbl)
   1770      1.74  jdolecek                 WAPBL_JLOCK_ASSERT(vp->v_mount);
   1771      1.74  jdolecek #endif
   1772      1.74  jdolecek 
   1773       1.1     pooka 	/*
   1774       1.1     pooka 	 * We only support direct I/O to user space for now.
   1775       1.1     pooka 	 */
   1776       1.1     pooka 
   1777       1.1     pooka 	if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
   1778       1.1     pooka 		return;
   1779       1.1     pooka 	}
   1780       1.1     pooka 
   1781       1.1     pooka 	/*
   1782       1.1     pooka 	 * If the vnode is mapped, we would need to get the getpages lock
   1783      1.53      yamt 	 * to stabilize the bmap, but then we would get into trouble while
   1784       1.1     pooka 	 * locking the pages if the pages belong to this same vnode (or a
   1785       1.1     pooka 	 * multi-vnode cascade to the same effect).  Just fall back to
   1786       1.1     pooka 	 * buffered I/O if the vnode is mapped to avoid this mess.
   1787       1.1     pooka 	 */
   1788       1.1     pooka 
   1789       1.1     pooka 	if (vp->v_vflag & VV_MAPPED) {
   1790       1.1     pooka 		return;
   1791       1.1     pooka 	}
   1792       1.1     pooka 
   1793      1.16     joerg 	if (need_wapbl) {
   1794      1.13   hannken 		error = WAPBL_BEGIN(vp->v_mount);
   1795      1.13   hannken 		if (error)
   1796      1.13   hannken 			return;
   1797      1.13   hannken 	}
   1798      1.13   hannken 
   1799       1.1     pooka 	/*
   1800       1.1     pooka 	 * Do as much of the uio as possible with direct I/O.
   1801       1.1     pooka 	 */
   1802       1.1     pooka 
   1803       1.1     pooka 	vs = uio->uio_vmspace;
   1804       1.1     pooka 	while (uio->uio_resid) {
   1805       1.1     pooka 		iov = uio->uio_iov;
   1806       1.1     pooka 		if (iov->iov_len == 0) {
   1807       1.1     pooka 			uio->uio_iov++;
   1808       1.1     pooka 			uio->uio_iovcnt--;
   1809       1.1     pooka 			continue;
   1810       1.1     pooka 		}
   1811       1.1     pooka 		va = (vaddr_t)iov->iov_base;
   1812       1.1     pooka 		len = MIN(iov->iov_len, genfs_maxdio);
   1813       1.1     pooka 		len &= ~mask;
   1814       1.1     pooka 
   1815       1.1     pooka 		/*
   1816       1.1     pooka 		 * If the next chunk is smaller than DEV_BSIZE or extends past
   1817       1.1     pooka 		 * the current EOF, then fall back to buffered I/O.
   1818       1.1     pooka 		 */
   1819       1.1     pooka 
   1820       1.1     pooka 		if (len == 0 || uio->uio_offset + len > vp->v_size) {
   1821      1.13   hannken 			break;
   1822       1.1     pooka 		}
   1823       1.1     pooka 
   1824       1.1     pooka 		/*
   1825       1.1     pooka 		 * Check alignment.  The file offset must be at least
   1826       1.1     pooka 		 * sector-aligned.  The exact constraint on memory alignment
   1827       1.1     pooka 		 * is very hardware-dependent, but requiring sector-aligned
   1828       1.1     pooka 		 * addresses there too is safe.
   1829       1.1     pooka 		 */
   1830       1.1     pooka 
   1831       1.1     pooka 		if (uio->uio_offset & mask || va & mask) {
   1832      1.13   hannken 			break;
   1833       1.1     pooka 		}
   1834       1.1     pooka 		error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
   1835       1.1     pooka 					  uio->uio_rw);
   1836       1.1     pooka 		if (error) {
   1837       1.1     pooka 			break;
   1838       1.1     pooka 		}
   1839       1.1     pooka 		iov->iov_base = (char *)iov->iov_base + len;
   1840       1.1     pooka 		iov->iov_len -= len;
   1841       1.1     pooka 		uio->uio_offset += len;
   1842       1.1     pooka 		uio->uio_resid -= len;
   1843       1.1     pooka 	}
   1844      1.13   hannken 
   1845      1.16     joerg 	if (need_wapbl)
   1846      1.13   hannken 		WAPBL_END(vp->v_mount);
   1847       1.1     pooka }
   1848       1.1     pooka 
   1849       1.1     pooka /*
   1850       1.1     pooka  * Iodone routine for direct I/O.  We don't do much here since the request is
   1851       1.1     pooka  * always synchronous, so the caller will do most of the work after biowait().
   1852       1.1     pooka  */
   1853       1.1     pooka 
   1854       1.1     pooka static void
   1855       1.1     pooka genfs_dio_iodone(struct buf *bp)
   1856       1.1     pooka {
   1857       1.1     pooka 
   1858       1.1     pooka 	KASSERT((bp->b_flags & B_ASYNC) == 0);
   1859       1.2        ad 	if ((bp->b_flags & B_READ) == 0 && (bp->b_cflags & BC_AGE) != 0) {
   1860       1.2        ad 		mutex_enter(bp->b_objlock);
   1861       1.1     pooka 		vwakeup(bp);
   1862       1.2        ad 		mutex_exit(bp->b_objlock);
   1863       1.1     pooka 	}
   1864       1.1     pooka 	putiobuf(bp);
   1865       1.1     pooka }
   1866       1.1     pooka 
   1867       1.1     pooka /*
   1868       1.1     pooka  * Process one chunk of a direct I/O request.
   1869       1.1     pooka  */
   1870       1.1     pooka 
   1871       1.1     pooka static int
   1872       1.1     pooka genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
   1873       1.1     pooka     off_t off, enum uio_rw rw)
   1874       1.1     pooka {
   1875       1.1     pooka 	struct vm_map *map;
   1876      1.56    martin 	struct pmap *upm, *kpm __unused;
   1877       1.1     pooka 	size_t klen = round_page(uva + len) - trunc_page(uva);
   1878       1.1     pooka 	off_t spoff, epoff;
   1879       1.1     pooka 	vaddr_t kva, puva;
   1880       1.1     pooka 	paddr_t pa;
   1881       1.1     pooka 	vm_prot_t prot;
   1882      1.58    martin 	int error, rv __diagused, poff, koff;
   1883      1.13   hannken 	const int pgoflags = PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED |
   1884       1.1     pooka 		(rw == UIO_WRITE ? PGO_FREE : 0);
   1885       1.1     pooka 
   1886       1.1     pooka 	/*
   1887       1.1     pooka 	 * For writes, verify that this range of the file already has fully
   1888       1.1     pooka 	 * allocated backing store.  If there are any holes, just punt and
   1889       1.1     pooka 	 * make the caller take the buffered write path.
   1890       1.1     pooka 	 */
   1891       1.1     pooka 
   1892       1.1     pooka 	if (rw == UIO_WRITE) {
   1893       1.1     pooka 		daddr_t lbn, elbn, blkno;
   1894       1.1     pooka 		int bsize, bshift, run;
   1895       1.1     pooka 
   1896       1.1     pooka 		bshift = vp->v_mount->mnt_fs_bshift;
   1897       1.1     pooka 		bsize = 1 << bshift;
   1898       1.1     pooka 		lbn = off >> bshift;
   1899       1.1     pooka 		elbn = (off + len + bsize - 1) >> bshift;
   1900       1.1     pooka 		while (lbn < elbn) {
   1901       1.1     pooka 			error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
   1902       1.1     pooka 			if (error) {
   1903       1.1     pooka 				return error;
   1904       1.1     pooka 			}
   1905       1.1     pooka 			if (blkno == (daddr_t)-1) {
   1906       1.1     pooka 				return ENOSPC;
   1907       1.1     pooka 			}
   1908       1.1     pooka 			lbn += 1 + run;
   1909       1.1     pooka 		}
   1910       1.1     pooka 	}
   1911       1.1     pooka 
   1912       1.1     pooka 	/*
   1913       1.1     pooka 	 * Flush any cached pages for parts of the file that we're about to
   1914       1.1     pooka 	 * access.  If we're writing, invalidate pages as well.
   1915       1.1     pooka 	 */
   1916       1.1     pooka 
   1917       1.1     pooka 	spoff = trunc_page(off);
   1918       1.1     pooka 	epoff = round_page(off + len);
   1919  1.83.2.2        ad 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
   1920       1.1     pooka 	error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
   1921       1.1     pooka 	if (error) {
   1922       1.1     pooka 		return error;
   1923       1.1     pooka 	}
   1924       1.1     pooka 
   1925       1.1     pooka 	/*
   1926       1.1     pooka 	 * Wire the user pages and remap them into kernel memory.
   1927       1.1     pooka 	 */
   1928       1.1     pooka 
   1929       1.1     pooka 	prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
   1930       1.1     pooka 	error = uvm_vslock(vs, (void *)uva, len, prot);
   1931       1.1     pooka 	if (error) {
   1932       1.1     pooka 		return error;
   1933       1.1     pooka 	}
   1934       1.1     pooka 
   1935       1.1     pooka 	map = &vs->vm_map;
   1936       1.1     pooka 	upm = vm_map_pmap(map);
   1937       1.1     pooka 	kpm = vm_map_pmap(kernel_map);
   1938       1.1     pooka 	puva = trunc_page(uva);
   1939      1.51      matt 	kva = uvm_km_alloc(kernel_map, klen, atop(puva) & uvmexp.colormask,
   1940      1.51      matt 	    UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
   1941       1.1     pooka 	for (poff = 0; poff < klen; poff += PAGE_SIZE) {
   1942       1.1     pooka 		rv = pmap_extract(upm, puva + poff, &pa);
   1943       1.1     pooka 		KASSERT(rv);
   1944      1.51      matt 		pmap_kenter_pa(kva + poff, pa, prot, PMAP_WIRED);
   1945       1.1     pooka 	}
   1946       1.1     pooka 	pmap_update(kpm);
   1947       1.1     pooka 
   1948       1.1     pooka 	/*
   1949       1.1     pooka 	 * Do the I/O.
   1950       1.1     pooka 	 */
   1951       1.1     pooka 
   1952       1.1     pooka 	koff = uva - trunc_page(uva);
   1953       1.1     pooka 	error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
   1954       1.1     pooka 			    genfs_dio_iodone);
   1955       1.1     pooka 
   1956       1.1     pooka 	/*
   1957       1.1     pooka 	 * Tear down the kernel mapping.
   1958       1.1     pooka 	 */
   1959       1.1     pooka 
   1960      1.51      matt 	pmap_kremove(kva, klen);
   1961       1.1     pooka 	pmap_update(kpm);
   1962       1.1     pooka 	uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
   1963       1.1     pooka 
   1964       1.1     pooka 	/*
   1965       1.1     pooka 	 * Unwire the user pages.
   1966       1.1     pooka 	 */
   1967       1.1     pooka 
   1968       1.1     pooka 	uvm_vsunlock(vs, (void *)uva, len);
   1969       1.1     pooka 	return error;
   1970       1.1     pooka }
   1971